]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Jul 2015 22:19:09 +0000 (15:19 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Jul 2015 22:19:09 +0000 (15:19 -0700)
Pull ARM updates from Russell King:
 "These are late by a week; they should have been merged during the
  merge window, but unfortunately, the ARM kernel build/boot farms were
  indicating random failures, and it wasn't clear whether the cause was
  something in these changes or something during the merge window.

  This is a set of merge window fixes with some documentation additions"

* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: avoid unwanted GCC memset()/memcpy() optimisations for IO variants
  ARM: pgtable: document mapping types
  ARM: io: convert ioremap*() to functions
  ARM: io: fix ioremap_wt() implementation
  ARM: io: document ARM specific behaviour of ioremap*() implementations
  ARM: fix lockdep unannotated irqs-off warning
  ARM: 8397/1: fix vdsomunge not to depend on glibc specific error.h
  ARM: add helpful message when truncating physical memory
  ARM: add help text for HIGHPTE configuration entry
  ARM: fix DEBUG_SET_MODULE_RONX build dependencies
  ARM: 8396/1: use phys_addr_t in pfn_to_kaddr()
  ARM: 8394/1: update memblock limit after mapping lowmem
  ARM: 8393/1: smp: Fix suspicious RCU usage with ipi tracepoints

1352 files changed:
CREDITS
Documentation/ABI/testing/sysfs-driver-toshiba_haps [new file with mode: 0644]
Documentation/clk.txt
Documentation/devicetree/bindings/arc/archs-idu-intc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arc/archs-intc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arc/axs101.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arc/axs103.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/bcm-cygnus-clock.txt [deleted file]
Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/clock-bindings.txt
Documentation/devicetree/bindings/clock/csr,atlas7-car.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/emev2-clock.txt
Documentation/devicetree/bindings/clock/keystone-pll.txt
Documentation/devicetree/bindings/clock/lpc1850-ccu.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/lpc1850-cgu.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/marvell,pxa1928.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt
Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt
Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt
Documentation/devicetree/bindings/clock/st,stm32-rcc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/sunxi.txt
Documentation/devicetree/bindings/clock/ti,cdce925.txt [new file with mode: 0644]
Documentation/devicetree/bindings/hwlock/hwlock.txt [new file with mode: 0644]
Documentation/devicetree/bindings/hwlock/omap-hwspinlock.txt [new file with mode: 0644]
Documentation/devicetree/bindings/hwlock/qcom-hwspinlock.txt [new file with mode: 0644]
Documentation/devicetree/bindings/hwlock/sirf,hwspinlock.txt [new file with mode: 0644]
Documentation/devicetree/bindings/leds/leds-aat1290.txt [new file with mode: 0644]
Documentation/devicetree/bindings/leds/leds-bcm6328.txt [new file with mode: 0644]
Documentation/devicetree/bindings/leds/leds-bcm6358.txt [new file with mode: 0644]
Documentation/devicetree/bindings/leds/leds-ktd2692.txt [new file with mode: 0644]
Documentation/devicetree/bindings/leds/leds-tlc591xx.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/tps6507x.txt [changed mode: 0755->0644]
Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt [deleted file]
Documentation/devicetree/bindings/pinctrl/lantiq,pinctrl-falcon.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/lantiq,pinctrl-xway.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt [deleted file]
Documentation/devicetree/bindings/remoteproc/wkup_m3_rproc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/usb/atmel-usb.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/devicetree/bindings/watchdog/digicolor-wdt.txt [new file with mode: 0644]
Documentation/devicetree/bindings/watchdog/omap-wdt.txt
Documentation/filesystems/caching/backend-api.txt
Documentation/filesystems/caching/fscache.txt
Documentation/filesystems/dax.txt
Documentation/filesystems/porting
Documentation/hwmon/submitting-patches
Documentation/hwmon/w83792d
Documentation/hwspinlock.txt
Documentation/ioctl/ioctl-number.txt
Documentation/kernel-parameters.txt
Documentation/leds/leds-class-flash.txt
Documentation/leds/leds-lp5523.txt
Documentation/ntb.txt [new file with mode: 0644]
Documentation/remoteproc.txt
Documentation/target/tcm_mod_builder.py
Documentation/target/tcm_mod_builder.txt
Documentation/target/tcmu-design.txt
Documentation/watchdog/watchdog-kernel-api.txt
Documentation/watchdog/watchdog-parameters.txt
Documentation/x86/boot.txt
Documentation/x86/entry_64.txt
Kbuild
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/Makefile
arch/arc/boot/dts/angel4.dts [deleted file]
arch/arc/boot/dts/axc001.dtsi [new file with mode: 0644]
arch/arc/boot/dts/axc003.dtsi [new file with mode: 0644]
arch/arc/boot/dts/axc003_idu.dtsi [new file with mode: 0644]
arch/arc/boot/dts/axs101.dts [new file with mode: 0644]
arch/arc/boot/dts/axs103.dts [new file with mode: 0644]
arch/arc/boot/dts/axs103_idu.dts [new file with mode: 0644]
arch/arc/boot/dts/axs10x_mb.dtsi [new file with mode: 0644]
arch/arc/boot/dts/nsim_700.dts [new file with mode: 0644]
arch/arc/boot/dts/nsim_hs.dts [new file with mode: 0644]
arch/arc/boot/dts/nsim_hs_idu.dts [new file with mode: 0644]
arch/arc/boot/dts/nsimosci_hs.dts [new file with mode: 0644]
arch/arc/boot/dts/nsimosci_hs_idu.dts [new file with mode: 0644]
arch/arc/boot/dts/vdk_axc003.dtsi [new file with mode: 0644]
arch/arc/boot/dts/vdk_axc003_idu.dtsi [new file with mode: 0644]
arch/arc/boot/dts/vdk_axs10x_mb.dtsi [new file with mode: 0644]
arch/arc/boot/dts/vdk_hs38.dts [new file with mode: 0644]
arch/arc/boot/dts/vdk_hs38_smp.dts [new file with mode: 0644]
arch/arc/configs/axs101_defconfig [new file with mode: 0644]
arch/arc/configs/axs103_defconfig [new file with mode: 0644]
arch/arc/configs/axs103_smp_defconfig [new file with mode: 0644]
arch/arc/configs/nsim_700_defconfig
arch/arc/configs/nsim_hs_defconfig [new file with mode: 0644]
arch/arc/configs/nsim_hs_smp_defconfig [new file with mode: 0644]
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig [new file with mode: 0644]
arch/arc/configs/nsimosci_hs_smp_defconfig [new file with mode: 0644]
arch/arc/configs/tb10x_defconfig
arch/arc/configs/vdk_hs38_defconfig [new file with mode: 0644]
arch/arc/configs/vdk_hs38_smp_defconfig [new file with mode: 0644]
arch/arc/include/asm/Kbuild
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/atomic.h
arch/arc/include/asm/barrier.h [new file with mode: 0644]
arch/arc/include/asm/bitops.h
arch/arc/include/asm/cache.h
arch/arc/include/asm/cacheflush.h
arch/arc/include/asm/cmpxchg.h
arch/arc/include/asm/delay.h
arch/arc/include/asm/dma-mapping.h
arch/arc/include/asm/elf.h
arch/arc/include/asm/entry-arcv2.h [new file with mode: 0644]
arch/arc/include/asm/entry-compact.h [new file with mode: 0644]
arch/arc/include/asm/entry.h
arch/arc/include/asm/io.h
arch/arc/include/asm/irq.h
arch/arc/include/asm/irqflags-arcv2.h [new file with mode: 0644]
arch/arc/include/asm/irqflags-compact.h [new file with mode: 0644]
arch/arc/include/asm/irqflags.h
arch/arc/include/asm/mcip.h [new file with mode: 0644]
arch/arc/include/asm/mmu.h
arch/arc/include/asm/pgtable.h
arch/arc/include/asm/processor.h
arch/arc/include/asm/ptrace.h
arch/arc/include/asm/spinlock.h
arch/arc/include/asm/thread_info.h
arch/arc/include/asm/uaccess.h
arch/arc/include/uapi/asm/page.h
arch/arc/kernel/Makefile
arch/arc/kernel/asm-offsets.c
arch/arc/kernel/devtree.c
arch/arc/kernel/entry-arcv2.S [new file with mode: 0644]
arch/arc/kernel/entry-compact.S [new file with mode: 0644]
arch/arc/kernel/entry.S
arch/arc/kernel/head.S
arch/arc/kernel/intc-arcv2.c [new file with mode: 0644]
arch/arc/kernel/intc-compact.c [new file with mode: 0644]
arch/arc/kernel/irq.c
arch/arc/kernel/mcip.c [new file with mode: 0644]
arch/arc/kernel/perf_event.c
arch/arc/kernel/process.c
arch/arc/kernel/ptrace.c
arch/arc/kernel/setup.c
arch/arc/kernel/signal.c
arch/arc/kernel/smp.c
arch/arc/kernel/stacktrace.c
arch/arc/kernel/time.c
arch/arc/kernel/troubleshoot.c
arch/arc/lib/Makefile
arch/arc/lib/memcmp.S
arch/arc/lib/memcpy-archs.S [new file with mode: 0644]
arch/arc/lib/memset-archs.S [new file with mode: 0644]
arch/arc/lib/strcmp-archs.S [new file with mode: 0644]
arch/arc/mm/Makefile
arch/arc/mm/cache.c [new file with mode: 0644]
arch/arc/mm/cache_arc700.c [deleted file]
arch/arc/mm/dma.c
arch/arc/mm/tlb.c
arch/arc/mm/tlbex.S
arch/arc/plat-arcfpga/Kconfig [deleted file]
arch/arc/plat-arcfpga/Makefile [deleted file]
arch/arc/plat-arcfpga/include/plat/smp.h [deleted file]
arch/arc/plat-arcfpga/platform.c [deleted file]
arch/arc/plat-arcfpga/smp.c [deleted file]
arch/arc/plat-axs10x/Kconfig [new file with mode: 0644]
arch/arc/plat-axs10x/Makefile [new file with mode: 0644]
arch/arc/plat-axs10x/axs10x.c [new file with mode: 0644]
arch/arc/plat-sim/Kconfig [new file with mode: 0644]
arch/arc/plat-sim/Makefile [new file with mode: 0644]
arch/arc/plat-sim/platform.c [new file with mode: 0644]
arch/arm/boot/compressed/libfdt_env.h
arch/arm/boot/dts/armada-370-xp.dtsi
arch/arm/boot/dts/armada-370.dtsi
arch/arm/boot/dts/armada-xp-mv78260.dtsi
arch/arm/boot/dts/armada-xp-mv78460.dtsi
arch/arm/boot/dts/armada-xp.dtsi
arch/arm/boot/dts/at91sam9g45.dtsi
arch/arm/boot/dts/at91sam9x5.dtsi
arch/arm/boot/dts/atlas7.dtsi
arch/arm/boot/dts/sama5d3.dtsi
arch/arm/boot/dts/sama5d4.dtsi
arch/arm/configs/multi_v7_defconfig
arch/arm/include/asm/xen/hypervisor.h
arch/arm/include/asm/xen/page.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/setup.c
arch/arm/mach-at91/pm.c
arch/arm/mach-bcm/Kconfig
arch/arm/mach-dove/include/mach/irqs.h
arch/arm/mach-dove/irq.c
arch/arm/mach-exynos/exynos.c
arch/arm/mach-imx/pm-imx5.c
arch/arm/mach-imx/pm-imx6.c
arch/arm/mach-lpc32xx/irq.c
arch/arm/mach-mvebu/headsmp-a9.S
arch/arm/mach-mvebu/platsmp-a9.c
arch/arm/mach-mvebu/pm-board.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/hwspinlock.c [deleted file]
arch/arm/mach-rockchip/platsmp.c
arch/arm/mach-socfpga/pm.c
arch/arm/mach-vexpress/spc.c
arch/arm/xen/enlighten.c
arch/arm/xen/mm.c
arch/arm/xen/p2m.c
arch/arm64/boot/dts/apm/apm-storm.dtsi
arch/arm64/kernel/cpuidle.c
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/traps.c
arch/arm64/mm/fault.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/mmu.c
arch/arm64/net/bpf_jit.h
arch/arm64/net/bpf_jit_comp.c
arch/avr32/mach-at32ap/extint.c
arch/blackfin/kernel/trace.c
arch/cris/arch-v10/drivers/eeprom.c
arch/cris/arch-v32/mm/intmem.c
arch/frv/mb93090-mb00/flash.c
arch/ia64/hp/sim/simscsi.c
arch/ia64/mm/init.c
arch/ia64/mm/numa.c
arch/ia64/sn/kernel/mca.c
arch/m68k/mac/psc.c
arch/mips/ath25/ar2315.c
arch/mips/ath25/ar5312.c
arch/mips/cavium-octeon/Makefile
arch/mips/mti-sead3/Makefile
arch/mips/pci/pci-ar2315.c
arch/mips/ralink/irq.c
arch/mn10300/kernel/irq.c
arch/mn10300/unit-asb2303/flash.c
arch/nios2/kernel/time.c
arch/parisc/kernel/pdc_cons.c
arch/parisc/kernel/perf.c
arch/powerpc/boot/libfdt_env.h
arch/powerpc/boot/of.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/time.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/platforms/83xx/suspend.c
arch/powerpc/platforms/cell/spufs/inode.c
arch/powerpc/platforms/ps3/time.c
arch/powerpc/sysdev/fsl_lbc.c
arch/s390/hypfs/inode.c
arch/s390/kernel/perf_cpum_sf.c
arch/sh/boards/mach-highlander/psw.c
arch/sh/boards/mach-landisk/psw.c
arch/tile/kernel/stack.c
arch/tile/kernel/usb.c
arch/tile/mm/elf.c
arch/um/drivers/hostaudio_kern.c
arch/unicore32/kernel/fpu-ucf64.c
arch/x86/Kconfig
arch/x86/configs/xen.config [new file with mode: 0644]
arch/x86/crypto/aesni-intel_glue.c
arch/x86/include/asm/intel_pmc_ipc.h [new file with mode: 0644]
arch/x86/include/asm/kvm_host.h
arch/x86/include/uapi/asm/hyperv.h
arch/x86/kernel/bootflag.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel_bts.c
arch/x86/kernel/cpu/perf_event_intel_pt.c
arch/x86/kernel/devicetree.c
arch/x86/kernel/fpu/init.c
arch/x86/kernel/kexec-bzimage64.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/setup.c
arch/x86/kernel/vsmp_64.c
arch/x86/kvm/i8254.c
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/mmu_audit.c
arch/x86/kvm/x86.c
arch/x86/lib/usercopy.c
arch/x86/platform/intel-mid/intel_mid_vrtc.c
arch/x86/platform/uv/uv_nmi.c
arch/xtensa/platforms/iss/network.c
crypto/asymmetric_keys/pkcs7_key_type.c
drivers/acpi/Kconfig
drivers/acpi/acpica/accommon.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/acinterp.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/acstruct.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/dsfield.c
drivers/acpi/acpica/dsinit.c
drivers/acpi/acpica/dsobject.c
drivers/acpi/acpica/dsutils.c
drivers/acpi/acpica/dswload.c
drivers/acpi/acpica/evgpeinit.c
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/exconvrt.c
drivers/acpi/acpica/exdebug.c
drivers/acpi/acpica/exdump.c
drivers/acpi/acpica/exfield.c
drivers/acpi/acpica/exfldio.c
drivers/acpi/acpica/exmisc.c
drivers/acpi/acpica/exnames.c
drivers/acpi/acpica/exoparg2.c
drivers/acpi/acpica/exoparg3.c
drivers/acpi/acpica/exregion.c
drivers/acpi/acpica/exstorob.c
drivers/acpi/acpica/exutils.c
drivers/acpi/acpica/hwxfsleep.c
drivers/acpi/acpica/nsaccess.c
drivers/acpi/acpica/nsconvert.c
drivers/acpi/acpica/nsdump.c
drivers/acpi/acpica/nseval.c
drivers/acpi/acpica/nsinit.c
drivers/acpi/acpica/nsparse.c
drivers/acpi/acpica/nsrepair2.c
drivers/acpi/acpica/nssearch.c
drivers/acpi/acpica/nsutils.c
drivers/acpi/acpica/nsxfeval.c
drivers/acpi/acpica/nsxfname.c
drivers/acpi/acpica/psutils.c
drivers/acpi/acpica/rscreate.c
drivers/acpi/acpica/rsmisc.c
drivers/acpi/acpica/rsutils.c
drivers/acpi/acpica/rsxface.c
drivers/acpi/acpica/tbdata.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbfind.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/acpica/tbprint.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/acpica/tbxface.c
drivers/acpi/acpica/tbxfload.c
drivers/acpi/acpica/utalloc.c
drivers/acpi/acpica/utbuffer.c
drivers/acpi/acpica/utcache.c
drivers/acpi/acpica/utcopy.c
drivers/acpi/acpica/utdebug.c
drivers/acpi/acpica/utglobal.c
drivers/acpi/acpica/utids.c
drivers/acpi/acpica/utmisc.c
drivers/acpi/acpica/utosi.c
drivers/acpi/acpica/utpredef.c
drivers/acpi/acpica/utprint.c
drivers/acpi/acpica/utstring.c
drivers/acpi/acpica/uttrack.c
drivers/acpi/acpica/utxface.c
drivers/acpi/acpica/utxfinit.c
drivers/acpi/blacklist.c
drivers/acpi/internal.h
drivers/acpi/osl.c
drivers/acpi/resource.c
drivers/base/node.c
drivers/base/property.c
drivers/block/drbd/drbd_debugfs.c
drivers/block/loop.c
drivers/block/null_blk.c
drivers/block/nvme-core.c
drivers/block/rbd.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkfront.c
drivers/char/agp/intel-gtt.c
drivers/char/ipmi/ipmi_watchdog.c
drivers/clk/Kconfig
drivers/clk/Makefile
drivers/clk/at91/clk-main.c
drivers/clk/at91/clk-master.c
drivers/clk/at91/clk-programmable.c
drivers/clk/at91/clk-slow.c
drivers/clk/at91/clk-smd.c
drivers/clk/at91/clk-usb.c
drivers/clk/at91/pmc.c
drivers/clk/bcm/Kconfig
drivers/clk/bcm/Makefile
drivers/clk/bcm/clk-cygnus.c [new file with mode: 0644]
drivers/clk/bcm/clk-iproc-armpll.c [new file with mode: 0644]
drivers/clk/bcm/clk-iproc-asiu.c [new file with mode: 0644]
drivers/clk/bcm/clk-iproc-pll.c [new file with mode: 0644]
drivers/clk/bcm/clk-iproc.h [new file with mode: 0644]
drivers/clk/bcm/clk-kona-setup.c
drivers/clk/bcm/clk-kona.c
drivers/clk/bcm/clk-kona.h
drivers/clk/berlin/berlin2-pll.c
drivers/clk/clk-asm9260.c
drivers/clk/clk-axm5516.c
drivers/clk/clk-cdce706.c
drivers/clk/clk-cdce925.c [new file with mode: 0644]
drivers/clk/clk-composite.c
drivers/clk/clk-conf.c
drivers/clk/clk-divider.c
drivers/clk/clk-fixed-factor.c
drivers/clk/clk-fixed-rate.c
drivers/clk/clk-fractional-divider.c
drivers/clk/clk-gate.c
drivers/clk/clk-gpio-gate.c
drivers/clk/clk-ls1x.c
drivers/clk/clk-max-gen.c
drivers/clk/clk-max77686.c
drivers/clk/clk-max77802.c
drivers/clk/clk-moxart.c
drivers/clk/clk-mux.c
drivers/clk/clk-nomadik.c
drivers/clk/clk-si5351.c
drivers/clk/clk-stm32f4.c [new file with mode: 0644]
drivers/clk/clk-u300.c
drivers/clk/clk-xgene.c
drivers/clk/clk.c
drivers/clk/hisilicon/Kconfig [new file with mode: 0644]
drivers/clk/hisilicon/Makefile
drivers/clk/hisilicon/clk-hi3620.c
drivers/clk/hisilicon/clk-hi6220.c [new file with mode: 0644]
drivers/clk/hisilicon/clk-hix5hd2.c
drivers/clk/hisilicon/clk.c
drivers/clk/hisilicon/clk.h
drivers/clk/hisilicon/clkdivider-hi6220.c [new file with mode: 0644]
drivers/clk/keystone/pll.c
drivers/clk/mediatek/Makefile [new file with mode: 0644]
drivers/clk/mediatek/clk-gate.c [new file with mode: 0644]
drivers/clk/mediatek/clk-gate.h [new file with mode: 0644]
drivers/clk/mediatek/clk-mt8135.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mt8173.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mtk.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mtk.h [new file with mode: 0644]
drivers/clk/mediatek/clk-pll.c [new file with mode: 0644]
drivers/clk/mediatek/reset.c [new file with mode: 0644]
drivers/clk/meson/Makefile [new file with mode: 0644]
drivers/clk/meson/clk-cpu.c [new file with mode: 0644]
drivers/clk/meson/clk-pll.c [new file with mode: 0644]
drivers/clk/meson/clkc.c [new file with mode: 0644]
drivers/clk/meson/clkc.h [new file with mode: 0644]
drivers/clk/meson/meson8b-clkc.c [new file with mode: 0644]
drivers/clk/mmp/Makefile
drivers/clk/mmp/clk-apbc.c
drivers/clk/mmp/clk-apmu.c
drivers/clk/mmp/clk-mmp2.c
drivers/clk/mmp/clk-of-mmp2.c
drivers/clk/mmp/clk-of-pxa168.c
drivers/clk/mmp/clk-of-pxa1928.c [new file with mode: 0644]
drivers/clk/mmp/clk-of-pxa910.c
drivers/clk/mvebu/armada-370.c
drivers/clk/mxs/clk-imx23.c
drivers/clk/mxs/clk-imx28.c
drivers/clk/mxs/clk.h
drivers/clk/nxp/Makefile [new file with mode: 0644]
drivers/clk/nxp/clk-lpc18xx-ccu.c [new file with mode: 0644]
drivers/clk/nxp/clk-lpc18xx-cgu.c [new file with mode: 0644]
drivers/clk/pistachio/clk-pll.c
drivers/clk/pxa/clk-pxa.h
drivers/clk/rockchip/clk-cpu.c
drivers/clk/rockchip/clk-mmc-phase.c
drivers/clk/rockchip/clk-pll.c
drivers/clk/rockchip/clk-rk3188.c
drivers/clk/rockchip/clk-rk3288.c
drivers/clk/rockchip/clk.c
drivers/clk/rockchip/clk.h
drivers/clk/samsung/Makefile
drivers/clk/samsung/clk-cpu.c [new file with mode: 0644]
drivers/clk/samsung/clk-cpu.h [new file with mode: 0644]
drivers/clk/samsung/clk-exynos4.c
drivers/clk/samsung/clk-exynos5260.c
drivers/clk/samsung/clk-exynos5420.c
drivers/clk/samsung/clk-exynos5433.c
drivers/clk/samsung/clk-pll.c
drivers/clk/samsung/clk-s3c2410-dclk.c
drivers/clk/samsung/clk-s5pv210.c
drivers/clk/samsung/clk.c
drivers/clk/samsung/clk.h
drivers/clk/shmobile/clk-emev2.c
drivers/clk/sirf/Makefile
drivers/clk/sirf/clk-atlas7.c [new file with mode: 0644]
drivers/clk/sirf/clk-common.c
drivers/clk/socfpga/Makefile
drivers/clk/socfpga/clk-gate-a10.c [new file with mode: 0644]
drivers/clk/socfpga/clk-gate.c
drivers/clk/socfpga/clk-periph-a10.c [new file with mode: 0644]
drivers/clk/socfpga/clk-periph.c
drivers/clk/socfpga/clk-pll-a10.c [new file with mode: 0644]
drivers/clk/socfpga/clk-pll.c
drivers/clk/socfpga/clk.c
drivers/clk/socfpga/clk.h
drivers/clk/st/clk-flexgen.c
drivers/clk/st/clkgen-fsyn.c
drivers/clk/st/clkgen-mux.c
drivers/clk/st/clkgen-pll.c
drivers/clk/sunxi/clk-mod0.c
drivers/clk/sunxi/clk-sun9i-core.c
drivers/clk/sunxi/clk-sunxi.c
drivers/clk/sunxi/clk-usb.c
drivers/clk/tegra/Kconfig [new file with mode: 0644]
drivers/clk/tegra/Makefile
drivers/clk/tegra/clk-emc.c [new file with mode: 0644]
drivers/clk/tegra/clk-tegra124.c
drivers/clk/tegra/clk-tegra30.c
drivers/clk/tegra/clk.h
drivers/clk/ti/clk-dra7-atl.c
drivers/clk/ti/clk.c
drivers/clk/ti/clockdomain.c
drivers/clk/ti/dpll.c
drivers/clk/ti/fapll.c
drivers/clk/ux500/u8500_clk.c
drivers/clk/ux500/u8500_of_clk.c
drivers/clk/versatile/clk-sp810.c
drivers/clk/zynq/clkc.c
drivers/clocksource/exynos_mct.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Makefile
drivers/cpufreq/exynos-cpufreq.c
drivers/cpufreq/exynos-cpufreq.h
drivers/cpufreq/exynos4210-cpufreq.c [deleted file]
drivers/cpufreq/s5pv210-cpufreq.c
drivers/cpuidle/cpuidle-at91.c
drivers/cpuidle/cpuidle-calxeda.c
drivers/cpuidle/cpuidle-powernv.c
drivers/cpuidle/cpuidle-zynq.c
drivers/crypto/marvell/cesa.c
drivers/crypto/mv_cesa.c
drivers/crypto/qat/qat_common/adf_accel_engine.c
drivers/crypto/qat/qat_common/adf_transport.c
drivers/dma/dmatest.c
drivers/dma/mmp_tdma.c
drivers/edac/octeon_edac-l2c.c
drivers/edac/octeon_edac-lmc.c
drivers/edac/octeon_edac-pc.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/libstub/Makefile
drivers/gpio/gpio-bcm-kona.c
drivers/gpio/gpio-dwapb.c
drivers/gpio/gpio-msic.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/cikd.h
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/cz_dpm.h
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/radeon_audio.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/hsi/controllers/omap_ssi.h
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/mcp3021.c
drivers/hwmon/nct7802.c
drivers/hwmon/w83627ehf.c
drivers/hwmon/w83792d.c
drivers/hwspinlock/Kconfig
drivers/hwspinlock/Makefile
drivers/hwspinlock/hwspinlock_core.c
drivers/hwspinlock/omap_hwspinlock.c
drivers/hwspinlock/qcom_hwspinlock.c [new file with mode: 0644]
drivers/hwspinlock/sirf_hwspinlock.c [new file with mode: 0644]
drivers/ide/ide.c
drivers/infiniband/hw/ehca/ipz_pt_fn.c
drivers/infiniband/hw/ipath/ipath_fs.c
drivers/infiniband/hw/qib/qib_fs.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/infiniband/ulp/srpt/ib_srpt.h
drivers/input/input.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/imx_keypad.c
drivers/input/misc/ati_remote2.c
drivers/input/misc/axp20x-pek.c
drivers/input/mouse/psmouse-base.c
drivers/input/serio/Kconfig
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/Makefile
drivers/input/touchscreen/edt-ft5x06.c
drivers/input/touchscreen/of_touchscreen.c
drivers/input/touchscreen/tsc2005.c
drivers/input/touchscreen/wdt87xx_i2c.c [new file with mode: 0644]
drivers/iommu/amd_iommu.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/iommu.c
drivers/irqchip/irqchip.h
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/led-class.c
drivers/leds/led-core.c
drivers/leds/leds-aat1290.c [new file with mode: 0644]
drivers/leds/leds-bcm6328.c [new file with mode: 0644]
drivers/leds/leds-bcm6358.c [new file with mode: 0644]
drivers/leds/leds-cobalt-raq.c
drivers/leds/leds-gpio.c
drivers/leds/leds-ktd2692.c [new file with mode: 0644]
drivers/leds/leds-lp5523.c
drivers/leds/leds-lp55xx-common.c
drivers/leds/leds-max77693.c [new file with mode: 0644]
drivers/leds/leds-tlc591xx.c [new file with mode: 0644]
drivers/leds/leds.h
drivers/mailbox/pl320-ipc.c
drivers/md/bcache/journal.c
drivers/md/bcache/super.c
drivers/md/bcache/util.h
drivers/md/bitmap.c
drivers/md/md.c
drivers/media/platform/coda/coda-common.c
drivers/media/v4l2-core/Kconfig
drivers/media/v4l2-core/Makefile
drivers/media/v4l2-core/v4l2-async.c
drivers/media/v4l2-core/v4l2-flash-led-class.c [new file with mode: 0644]
drivers/memstick/host/jmb38x_ms.c
drivers/memstick/host/r592.c
drivers/mfd/asic3.c
drivers/misc/lis3lv02d/lis3lv02d.c
drivers/misc/mei/bus.c
drivers/misc/mei/init.c
drivers/misc/mei/nfc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mtd/ubi/block.c
drivers/net/Kconfig
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
drivers/net/ethernet/cavium/liquidio/octeon_device.c
drivers/net/ethernet/cavium/liquidio/octeon_droq.c
drivers/net/ethernet/cavium/liquidio/request_manager.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/vnic_rq.h
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/icplus/ipg.c
drivers/net/ethernet/icplus/ipg.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/intf.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/renesas/ravb_ptp.c
drivers/net/ethernet/sis/sis900.h
drivers/net/ethernet/stmicro/stmmac/mmc_core.c
drivers/net/ethernet/via/Kconfig
drivers/net/macvtap.c
drivers/net/ntb_netdev.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/mdio-bcm-unimac.c
drivers/net/phy/phy_device.c
drivers/net/phy/vitesse.c
drivers/net/tun.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/libertas_tf/if_usb.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/ntb/Kconfig
drivers/ntb/Makefile
drivers/ntb/hw/Kconfig [new file with mode: 0644]
drivers/ntb/hw/Makefile [new file with mode: 0644]
drivers/ntb/hw/intel/Kconfig [new file with mode: 0644]
drivers/ntb/hw/intel/Makefile [new file with mode: 0644]
drivers/ntb/hw/intel/ntb_hw_intel.c [new file with mode: 0644]
drivers/ntb/hw/intel/ntb_hw_intel.h [new file with mode: 0644]
drivers/ntb/ntb.c [new file with mode: 0644]
drivers/ntb/ntb_hw.c [deleted file]
drivers/ntb/ntb_hw.h [deleted file]
drivers/ntb/ntb_regs.h [deleted file]
drivers/ntb/ntb_transport.c
drivers/ntb/test/Kconfig [new file with mode: 0644]
drivers/ntb/test/Makefile [new file with mode: 0644]
drivers/ntb/test/ntb_pingpong.c [new file with mode: 0644]
drivers/ntb/test/ntb_tool.c [new file with mode: 0644]
drivers/of/Kconfig
drivers/of/Makefile
drivers/of/address.c
drivers/of/base.c
drivers/of/device.c
drivers/of/fdt.c
drivers/of/irq.c
drivers/of/overlay.c
drivers/pci/host/pci-keystone.c
drivers/pci/xen-pcifront.c
drivers/pcmcia/xxs1500_ss.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/pinctrl-adi2.c
drivers/pinctrl/pinctrl-st.c
drivers/pinctrl/samsung/pinctrl-exynos.c
drivers/pinctrl/samsung/pinctrl-s3c24xx.c
drivers/pinctrl/samsung/pinctrl-s3c64xx.c
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/platform/goldfish/pdev_bus.c
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/dell-rbtn.c [new file with mode: 0644]
drivers/platform/x86/dell-rbtn.h [new file with mode: 0644]
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel_pmc_ipc.c [new file with mode: 0644]
drivers/platform/x86/pvpanic.c
drivers/platform/x86/tc1100-wmi.c
drivers/platform/x86/toshiba_acpi.c
drivers/platform/x86/toshiba_bluetooth.c
drivers/platform/x86/toshiba_haps.c
drivers/power/reset/syscon-reboot.c
drivers/power/test_power.c
drivers/regulator/max77802.c
drivers/remoteproc/Kconfig
drivers/remoteproc/Makefile
drivers/remoteproc/da8xx_remoteproc.c
drivers/remoteproc/remoteproc_core.c
drivers/remoteproc/remoteproc_internal.h
drivers/remoteproc/ste_modem_rproc.c
drivers/remoteproc/wkup_m3_rproc.c [new file with mode: 0644]
drivers/s390/kvm/virtio_ccw.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/qla2xxx/tcm_qla2xxx.h
drivers/scsi/scsi_debug.c
drivers/sh/intc/core.c
drivers/sh/intc/virq.c
drivers/soc/qcom/spm.c
drivers/soc/tegra/pmc.c
drivers/soc/versatile/soc-realview.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_erl0.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_login.h
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_parameters.h
drivers/target/iscsi/iscsi_target_tmr.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/loopback/tcm_loop.c
drivers/target/loopback/tcm_loop.h
drivers/target/sbp/sbp_target.c
drivers/target/sbp/sbp_target.h
drivers/target/target_core_alua.c
drivers/target/target_core_alua.h
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_fabric_lib.c
drivers/target/target_core_file.c
drivers/target/target_core_file.h
drivers/target/target_core_hba.c
drivers/target/target_core_iblock.c
drivers/target/target_core_internal.h
drivers/target/target_core_pr.c
drivers/target/target_core_pr.h
drivers/target/target_core_pscsi.c
drivers/target/target_core_rd.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/target/target_core_stat.c
drivers/target/target_core_tmr.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/target/target_core_ua.c
drivers/target/target_core_ua.h
drivers/target/target_core_user.c
drivers/target/target_core_xcopy.c
drivers/target/tcm_fc/tcm_fc.h
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_conf.c
drivers/target/tcm_fc/tfc_io.c
drivers/target/tcm_fc/tfc_sess.c
drivers/thermal/intel_powerclamp.c
drivers/tty/hvc/hvc_iucv.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/metag_da.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/omap-serial.c
drivers/tty/sysrq.c
drivers/usb/atm/ueagle-atm.c
drivers/usb/gadget/function/f_mass_storage.c
drivers/usb/gadget/function/storage_common.c
drivers/usb/gadget/legacy/tcm_usb_gadget.c
drivers/usb/gadget/legacy/tcm_usb_gadget.h
drivers/vhost/Kconfig
drivers/vhost/scsi.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/video/fbdev/omap2/dss/dss.c
drivers/video/fbdev/uvesafb.c
drivers/video/fbdev/vt8623fb.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_legacy.c
drivers/virtio/virtio_pci_modern.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/at91sam9_wdt.c
drivers/watchdog/da9062_wdt.c [new file with mode: 0644]
drivers/watchdog/digicolor_wdt.c [new file with mode: 0644]
drivers/watchdog/dw_wdt.c
drivers/watchdog/gpio_wdt.c
drivers/watchdog/hpwdt.c
drivers/watchdog/imgpdc_wdt.c
drivers/watchdog/imx2_wdt.c
drivers/watchdog/max63xx_wdt.c
drivers/watchdog/mena21_wdt.c
drivers/watchdog/omap_wdt.c
drivers/watchdog/omap_wdt.h
drivers/watchdog/st_lpc_wdt.c
drivers/watchdog/watchdog_core.c
drivers/xen/events/events_base.c
drivers/xen/events/events_fifo.c
drivers/xen/gntdev.c
drivers/xen/grant-table.c
drivers/xen/manage.c
drivers/xen/tmem.c
drivers/xen/xen-scsiback.c
drivers/xen/xenbus/xenbus_client.c
drivers/xen/xenbus/xenbus_probe.c
fs/adfs/super.c
fs/affs/affs.h
fs/affs/amigaffs.c
fs/affs/inode.c
fs/affs/symlink.c
fs/autofs4/autofs_i.h
fs/befs/befs.h
fs/binfmt_elf.c
fs/block_dev.c
fs/btrfs/file.c
fs/cachefiles/internal.h
fs/cachefiles/namei.c
fs/ceph/acl.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/snap.c
fs/ceph/super.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/coda/coda_linux.h
fs/configfs/inode.c
fs/configfs/mount.c
fs/coredump.c
fs/dax.c
fs/dcache.c
fs/debugfs/inode.c
fs/devpts/inode.c
fs/exofs/dir.c
fs/ext2/dir.c
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/migrate.c
fs/ext4/super.c
fs/file.c
fs/file_table.c
fs/freevxfs/vxfs_lookup.c
fs/fscache/cookie.c
fs/fscache/internal.h
fs/fscache/object.c
fs/fscache/operation.c
fs/fscache/page.c
fs/fscache/stats.c
fs/fuse/cuse.c
fs/fuse/dev.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/hfs/hfs_fs.h
fs/hfsplus/hfsplus_fs.h
fs/hpfs/hpfs_fn.h
fs/inode.c
fs/internal.h
fs/jffs2/os-linux.h
fs/jfs/jfs_incore.h
fs/kernfs/dir.c
fs/kernfs/inode.c
fs/libfs.c
fs/minix/dir.c
fs/minix/minix.h
fs/mount.h
fs/namei.c
fs/namespace.c
fs/ncpfs/dir.c
fs/nfs/callback.c
fs/nfs/callback_proc.c
fs/nfs/callback_xdr.c
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/file.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/flexfilelayout/flexfilelayout.h
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/inode.c
fs/nfs/nfs3xdr.c
fs/nfs/nfs42.h
fs/nfs/nfs42proc.c
fs/nfs/nfs42xdr.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4client.c
fs/nfs/nfs4file.c
fs/nfs/nfs4getroot.c
fs/nfs/nfs4idmap.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/super.c
fs/nfs/write.c
fs/nilfs2/dir.c
fs/nilfs2/inode.c
fs/notify/inotify/inotify_user.c
fs/ntfs/file.c
fs/ntfs/inode.h
fs/open.c
fs/overlayfs/inode.c
fs/overlayfs/overlayfs.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/posix_acl.c
fs/proc/base.c
fs/proc/generic.c
fs/proc/inode.c
fs/proc/internal.h
fs/proc/nommu.c
fs/proc/proc_sysctl.c
fs/proc/root.c
fs/proc/task_mmu.c
fs/proc/task_nommu.c
fs/proc_namespace.c
fs/pstore/inode.c
fs/qnx6/dir.c
fs/seq_file.c
fs/squashfs/squashfs_fs_i.h
fs/super.c
fs/sysfs/dir.c
fs/sysfs/mount.c
fs/sysv/dir.c
fs/sysv/sysv.h
fs/tracefs/inode.c
fs/udf/udf_i.h
fs/ufs/balloc.c
fs/ufs/dir.c
fs/ufs/ialloc.c
fs/ufs/inode.c
fs/ufs/namei.c
fs/ufs/super.c
fs/ufs/ufs.h
fs/xfs/xfs_file.c
include/acpi/acnames.h
include/acpi/acoutput.h
include/acpi/acpi_bus.h
include/acpi/acpixf.h
include/acpi/actbl.h
include/acpi/actbl1.h
include/acpi/actbl2.h
include/acpi/actbl3.h
include/acpi/actypes.h
include/acpi/platform/acenv.h
include/acpi/platform/acenvex.h
include/acpi/platform/acgcc.h
include/acpi/video.h
include/asm-generic/barrier.h
include/drm/drm_mem_util.h
include/dt-bindings/clock/bcm-cygnus.h [new file with mode: 0644]
include/dt-bindings/clock/hi6220-clock.h [new file with mode: 0644]
include/dt-bindings/clock/lpc18xx-ccu.h [new file with mode: 0644]
include/dt-bindings/clock/lpc18xx-cgu.h [new file with mode: 0644]
include/dt-bindings/clock/marvell,mmp2.h
include/dt-bindings/clock/marvell,pxa168.h
include/dt-bindings/clock/marvell,pxa1928.h [new file with mode: 0644]
include/dt-bindings/clock/marvell,pxa910.h
include/dt-bindings/clock/meson8b-clkc.h [new file with mode: 0644]
include/dt-bindings/clock/mt8135-clk.h [new file with mode: 0644]
include/dt-bindings/clock/mt8173-clk.h [new file with mode: 0644]
include/dt-bindings/reset-controller/mt8135-resets.h [new file with mode: 0644]
include/dt-bindings/reset-controller/mt8173-resets.h [new file with mode: 0644]
include/linux/acpi.h
include/linux/backing-dev-defs.h
include/linux/backing-dev.h
include/linux/buffer_head.h
include/linux/ceph/libceph.h
include/linux/ceph/osd_client.h
include/linux/clk-provider.h
include/linux/compiler.h
include/linux/crc-t10dif.h
include/linux/crush/crush.h
include/linux/crush/hash.h
include/linux/crush/mapper.h
include/linux/dcache.h
include/linux/device.h
include/linux/fdtable.h
include/linux/fs.h
include/linux/fscache-cache.h
include/linux/genalloc.h
include/linux/gfp.h
include/linux/gpio/consumer.h
include/linux/hwspinlock.h
include/linux/init.h
include/linux/input/touchscreen.h
include/linux/irq.h
include/linux/irqchip.h
include/linux/irqdesc.h
include/linux/irqnr.h
include/linux/kernel.h
include/linux/kernfs.h
include/linux/leds.h
include/linux/libfdt_env.h
include/linux/memblock.h
include/linux/mm.h
include/linux/mmzone.h
include/linux/module.h
include/linux/moduleparam.h
include/linux/nfs4.h
include/linux/nfs_fs.h
include/linux/nfs_fs_sb.h
include/linux/nfs_page.h
include/linux/nfs_xdr.h
include/linux/ntb.h
include/linux/ntb_transport.h [new file with mode: 0644]
include/linux/of.h
include/linux/of_device.h
include/linux/of_fdt.h
include/linux/pagemap.h
include/linux/platform_data/wkup_m3.h [new file with mode: 0644]
include/linux/platform_device.h
include/linux/preempt.h
include/linux/rbtree.h
include/linux/rbtree_augmented.h
include/linux/rbtree_latch.h [new file with mode: 0644]
include/linux/rcupdate.h
include/linux/remoteproc.h
include/linux/scatterlist.h
include/linux/sched.h
include/linux/seq_file.h
include/linux/seqlock.h
include/linux/sunrpc/bc_xprt.h
include/linux/sunrpc/clnt.h
include/linux/sunrpc/sched.h
include/linux/sunrpc/xprt.h
include/linux/sunrpc/xprtrdma.h
include/linux/sysctl.h
include/linux/sysfs.h
include/linux/virtio_byteorder.h
include/linux/virtio_config.h
include/linux/vringh.h
include/linux/watchdog.h
include/media/v4l2-flash-led-class.h [new file with mode: 0644]
include/media/v4l2-subdev.h
include/net/ax25.h
include/net/sock.h
include/target/iscsi/iscsi_target_core.h
include/target/target_core_backend.h
include/target/target_core_backend_configfs.h [deleted file]
include/target/target_core_base.h
include/target/target_core_configfs.h [deleted file]
include/target/target_core_fabric.h
include/uapi/drm/amdgpu_drm.h
include/uapi/linux/fuse.h
include/uapi/linux/if_tun.h
include/uapi/linux/in.h
include/uapi/linux/libc-compat.h
include/uapi/linux/vhost.h
init/Kconfig
init/main.c
ipc/msg.c
ipc/sem.c
ipc/shm.c
ipc/util.c
ipc/util.h
kernel/Makefile
kernel/cgroup.c
kernel/configs/xen.config [new file with mode: 0644]
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
kernel/gcov/base.c
kernel/gcov/gcc_4_7.c
kernel/jump_label.c
kernel/kexec.c
kernel/module.c
kernel/panic.c
kernel/params.c
kernel/power/Kconfig
kernel/power/hibernate.c
kernel/printk/printk.c
kernel/relay.c
kernel/sched/core.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/sched.h
kernel/sched/stats.h
kernel/sysctl.c
kernel/time/Makefile
kernel/time/timekeeping.c
kernel/time/timer.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Makefile
lib/bug.c
lib/crc-t10dif.c
lib/debug_info.c [new file with mode: 0644]
lib/genalloc.c
lib/list_sort.c
lib/rbtree.c
lib/scatterlist.c
mm/Kconfig
mm/backing-dev.c
mm/bootmem.c
mm/filemap.c
mm/internal.h
mm/memblock.c
mm/memory.c
mm/mm_init.c
mm/nobootmem.c
mm/nommu.c
mm/page_alloc.c
mm/page_owner.c
mm/slab_common.c
mm/swapfile.c
net/9p/client.c
net/ax25/af_ax25.c
net/ax25/ax25_in.c
net/bluetooth/hidp/core.c
net/bluetooth/l2cap_core.c
net/ceph/ceph_common.c
net/ceph/crush/crush.c
net/ceph/crush/crush_ln_table.h
net/ceph/crush/hash.c
net/ceph/crush/mapper.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/ceph/osdmap.c
net/ceph/pagevec.c
net/core/flow_dissector.c
net/core/sock.c
net/dsa/slave.c
net/ipv4/fib_semantics.c
net/ipv4/netfilter.c
net/mac80211/rate.c
net/sched/cls_flower.c
net/sctp/output.c
net/sctp/socket.c
net/sunrpc/Makefile
net/sunrpc/auth.c
net/sunrpc/backchannel_rqst.c
net/sunrpc/bc_svc.c [deleted file]
net/sunrpc/clnt.c
net/sunrpc/debugfs.c
net/sunrpc/svc.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/fmr_ops.c
net/sunrpc/xprtrdma/frwr_ops.c
net/sunrpc/xprtrdma/physical_ops.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/sunrpc/xprtsock.c
net/tipc/bcast.c
net/tipc/link.c
net/tipc/link.h
scripts/dtc/checks.c
scripts/dtc/data.c
scripts/dtc/dtc-lexer.l
scripts/dtc/dtc-lexer.lex.c_shipped
scripts/dtc/dtc-parser.tab.c_shipped
scripts/dtc/dtc-parser.tab.h_shipped
scripts/dtc/dtc-parser.y
scripts/dtc/dtc.c
scripts/dtc/dtc.h
scripts/dtc/flattree.c
scripts/dtc/fstree.c
scripts/dtc/libfdt/Makefile.libfdt
scripts/dtc/libfdt/fdt.c
scripts/dtc/libfdt/fdt.h
scripts/dtc/libfdt/fdt_empty_tree.c
scripts/dtc/libfdt/fdt_ro.c
scripts/dtc/libfdt/fdt_rw.c
scripts/dtc/libfdt/fdt_sw.c
scripts/dtc/libfdt/fdt_wip.c
scripts/dtc/libfdt/libfdt.h
scripts/dtc/libfdt/libfdt_env.h
scripts/dtc/libfdt/libfdt_internal.h
scripts/dtc/livetree.c
scripts/dtc/srcpos.c
scripts/dtc/srcpos.h
scripts/dtc/treesource.c
scripts/dtc/update-dtc-source.sh
scripts/dtc/util.c
scripts/dtc/util.h
scripts/dtc/version_gen.h
scripts/gdb/linux/dmesg.py
scripts/gdb/linux/lists.py [new file with mode: 0644]
scripts/gdb/linux/symbols.py
scripts/gdb/linux/tasks.py
scripts/gdb/linux/utils.py
scripts/gdb/vmlinux-gdb.py
scripts/kconfig/Makefile
scripts/kconfig/expr.c
scripts/kconfig/expr.h
scripts/kconfig/symbol.c
scripts/kconfig/zconf.l
scripts/kconfig/zconf.lex.c_shipped
scripts/kconfig/zconf.tab.c_shipped
scripts/kconfig/zconf.y
scripts/link-vmlinux.sh
scripts/sortextable.c
scripts/tags.sh
security/apparmor/lsm.c
security/inode.c
security/integrity/ima/ima_crypto.c
security/selinux/selinuxfs.c
security/smack/smackfs.c
sound/core/ctljack.c
sound/core/init.c
sound/core/memalloc.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
tools/build/Makefile.build
tools/include/linux/compiler.h
tools/include/linux/export.h [deleted file]
tools/include/linux/rbtree.h [new file with mode: 0644]
tools/include/linux/rbtree_augmented.h [new file with mode: 0644]
tools/lib/rbtree.c [new file with mode: 0644]
tools/perf/Documentation/perf-stat.txt
tools/perf/MANIFEST
tools/perf/Makefile
tools/perf/Makefile.perf
tools/perf/builtin-inject.c
tools/perf/builtin-kmem.c
tools/perf/builtin-kvm.c
tools/perf/builtin-mem.c
tools/perf/builtin-report.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/config/Makefile
tools/perf/tests/Build
tools/perf/tests/builtin-test.c
tools/perf/tests/code-reading.c
tools/perf/tests/keep-tracking.c
tools/perf/tests/make
tools/perf/tests/mmap-basic.c
tools/perf/tests/mmap-thread-lookup.c
tools/perf/tests/openat-syscall-all-cpus.c
tools/perf/tests/openat-syscall-tp-fields.c
tools/perf/tests/openat-syscall.c
tools/perf/tests/switch-tracking.c
tools/perf/tests/tests.h
tools/perf/tests/thread-map.c [new file with mode: 0644]
tools/perf/ui/browsers/hists.c
tools/perf/util/Build
tools/perf/util/auxtrace.c
tools/perf/util/auxtrace.h
tools/perf/util/cloexec.c
tools/perf/util/cpumap.c
tools/perf/util/cpumap.h
tools/perf/util/event.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/include/linux/rbtree.h [deleted file]
tools/perf/util/include/linux/rbtree_augmented.h [deleted file]
tools/perf/util/machine.c
tools/perf/util/parse-events.c
tools/perf/util/parse-events.l
tools/perf/util/pmu.c
tools/perf/util/probe-event.c
tools/perf/util/python-ext-sources
tools/perf/util/python.c
tools/perf/util/record.c
tools/perf/util/session.c
tools/perf/util/stat.c
tools/perf/util/stat.h
tools/perf/util/svghelper.c
tools/perf/util/symbol.c
tools/perf/util/thread_map.c
tools/perf/util/thread_map.h
tools/power/acpi/common/getopt.c
tools/power/acpi/man/acpidump.8
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
tools/power/acpi/os_specific/service_layers/osunixmap.c
tools/power/acpi/tools/acpidump/acpidump.h
tools/power/acpi/tools/acpidump/apdump.c
tools/power/acpi/tools/acpidump/apfiles.c
tools/power/acpi/tools/acpidump/apmain.c
virt/kvm/kvm_main.c

diff --git a/CREDITS b/CREDITS
index 4df764ebe217236e027e9deccf2569d519af09dc..1d616640bbf64a960a881c56ed6f5b0a5bd17b28 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2740,6 +2740,10 @@ S: C/ Mieses 20, 9-B
 S: Valladolid 47009
 S: Spain
 
+N: Jens Osterkamp
+E: jens@de.ibm.com
+D: Maintainer of Spidernet network driver for Cell
+
 N: Gadi Oxman
 E: gadio@netvision.net.il
 D: Original author and maintainer of IDE/ATAPI floppy/tape drivers
diff --git a/Documentation/ABI/testing/sysfs-driver-toshiba_haps b/Documentation/ABI/testing/sysfs-driver-toshiba_haps
new file mode 100644 (file)
index 0000000..a662370
--- /dev/null
@@ -0,0 +1,20 @@
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS620A:00/protection_level
+Date:          August 16, 2014
+KernelVersion: 3.17
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls the built-in accelerometer protection level,
+               valid values are:
+                       * 0 -> Disabled
+                       * 1 -> Low
+                       * 2 -> Medium
+                       * 3 -> High
+               The default potection value is set to 2 (Medium).
+Users:         KToshiba
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS620A:00/reset_protection
+Date:          August 16, 2014
+KernelVersion: 3.17
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file turns off the built-in accelerometer for a few
+               seconds and then restore normal operation. Accepting 1 as the
+               only parameter.
index 0e4f90aa1c136eaa40c9d93e3586bbffdd180de4..f463bdc37f885c80d4614d6c9ab729c739e298d5 100644 (file)
@@ -230,30 +230,7 @@ clk_register(...)
 
 See the basic clock types in drivers/clk/clk-*.c for examples.
 
-       Part 5 - static initialization of clock data
-
-For platforms with many clocks (often numbering into the hundreds) it
-may be desirable to statically initialize some clock data.  This
-presents a problem since the definition of struct clk should be hidden
-from everyone except for the clock core in drivers/clk/clk.c.
-
-To get around this problem struct clk's definition is exposed in
-include/linux/clk-private.h along with some macros for more easily
-initializing instances of the basic clock types.  These clocks must
-still be initialized with the common clock framework via a call to
-__clk_init.
-
-clk-private.h must NEVER be included by code which implements struct
-clk_ops callbacks, nor must it be included by any logic which pokes
-around inside of struct clk at run-time.  To do so is a layering
-violation.
-
-To better enforce this policy, always follow this simple rule: any
-statically initialized clock data MUST be defined in a separate file
-from the logic that implements its ops.  Basically separate the logic
-from the data and all is well.
-
-       Part 6 - Disabling clock gating of unused clocks
+       Part 5 - Disabling clock gating of unused clocks
 
 Sometimes during development it can be useful to be able to bypass the
 default disabling of unused clocks. For example, if drivers aren't enabling
@@ -264,7 +241,7 @@ are sorted out.
 To bypass this disabling, include "clk_ignore_unused" in the bootargs to the
 kernel.
 
-       Part 7 - Locking
+       Part 6 - Locking
 
 The common clock framework uses two global locks, the prepare lock and the
 enable lock.
diff --git a/Documentation/devicetree/bindings/arc/archs-idu-intc.txt b/Documentation/devicetree/bindings/arc/archs-idu-intc.txt
new file mode 100644 (file)
index 0000000..0dcb7c7
--- /dev/null
@@ -0,0 +1,46 @@
+* ARC-HS Interrupt Distribution Unit
+
+  This optional 2nd level interrupt controller can be used in SMP configurations for
+  dynamic IRQ routing, load balancing of common/external IRQs towards core intc.
+
+Properties:
+
+- compatible: "snps,archs-idu-intc"
+- interrupt-controller: This is an interrupt controller.
+- interrupt-parent: <reference to parent core intc>
+- #interrupt-cells: Must be <2>.
+- interrupts: <...> specifies the upstream core irqs
+
+  First cell specifies the "common" IRQ from peripheral to IDU
+  Second cell specifies the irq distribution mode to cores
+     0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
+
+  intc accessed via the special ARC AUX register interface, hence "reg" property
+  is not specified.
+
+Example:
+       core_intc: core-interrupt-controller {
+               compatible = "snps,archs-intc";
+               interrupt-controller;
+               #interrupt-cells = <1>;
+       };
+
+       idu_intc: idu-interrupt-controller {
+               compatible = "snps,archs-idu-intc";
+               interrupt-controller;
+               interrupt-parent = <&core_intc>;
+
+               /*
+                * <hwirq  distribution>
+                * distribution: 0=RR; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
+                */
+               #interrupt-cells = <2>;
+
+               /* upstream core irqs: downstream these are "COMMON" irq 0,1..  */
+               interrupts = <24 25 26 27 28 29 30 31>;
+       };
+
+       some_device: serial@c0fc1000 {
+               interrupt-parent = <&idu_intc>;
+               interrupts = <0 0>;     /* upstream idu IRQ #24, Round Robin */
+       };
diff --git a/Documentation/devicetree/bindings/arc/archs-intc.txt b/Documentation/devicetree/bindings/arc/archs-intc.txt
new file mode 100644 (file)
index 0000000..69f326d
--- /dev/null
@@ -0,0 +1,22 @@
+* ARC-HS incore Interrupt Controller (Provided by cores implementing ARCv2 ISA)
+
+Properties:
+
+- compatible: "snps,archs-intc"
+- interrupt-controller: This is an interrupt controller.
+- #interrupt-cells: Must be <1>.
+
+  Single Cell "interrupts" property of a device specifies the IRQ number
+  between 16 to 256
+
+  intc accessed via the special ARC AUX register interface, hence "reg" property
+  is not specified.
+
+Example:
+
+       intc: interrupt-controller {
+               compatible = "snps,archs-intc";
+               interrupt-controller;
+               #interrupt-cells = <1>;
+               interrupts = <16 17 18 19 20 21 22 23 24 25>;
+       };
diff --git a/Documentation/devicetree/bindings/arc/axs101.txt b/Documentation/devicetree/bindings/arc/axs101.txt
new file mode 100644 (file)
index 0000000..48290d5
--- /dev/null
@@ -0,0 +1,7 @@
+Synopsys DesignWare ARC Software Development Platforms Device Tree Bindings
+---------------------------------------------------------------------------
+
+SDP Main Board with an AXC001 CPU Card hoisting ARC700 core in silicon
+
+Required root node properties:
+    - compatible = "snps,axs101", "snps,arc-sdp";
diff --git a/Documentation/devicetree/bindings/arc/axs103.txt b/Documentation/devicetree/bindings/arc/axs103.txt
new file mode 100644 (file)
index 0000000..6eea862
--- /dev/null
@@ -0,0 +1,8 @@
+Synopsys DesignWare ARC Software Development Platforms Device Tree Bindings
+---------------------------------------------------------------------------
+
+SDP Main Board with an AXC003 FPGA Card which can contain various flavours of
+HS38x cores.
+
+Required root node properties:
+    - compatible = "snps,axs103", "snps,arc-sdp";
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
new file mode 100644 (file)
index 0000000..936166f
--- /dev/null
@@ -0,0 +1,23 @@
+Mediatek apmixedsys controller
+==============================
+
+The Mediatek apmixedsys controller provides the PLLs to the system.
+
+Required Properties:
+
+- compatible: Should be:
+       - "mediatek,mt8135-apmixedsys"
+       - "mediatek,mt8173-apmixedsys"
+- #clock-cells: Must be 1
+
+The apmixedsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+apmixedsys: clock-controller@10209000 {
+       compatible = "mediatek,mt8173-apmixedsys";
+       reg = <0 0x10209000 0 0x1000>;
+       #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
new file mode 100644 (file)
index 0000000..f6cd3e4
--- /dev/null
@@ -0,0 +1,30 @@
+Mediatek infracfg controller
+============================
+
+The Mediatek infracfg controller provides various clocks and reset
+outputs to the system.
+
+Required Properties:
+
+- compatible: Should be:
+       - "mediatek,mt8135-infracfg", "syscon"
+       - "mediatek,mt8173-infracfg", "syscon"
+- #clock-cells: Must be 1
+- #reset-cells: Must be 1
+
+The infracfg controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+Also it uses the common reset controller binding from
+Documentation/devicetree/bindings/reset/reset.txt.
+The available reset outputs are defined in
+dt-bindings/reset-controller/mt*-resets.h
+
+Example:
+
+infracfg: power-controller@10001000 {
+       compatible = "mediatek,mt8173-infracfg", "syscon";
+       reg = <0 0x10001000 0 0x1000>;
+       #clock-cells = <1>;
+       #reset-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt
new file mode 100644 (file)
index 0000000..f25b854
--- /dev/null
@@ -0,0 +1,30 @@
+Mediatek pericfg controller
+===========================
+
+The Mediatek pericfg controller provides various clocks and reset
+outputs to the system.
+
+Required Properties:
+
+- compatible: Should be:
+       - "mediatek,mt8135-pericfg", "syscon"
+       - "mediatek,mt8173-pericfg", "syscon"
+- #clock-cells: Must be 1
+- #reset-cells: Must be 1
+
+The pericfg controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+Also it uses the common reset controller binding from
+Documentation/devicetree/bindings/reset/reset.txt.
+The available reset outputs are defined in
+dt-bindings/reset-controller/mt*-resets.h
+
+Example:
+
+pericfg: power-controller@10003000 {
+       compatible = "mediatek,mt8173-pericfg", "syscon";
+       reg = <0 0x10003000 0 0x1000>;
+       #clock-cells = <1>;
+       #reset-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
new file mode 100644 (file)
index 0000000..f9e9179
--- /dev/null
@@ -0,0 +1,23 @@
+Mediatek topckgen controller
+============================
+
+The Mediatek topckgen controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be:
+       - "mediatek,mt8135-topckgen"
+       - "mediatek,mt8173-topckgen"
+- #clock-cells: Must be 1
+
+The topckgen controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+topckgen: power-controller@10000000 {
+       compatible = "mediatek,mt8173-topckgen";
+       reg = <0 0x10000000 0 0x1000>;
+       #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
new file mode 100644 (file)
index 0000000..2b7b3fa
--- /dev/null
@@ -0,0 +1,40 @@
+* Amlogic Meson8b Clock and Reset Unit
+
+The Amlogic Meson8b clock controller generates and supplies clock to various
+controllers within the SoC.
+
+Required Properties:
+
+- compatible: should be "amlogic,meson8b-clkc"
+- reg: it must be composed by two tuples:
+       0) physical base address of the xtal register and length of memory
+          mapped region.
+       1) physical base address of the clock controller and length of memory
+          mapped region.
+
+- #clock-cells: should be 1.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. All available clocks are defined as
+preprocessor macros in the dt-bindings/clock/meson8b-clkc.h header and can be
+used in device tree sources.
+
+Example: Clock controller node:
+
+       clkc: clock-controller@c1104000 {
+               #clock-cells = <1>;
+               compatible = "amlogic,meson8b-clkc";
+               reg = <0xc1108000 0x4>, <0xc1104000 0x460>;
+       };
+
+
+Example: UART controller node that consumes the clock generated by the clock
+  controller:
+
+       uart_AO: serial@c81004c0 {
+               compatible = "amlogic,meson-uart";
+               reg = <0xc81004c0 0x14>;
+               interrupts = <0 90 1>;
+               clocks = <&clkc CLKID_CLK81>;
+               status = "disabled";
+       };
diff --git a/Documentation/devicetree/bindings/clock/bcm-cygnus-clock.txt b/Documentation/devicetree/bindings/clock/bcm-cygnus-clock.txt
deleted file mode 100644 (file)
index 00d26ed..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-Broadcom Cygnus Clocks
-
-This binding uses the common clock binding:
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Currently various "fixed" clocks are declared for peripheral drivers that use
-the common clock framework to reference their core clocks. Proper support of
-these clocks will be added later
-
-Device tree example:
-
-       clocks {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
-
-               osc: oscillator {
-                       compatible = "fixed-clock";
-                       #clock-cells = <1>;
-                       clock-frequency = <25000000>;
-               };
-
-               apb_clk: apb_clk {
-                       compatible = "fixed-clock";
-                       #clock-cells = <0>;
-                       clock-frequency = <1000000000>;
-               };
-
-               periph_clk: periph_clk {
-                       compatible = "fixed-clock";
-                       #clock-cells = <0>;
-                       clock-frequency = <500000000>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt
new file mode 100644 (file)
index 0000000..da8d9bb
--- /dev/null
@@ -0,0 +1,132 @@
+Broadcom iProc Family Clocks
+
+This binding uses the common clock binding:
+    Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+The iProc clock controller manages clocks that are common to the iProc family.
+An SoC from the iProc family may have several PPLs, e.g., ARMPLL, GENPLL,
+LCPLL0, MIPIPLL, and etc., all derived from an onboard crystal. Each PLL
+comprises of several leaf clocks
+
+Required properties for a PLL and its leaf clocks:
+
+- compatible:
+    Should have a value of the form "brcm,<soc>-<pll>". For example, GENPLL on
+Cygnus has a compatible string of "brcm,cygnus-genpll"
+
+- #clock-cells:
+    Have a value of <1> since there are more than 1 leaf clock of a given PLL
+
+- reg:
+    Define the base and range of the I/O address space that contain the iProc
+clock control registers required for the PLL
+
+- clocks:
+    The input parent clock phandle for the PLL. For most iProc PLLs, this is an
+onboard crystal with a fixed rate
+
+- clock-output-names:
+    An ordered list of strings defining the names of the clocks
+
+Example:
+
+       osc: oscillator {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <25000000>;
+       };
+
+       genpll: genpll {
+               #clock-cells = <1>;
+               compatible = "brcm,cygnus-genpll";
+               reg = <0x0301d000 0x2c>, <0x0301c020 0x4>;
+               clocks = <&osc>;
+               clock-output-names = "genpll", "axi21", "250mhz", "ihost_sys",
+                                    "enet_sw", "audio_125", "can";
+       };
+
+Required properties for ASIU clocks:
+
+ASIU clocks are a special case. These clocks are derived directly from the
+reference clock of the onboard crystal
+
+- compatible:
+    Should have a value of the form "brcm,<soc>-asiu-clk". For example, ASIU
+clocks for Cygnus have a compatible string of "brcm,cygnus-asiu-clk"
+
+- #clock-cells:
+    Have a value of <1> since there are more than 1 ASIU clocks
+
+- reg:
+    Define the base and range of the I/O address space that contain the iProc
+clock control registers required for ASIU clocks
+
+- clocks:
+    The input parent clock phandle for the ASIU clock, i.e., the onboard
+crystal
+
+- clock-output-names:
+    An ordered list of strings defining the names of the ASIU clocks
+
+Example:
+
+       osc: oscillator {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <25000000>;
+       };
+
+       asiu_clks: asiu_clks {
+               #clock-cells = <1>;
+               compatible = "brcm,cygnus-asiu-clk";
+               reg = <0x0301d048 0xc>, <0x180aa024 0x4>;
+               clocks = <&osc>;
+               clock-output-names = "keypad", "adc/touch", "pwm";
+       };
+
+Cygnus
+------
+PLL and leaf clock compatible strings for Cygnus are:
+    "brcm,cygnus-armpll"
+    "brcm,cygnus-genpll"
+    "brcm,cygnus-lcpll0"
+    "brcm,cygnus-mipipll"
+    "brcm,cygnus-asiu-clk"
+
+The following table defines the set of PLL/clock index and ID for Cygnus.
+These clock IDs are defined in:
+    "include/dt-bindings/clock/bcm-cygnus.h"
+
+    Clock      Source (Parent)  Index   ID
+    ---        -----            -----   ---------
+    crystal    N/A              N/A     N/A
+
+    armpll     crystal          N/A     N/A
+
+    keypad     crystal (ASIU)   0       BCM_CYGNUS_ASIU_KEYPAD_CLK
+    adc/tsc    crystal (ASIU)   1       BCM_CYGNUS_ASIU_ADC_CLK
+    pwm        crystal (ASIU)   2       BCM_CYGNUS_ASIU_PWM_CLK
+
+    genpll     crystal          0       BCM_CYGNUS_GENPLL
+    axi21      genpll           1       BCM_CYGNUS_GENPLL_AXI21_CLK
+    250mhz     genpll           2       BCM_CYGNUS_GENPLL_250MHZ_CLK
+    ihost_sys  genpll           3       BCM_CYGNUS_GENPLL_IHOST_SYS_CLK
+    enet_sw    genpll           4       BCM_CYGNUS_GENPLL_ENET_SW_CLK
+    audio_125  genpll           5       BCM_CYGNUS_GENPLL_AUDIO_125_CLK
+    can        genpll           6       BCM_CYGNUS_GENPLL_CAN_CLK
+
+    lcpll0     crystal          0       BCM_CYGNUS_LCPLL0
+    pcie_phy   lcpll0           1       BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK
+    ddr_phy    lcpll0           2       BCM_CYGNUS_LCPLL0_DDR_PHY_CLK
+    sdio       lcpll0           3       BCM_CYGNUS_LCPLL0_SDIO_CLK
+    usb_phy    lcpll0           4       BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK
+    smart_card lcpll0           5       BCM_CYGNUS_LCPLL0_SMART_CARD_CLK
+    ch5_unused lcpll0           6       BCM_CYGNUS_LCPLL0_CH5_UNUSED
+
+    mipipll    crystal          0       BCM_CYGNUS_MIPIPLL
+    ch0_unused mipipll          1       BCM_CYGNUS_MIPIPLL_CH0_UNUSED
+    ch1_lcd    mipipll          2       BCM_CYGNUS_MIPIPLL_CH1_LCD
+    ch2_v3d    mipipll          3       BCM_CYGNUS_MIPIPLL_CH2_V3D
+    ch3_unused mipipll          4       BCM_CYGNUS_MIPIPLL_CH3_UNUSED
+    ch4_unused mipipll          5       BCM_CYGNUS_MIPIPLL_CH4_UNUSED
+    ch5_unused mipipll          6       BCM_CYGNUS_MIPIPLL_CH5_UNUSED
index 06fc6d541c8936c67ea609265ab1b56d3c280a57..2ec489eebe723afb0f6cf1700d7869e9d84f0ac6 100644 (file)
@@ -138,9 +138,10 @@ Some platforms may require initial configuration of default parent clocks
 and clock frequencies. Such a configuration can be specified in a device tree
 node through assigned-clocks, assigned-clock-parents and assigned-clock-rates
 properties. The assigned-clock-parents property should contain a list of parent
-clocks in form of phandle and clock specifier pairs, the assigned-clock-parents
-property the list of assigned clock frequency values - corresponding to clocks
-listed in the assigned-clocks property.
+clocks in the form of a phandle and clock specifier pair and the
+assigned-clock-rates property should contain a list of frequencies in Hz. Both
+these properties should correspond to the clocks listed in the assigned-clocks
+property.
 
 To skip setting parent or rate of a clock its corresponding entry should be
 set to 0, or can be omitted if it is not followed by any non-zero entry.
diff --git a/Documentation/devicetree/bindings/clock/csr,atlas7-car.txt b/Documentation/devicetree/bindings/clock/csr,atlas7-car.txt
new file mode 100644 (file)
index 0000000..54d6d13
--- /dev/null
@@ -0,0 +1,55 @@
+* Clock and reset bindings for CSR atlas7
+
+Required properties:
+- compatible: Should be "sirf,atlas7-car"
+- reg: Address and length of the register set
+- #clock-cells: Should be <1>
+- #reset-cells: Should be <1>
+
+The clock consumer should specify the desired clock by having the clock
+ID in its "clocks" phandle cell.
+The ID list atlas7_clks defined in drivers/clk/sirf/clk-atlas7.c
+
+The reset consumer should specify the desired reset by having the reset
+ID in its "reset" phandle cell.
+The ID list atlas7_reset_unit defined in drivers/clk/sirf/clk-atlas7.c
+
+Examples: Clock and reset controller node:
+
+car: clock-controller@18620000 {
+       compatible = "sirf,atlas7-car";
+       reg = <0x18620000 0x1000>;
+       #clock-cells = <1>;
+       #reset-cells = <1>;
+};
+
+Examples: Consumers using clock or reset:
+
+timer@10dc0000 {
+       compatible = "sirf,macro-tick";
+       reg = <0x10dc0000 0x1000>;
+       clocks = <&car 54>;
+       interrupts = <0 0 0>,
+                  <0 1 0>,
+                  <0 2 0>,
+                  <0 49 0>,
+                  <0 50 0>,
+                  <0 51 0>;
+};
+
+uart1: uart@18020000 {
+       cell-index = <1>;
+       compatible = "sirf,macro-uart";
+       reg = <0x18020000 0x1000>;
+       clocks = <&clks 95>;
+       interrupts = <0 18 0>;
+       fifosize = <32>;
+};
+
+vpp@13110000 {
+       compatible = "sirf,prima2-vpp";
+       reg = <0x13110000 0x10000>;
+       interrupts = <0 31 0>;
+       clocks = <&car 85>;
+       resets = <&car 29>;
+};
index 60bbb1a8c69a385274f2aeaafd8961c0a5dd7984..268ca615459e754900f979bda8542b3c5d2cbac3 100644 (file)
@@ -52,7 +52,7 @@ usia_u0_sclk: usia_u0_sclk {
 
 Example of consumer:
 
-uart@e1020000 {
+serial@e1020000 {
        compatible = "renesas,em-uart";
        reg = <0xe1020000 0x38>;
        interrupts = <0 8 0>;
index 225990f79b7c577f50594cec23e1abb889bc5502..47570d20721599a1c68e950ebf5598e913f0c8a1 100644 (file)
@@ -15,8 +15,8 @@ Required properties:
 - compatible : shall be "ti,keystone,main-pll-clock" or "ti,keystone,pll-clock"
 - clocks : parent clock phandle
 - reg - pll control0 and pll multipler registers
-- reg-names : control and multiplier. The multiplier is applicable only for
-               main pll clock
+- reg-names : control, multiplier and post-divider. The multiplier and
+               post-divider registers are applicable only for main pll clock
 - fixed-postdiv : fixed post divider value. If absent, use clkod register bits
                for postdiv
 
@@ -25,8 +25,8 @@ Example:
                #clock-cells = <0>;
                compatible = "ti,keystone,main-pll-clock";
                clocks = <&refclksys>;
-               reg = <0x02620350 4>, <0x02310110 4>;
-               reg-names = "control", "multiplier";
+               reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+               reg-names = "control", "multiplier", "post-divider";
                fixed-postdiv = <2>;
        };
 
diff --git a/Documentation/devicetree/bindings/clock/lpc1850-ccu.txt b/Documentation/devicetree/bindings/clock/lpc1850-ccu.txt
new file mode 100644 (file)
index 0000000..fa97c12
--- /dev/null
@@ -0,0 +1,77 @@
+* NXP LPC1850 Clock Control Unit (CCU)
+
+Each CGU base clock has several clock branches which can be turned on
+or off independently by the Clock Control Units CCU1 or CCU2. The
+branch clocks are distributed between CCU1 and CCU2.
+
+ - Above text taken from NXP LPC1850 User Manual.
+
+This binding uses the common clock binding:
+    Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible:
+       Should be "nxp,lpc1850-ccu"
+- reg:
+       Shall define the base and range of the address space
+       containing clock control registers
+- #clock-cells:
+       Shall have value <1>.  The permitted clock-specifier values
+       are the branch clock names defined in table below.
+- clocks:
+       Shall contain a list of phandles for the base clocks routed
+       from the CGU to the specific CCU. See mapping of base clocks
+       and CCU in table below.
+- clock-names:
+       Shall contain a list of names for the base clock routed
+       from the CGU to the specific CCU. Valid CCU clock names:
+       "base_usb0_clk",  "base_periph_clk", "base_usb1_clk",
+       "base_cpu_clk",   "base_spifi_clk",  "base_spi_clk",
+       "base_apb1_clk",  "base_apb3_clk",   "base_adchs_clk",
+       "base_sdio_clk",  "base_ssp0_clk",   "base_ssp1_clk",
+       "base_uart0_clk", "base_uart1_clk",  "base_uart2_clk",
+       "base_uart3_clk", "base_audio_clk"
+
+Which branch clocks that are available on the CCU depends on the
+specific LPC part. Check the user manual for your specific part.
+
+A list of CCU clocks can be found in dt-bindings/clock/lpc18xx-ccu.h.
+
+Example board file:
+
+soc {
+       ccu1: clock-controller@40051000 {
+               compatible = "nxp,lpc1850-ccu";
+               reg = <0x40051000 0x1000>;
+               #clock-cells = <1>;
+               clocks = <&cgu BASE_APB3_CLK>,   <&cgu BASE_APB1_CLK>,
+                        <&cgu BASE_SPIFI_CLK>,  <&cgu BASE_CPU_CLK>,
+                        <&cgu BASE_PERIPH_CLK>, <&cgu BASE_USB0_CLK>,
+                        <&cgu BASE_USB1_CLK>,   <&cgu BASE_SPI_CLK>;
+               clock-names = "base_apb3_clk",   "base_apb1_clk",
+                             "base_spifi_clk",  "base_cpu_clk",
+                             "base_periph_clk", "base_usb0_clk",
+                             "base_usb1_clk",   "base_spi_clk";
+       };
+
+       ccu2: clock-controller@40052000 {
+               compatible = "nxp,lpc1850-ccu";
+               reg = <0x40052000 0x1000>;
+               #clock-cells = <1>;
+               clocks = <&cgu BASE_AUDIO_CLK>, <&cgu BASE_UART3_CLK>,
+                        <&cgu BASE_UART2_CLK>, <&cgu BASE_UART1_CLK>,
+                        <&cgu BASE_UART0_CLK>, <&cgu BASE_SSP1_CLK>,
+                        <&cgu BASE_SSP0_CLK>,  <&cgu BASE_SDIO_CLK>;
+               clock-names = "base_audio_clk", "base_uart3_clk",
+                             "base_uart2_clk", "base_uart1_clk",
+                             "base_uart0_clk", "base_ssp1_clk",
+                             "base_ssp0_clk",  "base_sdio_clk";
+       };
+
+       /* A user of CCU brach clocks */
+       uart1: serial@40082000 {
+               ...
+               clocks = <&ccu2 CLK_APB0_UART1>, <&ccu1 CLK_CPU_UART1>;
+               ...
+       };
+};
diff --git a/Documentation/devicetree/bindings/clock/lpc1850-cgu.txt b/Documentation/devicetree/bindings/clock/lpc1850-cgu.txt
new file mode 100644 (file)
index 0000000..2cc32a9
--- /dev/null
@@ -0,0 +1,131 @@
+* NXP LPC1850 Clock Generation Unit (CGU)
+
+The CGU generates multiple independent clocks for the core and the
+peripheral blocks of the LPC18xx. Each independent clock is called
+a base clock and itself is one of the inputs to the two Clock
+Control Units (CCUs) which control the branch clocks to the
+individual peripherals.
+
+The CGU selects the inputs to the clock generators from multiple
+clock sources, controls the clock generation, and routes the outputs
+of the clock generators through the clock source bus to the output
+stages. Each output stage provides an independent clock source and
+corresponds to one of the base clocks for the LPC18xx.
+
+ - Above text taken from NXP LPC1850 User Manual.
+
+
+This binding uses the common clock binding:
+    Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible:
+       Should be "nxp,lpc1850-cgu"
+- reg:
+       Shall define the base and range of the address space
+       containing clock control registers
+- #clock-cells:
+       Shall have value <1>.  The permitted clock-specifier values
+       are the base clock numbers defined below.
+- clocks:
+       Shall contain a list of phandles for the external input
+       sources to the CGU. The list shall be in the following
+       order: xtal, 32khz, enet_rx_clk, enet_tx_clk, gp_clkin.
+- clock-indices:
+       Shall be an ordered list of numbers defining the base clock
+       number provided by the CGU.
+- clock-output-names:
+       Shall be an ordered list of strings defining the names of
+       the clocks provided by the CGU.
+
+Which base clocks that are available on the CGU depends on the
+specific LPC part. Base clocks are numbered from 0 to 27.
+
+Number:                Name:                   Description:
+ 0             BASE_SAFE_CLK           Base safe clock (always on) for WWDT
+ 1             BASE_USB0_CLK           Base clock for USB0
+ 2             BASE_PERIPH_CLK         Base clock for Cortex-M0SUB subsystem,
+                                       SPI, and SGPIO
+ 3             BASE_USB1_CLK           Base clock for USB1
+ 4             BASE_CPU_CLK            System base clock for ARM Cortex-M core
+                                       and APB peripheral blocks #0 and #2
+ 5             BASE_SPIFI_CLK          Base clock for SPIFI
+ 6             BASE_SPI_CLK            Base clock for SPI
+ 7             BASE_PHY_RX_CLK         Base clock for Ethernet PHY Receive clock
+ 8             BASE_PHY_TX_CLK         Base clock for Ethernet PHY Transmit clock
+ 9             BASE_APB1_CLK           Base clock for APB peripheral block # 1
+10             BASE_APB3_CLK           Base clock for APB peripheral block # 3
+11             BASE_LCD_CLK            Base clock for LCD
+12             BASE_ADCHS_CLK          Base clock for ADCHS
+13             BASE_SDIO_CLK           Base clock for SD/MMC
+14             BASE_SSP0_CLK           Base clock for SSP0
+15             BASE_SSP1_CLK           Base clock for SSP1
+16             BASE_UART0_CLK          Base clock for UART0
+17             BASE_UART1_CLK          Base clock for UART1
+18             BASE_UART2_CLK          Base clock for UART2
+19             BASE_UART3_CLK          Base clock for UART3
+20             BASE_OUT_CLK            Base clock for CLKOUT pin
+24-21          -                       Reserved
+25             BASE_AUDIO_CLK          Base clock for audio system (I2S)
+26             BASE_CGU_OUT0_CLK       Base clock for CGU_OUT0 clock output
+27             BASE_CGU_OUT1_CLK       Base clock for CGU_OUT1 clock output
+
+BASE_PERIPH_CLK and BASE_SPI_CLK is only available on LPC43xx.
+BASE_ADCHS_CLK is only available on LPC4370.
+
+
+Example board file:
+
+/ {
+       clocks {
+               xtal: xtal {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <12000000>;
+               };
+
+               xtal32: xtal32 {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <32768>;
+               };
+
+               enet_rx_clk: enet_rx_clk {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <0>;
+                       clock-output-names = "enet_rx_clk";
+               };
+
+               enet_tx_clk: enet_tx_clk {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <0>;
+                       clock-output-names = "enet_tx_clk";
+               };
+
+               gp_clkin: gp_clkin {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <0>;
+                       clock-output-names = "gp_clkin";
+               };
+       };
+
+       soc {
+               cgu: clock-controller@40050000 {
+                       compatible = "nxp,lpc1850-cgu";
+                       reg = <0x40050000 0x1000>;
+                       #clock-cells = <1>;
+                       clocks = <&xtal>, <&creg_clk 1>, <&enet_rx_clk>, <&enet_tx_clk>, <&gp_clkin>;
+               };
+
+               /* A CGU and CCU clock consumer */
+               lcdc: lcdc@40008000 {
+                       ...
+                       clocks = <&cgu BASE_LCD_CLK>, <&ccu1 CLK_CPU_LCD>;
+                       clock-names = "clcdclk", "apb_pclk";
+                       ...
+               };
+       };
+};
diff --git a/Documentation/devicetree/bindings/clock/marvell,pxa1928.txt b/Documentation/devicetree/bindings/clock/marvell,pxa1928.txt
new file mode 100644 (file)
index 0000000..809c5a2
--- /dev/null
@@ -0,0 +1,21 @@
+* Marvell PXA1928 Clock Controllers
+
+The PXA1928 clock subsystem generates and supplies clock to various
+controllers within the PXA1928 SoC. The PXA1928 contains 3 clock controller
+blocks called APMU, MPMU, and APBC roughly corresponding to internal buses.
+
+Required Properties:
+
+- compatible: should be one of the following.
+  - "marvell,pxa1928-apmu" - APMU controller compatible
+  - "marvell,pxa1928-mpmu" - MPMU controller compatible
+  - "marvell,pxa1928-apbc" - APBC controller compatible
+- reg: physical base address of the clock controller and length of memory mapped
+  region.
+- #clock-cells: should be 1.
+- #reset-cells: should be 1.
+
+Each clock is assigned an identifier and client nodes use the clock controller
+phandle and this identifier to specify the clock which they consume.
+
+All these identifiers can be found in <dt-bindings/clock/marvell,pxa1928.h>.
index 31c7c0c1ce8f6b47a625f8b6f41eafc6a35d402a..660e64912cceb69146f1ea6b4a18e8b533944223 100644 (file)
@@ -19,6 +19,7 @@ ID    Clock   Peripheral
 9      pex1    PCIe Cntrl 1
 15     sata0   SATA Host 0
 17     sdio    SDHCI Host
+23     crypto  CESA (crypto engine)
 25     tdm     Time Division Mplx
 28     ddr     DDR Cntrl
 30     sata1   SATA Host 0
index c6620bc9670364315bcc8687ec81c016e7a89719..7f02fb4ca4adb020a9b7990e6db5532e0fc2abe5 100644 (file)
@@ -20,15 +20,38 @@ Required properties :
 - #reset-cells : Should be 1.
   In clock consumers, this cell represents the bit number in the CAR's
   array of CLK_RST_CONTROLLER_RST_DEVICES_* registers.
+- nvidia,external-memory-controller : phandle of the EMC driver.
+
+The node should contain a "emc-timings" subnode for each supported RAM type (see
+field RAM_CODE in register PMC_STRAPPING_OPT_A).
+
+Required properties for "emc-timings" nodes :
+- nvidia,ram-code : Should contain the value of RAM_CODE this timing set
+  is used for.
+
+Each "emc-timings" node should contain a "timing" subnode for every supported
+EMC clock rate.
+
+Required properties for "timing" nodes :
+- clock-frequency : Should contain the memory clock rate to which this timing
+relates.
+- nvidia,parent-clock-frequency : Should contain the rate at which the current
+parent of the EMC clock should be running at this timing.
+- clocks : Must contain an entry for each entry in clock-names.
+  See ../clocks/clock-bindings.txt for details.
+- clock-names : Must include the following entries:
+  - emc-parent : the clock that should be the parent of the EMC clock at this
+timing.
 
 Example SoC include file:
 
 / {
-       tegra_car: clock {
+       tegra_car: clock@60006000 {
                compatible = "nvidia,tegra124-car";
                reg = <0x60006000 0x1000>;
                #clock-cells = <1>;
                #reset-cells = <1>;
+               nvidia,external-memory-controller = <&emc>;
        };
 
        usb@c5004000 {
@@ -62,4 +85,23 @@ Example board file:
        &tegra_car {
                clocks = <&clk_32k> <&osc>;
        };
+
+       clock@60006000 {
+               emc-timings-3 {
+                       nvidia,ram-code = <3>;
+
+                       timing-12750000 {
+                               clock-frequency = <12750000>;
+                               nvidia,parent-clock-frequency = <408000000>;
+                               clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+                               clock-names = "emc-parent";
+                       };
+                       timing-20400000 {
+                               clock-frequency = <20400000>;
+                               nvidia,parent-clock-frequency = <408000000>;
+                               clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+                               clock-names = "emc-parent";
+                       };
+               };
+       };
 };
index 054f65f9319cd71c74936dcab565901559a9d280..5ddb68418655d569e047ec91891d5c6b41bda49e 100644 (file)
@@ -10,9 +10,11 @@ Required Properties:
     - "renesas,r8a73a4-div6-clock" for R8A73A4 (R-Mobile APE6) DIV6 clocks
     - "renesas,r8a7740-div6-clock" for R8A7740 (R-Mobile A1) DIV6 clocks
     - "renesas,r8a7790-div6-clock" for R8A7790 (R-Car H2) DIV6 clocks
-    - "renesas,r8a7791-div6-clock" for R8A7791 (R-Car M2) DIV6 clocks
+    - "renesas,r8a7791-div6-clock" for R8A7791 (R-Car M2-W) DIV6 clocks
+    - "renesas,r8a7793-div6-clock" for R8A7793 (R-Car M2-N) DIV6 clocks
+    - "renesas,r8a7794-div6-clock" for R8A7794 (R-Car E2) DIV6 clocks
     - "renesas,sh73a0-div6-clock" for SH73A0 (SH-Mobile AG5) DIV6 clocks
-    - "renesas,cpg-div6-clock" for generic DIV6 clocks
+    and "renesas,cpg-div6-clock" as a fallback.
   - reg: Base address and length of the memory resource used by the DIV6 clock
   - clocks: Reference to the parent clock(s); either one, four, or eight
     clocks must be specified.  For clocks with multiple parents, invalid
index 0a80fa70ca265c0f2c7b7657d7270f47d6f53a45..16ed18155160f58c9ce17dce42f50f1527261f72 100644 (file)
@@ -13,12 +13,14 @@ Required Properties:
     - "renesas,r7s72100-mstp-clocks" for R7S72100 (RZ) MSTP gate clocks
     - "renesas,r8a73a4-mstp-clocks" for R8A73A4 (R-Mobile APE6) MSTP gate clocks
     - "renesas,r8a7740-mstp-clocks" for R8A7740 (R-Mobile A1) MSTP gate clocks
+    - "renesas,r8a7778-mstp-clocks" for R8A7778 (R-Car M1) MSTP gate clocks
     - "renesas,r8a7779-mstp-clocks" for R8A7779 (R-Car H1) MSTP gate clocks
     - "renesas,r8a7790-mstp-clocks" for R8A7790 (R-Car H2) MSTP gate clocks
-    - "renesas,r8a7791-mstp-clocks" for R8A7791 (R-Car M2) MSTP gate clocks
+    - "renesas,r8a7791-mstp-clocks" for R8A7791 (R-Car M2-W) MSTP gate clocks
+    - "renesas,r8a7793-mstp-clocks" for R8A7793 (R-Car M2-N) MSTP gate clocks
     - "renesas,r8a7794-mstp-clocks" for R8A7794 (R-Car E2) MSTP gate clocks
     - "renesas,sh73a0-mstp-clocks" for SH73A0 (SH-MobileAG5) MSTP gate clocks
-    - "renesas,cpg-mstp-clock" for generic MSTP gate clocks
+    and "renesas,cpg-mstp-clocks" as a fallback.
   - reg: Base address and length of the I/O mapped registers used by the MSTP
     clocks. The first register is the clock control register and is mandatory.
     The second register is the clock status register and is optional when not
index b02944fba9de4f8696d9f6ac7845acb96937aeed..56f111bd3e456619ae872fd49825f48b11604c02 100644 (file)
@@ -10,7 +10,7 @@ Required Properties:
     - "renesas,r8a7791-cpg-clocks" for the r8a7791 CPG
     - "renesas,r8a7793-cpg-clocks" for the r8a7793 CPG
     - "renesas,r8a7794-cpg-clocks" for the r8a7794 CPG
-    - "renesas,rcar-gen2-cpg-clocks" for the generic R-Car Gen2 CPG
+    and "renesas,rcar-gen2-cpg-clocks" as a fallback.
 
   - reg: Base address and length of the memory resource used by the CPG
 
index 98a257492522cd45d967189ef14f0df7dcb60207..b0f7ddb8cdb13750e1673e458fa91d2cfa7e44d5 100644 (file)
@@ -7,7 +7,7 @@ Required Properties:
 
   - compatible: Must be one of
     - "renesas,r7s72100-cpg-clocks" for the r7s72100 CPG
-    - "renesas,rz-cpg-clocks" for the generic RZ CPG
+    and "renesas,rz-cpg-clocks" as a fallback.
   - reg: Base address and length of the memory resource used by the CPG
   - clocks: References to possible parent clocks. Order must match clock modes
     in the datasheet. For the r7s72100, this is extal, usb_x1.
diff --git a/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt b/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt
new file mode 100644 (file)
index 0000000..fee3205
--- /dev/null
@@ -0,0 +1,65 @@
+STMicroelectronics STM32 Reset and Clock Controller
+===================================================
+
+The RCC IP is both a reset and a clock controller. This documentation only
+describes the clock part.
+
+Please also refer to clock-bindings.txt in this directory for common clock
+controller binding usage.
+
+Required properties:
+- compatible: Should be "st,stm32f42xx-rcc"
+- reg: should be register base and length as documented in the
+  datasheet
+- #clock-cells: 2, device nodes should specify the clock in their "clocks"
+  property, containing a phandle to the clock device node, an index selecting
+  between gated clocks and other clocks and an index specifying the clock to
+  use.
+
+Example:
+
+       rcc: rcc@40023800 {
+               #clock-cells = <2>
+               compatible = "st,stm32f42xx-rcc", "st,stm32-rcc";
+               reg = <0x40023800 0x400>;
+       };
+
+Specifying gated clocks
+=======================
+
+The primary index must be set to 0.
+
+The secondary index is the bit number within the RCC register bank, starting
+from the first RCC clock enable register (RCC_AHB1ENR, address offset 0x30).
+
+It is calculated as: index = register_offset / 4 * 32 + bit_offset.
+Where bit_offset is the bit offset within the register (LSB is 0, MSB is 31).
+
+Example:
+
+       /* Gated clock, AHB1 bit 0 (GPIOA) */
+       ... {
+               clocks = <&rcc 0 0>
+       };
+
+       /* Gated clock, AHB2 bit 4 (CRYP) */
+       ... {
+               clocks = <&rcc 0 36>
+       };
+
+Specifying other clocks
+=======================
+
+The primary index must be set to 1.
+
+The secondary index is bound with the following magic numbers:
+
+       0       SYSTICK
+       1       FCLK
+
+Example:
+
+       /* Misc clock, FCLK */
+       ... {
+               clocks = <&rcc 1 1>
+       };
index 4fa11af3d378ef281e43717490c012263b901122..8a47b77abfca677e234fdd618ee029935e0861dc 100644 (file)
@@ -67,6 +67,7 @@ Required properties:
        "allwinner,sun4i-a10-usb-clk" - for usb gates + resets on A10 / A20
        "allwinner,sun5i-a13-usb-clk" - for usb gates + resets on A13
        "allwinner,sun6i-a31-usb-clk" - for usb gates + resets on A31
+       "allwinner,sun8i-a23-usb-clk" - for usb gates + resets on A23
        "allwinner,sun9i-a80-usb-mod-clk" - for usb gates + resets on A80
        "allwinner,sun9i-a80-usb-phy-clk" - for usb phy gates + resets on A80
 
diff --git a/Documentation/devicetree/bindings/clock/ti,cdce925.txt b/Documentation/devicetree/bindings/clock/ti,cdce925.txt
new file mode 100644 (file)
index 0000000..4c7669a
--- /dev/null
@@ -0,0 +1,42 @@
+Binding for TO CDCE925 programmable I2C clock synthesizers.
+
+Reference
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] http://www.ti.com/product/cdce925
+
+The driver provides clock sources for each output Y1 through Y5.
+
+Required properties:
+ - compatible: Shall be "ti,cdce925"
+ - reg: I2C device address.
+ - clocks: Points to a fixed parent clock that provides the input frequency.
+ - #clock-cells: From common clock bindings: Shall be 1.
+
+Optional properties:
+ - xtal-load-pf: Crystal load-capacitor value to fine-tune performance on a
+                 board, or to compensate for external influences.
+
+For both PLL1 and PLL2 an optional child node can be used to specify spread
+spectrum clocking parameters for a board.
+  - spread-spectrum: SSC mode as defined in the data sheet.
+  - spread-spectrum-center: Use "centered" mode instead of "max" mode. When
+    present, the clock runs at the requested frequency on average. Otherwise
+    the requested frequency is the maximum value of the SCC range.
+
+
+Example:
+
+       clockgen: cdce925pw@64 {
+               compatible = "cdce925";
+               reg = <0x64>;
+               clocks = <&xtal_27Mhz>;
+               #clock-cells = <1>;
+               xtal-load-pf = <5>;
+               /* PLL options to get SSC 1% centered */
+               PLL2 {
+                       spread-spectrum = <4>;
+                       spread-spectrum-center;
+               };
+       };
diff --git a/Documentation/devicetree/bindings/hwlock/hwlock.txt b/Documentation/devicetree/bindings/hwlock/hwlock.txt
new file mode 100644 (file)
index 0000000..085d1f5
--- /dev/null
@@ -0,0 +1,59 @@
+Generic hwlock bindings
+=======================
+
+Generic bindings that are common to all the hwlock platform specific driver
+implementations.
+
+Please also look through the individual platform specific hwlock binding
+documentations for identifying any additional properties specific to that
+platform.
+
+hwlock providers:
+=================
+
+Required properties:
+- #hwlock-cells:        Specifies the number of cells needed to represent a
+                        specific lock.
+
+hwlock users:
+=============
+
+Consumers that require specific hwlock(s) should specify them using the
+property "hwlocks", and an optional "hwlock-names" property.
+
+Required properties:
+- hwlocks:              List of phandle to a hwlock provider node and an
+                        associated hwlock args specifier as indicated by
+                        #hwlock-cells. The list can have just a single hwlock
+                        or multiple hwlocks, with each hwlock represented by
+                        a phandle and a corresponding args specifier.
+
+Optional properties:
+- hwlock-names:         List of hwlock name strings defined in the same order
+                        as the hwlocks, with one name per hwlock. Consumers can
+                        use the hwlock-names to match and get a specific hwlock.
+
+
+1. Example of a node using a single specific hwlock:
+
+The following example has a node requesting a hwlock in the bank defined by
+the node hwlock1. hwlock1 is a hwlock provider with an argument specifier
+of length 1.
+
+       node {
+               ...
+               hwlocks = <&hwlock1 2>;
+               ...
+       };
+
+2. Example of a node using multiple specific hwlocks:
+
+The following example has a node requesting two hwlocks, a hwlock within
+the hwlock device node 'hwlock1' with #hwlock-cells value of 1, and another
+hwlock within the hwlock device node 'hwlock2' with #hwlock-cells value of 2.
+
+       node {
+               ...
+               hwlocks = <&hwlock1 2>, <&hwlock2 0 3>;
+               ...
+       };
diff --git a/Documentation/devicetree/bindings/hwlock/omap-hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/omap-hwspinlock.txt
new file mode 100644 (file)
index 0000000..2c9804f
--- /dev/null
@@ -0,0 +1,26 @@
+OMAP4+ HwSpinlock Driver
+========================
+
+Required properties:
+- compatible:          Should be "ti,omap4-hwspinlock" for
+                           OMAP44xx, OMAP54xx, AM33xx, AM43xx, DRA7xx SoCs
+- reg:                 Contains the hwspinlock module register address space
+                       (base address and length)
+- ti,hwmods:           Name of the hwmod associated with the hwspinlock device
+- #hwlock-cells:       Should be 1. The OMAP hwspinlock users will use a
+                       0-indexed relative hwlock number as the argument
+                       specifier value for requesting a specific hwspinlock
+                       within a hwspinlock bank.
+
+Please look at the generic hwlock binding for usage information for consumers,
+"Documentation/devicetree/bindings/hwlock/hwlock.txt"
+
+Example:
+
+/* OMAP4 */
+hwspinlock: spinlock@4a0f6000 {
+       compatible = "ti,omap4-hwspinlock";
+       reg = <0x4a0f6000 0x1000>;
+       ti,hwmods = "spinlock";
+       #hwlock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/hwlock/qcom-hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/qcom-hwspinlock.txt
new file mode 100644 (file)
index 0000000..4563f52
--- /dev/null
@@ -0,0 +1,39 @@
+Qualcomm Hardware Mutex Block:
+
+The hardware block provides mutexes utilized between different processors on
+the SoC as part of the communication protocol used by these processors.
+
+- compatible:
+       Usage: required
+       Value type: <string>
+       Definition: must be one of:
+                   "qcom,sfpb-mutex",
+                   "qcom,tcsr-mutex"
+
+- syscon:
+       Usage: required
+       Value type: <prop-encoded-array>
+       Definition: one cell containing:
+                   syscon phandle
+                   offset of the hwmutex block within the syscon
+                   stride of the hwmutex registers
+
+- #hwlock-cells:
+       Usage: required
+       Value type: <u32>
+       Definition: must be 1, the specified cell represent the lock id
+                   (hwlock standard property, see hwlock.txt)
+
+Example:
+
+       tcsr_mutex_block: syscon@fd484000 {
+               compatible = "syscon";
+               reg = <0xfd484000 0x2000>;
+       };
+
+       hwlock@fd484000 {
+               compatible = "qcom,tcsr-mutex";
+               syscon = <&tcsr_mutex_block 0 0x80>;
+
+               #hwlock-cells = <1>;
+       };
diff --git a/Documentation/devicetree/bindings/hwlock/sirf,hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/sirf,hwspinlock.txt
new file mode 100644 (file)
index 0000000..9bb1240
--- /dev/null
@@ -0,0 +1,28 @@
+SIRF Hardware spinlock device Binding
+-----------------------------------------------
+
+Required properties :
+- compatible : shall contain only one of the following:
+       "sirf,hwspinlock"
+
+- reg : the register address of hwspinlock
+
+- #hwlock-cells : hwlock users only use the hwlock id to represent a specific
+       hwlock, so the number of cells should be <1> here.
+
+Please look at the generic hwlock binding for usage information for consumers,
+"Documentation/devicetree/bindings/hwlock/hwlock.txt"
+
+Example of hwlock provider:
+       hwlock {
+               compatible = "sirf,hwspinlock";
+               reg = <0x13240000 0x00010000>;
+               #hwlock-cells = <1>;
+       };
+
+Example of hwlock users:
+       node {
+               ...
+               hwlocks = <&hwlock 2>;
+               ...
+       };
diff --git a/Documentation/devicetree/bindings/leds/leds-aat1290.txt b/Documentation/devicetree/bindings/leds/leds-aat1290.txt
new file mode 100644 (file)
index 0000000..c05ed91
--- /dev/null
@@ -0,0 +1,73 @@
+* Skyworks Solutions, Inc. AAT1290 Current Regulator for Flash LEDs
+
+The device is controlled through two pins: FL_EN and EN_SET. The pins when,
+asserted high, enable flash strobe and movie mode (max 1/2 of flash current)
+respectively. In order to add a capability of selecting the strobe signal source
+(e.g. CPU or camera sensor) there is an additional switch required, independent
+of the flash chip. The switch is controlled with pin control.
+
+Required properties:
+
+- compatible : Must be "skyworks,aat1290".
+- flen-gpios : Must be device tree identifier of the flash device FL_EN pin.
+- enset-gpios : Must be device tree identifier of the flash device EN_SET pin.
+
+Optional properties:
+- pinctrl-names : Must contain entries: "default", "host", "isp". Entries
+               "default" and "host" must refer to the same pin configuration
+               node, which sets the host as a strobe signal provider. Entry
+               "isp" must refer to the pin configuration node, which sets the
+               ISP as a strobe signal provider.
+
+A discrete LED element connected to the device must be represented by a child
+node - see Documentation/devicetree/bindings/leds/common.txt.
+
+Required properties of the LED child node:
+- led-max-microamp : see Documentation/devicetree/bindings/leds/common.txt
+- flash-max-microamp : see Documentation/devicetree/bindings/leds/common.txt
+                       Maximum flash LED supply current can be calculated using
+                       following formula: I = 1A * 162kohm / Rset.
+- flash-timeout-us : see Documentation/devicetree/bindings/leds/common.txt
+                     Maximum flash timeout can be calculated using following
+                     formula: T = 8.82 * 10^9 * Ct.
+
+Optional properties of the LED child node:
+- label : see Documentation/devicetree/bindings/leds/common.txt
+
+Example (by Ct = 220nF, Rset = 160kohm and exynos4412-trats2 board with
+a switch that allows for routing strobe signal either from the host or from
+the camera sensor):
+
+#include "exynos4412.dtsi"
+
+aat1290 {
+       compatible = "skyworks,aat1290";
+       flen-gpios = <&gpj1 1 GPIO_ACTIVE_HIGH>;
+       enset-gpios = <&gpj1 2 GPIO_ACTIVE_HIGH>;
+
+       pinctrl-names = "default", "host", "isp";
+       pinctrl-0 = <&camera_flash_host>;
+       pinctrl-1 = <&camera_flash_host>;
+       pinctrl-2 = <&camera_flash_isp>;
+
+       camera_flash: flash-led {
+               label = "aat1290-flash";
+               led-max-microamp = <520833>;
+               flash-max-microamp = <1012500>;
+               flash-timeout-us = <1940000>;
+       };
+};
+
+&pinctrl_0 {
+       camera_flash_host: camera-flash-host {
+               samsung,pins = "gpj1-0";
+               samsung,pin-function = <1>;
+               samsung,pin-val = <0>;
+       };
+
+       camera_flash_isp: camera-flash-isp {
+               samsung,pins = "gpj1-0";
+               samsung,pin-function = <1>;
+               samsung,pin-val = <1>;
+       };
+};
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6328.txt b/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
new file mode 100644 (file)
index 0000000..f9e36ad
--- /dev/null
@@ -0,0 +1,309 @@
+LEDs connected to Broadcom BCM6328 controller
+
+This controller is present on BCM6318, BCM6328, BCM6362 and BCM63268.
+In these SoCs it's possible to control LEDs both as GPIOs or by hardware.
+However, on some devices there are Serial LEDs (LEDs connected to a 74x164
+controller), which can either be controlled by software (exporting the 74x164
+as spi-gpio. See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or
+by hardware using this driver.
+Some of these Serial LEDs are hardware controlled (e.g. ethernet LEDs) and
+exporting the 74x164 as spi-gpio prevents those LEDs to be hardware
+controlled, so the only chance to keep them working is by using this driver.
+
+BCM6328 LED controller has a HWDIS register, which controls whether a LED
+should be controlled by a hardware signal instead of the MODE register value,
+with 0 meaning hardware control enabled and 1 hardware control disabled. This
+is usually 1:1 for hardware to LED signals, but through the activity/link
+registers you have some limited control over rerouting the LEDs (as
+explained later in brcm,link-signal-sources). Even if a LED is hardware
+controlled you are still able to make it blink or light it up if it isn't,
+but you can't turn it off if the hardware decides to light it up. For this
+reason, hardware controlled LEDs aren't registered as LED class devices.
+
+Required properties:
+  - compatible : should be "brcm,bcm6328-leds".
+  - #address-cells : must be 1.
+  - #size-cells : must be 0.
+  - reg : BCM6328 LED controller address and size.
+
+Optional properties:
+  - brcm,serial-leds : Boolean, enables Serial LEDs.
+    Default : false
+
+Each LED is represented as a sub-node of the brcm,bcm6328-leds device.
+
+LED sub-node required properties:
+  - reg : LED pin number (only LEDs 0 to 23 are valid).
+
+LED sub-node optional properties:
+  a) Optional properties for sub-nodes related to software controlled LEDs:
+    - label : see Documentation/devicetree/bindings/leds/common.txt
+    - active-low : Boolean, makes LED active low.
+      Default : false
+    - default-state : see
+      Documentation/devicetree/bindings/leds/leds-gpio.txt
+    - linux,default-trigger : see
+      Documentation/devicetree/bindings/leds/common.txt
+
+  b) Optional properties for sub-nodes related to hardware controlled LEDs:
+    - brcm,hardware-controlled : Boolean, makes this LED hardware controlled.
+      Default : false
+    - brcm,link-signal-sources : An array of hardware link
+      signal sources. Up to four link hardware signals can get muxed into
+      these LEDs. Only valid for LEDs 0 to 7, where LED signals 0 to 3 may
+      be muxed to LEDs 0 to 3, and signals 4 to 7 may be muxed to LEDs
+      4 to 7. A signal can be muxed to more than one LED, and one LED can
+      have more than one source signal.
+    - brcm,activity-signal-sources : An array of hardware activity
+      signal sources. Up to four activity hardware signals can get muxed into
+      these LEDs. Only valid for LEDs 0 to 7, where LED signals 0 to 3 may
+      be muxed to LEDs 0 to 3, and signals 4 to 7 may be muxed to LEDs
+      4 to 7. A signal can be muxed to more than one LED, and one LED can
+      have more than one source signal.
+
+Examples:
+Scenario 1 : BCM6328 with 4 EPHY LEDs
+       leds0: led-controller@10000800 {
+               compatible = "brcm,bcm6328-leds";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0x10000800 0x24>;
+
+               alarm_red@2 {
+                       reg = <2>;
+                       active-low;
+                       label = "red:alarm";
+               };
+               inet_green@3 {
+                       reg = <3>;
+                       active-low;
+                       label = "green:inet";
+               };
+               power_green@4 {
+                       reg = <4>;
+                       active-low;
+                       label = "green:power";
+                       default-state = "on";
+               };
+               ephy0_spd@17 {
+                       reg = <17>;
+                       brcm,hardware-controlled;
+               };
+               ephy1_spd@18 {
+                       reg = <18>;
+                       brcm,hardware-controlled;
+               };
+               ephy2_spd@19 {
+                       reg = <19>;
+                       brcm,hardware-controlled;
+               };
+               ephy3_spd@20 {
+                       reg = <20>;
+                       brcm,hardware-controlled;
+               };
+       };
+
+Scenario 2 : BCM63268 with Serial/GPHY0 LEDs
+       leds0: led-controller@10001900 {
+               compatible = "brcm,bcm6328-leds";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0x10001900 0x24>;
+               brcm,serial-leds;
+
+               gphy0_spd0@0 {
+                       reg = <0>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <0>;
+               };
+               gphy0_spd1@1 {
+                       reg = <1>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <1>;
+               };
+               inet_red@2 {
+                       reg = <2>;
+                       active-low;
+                       label = "red:inet";
+               };
+               dsl_green@3 {
+                       reg = <3>;
+                       active-low;
+                       label = "green:dsl";
+               };
+               usb_green@4 {
+                       reg = <4>;
+                       active-low;
+                       label = "green:usb";
+               };
+               wps_green@7 {
+                       reg = <7>;
+                       active-low;
+                       label = "green:wps";
+               };
+               inet_green@8 {
+                       reg = <8>;
+                       active-low;
+                       label = "green:inet";
+               };
+               ephy0_act@9 {
+                       reg = <9>;
+                       brcm,hardware-controlled;
+               };
+               ephy1_act@10 {
+                       reg = <10>;
+                       brcm,hardware-controlled;
+               };
+               ephy2_act@11 {
+                       reg = <11>;
+                       brcm,hardware-controlled;
+               };
+               gphy0_act@12 {
+                       reg = <12>;
+                       brcm,hardware-controlled;
+               };
+               ephy0_spd@13 {
+                       reg = <13>;
+                       brcm,hardware-controlled;
+               };
+               ephy1_spd@14 {
+                       reg = <14>;
+                       brcm,hardware-controlled;
+               };
+               ephy2_spd@15 {
+                       reg = <15>;
+                       brcm,hardware-controlled;
+               };
+               power_green@20 {
+                       reg = <20>;
+                       active-low;
+                       label = "green:power";
+                       default-state = "on";
+               };
+       };
+
+Scenario 3 : BCM6362 with 1 LED for each EPHY
+       leds0: led-controller@10001900 {
+               compatible = "brcm,bcm6328-leds";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0x10001900 0x24>;
+
+               usb@0 {
+                       reg = <0>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <0>;
+                       brcm,activity-signal-sources = <0>;
+                       /* USB link/activity routed to USB LED */
+               };
+               inet@1 {
+                       reg = <1>;
+                       brcm,hardware-controlled;
+                       brcm,activity-signal-sources = <1>;
+                       /* INET activity routed to INET LED */
+               };
+               ephy0@4 {
+                       reg = <4>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <4>;
+                       /* EPHY0 link routed to EPHY0 LED */
+               };
+               ephy1@5 {
+                       reg = <5>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <5>;
+                       /* EPHY1 link routed to EPHY1 LED */
+               };
+               ephy2@6 {
+                       reg = <6>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <6>;
+                       /* EPHY2 link routed to EPHY2 LED */
+               };
+               ephy3@7 {
+                       reg = <7>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <7>;
+                       /* EPHY3 link routed to EPHY3 LED */
+               };
+               power_green@20 {
+                       reg = <20>;
+                       active-low;
+                       label = "green:power";
+                       default-state = "on";
+               };
+       };
+
+Scenario 4 : BCM6362 with 1 LED for all EPHYs
+       leds0: led-controller@10001900 {
+               compatible = "brcm,bcm6328-leds";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0x10001900 0x24>;
+
+               usb@0 {
+                       reg = <0>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <0 1>;
+                       brcm,activity-signal-sources = <0 1>;
+                       /* USB/INET link/activity routed to USB LED */
+               };
+               ephy@4 {
+                       reg = <4>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <4 5 6 7>;
+                       /* EPHY0/1/2/3 link routed to EPHY0 LED */
+               };
+               power_green@20 {
+                       reg = <20>;
+                       active-low;
+                       label = "green:power";
+                       default-state = "on";
+               };
+       };
+
+Scenario 5 : BCM6362 with EPHY LEDs swapped
+       leds0: led-controller@10001900 {
+               compatible = "brcm,bcm6328-leds";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0x10001900 0x24>;
+
+               usb@0 {
+                       reg = <0>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <0>;
+                       brcm,activity-signal-sources = <0 1>;
+                       /* USB link/act and INET act routed to USB LED */
+               };
+               ephy0@4 {
+                       reg = <4>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <7>;
+                       /* EPHY3 link routed to EPHY0 LED */
+               };
+               ephy1@5 {
+                       reg = <5>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <6>;
+                       /* EPHY2 link routed to EPHY1 LED */
+               };
+               ephy2@6 {
+                       reg = <6>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <5>;
+                       /* EPHY1 link routed to EPHY2 LED */
+               };
+               ephy3@7 {
+                       reg = <7>;
+                       brcm,hardware-controlled;
+                       brcm,link-signal-sources = <4>;
+                       /* EPHY0 link routed to EPHY3 LED */
+               };
+               power_green@20 {
+                       reg = <20>;
+                       active-low;
+                       label = "green:power";
+                       default-state = "on";
+               };
+       };
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6358.txt b/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
new file mode 100644 (file)
index 0000000..b22a55b
--- /dev/null
@@ -0,0 +1,145 @@
+LEDs connected to Broadcom BCM6358 controller
+
+This controller is present on BCM6358 and BCM6368.
+In these SoCs there are Serial LEDs (LEDs connected to a 74x164 controller),
+which can either be controlled by software (exporting the 74x164 as spi-gpio.
+See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or
+by hardware using this driver.
+
+Required properties:
+  - compatible : should be "brcm,bcm6358-leds".
+  - #address-cells : must be 1.
+  - #size-cells : must be 0.
+  - reg : BCM6358 LED controller address and size.
+
+Optional properties:
+  - brcm,clk-div : SCK signal divider. Possible values are 1, 2, 4 and 8.
+    Default : 1
+  - brcm,clk-dat-low : Boolean, makes clock and data signals active low.
+    Default : false
+
+Each LED is represented as a sub-node of the brcm,bcm6358-leds device.
+
+LED sub-node required properties:
+  - reg : LED pin number (only LEDs 0 to 31 are valid).
+
+LED sub-node optional properties:
+  - label : see Documentation/devicetree/bindings/leds/common.txt
+  - active-low : Boolean, makes LED active low.
+    Default : false
+  - default-state : see
+    Documentation/devicetree/bindings/leds/leds-gpio.txt
+  - linux,default-trigger : see
+    Documentation/devicetree/bindings/leds/common.txt
+
+Examples:
+Scenario 1 : BCM6358
+       leds0: led-controller@fffe00d0 {
+               compatible = "brcm,bcm6358-leds";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0xfffe00d0 0x8>;
+
+               alarm_white {
+                       reg = <0>;
+                       active-low;
+                       label = "white:alarm";
+               };
+               tv_white {
+                       reg = <2>;
+                       active-low;
+                       label = "white:tv";
+               };
+               tel_white {
+                       reg = <3>;
+                       active-low;
+                       label = "white:tel";
+               };
+               adsl_white {
+                       reg = <4>;
+                       active-low;
+                       label = "white:adsl";
+               };
+       };
+
+Scenario 2 : BCM6368
+       leds0: led-controller@100000d0 {
+               compatible = "brcm,bcm6358-leds";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0x100000d0 0x8>;
+               brcm,pol-low;
+               brcm,clk-div = <4>;
+
+               power_red {
+                       reg = <0>;
+                       active-low;
+                       label = "red:power";
+               };
+               power_green {
+                       reg = <1>;
+                       active-low;
+                       label = "green:power";
+                       default-state = "on";
+               };
+               power_blue {
+                       reg = <2>;
+                       label = "blue:power";
+               };
+               broadband_red {
+                       reg = <3>;
+                       active-low;
+                       label = "red:broadband";
+               };
+               broadband_green {
+                       reg = <4>;
+                       label = "green:broadband";
+               };
+               broadband_blue {
+                       reg = <5>;
+                       active-low;
+                       label = "blue:broadband";
+               };
+               wireless_red {
+                       reg = <6>;
+                       active-low;
+                       label = "red:wireless";
+               };
+               wireless_green {
+                       reg = <7>;
+                       active-low;
+                       label = "green:wireless";
+               };
+               wireless_blue {
+                       reg = <8>;
+                       label = "blue:wireless";
+               };
+               phone_red {
+                       reg = <9>;
+                       active-low;
+                       label = "red:phone";
+               };
+               phone_green {
+                       reg = <10>;
+                       active-low;
+                       label = "green:phone";
+               };
+               phone_blue {
+                       reg = <11>;
+                       label = "blue:phone";
+               };
+               upgrading_red {
+                       reg = <12>;
+                       active-low;
+                       label = "red:upgrading";
+               };
+               upgrading_green {
+                       reg = <13>;
+                       active-low;
+                       label = "green:upgrading";
+               };
+               upgrading_blue {
+                       reg = <14>;
+                       label = "blue:upgrading";
+               };
+       };
diff --git a/Documentation/devicetree/bindings/leds/leds-ktd2692.txt b/Documentation/devicetree/bindings/leds/leds-ktd2692.txt
new file mode 100644 (file)
index 0000000..8537374
--- /dev/null
@@ -0,0 +1,50 @@
+* Kinetic Technologies - KTD2692 Flash LED Driver
+
+KTD2692 is the ideal power solution for high-power flash LEDs.
+It uses ExpressWire single-wire programming for maximum flexibility.
+
+The ExpressWire interface through CTRL pin can control LED on/off and
+enable/disable the IC, Movie(max 1/3 of Flash current) / Flash mode current,
+Flash timeout, LVP(low voltage protection).
+
+Also, When the AUX pin is pulled high while CTRL pin is high,
+LED current will be ramped up to the flash-mode current level.
+
+Required properties:
+- compatible : Should be "kinetic,ktd2692".
+- ctrl-gpios : Specifier of the GPIO connected to CTRL pin.
+- aux-gpios : Specifier of the GPIO connected to AUX pin.
+
+Optional properties:
+- vin-supply : "vin" LED supply (2.7V to 5.5V).
+  See Documentation/devicetree/bindings/regulator/regulator.txt
+
+A discrete LED element connected to the device must be represented by a child
+node - See Documentation/devicetree/bindings/leds/common.txt
+
+Required properties for flash LED child nodes:
+  See Documentation/devicetree/bindings/leds/common.txt
+- led-max-microamp : Minimum Threshold for Timer protection
+  is defined internally (Maximum 300mA).
+- flash-max-microamp : Flash LED maximum current
+  Formula : I(mA) = 15000 / Rset.
+- flash-max-timeout-us : Flash LED maximum timeout.
+
+Optional properties for flash LED child nodes:
+- label : See Documentation/devicetree/bindings/leds/common.txt
+
+Example:
+
+ktd2692 {
+       compatible = "kinetic,ktd2692";
+       ctrl-gpios = <&gpc0 1 0>;
+       aux-gpios = <&gpc0 2 0>;
+       vin-supply = <&vbat>;
+
+       flash-led {
+               label = "ktd2692-flash";
+               led-max-microamp = <300000>;
+               flash-max-microamp = <1500000>;
+               flash-max-timeout-us = <1835000>;
+       };
+};
diff --git a/Documentation/devicetree/bindings/leds/leds-tlc591xx.txt b/Documentation/devicetree/bindings/leds/leds-tlc591xx.txt
new file mode 100644 (file)
index 0000000..3bbbf70
--- /dev/null
@@ -0,0 +1,40 @@
+LEDs connected to tlc59116 or tlc59108
+
+Required properties
+- compatible: should be "ti,tlc59116" or "ti,tlc59108"
+- #address-cells: must be 1
+- #size-cells: must be 0
+- reg: typically 0x68
+
+Each led is represented as a sub-node of the ti,tlc59116.
+See Documentation/devicetree/bindings/leds/common.txt
+
+LED sub-node properties:
+- reg: number of LED line, 0 to 15 or 0 to 7
+- label: (optional) name of LED
+- linux,default-trigger : (optional)
+
+Examples:
+
+tlc59116@68 {
+       #address-cells = <1>;
+       #size-cells = <0>;
+       compatible = "ti,tlc59116";
+       reg = <0x68>;
+
+       wan@0 {
+               label = "wrt1900ac:amber:wan";
+               reg = <0x0>;
+       };
+
+       2g@2 {
+               label = "wrt1900ac:white:2g";
+               reg = <0x2>;
+       };
+
+       alive@9 {
+               label = "wrt1900ac:green:alive";
+               reg = <0x9>;
+               linux,default_trigger = "heartbeat";
+       };
+};
index 750d577e8083ee3f96c8bf823c986c162d4ac5b3..f5a8ca29aff06e84d49e3c75caf125b35c660055 100644 (file)
@@ -1,7 +1,7 @@
 * Marvell Armada 370 / Armada XP Ethernet Controller (NETA)
 
 Required properties:
-- compatible: should be "marvell,armada-370-neta".
+- compatible: "marvell,armada-370-neta" or "marvell,armada-xp-neta".
 - reg: address and length of the register set for the device.
 - interrupts: interrupt for the device
 - phy: See ethernet.txt file in the same directory.
diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt
deleted file mode 100644 (file)
index ac4da9f..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-Lantiq FALCON pinmux controller
-
-Required properties:
-- compatible: "lantiq,pinctrl-falcon"
-- reg: Should contain the physical address and length of the gpio/pinmux
-  register range
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices, including the meaning of the
-phrase "pin configuration node".
-
-Lantiq's pin configuration nodes act as a container for an arbitrary number of
-subnodes. Each of these subnodes represents some desired configuration for a
-pin, a group, or a list of pins or groups. This configuration can include the
-mux function to select on those group(s), and two pin configuration parameters:
-pull-up and open-drain
-
-The name of each subnode is not important as long as it is unique; all subnodes
-should be enumerated and processed purely based on their content.
-
-Each subnode only affects those parameters that are explicitly listed. In
-other words, a subnode that lists a mux function but no pin configuration
-parameters implies no information about any pin configuration parameters.
-Similarly, a pin subnode that describes a pullup parameter implies no
-information about e.g. the mux function.
-
-We support 2 types of nodes.
-
-Definition of mux function groups:
-
-Required subnode-properties:
-- lantiq,groups : An array of strings. Each string contains the name of a group.
-  Valid values for these names are listed below.
-- lantiq,function: A string containing the name of the function to mux to the
-  group. Valid values for function names are listed below.
-
-Valid values for group and function names:
-
-  mux groups:
-    por, ntr, ntr8k, hrst, mdio, bootled, asc0, spi, spi cs0, spi cs1, i2c,
-    jtag, slic, pcm, asc1
-
-  functions:
-    rst, ntr, mdio, led, asc, spi, i2c, jtag, slic, pcm
-
-
-Definition of pin configurations:
-
-Required subnode-properties:
-- lantiq,pins : An array of strings. Each string contains the name of a pin.
-  Valid values for these names are listed below.
-
-Optional subnode-properties:
-- lantiq,pull: Integer, representing the pull-down/up to apply to the pin.
-    0: none, 1: down
-- lantiq,drive-current: Boolean, enables drive-current
-- lantiq,slew-rate: Boolean, enables slew-rate
-
-Example:
-       pinmux0 {
-               compatible = "lantiq,pinctrl-falcon";
-               pinctrl-names = "default";
-               pinctrl-0 = <&state_default>;
-
-               state_default: pinmux {
-                       asc0 {
-                               lantiq,groups = "asc0";
-                               lantiq,function = "asc";
-                       };
-                       ntr {
-                               lantiq,groups = "ntr8k";
-                               lantiq,function = "ntr";
-                       };
-                       i2c {
-                               lantiq,groups = "i2c";
-                               lantiq,function = "i2c";
-                       };
-                       hrst {
-                               lantiq,groups = "hrst";
-                               lantiq,function = "rst";
-                       };
-               };
-       };
diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,pinctrl-falcon.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,pinctrl-falcon.txt
new file mode 100644 (file)
index 0000000..ac4da9f
--- /dev/null
@@ -0,0 +1,83 @@
+Lantiq FALCON pinmux controller
+
+Required properties:
+- compatible: "lantiq,pinctrl-falcon"
+- reg: Should contain the physical address and length of the gpio/pinmux
+  register range
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+Lantiq's pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those group(s), and two pin configuration parameters:
+pull-up and open-drain
+
+The name of each subnode is not important as long as it is unique; all subnodes
+should be enumerated and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+We support 2 types of nodes.
+
+Definition of mux function groups:
+
+Required subnode-properties:
+- lantiq,groups : An array of strings. Each string contains the name of a group.
+  Valid values for these names are listed below.
+- lantiq,function: A string containing the name of the function to mux to the
+  group. Valid values for function names are listed below.
+
+Valid values for group and function names:
+
+  mux groups:
+    por, ntr, ntr8k, hrst, mdio, bootled, asc0, spi, spi cs0, spi cs1, i2c,
+    jtag, slic, pcm, asc1
+
+  functions:
+    rst, ntr, mdio, led, asc, spi, i2c, jtag, slic, pcm
+
+
+Definition of pin configurations:
+
+Required subnode-properties:
+- lantiq,pins : An array of strings. Each string contains the name of a pin.
+  Valid values for these names are listed below.
+
+Optional subnode-properties:
+- lantiq,pull: Integer, representing the pull-down/up to apply to the pin.
+    0: none, 1: down
+- lantiq,drive-current: Boolean, enables drive-current
+- lantiq,slew-rate: Boolean, enables slew-rate
+
+Example:
+       pinmux0 {
+               compatible = "lantiq,pinctrl-falcon";
+               pinctrl-names = "default";
+               pinctrl-0 = <&state_default>;
+
+               state_default: pinmux {
+                       asc0 {
+                               lantiq,groups = "asc0";
+                               lantiq,function = "asc";
+                       };
+                       ntr {
+                               lantiq,groups = "ntr8k";
+                               lantiq,function = "ntr";
+                       };
+                       i2c {
+                               lantiq,groups = "i2c";
+                               lantiq,function = "i2c";
+                       };
+                       hrst {
+                               lantiq,groups = "hrst";
+                               lantiq,function = "rst";
+                       };
+               };
+       };
diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,pinctrl-xway.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,pinctrl-xway.txt
new file mode 100644 (file)
index 0000000..e89b467
--- /dev/null
@@ -0,0 +1,97 @@
+Lantiq XWAY pinmux controller
+
+Required properties:
+- compatible: "lantiq,pinctrl-xway" or "lantiq,pinctrl-xr9"
+- reg: Should contain the physical address and length of the gpio/pinmux
+  register range
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+Lantiq's pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those group(s), and two pin configuration parameters:
+pull-up and open-drain
+
+The name of each subnode is not important as long as it is unique; all subnodes
+should be enumerated and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+We support 2 types of nodes.
+
+Definition of mux function groups:
+
+Required subnode-properties:
+- lantiq,groups : An array of strings. Each string contains the name of a group.
+  Valid values for these names are listed below.
+- lantiq,function: A string containing the name of the function to mux to the
+  group. Valid values for function names are listed below.
+
+Valid values for group and function names:
+
+  mux groups:
+    exin0, exin1, exin2, jtag, ebu a23, ebu a24, ebu a25, ebu clk, ebu cs1,
+    ebu wait, nand ale, nand cs1, nand cle, spi, spi_cs1, spi_cs2, spi_cs3,
+    spi_cs4, spi_cs5, spi_cs6, asc0, asc0 cts rts, stp, nmi , gpt1, gpt2,
+    gpt3, clkout0, clkout1, clkout2, clkout3, gnt1, gnt2, gnt3, req1, req2,
+    req3
+
+  additional mux groups (XR9 only):
+    mdio, nand rdy, nand rd, exin3, exin4, gnt4, req4
+
+  functions:
+    spi, asc, cgu, jtag, exin, stp, gpt, nmi, pci, ebu, mdio
+
+
+
+Definition of pin configurations:
+
+Required subnode-properties:
+- lantiq,pins : An array of strings. Each string contains the name of a pin.
+  Valid values for these names are listed below.
+
+Optional subnode-properties:
+- lantiq,pull: Integer, representing the pull-down/up to apply to the pin.
+    0: none, 1: down, 2: up.
+- lantiq,open-drain: Boolean, enables open-drain on the defined pin.
+
+Valid values for XWAY pin names:
+  Pinconf pins can be referenced via the names io0-io31.
+
+Valid values for XR9 pin names:
+  Pinconf pins can be referenced via the names io0-io55.
+
+Example:
+       gpio: pinmux@E100B10 {
+               compatible = "lantiq,pinctrl-xway";
+               pinctrl-names = "default";
+               pinctrl-0 = <&state_default>;
+
+               #gpio-cells = <2>;
+               gpio-controller;
+               reg = <0xE100B10 0xA0>;
+
+               state_default: pinmux {
+                       stp {
+                               lantiq,groups = "stp";
+                               lantiq,function = "stp";
+                       };
+                       pci {
+                               lantiq,groups = "gnt1";
+                               lantiq,function = "pci";
+                       };
+                       conf_out {
+                               lantiq,pins = "io4", "io5", "io6"; /* stp */
+                               lantiq,open-drain;
+                               lantiq,pull = <0>;
+                       };
+               };
+       };
+
diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt
deleted file mode 100644 (file)
index e89b467..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-Lantiq XWAY pinmux controller
-
-Required properties:
-- compatible: "lantiq,pinctrl-xway" or "lantiq,pinctrl-xr9"
-- reg: Should contain the physical address and length of the gpio/pinmux
-  register range
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices, including the meaning of the
-phrase "pin configuration node".
-
-Lantiq's pin configuration nodes act as a container for an arbitrary number of
-subnodes. Each of these subnodes represents some desired configuration for a
-pin, a group, or a list of pins or groups. This configuration can include the
-mux function to select on those group(s), and two pin configuration parameters:
-pull-up and open-drain
-
-The name of each subnode is not important as long as it is unique; all subnodes
-should be enumerated and processed purely based on their content.
-
-Each subnode only affects those parameters that are explicitly listed. In
-other words, a subnode that lists a mux function but no pin configuration
-parameters implies no information about any pin configuration parameters.
-Similarly, a pin subnode that describes a pullup parameter implies no
-information about e.g. the mux function.
-
-We support 2 types of nodes.
-
-Definition of mux function groups:
-
-Required subnode-properties:
-- lantiq,groups : An array of strings. Each string contains the name of a group.
-  Valid values for these names are listed below.
-- lantiq,function: A string containing the name of the function to mux to the
-  group. Valid values for function names are listed below.
-
-Valid values for group and function names:
-
-  mux groups:
-    exin0, exin1, exin2, jtag, ebu a23, ebu a24, ebu a25, ebu clk, ebu cs1,
-    ebu wait, nand ale, nand cs1, nand cle, spi, spi_cs1, spi_cs2, spi_cs3,
-    spi_cs4, spi_cs5, spi_cs6, asc0, asc0 cts rts, stp, nmi , gpt1, gpt2,
-    gpt3, clkout0, clkout1, clkout2, clkout3, gnt1, gnt2, gnt3, req1, req2,
-    req3
-
-  additional mux groups (XR9 only):
-    mdio, nand rdy, nand rd, exin3, exin4, gnt4, req4
-
-  functions:
-    spi, asc, cgu, jtag, exin, stp, gpt, nmi, pci, ebu, mdio
-
-
-
-Definition of pin configurations:
-
-Required subnode-properties:
-- lantiq,pins : An array of strings. Each string contains the name of a pin.
-  Valid values for these names are listed below.
-
-Optional subnode-properties:
-- lantiq,pull: Integer, representing the pull-down/up to apply to the pin.
-    0: none, 1: down, 2: up.
-- lantiq,open-drain: Boolean, enables open-drain on the defined pin.
-
-Valid values for XWAY pin names:
-  Pinconf pins can be referenced via the names io0-io31.
-
-Valid values for XR9 pin names:
-  Pinconf pins can be referenced via the names io0-io55.
-
-Example:
-       gpio: pinmux@E100B10 {
-               compatible = "lantiq,pinctrl-xway";
-               pinctrl-names = "default";
-               pinctrl-0 = <&state_default>;
-
-               #gpio-cells = <2>;
-               gpio-controller;
-               reg = <0xE100B10 0xA0>;
-
-               state_default: pinmux {
-                       stp {
-                               lantiq,groups = "stp";
-                               lantiq,function = "stp";
-                       };
-                       pci {
-                               lantiq,groups = "gnt1";
-                               lantiq,function = "pci";
-                       };
-                       conf_out {
-                               lantiq,pins = "io4", "io5", "io6"; /* stp */
-                               lantiq,open-drain;
-                               lantiq,pull = <0>;
-                       };
-               };
-       };
-
diff --git a/Documentation/devicetree/bindings/remoteproc/wkup_m3_rproc.txt b/Documentation/devicetree/bindings/remoteproc/wkup_m3_rproc.txt
new file mode 100644 (file)
index 0000000..3a70073
--- /dev/null
@@ -0,0 +1,52 @@
+TI Wakeup M3 Remoteproc Driver
+==============================
+
+The TI AM33xx and AM43xx family of devices use a small Cortex M3 co-processor
+(commonly referred to as Wakeup M3 or CM3) to help with various low power tasks
+that cannot be controlled from the MPU. This CM3 processor requires a firmware
+binary to accomplish this. The wkup_m3 remoteproc driver handles the loading of
+the firmware and booting of the CM3.
+
+Wkup M3 Device Node:
+====================
+A wkup_m3 device node is used to represent the Wakeup M3 processor instance
+within the SoC. It is added as a child node of the parent interconnect bus
+(l4_wkup) through which it is accessible to the MPU.
+
+Required properties:
+--------------------
+- compatible:          Should be one of,
+                               "ti,am3352-wkup-m3" for AM33xx SoCs
+                               "ti,am4372-wkup-m3" for AM43xx SoCs
+- reg:                 Should contain the address ranges for the two internal
+                       memory regions, UMEM and DMEM. The parent node should
+                       provide an appropriate ranges property for properly
+                       translating these into bus addresses.
+- reg-names:           Contains the corresponding names for the two memory
+                       regions. These should be named "umem" & "dmem".
+- ti,hwmods:           Name of the hwmod associated with the wkupm3 device.
+- ti,pm-firmware:      Name of firmware file to be used for loading and
+                       booting the wkup_m3 remote processor.
+
+Example:
+--------
+/* AM33xx */
+ocp {
+        l4_wkup: l4_wkup@44c00000 {
+               compatible = "am335-l4-wkup", "simple-bus";
+               ranges = <0 0x44c00000 0x400000>;
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               wkup_m3: wkup_m3@100000 {
+                       compatible = "ti,am3352-wkup-m3";
+                       reg = <0x100000 0x4000>,
+                             <0x180000 0x2000>;
+                       reg-names = "umem", "dmem";
+                       ti,hwmods = "wkup_m3";
+                       ti,pm-firmware = "am335x-pm-firmware.elf";
+               };
+       };
+
+       ...
+};
index 1be8d7a26c15fff480c9d9644914eeee0bec03e9..5883b73ea1b56053fbafb98f473b1f2c8a349c62 100644 (file)
@@ -79,9 +79,9 @@ Atmel High-Speed USB device controller
 
 Required properties:
  - compatible: Should be one of the following
-              "at91sam9rl-udc"
-              "at91sam9g45-udc"
-              "sama5d3-udc"
+              "atmel,at91sam9rl-udc"
+              "atmel,at91sam9g45-udc"
+              "atmel,sama5d3-udc"
  - reg: Address and length of the register set for the device
  - interrupts: Should contain usba interrupt
  - clocks: Should reference the peripheral and host clocks
index 7b607761b7748c19899f407b062cdcaa514ea2d2..d444757c4d9ec3e56c261e0adf330de187251466 100644 (file)
@@ -114,6 +114,7 @@ isee        ISEE 2007 S.L.
 isil   Intersil
 karo   Ka-Ro electronics GmbH
 keymile        Keymile GmbH
+kinetic Kinetic Technologies
 lacie  LaCie
 lantiq Lantiq Semiconductor
 lenovo Lenovo Group Ltd.
@@ -226,4 +227,5 @@ xillybus    Xillybus Ltd.
 xlnx   Xilinx
 zyxel  ZyXEL Communications Corp.
 zarlink        Zarlink Semiconductor
+zii    Zodiac Inflight Innovations
 zte    ZTE Corp.
diff --git a/Documentation/devicetree/bindings/watchdog/digicolor-wdt.txt b/Documentation/devicetree/bindings/watchdog/digicolor-wdt.txt
new file mode 100644 (file)
index 0000000..a882967
--- /dev/null
@@ -0,0 +1,25 @@
+Conexant Digicolor SoCs Watchdog timer
+
+The watchdog functionality in Conexant Digicolor SoCs relies on the so called
+"Agent Communication" block. This block includes the eight programmable system
+timer counters. The first timer (called "Timer A") is the only one that can be
+used as watchdog.
+
+Required properties:
+
+- compatible : Should be "cnxt,cx92755-wdt"
+- reg : Specifies base physical address and size of the registers
+- clocks : phandle; specifies the clock that drives the timer
+
+Optional properties:
+
+- timeout-sec : Contains the watchdog timeout in seconds
+
+Example:
+
+       watchdog@f0000fc0 {
+               compatible = "cnxt,cx92755-wdt";
+               reg = <0xf0000fc0 0x8>;
+               clocks = <&main_clk>;
+               timeout-sec = <15>;
+       };
index c227970671ea3fafba27935405475f2376bf78fd..1fa20e453a2d00207fb72408b3c84db066d3ed9a 100644 (file)
@@ -1,10 +1,11 @@
 TI Watchdog Timer (WDT) Controller for OMAP
 
 Required properties:
-compatible:
-- "ti,omap3-wdt" for OMAP3
-- "ti,omap4-wdt" for OMAP4
-- ti,hwmods: Name of the hwmod associated to the WDT
+- compatible : "ti,omap3-wdt" for OMAP3 or "ti,omap4-wdt" for OMAP4
+- ti,hwmods : Name of the hwmod associated to the WDT
+
+Optional properties:
+- timeout-sec : default watchdog timeout in seconds
 
 Examples:
 
index 277d1e810670d3678b1991b059758a69a497edef..c0bd5677271bcf07880db101bd1059e4d45c4b93 100644 (file)
@@ -676,6 +676,29 @@ FS-Cache provides some utilities that a cache backend may make use of:
      as possible.
 
 
+ (*) Indicate that a stale object was found and discarded:
+
+       void fscache_object_retrying_stale(struct fscache_object *object);
+
+     This is called to indicate that the lookup procedure found an object in
+     the cache that the netfs decided was stale.  The object has been
+     discarded from the cache and the lookup will be performed again.
+
+
+ (*) Indicate that the caching backend killed an object:
+
+       void fscache_object_mark_killed(struct fscache_object *object,
+                                       enum fscache_why_object_killed why);
+
+     This is called to indicate that the cache backend preemptively killed an
+     object.  The why parameter should be set to indicate the reason:
+
+       FSCACHE_OBJECT_IS_STALE - the object was stale and needs discarding.
+       FSCACHE_OBJECT_NO_SPACE - there was insufficient cache space
+       FSCACHE_OBJECT_WAS_RETIRED - the object was retired when relinquished.
+       FSCACHE_OBJECT_WAS_CULLED - the object was culled to make space.
+
+
  (*) Get and release references on a retrieval record:
 
        void fscache_get_retrieval(struct fscache_retrieval *op);
index 770267af5b3e2c992eb081d2827a77acccd1ae7e..50f0a5757f48cb654f33130fcabc2db87f197a12 100644 (file)
@@ -284,8 +284,9 @@ proc files.
                enq=N   Number of times async ops queued for processing
                can=N   Number of async ops cancelled
                rej=N   Number of async ops rejected due to object lookup/create failure
+               ini=N   Number of async ops initialised
                dfr=N   Number of async ops queued for deferred release
-               rel=N   Number of async ops released
+               rel=N   Number of async ops released (should equal ini=N when idle)
                gc=N    Number of deferred-release async ops garbage collected
        CacheOp alo=N   Number of in-progress alloc_object() cache ops
                luo=N   Number of in-progress lookup_object() cache ops
@@ -303,6 +304,10 @@ proc files.
                wrp=N   Number of in-progress write_page() cache ops
                ucp=N   Number of in-progress uncache_page() cache ops
                dsp=N   Number of in-progress dissociate_pages() cache ops
+       CacheEv nsp=N   Number of object lookups/creations rejected due to lack of space
+               stl=N   Number of stale objects deleted
+               rtr=N   Number of objects retired when relinquished
+               cul=N   Number of objects culled
 
 
  (*) /proc/fs/fscache/histogram
index baf41118660d6019b475eb27d58abe83e1aea0d4..7af2851d667c7ab0733ff177c7c1ca91d33bf85e 100644 (file)
@@ -18,8 +18,10 @@ Usage
 -----
 
 If you have a block device which supports DAX, you can make a filesystem
-on it as usual.  When mounting it, use the -o dax option manually
-or add 'dax' to the options in /etc/fstab.
+on it as usual.  The DAX code currently only supports files with a block
+size equal to your kernel's PAGE_SIZE, so you may need to specify a block
+size when creating the filesystem.  When mounting it, use the "-o dax"
+option on the command line or add 'dax' to the options in /etc/fstab.
 
 
 Implementation Tips for Block Driver Writers
index 68f1c9106573f40df371e01d14946c52df98405d..f24d1b8339576e96c46045f5da8f275ee9250056 100644 (file)
@@ -500,3 +500,7 @@ in your dentry operations instead.
        dentry,  it does not get nameidata at all and it gets called only when cookie
        is non-NULL.  Note that link body isn't available anymore, so if you need it,
        store it as cookie.
+--
+[mandatory]
+       __fd_install() & fd_install() can now sleep. Callers should not
+       hold a spinlock or other resources that do not allow a schedule.
index 3d1bac399a220b074ce542eba81c0fcba505ce99..d201828d202ff8de99579ee64cd24b2ce89a391d 100644 (file)
@@ -81,6 +81,13 @@ increase the chances of your change being accepted.
 
 * Provide a detect function if and only if a chip can be detected reliably.
 
+* Only the following I2C addresses shall be probed: 0x18-0x1f, 0x28-0x2f,
+  0x48-0x4f, 0x58, 0x5c, 0x73 and 0x77. Probing other addresses is strongly
+  discouraged as it is known to cause trouble with other (non-hwmon) I2C
+  chips. If your chip lives at an address which can't be probed then the
+  device will have to be instantiated explicitly (which is always better
+  anyway.)
+
 * Avoid writing to chip registers in the detect function. If you have to write,
   only do it after you have already gathered enough data to be certain that the
   detection is going to be successful.
index 53f7b6866fec4d51c4049ba6c700d58e1950a046..f2ffc402ea4519b6d9ba140ebfe72f88d1f67f4f 100644 (file)
@@ -8,6 +8,7 @@ Supported chips:
     Datasheet: http://www.winbond.com.tw
 
 Author: Shane Huang (Winbond)
+Updated: Roger Lucas
 
 
 Module Parameters
@@ -38,9 +39,16 @@ parameter; this will put it into a more well-behaved state first.
 The driver implements three temperature sensors, seven fan rotation speed
 sensors, nine voltage sensors, and two automatic fan regulation
 strategies called: Smart Fan I (Thermal Cruise mode) and Smart Fan II.
-Automatic fan control mode is possible only for fan1-fan3. Fan4-fan7 can run
-synchronized with selected fan (fan1-fan3). This functionality and manual PWM
-control for fan4-fan7 is not yet implemented.
+
+The driver also implements up to seven fan control outputs: pwm1-7.  Pwm1-7
+can be configured to PWM output or Analogue DC output via their associated
+pwmX_mode. Outputs pwm4 through pwm7 may or may not be present depending on
+how the W83792AD/D was configured by the BIOS.
+
+Automatic fan control mode is possible only for fan1-fan3.
+
+For all pwmX outputs, a value of 0 means minimum fan speed and a value of
+255 means maximum fan speed.
 
 Temperatures are measured in degrees Celsius and measurement resolution is 1
 degC for temp1 and 0.5 degC for temp2 and temp3. An alarm is triggered when
@@ -157,14 +165,14 @@ for each fan.
 /sys files
 ----------
 
-pwm[1-3] - this file stores PWM duty cycle or DC value (fan speed) in range:
+pwm[1-7] - this file stores PWM duty cycle or DC value (fan speed) in range:
        0 (stop) to 255 (full)
 pwm[1-3]_enable - this file controls mode of fan/temperature control:
             * 0 Disabled
             * 1 Manual mode
             * 2 Smart Fan II
             * 3 Thermal Cruise
-pwm[1-3]_mode - Select PWM of DC mode
+pwm[1-7]_mode - Select PWM or DC mode
             * 0 DC
             * 1 PWM
 thermal_cruise[1-3] - Selects the desired temperature for cruise (degC)
index 62f7d4ea6e26459b74cdccbca9f7a3532ab4900b..61c1ee98e59f2137b8b250d2b469d4d949cca9b3 100644 (file)
@@ -48,6 +48,16 @@ independent, drivers.
      ids for predefined purposes.
      Should be called from a process context (might sleep).
 
+  int of_hwspin_lock_get_id(struct device_node *np, int index);
+   - retrieve the global lock id for an OF phandle-based specific lock.
+     This function provides a means for DT users of a hwspinlock module
+     to get the global lock id of a specific hwspinlock, so that it can
+     be requested using the normal hwspin_lock_request_specific() API.
+     The function returns a lock id number on success, -EPROBE_DEFER if
+     the hwspinlock device is not yet registered with the core, or other
+     error values.
+     Should be called from a process context (might sleep).
+
   int hwspin_lock_free(struct hwspinlock *hwlock);
    - free a previously-assigned hwspinlock; returns 0 on success, or an
      appropriate error code on failure (e.g. -EINVAL if the hwspinlock
index 51f4221657bff5b03c9a8ef44116d6e27ef27423..611c52267d24812423821a9f062a407414f86e36 100644 (file)
@@ -321,6 +321,7 @@ Code  Seq#(hex)     Include File            Comments
 0xDB   00-0F   drivers/char/mwave/mwavepub.h
 0xDD   00-3F   ZFCP device driver      see drivers/s390/scsi/
                                        <mailto:aherrman@de.ibm.com>
+0xE5   00-3F   linux/fuse.h
 0xEC   00-01   drivers/platform/chrome/cros_ec_dev.h   ChromeOS EC driver
 0xF3   00-3F   drivers/usb/misc/sisusbvga/sisusb.h     sisfb (in development)
                                        <mailto:thomas@winischhofer.net>
index afe7e2bbbc23cbc01eef6224824f1c4d199833c7..1d6f0459cd7bbe531b7acc2d85722ff62185729e 100644 (file)
@@ -293,6 +293,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        acpi_os_name=   [HW,ACPI] Tell ACPI BIOS the name of the OS
                        Format: To spoof as Windows 98: ="Microsoft Windows"
 
+       acpi_rev_override [ACPI] Override the _REV object to return 5 (instead
+                       of 2 which is mandated by ACPI 6) as the supported ACPI
+                       specification revision (when using this switch, it may
+                       be necessary to carry out a cold reboot _twice_ in a
+                       row to make it take effect on the platform firmware).
+
        acpi_osi=       [HW,ACPI] Modify list of supported OS interface strings
                        acpi_osi="string1"      # add string1
                        acpi_osi="!string2"     # remove string2
index 19bb67355424a053127f19b12f347a9cdafd513a..8da3c6f4b60b8d095056f98a5da495d6d971931e 100644 (file)
@@ -20,3 +20,54 @@ Following sysfs attributes are exposed for controlling flash LED devices:
        - max_flash_timeout
        - flash_strobe
        - flash_fault
+
+
+V4L2 flash wrapper for flash LEDs
+=================================
+
+A LED subsystem driver can be controlled also from the level of VideoForLinux2
+subsystem. In order to enable this CONFIG_V4L2_FLASH_LED_CLASS symbol has to
+be defined in the kernel config.
+
+The driver must call the v4l2_flash_init function to get registered in the
+V4L2 subsystem. The function takes six arguments:
+- dev       : flash device, e.g. an I2C device
+- of_node   : of_node of the LED, may be NULL if the same as device's
+- fled_cdev : LED flash class device to wrap
+- iled_cdev : LED flash class device representing indicator LED associated with
+             fled_cdev, may be NULL
+- ops : V4L2 specific ops
+       * external_strobe_set - defines the source of the flash LED strobe -
+               V4L2_CID_FLASH_STROBE control or external source, typically
+               a sensor, which makes it possible to synchronise the flash
+               strobe start with exposure start,
+       * intensity_to_led_brightness and led_brightness_to_intensity - perform
+               enum led_brightness <-> V4L2 intensity conversion in a device
+               specific manner - they can be used for devices with non-linear
+               LED current scale.
+- config : configuration for V4L2 Flash sub-device
+       * dev_name - the name of the media entity, unique in the system,
+       * flash_faults - bitmask of flash faults that the LED flash class
+               device can report; corresponding LED_FAULT* bit definitions are
+               available in <linux/led-class-flash.h>,
+       * torch_intensity - constraints for the LED in TORCH mode
+               in microamperes,
+       * indicator_intensity - constraints for the indicator LED
+               in microamperes,
+       * has_external_strobe - determines whether the flash strobe source
+               can be switched to external,
+
+On remove the v4l2_flash_release function has to be called, which takes one
+argument - struct v4l2_flash pointer returned previously by v4l2_flash_init.
+This function can be safely called with NULL or error pointer argument.
+
+Please refer to drivers/leds/leds-max77693.c for an exemplary usage of the
+v4l2 flash wrapper.
+
+Once the V4L2 sub-device is registered by the driver which created the Media
+controller device, the sub-device node acts just as a node of a native V4L2
+flash API device would. The calls are simply routed to the LED flash API.
+
+Opening the V4L2 flash sub-device makes the LED subsystem sysfs interface
+unavailable. The interface is re-enabled after the V4L2 flash sub-device
+is closed.
index 5b3e91d4ac5912011f0f4dfc59e1022f9ac24476..0dbbd279c9b92ea4fa579a2a94e5cf3a8e9227d6 100644 (file)
@@ -49,6 +49,36 @@ There are two ways to run LED patterns.
 2) Firmware interface - LP55xx common interface
   For the details, please refer to 'firmware' section in leds-lp55xx.txt
 
+LP5523 has three master faders. If a channel is mapped to one of
+the master faders, its output is dimmed based on the value of the master
+fader.
+
+For example,
+
+  echo "123000123" > master_fader_leds
+
+creates the following channel-fader mappings:
+
+  channel 0,6 to master_fader1
+  channel 1,7 to master_fader2
+  channel 2,8 to master_fader3
+
+Then, to have 25% of the original output on channel 0,6:
+
+  echo 64 > master_fader1
+
+To have 0% of the original output (i.e. no output) channel 1,7:
+
+  echo 0 > master_fader2
+
+To have 100% of the original output (i.e. no dimming) on channel 2,8:
+
+  echo 255 > master_fader3
+
+To clear all master fader controls:
+
+  echo "000000000" > master_fader_leds
+
 Selftest uses always the current from the platform data.
 
 Each channel contains led current settings.
diff --git a/Documentation/ntb.txt b/Documentation/ntb.txt
new file mode 100644 (file)
index 0000000..1d9bbab
--- /dev/null
@@ -0,0 +1,127 @@
+# NTB Drivers
+
+NTB (Non-Transparent Bridge) is a type of PCI-Express bridge chip that connects
+the separate memory systems of two computers to the same PCI-Express fabric.
+Existing NTB hardware supports a common feature set, including scratchpad
+registers, doorbell registers, and memory translation windows.  Scratchpad
+registers are read-and-writable registers that are accessible from either side
+of the device, so that peers can exchange a small amount of information at a
+fixed address.  Doorbell registers provide a way for peers to send interrupt
+events.  Memory windows allow translated read and write access to the peer
+memory.
+
+## NTB Core Driver (ntb)
+
+The NTB core driver defines an api wrapping the common feature set, and allows
+clients interested in NTB features to discover NTB the devices supported by
+hardware drivers.  The term "client" is used here to mean an upper layer
+component making use of the NTB api.  The term "driver," or "hardware driver,"
+is used here to mean a driver for a specific vendor and model of NTB hardware.
+
+## NTB Client Drivers
+
+NTB client drivers should register with the NTB core driver.  After
+registering, the client probe and remove functions will be called appropriately
+as ntb hardware, or hardware drivers, are inserted and removed.  The
+registration uses the Linux Device framework, so it should feel familiar to
+anyone who has written a pci driver.
+
+### NTB Transport Client (ntb\_transport) and NTB Netdev (ntb\_netdev)
+
+The primary client for NTB is the Transport client, used in tandem with NTB
+Netdev.  These drivers function together to create a logical link to the peer,
+across the ntb, to exchange packets of network data.  The Transport client
+establishes a logical link to the peer, and creates queue pairs to exchange
+messages and data.  The NTB Netdev then creates an ethernet device using a
+Transport queue pair.  Network data is copied between socket buffers and the
+Transport queue pair buffer.  The Transport client may be used for other things
+besides Netdev, however no other applications have yet been written.
+
+### NTB Ping Pong Test Client (ntb\_pingpong)
+
+The Ping Pong test client serves as a demonstration to exercise the doorbell
+and scratchpad registers of NTB hardware, and as an example simple NTB client.
+Ping Pong enables the link when started, waits for the NTB link to come up, and
+then proceeds to read and write the doorbell scratchpad registers of the NTB.
+The peers interrupt each other using a bit mask of doorbell bits, which is
+shifted by one in each round, to test the behavior of multiple doorbell bits
+and interrupt vectors.  The Ping Pong driver also reads the first local
+scratchpad, and writes the value plus one to the first peer scratchpad, each
+round before writing the peer doorbell register.
+
+Module Parameters:
+
+* unsafe - Some hardware has known issues with scratchpad and doorbell
+       registers.  By default, Ping Pong will not attempt to exercise such
+       hardware.  You may override this behavior at your own risk by setting
+       unsafe=1.
+* delay\_ms - Specify the delay between receiving a doorbell
+       interrupt event and setting the peer doorbell register for the next
+       round.
+* init\_db - Specify the doorbell bits to start new series of rounds.  A new
+       series begins once all the doorbell bits have been shifted out of
+       range.
+* dyndbg - It is suggested to specify dyndbg=+p when loading this module, and
+       then to observe debugging output on the console.
+
+### NTB Tool Test Client (ntb\_tool)
+
+The Tool test client serves for debugging, primarily, ntb hardware and drivers.
+The Tool provides access through debugfs for reading, setting, and clearing the
+NTB doorbell, and reading and writing scratchpads.
+
+The Tool does not currently have any module parameters.
+
+Debugfs Files:
+
+* *debugfs*/ntb\_tool/*hw*/ - A directory in debugfs will be created for each
+       NTB device probed by the tool.  This directory is shortened to *hw*
+       below.
+* *hw*/db - This file is used to read, set, and clear the local doorbell.  Not
+       all operations may be supported by all hardware.  To read the doorbell,
+       read the file.  To set the doorbell, write `s` followed by the bits to
+       set (eg: `echo 's 0x0101' > db`).  To clear the doorbell, write `c`
+       followed by the bits to clear.
+* *hw*/mask - This file is used to read, set, and clear the local doorbell mask.
+       See *db* for details.
+* *hw*/peer\_db - This file is used to read, set, and clear the peer doorbell.
+       See *db* for details.
+* *hw*/peer\_mask - This file is used to read, set, and clear the peer doorbell
+       mask.  See *db* for details.
+* *hw*/spad - This file is used to read and write local scratchpads.  To read
+       the values of all scratchpads, read the file.  To write values, write a
+       series of pairs of scratchpad number and value
+       (eg: `echo '4 0x123 7 0xabc' > spad`
+       # to set scratchpads `4` and `7` to `0x123` and `0xabc`, respectively).
+* *hw*/peer\_spad - This file is used to read and write peer scratchpads.  See
+       *spad* for details.
+
+## NTB Hardware Drivers
+
+NTB hardware drivers should register devices with the NTB core driver.  After
+registering, clients probe and remove functions will be called.
+
+### NTB Intel Hardware Driver (ntb\_hw\_intel)
+
+The Intel hardware driver supports NTB on Xeon and Atom CPUs.
+
+Module Parameters:
+
+* b2b\_mw\_idx - If the peer ntb is to be accessed via a memory window, then use
+       this memory window to access the peer ntb.  A value of zero or positive
+       starts from the first mw idx, and a negative value starts from the last
+       mw idx.  Both sides MUST set the same value here!  The default value is
+       `-1`.
+* b2b\_mw\_share - If the peer ntb is to be accessed via a memory window, and if
+       the memory window is large enough, still allow the client to use the
+       second half of the memory window for address translation to the peer.
+* xeon\_b2b\_usd\_bar2\_addr64 - If using B2B topology on Xeon hardware, use
+       this 64 bit address on the bus between the NTB devices for the window
+       at BAR2, on the upstream side of the link.
+* xeon\_b2b\_usd\_bar4\_addr64 - See *xeon\_b2b\_bar2\_addr64*.
+* xeon\_b2b\_usd\_bar4\_addr32 - See *xeon\_b2b\_bar2\_addr64*.
+* xeon\_b2b\_usd\_bar5\_addr32 - See *xeon\_b2b\_bar2\_addr64*.
+* xeon\_b2b\_dsd\_bar2\_addr64 - See *xeon\_b2b\_bar2\_addr64*.
+* xeon\_b2b\_dsd\_bar4\_addr64 - See *xeon\_b2b\_bar2\_addr64*.
+* xeon\_b2b\_dsd\_bar4\_addr32 - See *xeon\_b2b\_bar2\_addr64*.
+* xeon\_b2b\_dsd\_bar5\_addr32 - See *xeon\_b2b\_bar2\_addr64*.
index e6469fdcf89a0344b017f90c821019750d8e9039..ef0219fa4bb4cf5beb9078293a92b3ccbcbe0d48 100644 (file)
@@ -51,6 +51,12 @@ cost.
         rproc_shutdown() returns, and users can still use it with a subsequent
         rproc_boot(), if needed.
 
+  struct rproc *rproc_get_by_phandle(phandle phandle)
+    - Find an rproc handle using a device tree phandle. Returns the rproc
+      handle on success, and NULL on failure. This function increments
+      the remote processor's refcount, so always use rproc_put() to
+      decrement it back once rproc isn't needed anymore.
+
 3. Typical usage
 
 #include <linux/remoteproc.h>
index 6085e1f19c9d59fa49b5a3d4d369da6426ae526e..949de191fcdc1939c9b160eb7a809afcdbc847af 100755 (executable)
@@ -50,15 +50,6 @@ def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
        buf = "#define " + fabric_mod_name.upper() + "_VERSION  \"v0.1\"\n"
        buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
        buf += "\n"
-       buf += "struct " + fabric_mod_name + "_nacl {\n"
-       buf += "        /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
-       buf += "        u64 nport_wwpn;\n"
-       buf += "        /* ASCII formatted WWPN for FC Initiator Nport */\n"
-       buf += "        char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
-       buf += "        /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
-       buf += "        struct se_node_acl se_node_acl;\n"
-       buf += "};\n"
-       buf += "\n"
        buf += "struct " + fabric_mod_name + "_tpg {\n"
        buf += "        /* FC lport target portal group tag for TCM */\n"
        buf += "        u16 lport_tpgt;\n"
@@ -69,8 +60,6 @@ def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
        buf += "};\n"
        buf += "\n"
        buf += "struct " + fabric_mod_name + "_lport {\n"
-       buf += "        /* SCSI protocol the lport is providing */\n"
-       buf += "        u8 lport_proto_id;\n"
        buf += "        /* Binary World Wide unique Port Name for FC Target Lport */\n"
        buf += "        u64 lport_wwpn;\n"
        buf += "        /* ASCII formatted WWPN for FC Target Lport */\n"
@@ -105,14 +94,6 @@ def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
        buf = "#define " + fabric_mod_name.upper() + "_VERSION  \"v0.1\"\n"
        buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
        buf += "\n"
-       buf += "struct " + fabric_mod_name + "_nacl {\n"
-       buf += "        /* Binary World Wide unique Port Name for SAS Initiator port */\n"
-       buf += "        u64 iport_wwpn;\n"
-       buf += "        /* ASCII formatted WWPN for Sas Initiator port */\n"
-       buf += "        char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
-       buf += "        /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
-       buf += "        struct se_node_acl se_node_acl;\n"
-       buf += "};\n\n"
        buf += "struct " + fabric_mod_name + "_tpg {\n"
        buf += "        /* SAS port target portal group tag for TCM */\n"
        buf += "        u16 tport_tpgt;\n"
@@ -122,8 +103,6 @@ def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
        buf += "        struct se_portal_group se_tpg;\n"
        buf += "};\n\n"
        buf += "struct " + fabric_mod_name + "_tport {\n"
-       buf += "        /* SCSI protocol the tport is providing */\n"
-       buf += "        u8 tport_proto_id;\n"
        buf += "        /* Binary World Wide unique Port Name for SAS Target port */\n"
        buf += "        u64 tport_wwpn;\n"
        buf += "        /* ASCII formatted WWPN for SAS Target port */\n"
@@ -158,12 +137,6 @@ def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
        buf = "#define " + fabric_mod_name.upper() + "_VERSION  \"v0.1\"\n"
        buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
        buf += "\n"
-       buf += "struct " + fabric_mod_name + "_nacl {\n"
-       buf += "        /* ASCII formatted InitiatorName */\n"
-       buf += "        char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
-       buf += "        /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
-       buf += "        struct se_node_acl se_node_acl;\n"
-       buf += "};\n\n"
        buf += "struct " + fabric_mod_name + "_tpg {\n"
        buf += "        /* iSCSI target portal group tag for TCM */\n"
        buf += "        u16 tport_tpgt;\n"
@@ -173,8 +146,6 @@ def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
        buf += "        struct se_portal_group se_tpg;\n"
        buf += "};\n\n"
        buf += "struct " + fabric_mod_name + "_tport {\n"
-       buf += "        /* SCSI protocol the tport is providing */\n"
-       buf += "        u8 tport_proto_id;\n"
        buf += "        /* ASCII formatted TargetName for IQN */\n"
        buf += "        char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
        buf += "        /* Returned by " + fabric_mod_name + "_make_tport() */\n"
@@ -232,61 +203,12 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "#include <target/target_core_base.h>\n"
        buf += "#include <target/target_core_fabric.h>\n"
        buf += "#include <target/target_core_fabric_configfs.h>\n"
-       buf += "#include <target/target_core_configfs.h>\n"
        buf += "#include <target/configfs_macros.h>\n\n"
        buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
        buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
 
        buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
 
-       buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
-       buf += "        struct se_portal_group *se_tpg,\n"
-       buf += "        struct config_group *group,\n"
-       buf += "        const char *name)\n"
-       buf += "{\n"
-       buf += "        struct se_node_acl *se_nacl, *se_nacl_new;\n"
-       buf += "        struct " + fabric_mod_name + "_nacl *nacl;\n"
-
-       if proto_ident == "FC" or proto_ident == "SAS":
-               buf += "        u64 wwpn = 0;\n"
-
-       buf += "        u32 nexus_depth;\n\n"
-       buf += "        /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
-       buf += "                return ERR_PTR(-EINVAL); */\n"
-       buf += "        se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
-       buf += "        if (!se_nacl_new)\n"
-       buf += "                return ERR_PTR(-ENOMEM);\n"
-       buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
-       buf += "        nexus_depth = 1;\n"
-       buf += "        /*\n"
-       buf += "         * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
-       buf += "         * when converting a NodeACL from demo mode -> explict\n"
-       buf += "         */\n"
-       buf += "        se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
-       buf += "                                name, nexus_depth);\n"
-       buf += "        if (IS_ERR(se_nacl)) {\n"
-       buf += "                " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
-       buf += "                return se_nacl;\n"
-       buf += "        }\n"
-       buf += "        /*\n"
-       buf += "         * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
-       buf += "         */\n"
-       buf += "        nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
-
-       if proto_ident == "FC" or proto_ident == "SAS":
-               buf += "        nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
-
-       buf += "        /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
-       buf += "        return se_nacl;\n"
-       buf += "}\n\n"
-       buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
-       buf += "{\n"
-       buf += "        struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
-       buf += "                                struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
-       buf += "        core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
-       buf += "        kfree(nacl);\n"
-       buf += "}\n\n"
-
        buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
        buf += "        struct se_wwn *wwn,\n"
        buf += "        struct config_group *group,\n"
@@ -309,8 +231,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
        buf += "        tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
        buf += "        ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n"
-       buf += "                                &tpg->se_tpg, tpg,\n"
-       buf += "                                TRANSPORT_TPG_TYPE_NORMAL);\n"
+       buf += "                                &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
        buf += "        if (ret < 0) {\n"
        buf += "                kfree(tpg);\n"
        buf += "                return NULL;\n"
@@ -372,21 +293,13 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
        buf += "        .module                         = THIS_MODULE,\n"
        buf += "        .name                           = " + fabric_mod_name + ",\n"
-       buf += "        .get_fabric_proto_ident         = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
        buf += "        .get_fabric_name                = " + fabric_mod_name + "_get_fabric_name,\n"
-       buf += "        .get_fabric_proto_ident         = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
        buf += "        .tpg_get_wwn                    = " + fabric_mod_name + "_get_fabric_wwn,\n"
        buf += "        .tpg_get_tag                    = " + fabric_mod_name + "_get_tag,\n"
-       buf += "        .tpg_get_default_depth          = " + fabric_mod_name + "_get_default_depth,\n"
-       buf += "        .tpg_get_pr_transport_id        = " + fabric_mod_name + "_get_pr_transport_id,\n"
-       buf += "        .tpg_get_pr_transport_id_len    = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
-       buf += "        .tpg_parse_pr_out_transport_id  = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
        buf += "        .tpg_check_demo_mode            = " + fabric_mod_name + "_check_false,\n"
        buf += "        .tpg_check_demo_mode_cache      = " + fabric_mod_name + "_check_true,\n"
        buf += "        .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
        buf += "        .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
-       buf += "        .tpg_alloc_fabric_acl           = " + fabric_mod_name + "_alloc_fabric_acl,\n"
-       buf += "        .tpg_release_fabric_acl         = " + fabric_mod_name + "_release_fabric_acl,\n"
        buf += "        .tpg_get_inst_index             = " + fabric_mod_name + "_tpg_get_inst_index,\n"
        buf += "        .release_cmd                    = " + fabric_mod_name + "_release_cmd,\n"
        buf += "        .shutdown_session               = " + fabric_mod_name + "_shutdown_session,\n"
@@ -396,7 +309,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .write_pending                  = " + fabric_mod_name + "_write_pending,\n"
        buf += "        .write_pending_status           = " + fabric_mod_name + "_write_pending_status,\n"
        buf += "        .set_default_node_attributes    = " + fabric_mod_name + "_set_default_node_attrs,\n"
-       buf += "        .get_task_tag                   = " + fabric_mod_name + "_get_task_tag,\n"
        buf += "        .get_cmd_state                  = " + fabric_mod_name + "_get_cmd_state,\n"
        buf += "        .queue_data_in                  = " + fabric_mod_name + "_queue_data_in,\n"
        buf += "        .queue_status                   = " + fabric_mod_name + "_queue_status,\n"
@@ -409,12 +321,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .fabric_drop_wwn                = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
        buf += "        .fabric_make_tpg                = " + fabric_mod_name + "_make_tpg,\n"
        buf += "        .fabric_drop_tpg                = " + fabric_mod_name + "_drop_tpg,\n"
-       buf += "        .fabric_post_link               = NULL,\n"
-       buf += "        .fabric_pre_unlink              = NULL,\n"
-       buf += "        .fabric_make_np                 = NULL,\n"
-       buf += "        .fabric_drop_np                 = NULL,\n"
-       buf += "        .fabric_make_nodeacl            = " + fabric_mod_name + "_make_nodeacl,\n"
-       buf += "        .fabric_drop_nodeacl            = " + fabric_mod_name + "_drop_nodeacl,\n"
        buf += "\n"
        buf += "        .tfc_wwn_attrs                  = " + fabric_mod_name + "_wwn_attrs;\n"
        buf += "};\n\n"
@@ -507,7 +413,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "#include <scsi/scsi_proto.h>\n"
        buf += "#include <target/target_core_base.h>\n"
        buf += "#include <target/target_core_fabric.h>\n"
-       buf += "#include <target/target_core_configfs.h>\n\n"
        buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
        buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
 
@@ -539,35 +444,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
                        continue
 
-               if re.search('get_fabric_proto_ident', fo):
-                       buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
-                       buf += "{\n"
-                       buf += "        struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
-                       buf += "                                struct " + fabric_mod_name + "_tpg, se_tpg);\n"
-                       buf += "        struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
-                       buf += "        u8 proto_id;\n\n"
-                       buf += "        switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
-                       if proto_ident == "FC":
-                               buf += "        case SCSI_PROTOCOL_FCP:\n"
-                               buf += "        default:\n"
-                               buf += "                proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
-                               buf += "                break;\n"
-                       elif proto_ident == "SAS":
-                               buf += "        case SCSI_PROTOCOL_SAS:\n"
-                               buf += "        default:\n"
-                               buf += "                proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
-                               buf += "                break;\n"
-                       elif proto_ident == "iSCSI":
-                               buf += "        case SCSI_PROTOCOL_ISCSI:\n"
-                               buf += "        default:\n"
-                               buf += "                proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
-                               buf += "                break;\n"
-
-                       buf += "        }\n\n"
-                       buf += "        return proto_id;\n"
-                       buf += "}\n\n"
-                       bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
-
                if re.search('get_wwn', fo):
                        buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
                        buf += "{\n"
@@ -587,150 +463,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        buf += "}\n\n"
                        bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
 
-               if re.search('get_default_depth', fo):
-                       buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
-                       buf += "{\n"
-                       buf += "        return 1;\n"
-                       buf += "}\n\n"
-                       bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
-
-               if re.search('get_pr_transport_id\)\(', fo):
-                       buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
-                       buf += "        struct se_portal_group *se_tpg,\n"
-                       buf += "        struct se_node_acl *se_nacl,\n"
-                       buf += "        struct t10_pr_registration *pr_reg,\n"
-                       buf += "        int *format_code,\n"
-                       buf += "        unsigned char *buf)\n"
-                       buf += "{\n"
-                       buf += "        struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
-                       buf += "                                struct " + fabric_mod_name + "_tpg, se_tpg);\n"
-                       buf += "        struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
-                       buf += "        int ret = 0;\n\n"
-                       buf += "        switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
-                       if proto_ident == "FC":
-                               buf += "        case SCSI_PROTOCOL_FCP:\n"
-                               buf += "        default:\n"
-                               buf += "                ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
-                               buf += "                                        format_code, buf);\n"
-                               buf += "                break;\n"
-                       elif proto_ident == "SAS":
-                               buf += "        case SCSI_PROTOCOL_SAS:\n"
-                               buf += "        default:\n"
-                               buf += "                ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
-                               buf += "                                        format_code, buf);\n"
-                               buf += "                break;\n"
-                       elif proto_ident == "iSCSI":
-                               buf += "        case SCSI_PROTOCOL_ISCSI:\n"
-                               buf += "        default:\n"
-                               buf += "                ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
-                               buf += "                                        format_code, buf);\n"
-                               buf += "                break;\n"
-
-                       buf += "        }\n\n"
-                       buf += "        return ret;\n"
-                       buf += "}\n\n"
-                       bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
-                       bufi += "                       struct se_node_acl *, struct t10_pr_registration *,\n"
-                       bufi += "                       int *, unsigned char *);\n"
-
-               if re.search('get_pr_transport_id_len\)\(', fo):
-                       buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
-                       buf += "        struct se_portal_group *se_tpg,\n"
-                       buf += "        struct se_node_acl *se_nacl,\n"
-                       buf += "        struct t10_pr_registration *pr_reg,\n"
-                       buf += "        int *format_code)\n"
-                       buf += "{\n"
-                       buf += "        struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
-                       buf += "                                struct " + fabric_mod_name + "_tpg, se_tpg);\n"
-                       buf += "        struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
-                       buf += "        int ret = 0;\n\n"
-                       buf += "        switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
-                       if proto_ident == "FC":
-                               buf += "        case SCSI_PROTOCOL_FCP:\n"
-                               buf += "        default:\n"
-                               buf += "                ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
-                               buf += "                                        format_code);\n"
-                               buf += "                break;\n"
-                       elif proto_ident == "SAS":
-                               buf += "        case SCSI_PROTOCOL_SAS:\n"
-                               buf += "        default:\n"
-                               buf += "                ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
-                               buf += "                                        format_code);\n"
-                               buf += "                break;\n"
-                       elif proto_ident == "iSCSI":
-                               buf += "        case SCSI_PROTOCOL_ISCSI:\n"
-                               buf += "        default:\n"
-                               buf += "                ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
-                               buf += "                                        format_code);\n"
-                               buf += "                break;\n"
-
-
-                       buf += "        }\n\n"
-                       buf += "        return ret;\n"
-                       buf += "}\n\n"
-                       bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
-                       bufi += "                       struct se_node_acl *, struct t10_pr_registration *,\n"
-                       bufi += "                       int *);\n"
-
-               if re.search('parse_pr_out_transport_id\)\(', fo):
-                       buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
-                       buf += "        struct se_portal_group *se_tpg,\n"
-                       buf += "        const char *buf,\n"
-                       buf += "        u32 *out_tid_len,\n"
-                       buf += "        char **port_nexus_ptr)\n"
-                       buf += "{\n"
-                       buf += "        struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
-                       buf += "                                struct " + fabric_mod_name + "_tpg, se_tpg);\n"
-                       buf += "        struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
-                       buf += "        char *tid = NULL;\n\n"
-                       buf += "        switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
-                       if proto_ident == "FC":
-                               buf += "        case SCSI_PROTOCOL_FCP:\n"
-                               buf += "        default:\n"
-                               buf += "                tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
-                               buf += "                                        port_nexus_ptr);\n"
-                       elif proto_ident == "SAS":
-                               buf += "        case SCSI_PROTOCOL_SAS:\n"
-                               buf += "        default:\n"
-                               buf += "                tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
-                               buf += "                                        port_nexus_ptr);\n"
-                       elif proto_ident == "iSCSI":
-                               buf += "        case SCSI_PROTOCOL_ISCSI:\n"
-                               buf += "        default:\n"
-                               buf += "                tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
-                               buf += "                                        port_nexus_ptr);\n"
-
-                       buf += "        }\n\n"
-                       buf += "        return tid;\n"
-                       buf += "}\n\n"
-                       bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
-                       bufi += "                       const char *, u32 *, char **);\n"
-
-               if re.search('alloc_fabric_acl\)\(', fo):
-                       buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
-                       buf += "{\n"
-                       buf += "        struct " + fabric_mod_name + "_nacl *nacl;\n\n"
-                       buf += "        nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
-                       buf += "        if (!nacl) {\n"
-                       buf += "                printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
-                       buf += "                return NULL;\n"
-                       buf += "        }\n\n"
-                       buf += "        return &nacl->se_node_acl;\n"
-                       buf += "}\n\n"
-                       bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
-
-               if re.search('release_fabric_acl\)\(', fo):
-                       buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
-                       buf += "        struct se_portal_group *se_tpg,\n"
-                       buf += "        struct se_node_acl *se_nacl)\n"
-                       buf += "{\n"
-                       buf += "        struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
-                       buf += "                        struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
-                       buf += "        kfree(nacl);\n"
-                       buf += "}\n\n"
-                       bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
-                       bufi += "                       struct se_node_acl *);\n"
-
                if re.search('tpg_get_inst_index\)\(', fo):
                        buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
                        buf += "{\n"
@@ -787,13 +519,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        buf += "}\n\n"
                        bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
 
-               if re.search('get_task_tag\)\(', fo):
-                       buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
-                       buf += "{\n"
-                       buf += "        return 0;\n"
-                       buf += "}\n\n"
-                       bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
-
                if re.search('get_cmd_state\)\(', fo):
                        buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
                        buf += "{\n"
index 84533d8e747f298d557a4d18159a8a01a96db141..ae22f70055403d70425711e5fbda19eeea823a94 100644 (file)
@@ -13,8 +13,8 @@ fabric skeleton, by simply using:
 This script will create a new drivers/target/$TCM_NEW_MOD/, and will do the following
 
        *) Generate new API callers for drivers/target/target_core_fabric_configs.c logic
-          ->make_nodeacl(), ->drop_nodeacl(), ->make_tpg(), ->drop_tpg()
-          ->make_wwn(), ->drop_wwn().  These are created into $TCM_NEW_MOD/$TCM_NEW_MOD_configfs.c
+          ->make_tpg(), ->drop_tpg(), ->make_wwn(), ->drop_wwn().  These are created
+          into $TCM_NEW_MOD/$TCM_NEW_MOD_configfs.c
        *) Generate basic infrastructure for loading/unloading LKMs and TCM/ConfigFS fabric module
           using a skeleton struct target_core_fabric_ops API template.
        *) Based on user defined T10 Proto_Ident for the new fabric module being built,
index 263b907517ac2cd14e3b8472f4bc23f4aa8aae07..bef81e42788f2b8e48238b63318afd556e843979 100644 (file)
@@ -152,7 +152,7 @@ overall shared memory region, not the entry. The data in/out buffers
 are accessible via tht req.iov[] array. iov_cnt contains the number of
 entries in iov[] needed to describe either the Data-In or Data-Out
 buffers. For bidirectional commands, iov_cnt specifies how many iovec
-entries cover the Data-Out area, and iov_bidi_count specifies how many
+entries cover the Data-Out area, and iov_bidi_cnt specifies how many
 iovec entries immediately after that in iov[] cover the Data-In
 area. Just like other fields, iov.iov_base is an offset from the start
 of the region.
index a0438f3957cad82a29ee8f90753709e9d673056c..d8b0d336770617208a29ab832ef66f9866f38f34 100644 (file)
@@ -36,6 +36,10 @@ The watchdog_unregister_device routine deregisters a registered watchdog timer
 device. The parameter of this routine is the pointer to the registered
 watchdog_device structure.
 
+The watchdog subsystem includes an registration deferral mechanism,
+which allows you to register an watchdog as early as you wish during
+the boot process.
+
 The watchdog device structure looks like this:
 
 struct watchdog_device {
@@ -52,6 +56,7 @@ struct watchdog_device {
        void *driver_data;
        struct mutex lock;
        unsigned long status;
+       struct list_head deferred;
 };
 
 It contains following fields:
@@ -80,6 +85,8 @@ It contains following fields:
   information about the status of the device (Like: is the watchdog timer
   running/active, is the nowayout bit set, is the device opened via
   the /dev/watchdog interface or not, ...).
+* deferred: entry in wtd_deferred_reg_list which is used to
+  register early initialized watchdogs.
 
 The list of watchdog operations is defined as:
 
index 692791cc674c44f20398c8b8f64bd652d6ea9f12..9f9ec9f76039404a15114c97fe67535fe41fa88c 100644 (file)
@@ -208,6 +208,9 @@ nowayout: Watchdog cannot be stopped once started
 -------------------------------------------------
 omap_wdt:
 timer_margin: initial watchdog timeout (in seconds)
+early_enable: Watchdog is started on module insertion (default=0
+nowayout: Watchdog cannot be stopped once started
+       (default=kernel config parameter)
 -------------------------------------------------
 orion_wdt:
 heartbeat: Initial watchdog heartbeat in seconds
index 7c1f9fad667460ff143b867ca5ce5d629560e876..9da6f3512249621ce8bc02dfa6d3b9e5688c1042 100644 (file)
@@ -406,7 +406,7 @@ Protocol:   2.00+
        - If 0, the protected-mode code is loaded at 0x10000.
        - If 1, the protected-mode code is loaded at 0x100000.
 
-  Bit 1 (kernel internal): ALSR_FLAG
+  Bit 1 (kernel internal): KASLR_FLAG
        - Used internally by the compressed kernel to communicate
          KASLR status to kernel proper.
          If 1, KASLR enabled.
index 33884d15612599805f922f386474109ab44998ed..c1df8eba9dfd4dca604dd11c06b40fa4413bc89b 100644 (file)
@@ -1,14 +1,14 @@
 This file documents some of the kernel entries in
-arch/x86/kernel/entry_64.S.  A lot of this explanation is adapted from
+arch/x86/entry/entry_64.S.  A lot of this explanation is adapted from
 an email from Ingo Molnar:
 
 http://lkml.kernel.org/r/<20110529191055.GC9835%40elte.hu>
 
 The x86 architecture has quite a few different ways to jump into
 kernel code.  Most of these entry points are registered in
-arch/x86/kernel/traps.c and implemented in arch/x86/kernel/entry_64.S
-for 64-bit, arch/x86/kernel/entry_32.S for 32-bit and finally
-arch/x86/ia32/ia32entry.S which implements the 32-bit compatibility
+arch/x86/kernel/traps.c and implemented in arch/x86/entry/entry_64.S
+for 64-bit, arch/x86/entry/entry_32.S for 32-bit and finally
+arch/x86/entry/entry_64_compat.S which implements the 32-bit compatibility
 syscall entry points and thus provides for 32-bit processes the
 ability to execute syscalls when running on 64-bit kernels.
 
diff --git a/Kbuild b/Kbuild
index df99a5f53beb880482871e99453bf04ef2f0fb06..f55cefd9bf29a2fa2746f7039f2481dfdceadfc7 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -52,7 +52,6 @@ $(obj)/$(bounds-file): kernel/bounds.s FORCE
 
 timeconst-file := include/generated/timeconst.h
 
-#always  += $(timeconst-file)
 targets += $(timeconst-file)
 
 quiet_cmd_gentimeconst = GEN     $@
index 0e6b09150aad52720d8f98b1802e0a4005e688af..8133cefb6b6e28715197a86aad555c29edbb7aa1 100644 (file)
@@ -2026,10 +2026,10 @@ S:      Maintained
 F:     drivers/net/hamradio/baycom*
 
 BCACHE (BLOCK LAYER CACHE)
-M:     Kent Overstreet <kmo@daterainc.com>
+M:     Kent Overstreet <kent.overstreet@gmail.com>
 L:     linux-bcache@vger.kernel.org
 W:     http://bcache.evilpiepirate.org
-S:     Maintained:
+S:     Maintained
 F:     drivers/md/bcache/
 
 BDISP ST MEDIA DRIVER
@@ -2280,7 +2280,7 @@ S:        Maintained
 F:     arch/mips/bmips/*
 F:     arch/mips/include/asm/mach-bmips/*
 F:     arch/mips/kernel/*bmips*
-F:     arch/mips/boot/dts/bcm*.dts*
+F:     arch/mips/boot/dts/brcm/bcm*.dts*
 F:     drivers/irqchip/irq-bcm7*
 F:     drivers/irqchip/irq-brcmstb*
 
@@ -2339,7 +2339,7 @@ M:        Ray Jui <rjui@broadcom.com>
 L:     bcm-kernel-feedback-list@broadcom.com
 S:     Supported
 F:     drivers/gpio/gpio-bcm-kona.c
-F:     Documentation/devicetree/bindings/gpio/gpio-bcm-kona.txt
+F:     Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
 
 BROADCOM NVRAM DRIVER
 M:     Rafał Miłecki <zajec5@gmail.com>
@@ -2763,7 +2763,7 @@ F:        Documentation/devicetree/bindings/media/coda.txt
 F:     drivers/media/platform/coda/
 
 COMMON CLK FRAMEWORK
-M:     Mike Turquette <mturquette@linaro.org>
+M:     Michael Turquette <mturquette@baylibre.com>
 M:     Stephen Boyd <sboyd@codeaurora.org>
 L:     linux-clk@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
@@ -3216,6 +3216,11 @@ L:       platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/dell-laptop.c
 
+DELL LAPTOP RBTN DRIVER
+M:     Pali Rohár <pali.rohar@gmail.com>
+S:     Maintained
+F:     drivers/platform/x86/dell-rbtn.*
+
 DELL LAPTOP FREEFALL DRIVER
 M:     Pali Rohár <pali.rohar@gmail.com>
 S:     Maintained
@@ -4425,9 +4430,11 @@ FUSE: FILESYSTEM IN USERSPACE
 M:     Miklos Szeredi <miklos@szeredi.hu>
 L:     fuse-devel@lists.sourceforge.net
 W:     http://fuse.sourceforge.net/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git
 S:     Maintained
 F:     fs/fuse/
 F:     include/uapi/linux/fuse.h
+F:     Documentation/filesystems/fuse.txt
 
 FUTURE DOMAIN TMC-16x0 SCSI DRIVER (16-bit)
 M:     Rik Faith <faith@cs.unc.edu>
@@ -5285,11 +5292,10 @@ INTEL ASoC BDW/HSW DRIVERS
 M:     Jie Yang <yang.jie@linux.intel.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
-F:     sound/soc/intel/sst-haswell*
-F:     sound/soc/intel/sst-dsp*
-F:     sound/soc/intel/sst-firmware.c
-F:     sound/soc/intel/broadwell.c
-F:     sound/soc/intel/haswell.c
+F:     sound/soc/intel/common/sst-dsp*
+F:     sound/soc/intel/common/sst-firmware.c
+F:     sound/soc/intel/boards/broadwell.c
+F:     sound/soc/intel/haswell/
 
 INTEL C600 SERIES SAS CONTROLLER DRIVER
 M:     Intel SCU Linux support <intel-linux-scu@intel.com>
@@ -5464,6 +5470,13 @@ F:       include/linux/mei_cl_bus.h
 F:     drivers/misc/mei/*
 F:     Documentation/misc-devices/mei/*
 
+INTEL PMC IPC DRIVER
+M:     Zha Qipeng<qipeng.zha@intel.com>
+L:     platform-driver-x86@vger.kernel.org
+S:     Maintained
+F:     drivers/platform/x86/intel_pmc_ipc.c
+F:     arch/x86/include/asm/intel_pmc_ipc.h
+
 IOC3 ETHERNET DRIVER
 M:     Ralf Baechle <ralf@linux-mips.org>
 L:     linux-mips@linux-mips.org
@@ -6787,9 +6800,8 @@ S:        Maintained
 F:     drivers/platform/x86/msi-laptop.c
 
 MSI WMI SUPPORT
-M:     Anisse Astier <anisse@astier.eu>
 L:     platform-driver-x86@vger.kernel.org
-S:     Supported
+S:     Orphan
 F:     drivers/platform/x86/msi-wmi.c
 
 MSI001 MEDIA DRIVER
@@ -7019,7 +7031,6 @@ L:        nbd-general@lists.sourceforge.net
 T:     git git://git.pengutronix.de/git/mpa/linux-nbd.git
 F:     Documentation/blockdev/nbd.txt
 F:     drivers/block/nbd.c
-F:     include/linux/nbd.h
 F:     include/uapi/linux/nbd.h
 
 NETWORK DROP MONITOR
@@ -7208,15 +7219,25 @@ F:      drivers/power/bq27x00_battery.c
 F:     drivers/power/isp1704_charger.c
 F:     drivers/power/rx51_battery.c
 
-NTB DRIVER
+NTB DRIVER CORE
 M:     Jon Mason <jdmason@kudzu.us>
 M:     Dave Jiang <dave.jiang@intel.com>
+M:     Allen Hubbe <Allen.Hubbe@emc.com>
 S:     Supported
 W:     https://github.com/jonmason/ntb/wiki
 T:     git git://github.com/jonmason/ntb.git
 F:     drivers/ntb/
 F:     drivers/net/ntb_netdev.c
 F:     include/linux/ntb.h
+F:     include/linux/ntb_transport.h
+
+NTB INTEL DRIVER
+M:     Jon Mason <jdmason@kudzu.us>
+M:     Dave Jiang <dave.jiang@intel.com>
+S:     Supported
+W:     https://github.com/jonmason/ntb/wiki
+T:     git git://github.com/jonmason/ntb.git
+F:     drivers/ntb/hw/intel/
 
 NTFS FILESYSTEM
 M:     Anton Altaparmakov <anton@tuxera.com>
@@ -7360,7 +7381,6 @@ M:        Ohad Ben-Cohen <ohad@wizery.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     drivers/hwspinlock/omap_hwspinlock.c
-F:     arch/arm/mach-omap2/hwspinlock.c
 
 OMAP MMC SUPPORT
 M:     Jarkko Lavinen <jarkko.lavinen@nokia.com>
@@ -7647,7 +7667,6 @@ F:        arch/*/include/asm/paravirt.h
 PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES
 M:     Tim Waugh <tim@cyberelk.net>
 L:     linux-parport@lists.infradead.org (subscribers-only)
-W:     http://www.torque.net/linux-pp.html
 S:     Maintained
 F:     Documentation/blockdev/paride.txt
 F:     drivers/block/paride/
@@ -9091,9 +9110,9 @@ S:        Supported
 F:     drivers/net/ethernet/emulex/benet/
 
 EMULEX ONECONNECT ROCE DRIVER
-M:     Selvin Xavier <selvin.xavier@emulex.com>
-M:     Devesh Sharma <devesh.sharma@emulex.com>
-M:     Mitesh Ahuja <mitesh.ahuja@emulex.com>
+M:     Selvin Xavier <selvin.xavier@avagotech.com>
+M:     Devesh Sharma <devesh.sharma@avagotech.com>
+M:     Mitesh Ahuja <mitesh.ahuja@avagotech.com>
 L:     linux-rdma@vger.kernel.org
 W:     http://www.emulex.com
 S:     Supported
@@ -9593,7 +9612,6 @@ F:        include/uapi/linux/spi/
 
 SPIDERNET NETWORK DRIVER for CELL
 M:     Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
-M:     Jens Osterkamp <jens@de.ibm.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/networking/spider_net.txt
@@ -9832,6 +9850,13 @@ F:       arch/arc/
 F:     Documentation/devicetree/bindings/arc/
 F:     drivers/tty/serial/arc_uart.c
 
+SYNOPSYS ARC SDP platform support
+M:     Alexey Brodkin <abrodkin@synopsys.com>
+S:     Supported
+F:     arch/arc/plat-axs10x
+F:     arch/arc/boot/dts/ax*
+F:     Documentation/devicetree/bindings/arc/axs10*
+
 SYSTEM CONFIGURATION (SYSCON)
 M:     Lee Jones <lee.jones@linaro.org>
 M:     Arnd Bergmann <arnd@arndb.de>
@@ -11347,6 +11372,13 @@ L:     zd1211-devs@lists.sourceforge.net (subscribers-only)
 S:     Maintained
 F:     drivers/net/wireless/zd1211rw/
 
+ZPOOL COMPRESSED PAGE STORAGE API
+M:     Dan Streetman <ddstreet@ieee.org>
+L:     linux-mm@kvack.org
+S:     Maintained
+F:     mm/zpool.c
+F:     include/linux/zpool.h
+
 ZR36067 VIDEO FOR LINUX DRIVER
 L:     mjpeg-users@lists.sourceforge.net
 L:     linux-media@vger.kernel.org
index 6c6f14628f329d0ba10f5632fb362c818c437ff5..13270c0a93363f079ccc3a112bfa7d7d80c7f429 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
-PATCHLEVEL = 1
+PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
@@ -335,15 +335,6 @@ endif
 export KBUILD_MODULES KBUILD_BUILTIN
 export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD
 
-ifneq ($(CC),)
-ifeq ($(shell $(CC) -v 2>&1 | grep -c "clang version"), 1)
-COMPILER := clang
-else
-COMPILER := gcc
-endif
-export COMPILER
-endif
-
 # We need some generic definitions (do not try to remake the file).
 scripts/Kbuild.include: ;
 include scripts/Kbuild.include
@@ -670,6 +661,13 @@ endif
 endif
 KBUILD_CFLAGS += $(stackp-flag)
 
+ifeq ($(shell $(CC) -v 2>&1 | grep -c "clang version"), 1)
+COMPILER := clang
+else
+COMPILER := gcc
+endif
+export COMPILER
+
 ifeq ($(COMPILER),clang)
 KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
 KBUILD_CPPFLAGS += $(call cc-option,-Wno-unknown-warning-option,)
index df94ac1f75b6ac517784364cdeff6781eead9a6b..e7cee0a5c56dfa80222d8286a63342b10c07bc42 100644 (file)
@@ -81,17 +81,37 @@ menu "ARC Architecture Configuration"
 
 menu "ARC Platform/SoC/Board"
 
-source "arch/arc/plat-arcfpga/Kconfig"
+source "arch/arc/plat-sim/Kconfig"
 source "arch/arc/plat-tb10x/Kconfig"
+source "arch/arc/plat-axs10x/Kconfig"
 #New platform adds here
 
 endmenu
 
+choice
+       prompt "ARC Instruction Set"
+       default ISA_ARCOMPACT
+
+config ISA_ARCOMPACT
+       bool "ARCompact ISA"
+       help
+         The original ARC ISA of ARC600/700 cores
+
+config ISA_ARCV2
+       bool "ARC ISA v2"
+       help
+         ISA for the Next Generation ARC-HS cores
+
+endchoice
+
 menu "ARC CPU Configuration"
 
 choice
        prompt "ARC Core"
-       default ARC_CPU_770
+       default ARC_CPU_770 if ISA_ARCOMPACT
+       default ARC_CPU_HS if ISA_ARCV2
+
+if ISA_ARCOMPACT
 
 config ARC_CPU_750D
        bool "ARC750D"
@@ -100,7 +120,7 @@ config ARC_CPU_750D
 
 config ARC_CPU_770
        bool "ARC770"
-       select ARC_CPU_REL_4_10
+       select ARC_HAS_SWAPE
        help
          Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
          This core has a bunch of cool new features:
@@ -109,6 +129,27 @@ config ARC_CPU_770
          -Caches: New Prog Model, Region Flush
          -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
 
+endif  #ISA_ARCOMPACT
+
+config ARC_CPU_HS
+       bool "ARC-HS"
+       depends on ISA_ARCV2
+       help
+         Support for ARC HS38x Cores based on ARCv2 ISA
+         The notable features are:
+           - SMP configurations of upto 4 core with coherency
+           - Optional L2 Cache and IO-Coherency
+           - Revised Interrupt Architecture (multiple priorites, reg banks,
+               auto stack switch, auto regfile save/restore)
+           - MMUv4 (PIPT dcache, Huge Pages)
+           - Instructions for
+               * 64bit load/store: LDD, STD
+               * Hardware assisted divide/remainder: DIV, REM
+               * Function prologue/epilogue: ENTER_S, LEAVE_S
+               * IRQ enable/disable: CLRI, SETI
+               * pop count: FFS, FLS
+               * SETcc, BMSKN, XBFU...
+
 endchoice
 
 config CPU_BIG_ENDIAN
@@ -117,17 +158,13 @@ config CPU_BIG_ENDIAN
        help
          Build kernel for Big Endian Mode of ARC CPU
 
-# If a platform can't work with 0x8000_0000 based dma_addr_t
-config ARC_PLAT_NEEDS_CPU_TO_DMA
-       bool
-
 config SMP
-       bool "Symmetric Multi-Processing (Incomplete)"
+       bool "Symmetric Multi-Processing"
        default n
+       select ARC_HAS_COH_CACHES if ISA_ARCV2
+       select ARC_MCIP if ISA_ARCV2
        help
-         This enables support for systems with more than one CPU. If you have
-         a system with only one CPU, say N. If you have a system with more
-         than one CPU, say Y.
+         This enables support for systems with more than one CPU.
 
 if SMP
 
@@ -137,13 +174,20 @@ config ARC_HAS_COH_CACHES
 config ARC_HAS_REENTRANT_IRQ_LV2
        def_bool n
 
-endif
+config ARC_MCIP
+       bool "ARConnect Multicore IP (MCIP) Support "
+       depends on ISA_ARCV2
+       help
+         This IP block enables SMP in ARC-HS38 cores.
+         It provides for cross-core interrupts, multi-core debug
+         hardware semaphores, shared memory,....
 
 config NR_CPUS
        int "Maximum number of CPUs (2-4096)"
        range 2 4096
-       depends on SMP
-       default "2"
+       default "4"
+
+endif  #SMP
 
 menuconfig ARC_CACHE
        bool "Enable Cache Support"
@@ -185,7 +229,7 @@ config ARC_CACHE_PAGES
 
 config ARC_CACHE_VIPT_ALIASING
        bool "Support VIPT Aliasing D$"
-       depends on ARC_HAS_DCACHE
+       depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
        default n
 
 endif  #ARC_CACHE
@@ -226,9 +270,10 @@ config ARC_HAS_HW_MPY
          Multipler. Otherwise software multipy lib is used
 
 choice
-       prompt "ARC700 MMU Version"
+       prompt "MMU Version"
        default ARC_MMU_V3 if ARC_CPU_770
        default ARC_MMU_V2 if ARC_CPU_750D
+       default ARC_MMU_V4 if ARC_CPU_HS
 
 config ARC_MMU_V1
        bool "MMU v1"
@@ -249,6 +294,10 @@ config ARC_MMU_V3
          Variable Page size (1k-16k), var JTLB size 128 x (2 or 4)
          Shared Address Spaces (SASID)
 
+config ARC_MMU_V4
+       bool "MMU v4"
+       depends on ISA_ARCV2
+
 endchoice
 
 
@@ -271,6 +320,8 @@ config ARC_PAGE_SIZE_4K
 
 endchoice
 
+if ISA_ARCOMPACT
+
 config ARC_COMPACT_IRQ_LEVELS
        bool "ARCompact IRQ Priorities: High(2)/Low(1)"
        default n
@@ -290,7 +341,7 @@ config ARC_IRQ5_LV2
 config ARC_IRQ6_LV2
        bool
 
-endif
+endif  #ARC_COMPACT_IRQ_LEVELS
 
 config ARC_FPU_SAVE_RESTORE
        bool "Enable FPU state persistence across context switch"
@@ -303,32 +354,53 @@ config ARC_FPU_SAVE_RESTORE
          based on actual usage of FPU by a task. Thus our implemn does
          this for all tasks in system.
 
+endif  #ISA_ARCOMPACT
+
 config ARC_CANT_LLSC
        def_bool n
 
-menuconfig ARC_CPU_REL_4_10
-       bool "Enable support for Rel 4.10 features"
-       default n
-       help
-         -ARC770 (and dependent features) enabled
-         -ARC750 also shares some of the new features with 770
-
 config ARC_HAS_LLSC
        bool "Insn: LLOCK/SCOND (efficient atomic ops)"
        default y
-       depends on ARC_CPU_770 && !ARC_CANT_LLSC
+       depends on !ARC_CPU_750D && !ARC_CANT_LLSC
 
 config ARC_HAS_SWAPE
        bool "Insn: SWAPE (endian-swap)"
        default y
-       depends on ARC_CPU_REL_4_10
 
-config ARC_HAS_RTSC
-       bool "Insn: RTSC (64-bit r/o cycle counter)"
+if ISA_ARCV2
+
+config ARC_HAS_LL64
+       bool "Insn: 64bit LDD/STD"
+       help
+         Enable gcc to generate 64-bit load/store instructions
+         ISA mandates even/odd registers to allow encoding of two
+         dest operands with 2 possible source operands.
        default y
-       depends on ARC_CPU_REL_4_10
+
+config ARC_HAS_RTC
+       bool "Local 64-bit r/o cycle counter"
+       default n
        depends on !SMP
 
+config ARC_HAS_GRTC
+       bool "SMP synchronized 64-bit cycle counter"
+       default y
+       depends on SMP
+
+config ARC_NUMBER_OF_INTERRUPTS
+       int "Number of interrupts"
+       range 8 240
+       default 32
+       help
+         This defines the number of interrupts on the ARCv2HS core.
+         It affects the size of vector table.
+         The initial 8 IRQs are fixed (Timer, ICI etc) and although configurable
+         in hardware, it keep things simple for Linux to assume they are always
+         present.
+
+endif  # ISA_ARCV2
+
 endmenu   # "ARC CPU Configuration"
 
 config LINUX_LINK_BASE
@@ -354,8 +426,10 @@ config ARC_CURR_IN_REG
 
 config ARC_EMUL_UNALIGNED
        bool "Emulate unaligned memory access (userspace only)"
+       default N
        select SYSCTL_ARCH_UNALIGN_NO_WARN
        select SYSCTL_ARCH_UNALIGN_ALLOW
+       depends on ISA_ARCOMPACT
        help
          This enables misaligned 16 & 32 bit memory access from user space.
          Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide
@@ -378,9 +452,10 @@ menuconfig ARC_DBG
        bool "ARC debugging"
        default y
 
+if ARC_DBG
+
 config ARC_DW2_UNWIND
        bool "Enable DWARF specific kernel stack unwind"
-       depends on ARC_DBG
        default y
        select KALLSYMS
        help
@@ -394,18 +469,38 @@ config ARC_DW2_UNWIND
 
 config ARC_DBG_TLB_PARANOIA
        bool "Paranoia Checks in Low Level TLB Handlers"
-       depends on ARC_DBG
        default n
 
 config ARC_DBG_TLB_MISS_COUNT
        bool "Profile TLB Misses"
        default n
        select DEBUG_FS
-       depends on ARC_DBG
        help
          Counts number of I and D TLB Misses and exports them via Debugfs
          The counters can be cleared via Debugfs as well
 
+if SMP
+
+config ARC_IPI_DBG
+       bool "Debug Inter Core interrupts"
+       default n
+
+endif
+
+endif
+
+config ARC_UBOOT_SUPPORT
+       bool "Support uboot arg Handling"
+       default n
+       help
+         ARC Linux by default checks for uboot provided args as pointers to
+         external cmdline or DTB. This however breaks in absence of uboot,
+         when booting from Metaware debugger directly, as the registers are
+         not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus
+         registers look like uboot args to kernel which then chokes.
+         So only enable the uboot arg checking/processing if users are sure
+         of uboot being in play.
+
 config ARC_BUILTIN_DTB_NAME
        string "Built in DTB"
        help
index db72fec0e160fc8e67fe17c5faf9c6ef54c8bcdb..6107062c01115dbea8a56e02bce254a8ba5b91af 100644 (file)
@@ -9,12 +9,14 @@
 UTS_MACHINE := arc
 
 ifeq ($(CROSS_COMPILE),)
-CROSS_COMPILE := arc-linux-uclibc-
+CROSS_COMPILE := arc-linux-
 endif
 
 KBUILD_DEFCONFIG := nsim_700_defconfig
 
-cflags-y       += -mA7 -fno-common -pipe -fno-builtin -D__linux__
+cflags-y       += -fno-common -pipe -fno-builtin -D__linux__
+cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
+cflags-$(CONFIG_ISA_ARCV2)     += -mcpu=archs
 
 ifdef CONFIG_ARC_CURR_IN_REG
 # For a global register defintion, make sure it gets passed to every file
@@ -33,7 +35,11 @@ cflags-$(atleast_gcc44)                      += -fsection-anchors
 
 cflags-$(CONFIG_ARC_HAS_LLSC)          += -mlock
 cflags-$(CONFIG_ARC_HAS_SWAPE)         += -mswape
-cflags-$(CONFIG_ARC_HAS_RTSC)          += -mrtsc
+
+ifndef CONFIG_ARC_HAS_LL64
+cflags-$(CONFIG_ISA_ARCV2)             += -mno-ll64
+endif
+
 cflags-$(CONFIG_ARC_DW2_UNWIND)                += -fasynchronous-unwind-tables
 
 # By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
@@ -81,8 +87,9 @@ core-y                += arch/arc/
 # w/o this dtb won't embed into kernel binary
 core-y         += arch/arc/boot/dts/
 
-core-$(CONFIG_ARC_PLAT_FPGA_LEGACY)    += arch/arc/plat-arcfpga/
-core-$(CONFIG_ARC_PLAT_TB10X)          += arch/arc/plat-tb10x/
+core-$(CONFIG_ARC_PLAT_SIM)    += arch/arc/plat-sim/
+core-$(CONFIG_ARC_PLAT_TB10X)  += arch/arc/plat-tb10x/
+core-$(CONFIG_ARC_PLAT_AXS10X) += arch/arc/plat-axs10x/
 
 drivers-$(CONFIG_OPROFILE)     += arch/arc/oprofile/
 
index faf240e29ec2cebc1b6f7762b0c0556de37f1a8f..b0e3f19bbd07e32cb57c91f803302603ebe429fc 100644 (file)
@@ -1,5 +1,5 @@
 # Built-in dtb
-builtindtb-y           := angel4
+builtindtb-y           := nsim_700
 
 ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),"")
        builtindtb-y    := $(patsubst "%",%,$(CONFIG_ARC_BUILTIN_DTB_NAME))
diff --git a/arch/arc/boot/dts/angel4.dts b/arch/arc/boot/dts/angel4.dts
deleted file mode 100644 (file)
index 3b076fb..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-/dts-v1/;
-
-/include/ "skeleton.dtsi"
-
-/ {
-       compatible = "snps,arc-angel4";
-       clock-frequency = <80000000>;   /* 80 MHZ */
-       #address-cells = <1>;
-       #size-cells = <1>;
-       interrupt-parent = <&intc>;
-
-       chosen {
-               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
-       };
-
-       aliases {
-               serial0 = &arcuart0;
-       };
-
-       fpga {
-               compatible = "simple-bus";
-               #address-cells = <1>;
-               #size-cells = <1>;
-
-               /* child and parent address space 1:1 mapped */
-               ranges;
-
-               intc: interrupt-controller {
-                       compatible = "snps,arc700-intc";
-                       interrupt-controller;
-                       #interrupt-cells = <1>;
-               };
-
-               arcuart0: serial@c0fc1000 {
-                       compatible = "snps,arc-uart";
-                       reg = <0xc0fc1000 0x100>;
-                       interrupts = <5>;
-                       clock-frequency = <80000000>;
-                       current-speed = <115200>;
-                       status = "okay";
-               };
-
-               ethernet@c0fc2000 {
-                       compatible = "snps,arc-emac";
-                       reg = <0xc0fc2000 0x3c>;
-                       interrupts = <6>;
-                       mac-address = [ 00 11 22 33 44 55 ];
-                       clock-frequency = <80000000>;
-                       max-speed = <100>;
-                       phy = <&phy0>;
-
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       phy0: ethernet-phy@0 {
-                               reg = <1>;
-                       };
-               };
-
-               arcpmu0: pmu {
-                       compatible = "snps,arc700-pct";
-               };
-       };
-};
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
new file mode 100644 (file)
index 0000000..a5e2726
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Device tree for AXC001 770D/EM6/AS221 CPU card
+ * Note that this file only supports the 770D CPU
+ */
+
+/ {
+       compatible = "snps,arc";
+       clock-frequency = <750000000>;  /* 750 MHZ */
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpu_card {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               ranges = <0x00000000 0xf0000000 0x10000000>;
+
+               cpu_intc: arc700-intc@cpu {
+                       compatible = "snps,arc700-intc";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+               };
+
+               /*
+                * this GPIO block ORs all interrupts on CPU card (creg,..)
+                * to uplink only 1 IRQ to ARC core intc
+                */
+               dw-apb-gpio@0x2000 {
+                       compatible = "snps,dw-apb-gpio";
+                       reg = < 0x2000 0x80 >;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       ictl_intc: gpio-controller@0 {
+                               compatible = "snps,dw-apb-gpio-port";
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               snps,nr-gpios = <30>;
+                               reg = <0>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               interrupt-parent = <&cpu_intc>;
+                               interrupts = <15>;
+                       };
+               };
+
+               debug_uart: dw-apb-uart@0x5000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x5000 0x100>;
+                       clock-frequency = <33333000>;
+                       interrupt-parent = <&ictl_intc>;
+                       interrupts = <19 4>;
+                       baud = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+               };
+
+               arcpmu0: pmu {
+                       compatible = "snps,arc700-pct";
+               };
+       };
+
+       /*
+        * This INTC is actually connected to DW APB GPIO
+        * which acts as a wire between MB INTC and CPU INTC.
+        * GPIO INTC is configured in platform init code
+        * and here we mimic direct connection from MB INTC to
+        * CPU INTC, thus we set "interrupts = <7>" instead of
+        * "interrupts = <12>"
+        *
+        * This intc actually resides on MB, but we move it here to
+        * avoid duplicating the MB dtsi file given that IRQ from
+        * this intc to cpu intc are different for axs101 and axs103
+        */
+       mb_intc: dw-apb-ictl@0xe0012000 {
+               #interrupt-cells = <1>;
+               compatible = "snps,dw-apb-ictl";
+               reg = < 0xe0012000 0x200 >;
+               interrupt-controller;
+               interrupt-parent = <&cpu_intc>;
+               interrupts = < 7 >;
+       };
+
+       memory {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges = <0x00000000 0x80000000 0x40000000>;
+               device_type = "memory";
+               reg = <0x00000000 0x20000000>;  /* 512MiB */
+       };
+};
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
new file mode 100644 (file)
index 0000000..15c8d62
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Device tree for AXC003 CPU card: HS38x UP configuration
+ */
+
+/ {
+       compatible = "snps,arc";
+       clock-frequency = <75000000>;
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpu_card {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               ranges = <0x00000000 0xf0000000 0x10000000>;
+
+               cpu_intc: archs-intc@cpu {
+                       compatible = "snps,archs-intc";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+               };
+
+               /*
+                * this GPIO block ORs all interrupts on CPU card (creg,..)
+                * to uplink only 1 IRQ to ARC core intc
+                */
+               dw-apb-gpio@0x2000 {
+                       compatible = "snps,dw-apb-gpio";
+                       reg = < 0x2000 0x80 >;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       ictl_intc: gpio-controller@0 {
+                               compatible = "snps,dw-apb-gpio-port";
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               snps,nr-gpios = <30>;
+                               reg = <0>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               interrupt-parent = <&cpu_intc>;
+                               interrupts = <25>;
+                       };
+               };
+
+               debug_uart: dw-apb-uart@0x5000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x5000 0x100>;
+                       clock-frequency = <33333000>;
+                       interrupt-parent = <&ictl_intc>;
+                       interrupts = <2 4>;
+                       baud = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+               };
+
+               arcpct0: pct {
+                       compatible = "snps,archs-pct";
+                       #interrupt-cells = <1>;
+                       interrupt-parent = <&cpu_intc>;
+                       interrupts = <20>;
+               };
+       };
+
+       /*
+        * This INTC is actually connected to DW APB GPIO
+        * which acts as a wire between MB INTC and CPU INTC.
+        * GPIO INTC is configured in platform init code
+        * and here we mimic direct connection from MB INTC to
+        * CPU INTC, thus we set "interrupts = <7>" instead of
+        * "interrupts = <12>"
+        *
+        * This intc actually resides on MB, but we move it here to
+        * avoid duplicating the MB dtsi file given that IRQ from
+        * this intc to cpu intc are different for axs101 and axs103
+        */
+       mb_intc: dw-apb-ictl@0xe0012000 {
+               #interrupt-cells = <1>;
+               compatible = "snps,dw-apb-ictl";
+               reg = < 0xe0012000 0x200 >;
+               interrupt-controller;
+               interrupt-parent = <&cpu_intc>;
+               interrupts = < 24 >;
+       };
+
+       memory {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges = <0x00000000 0x80000000 0x40000000>;
+               device_type = "memory";
+               reg = <0x00000000 0x20000000>;  /* 512MiB */
+       };
+};
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
new file mode 100644 (file)
index 0000000..199d428
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014, 2015 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Device tree for AXC003 CPU card: HS38x2 (Dual Core) with IDU intc
+ */
+
+/ {
+       compatible = "snps,arc";
+       clock-frequency = <75000000>;
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpu_card {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               ranges = <0x00000000 0xf0000000 0x10000000>;
+
+               cpu_intc: archs-intc@cpu {
+                       compatible = "snps,archs-intc";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+               };
+
+               idu_intc: idu-interrupt-controller {
+                       compatible = "snps,archs-idu-intc";
+                       interrupt-controller;
+                       interrupt-parent = <&cpu_intc>;
+
+                       /*
+                        * <hwirq  distribution>
+                        * distribution: 0=RR; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
+                        */
+                       #interrupt-cells = <2>;
+
+                       /*
+                        * upstream irqs to core intc - downstream these are
+                        * "COMMON" irq 0,1..
+                        */
+                       interrupts = <24 25>;
+               };
+
+               /*
+                * this GPIO block ORs all interrupts on CPU card (creg,..)
+                * to uplink only 1 IRQ to ARC core intc
+                */
+               dw-apb-gpio@0x2000 {
+                       compatible = "snps,dw-apb-gpio";
+                       reg = < 0x2000 0x80 >;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       ictl_intc: gpio-controller@0 {
+                               compatible = "snps,dw-apb-gpio-port";
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               snps,nr-gpios = <30>;
+                               reg = <0>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                               interrupt-parent = <&idu_intc>;
+
+                               /*
+                                * cmn irq 1 -> cpu irq 25
+                                * Distribute to cpu0 only
+                                */
+                               interrupts = <1 1>;
+                       };
+               };
+
+               debug_uart: dw-apb-uart@0x5000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x5000 0x100>;
+                       clock-frequency = <33333000>;
+                       interrupt-parent = <&ictl_intc>;
+                       interrupts = <2 4>;
+                       baud = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+               };
+
+               arcpct0: pct {
+                       compatible = "snps,archs-pct";
+                       #interrupt-cells = <1>;
+                       interrupt-parent = <&cpu_intc>;
+                       interrupts = <20>;
+               };
+       };
+
+       /*
+        * This INTC is actually connected to DW APB GPIO
+        * which acts as a wire between MB INTC and CPU INTC.
+        * GPIO INTC is configured in platform init code
+        * and here we mimic direct connection from MB INTC to
+        * CPU INTC, thus we set "interrupts = <0 1>" instead of
+        * "interrupts = <12>"
+        *
+        * This intc actually resides on MB, but we move it here to
+        * avoid duplicating the MB dtsi file given that IRQ from
+        * this intc to cpu intc are different for axs101 and axs103
+        */
+       mb_intc: dw-apb-ictl@0xe0012000 {
+               #interrupt-cells = <1>;
+               compatible = "snps,dw-apb-ictl";
+               reg = < 0xe0012000 0x200 >;
+               interrupt-controller;
+               interrupt-parent = <&idu_intc>;
+               interrupts = <0 1>;     /* cmn irq 0 -> cpu irq 24
+                                          distribute to cpu0 only */
+       };
+
+       memory {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges = <0x00000000 0x80000000 0x40000000>;
+               device_type = "memory";
+               reg = <0x00000000 0x20000000>;  /* 512MiB */
+       };
+};
diff --git a/arch/arc/boot/dts/axs101.dts b/arch/arc/boot/dts/axs101.dts
new file mode 100644 (file)
index 0000000..3f9b058
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * ARC AXS101 S/W development platform
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "axc001.dtsi"
+/include/ "axs10x_mb.dtsi"
+
+/ {
+       compatible = "snps,axs101", "snps,arc-sdp";
+
+       chosen {
+               bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0";
+       };
+};
diff --git a/arch/arc/boot/dts/axs103.dts b/arch/arc/boot/dts/axs103.dts
new file mode 100644 (file)
index 0000000..e6d0e31
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Device Tree for AXS103 SDP with AXS10X Main Board and
+ * AXC003 FPGA Card (with UP bitfile)
+ */
+/dts-v1/;
+
+/include/ "axc003.dtsi"
+/include/ "axs10x_mb.dtsi"
+
+/ {
+       compatible = "snps,axs103", "snps,arc-sdp";
+
+       chosen {
+               bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=ttyS3,115200n8 debug print-fatal-signals=1";
+       };
+};
diff --git a/arch/arc/boot/dts/axs103_idu.dts b/arch/arc/boot/dts/axs103_idu.dts
new file mode 100644 (file)
index 0000000..f999fef
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Device Tree for AXS103 SDP with AXS10X Main Board and
+ * AXC003 FPGA Card (with SMP bitfile)
+ */
+/dts-v1/;
+
+/include/ "axc003_idu.dtsi"
+/include/ "axs10x_mb.dtsi"
+
+/ {
+       compatible = "snps,axs103", "snps,arc-sdp";
+
+       chosen {
+               bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=ttyS3,115200n8 debug print-fatal-signals=1";
+       };
+};
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
new file mode 100644 (file)
index 0000000..f3db321
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+ * Support for peripherals on the AXS10x mainboard
+ *
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+       axs10x_mb {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges = <0x00000000 0xe0000000 0x10000000>;
+               interrupt-parent = <&mb_intc>;
+
+               clocks {
+                       i2cclk: i2cclk {
+                               compatible = "fixed-clock";
+                               clock-frequency = <50000000>;
+                               #clock-cells = <0>;
+                       };
+
+                       apbclk: apbclk {
+                               compatible = "fixed-clock";
+                               clock-frequency = <50000000>;
+                               #clock-cells = <0>;
+                       };
+
+                       mmcclk: mmcclk {
+                               compatible = "fixed-clock";
+                               clock-frequency = <50000000>;
+                               #clock-cells = <0>;
+                       };
+               };
+
+               ethernet@0x18000 {
+                       #interrupt-cells = <1>;
+                       compatible = "snps,dwmac";
+                       reg = < 0x18000 0x2000 >;
+                       interrupts = < 4 >;
+                       interrupt-names = "macirq";
+                       phy-mode = "rgmii";
+                       snps,pbl = < 32 >;
+                       clocks = <&apbclk>;
+                       clock-names = "stmmaceth";
+               };
+
+               ehci@0x40000 {
+                       compatible = "generic-ehci";
+                       reg = < 0x40000 0x100 >;
+                       interrupts = < 8 >;
+               };
+
+               ohci@0x60000 {
+                       compatible = "generic-ohci";
+                       reg = < 0x60000 0x100 >;
+                       interrupts = < 8 >;
+               };
+
+               /*
+                * According to DW Mobile Storage databook it is required
+                * to use  "Hold Register" if card is enumerated in SDR12 or
+                * SDR25 modes.
+                *
+                * Utilization of "Hold Register" is already implemented via
+                * dw_mci_pltfm_prepare_command() which in its turn gets
+                * used through dw_mci_drv_data->prepare_command call-back.
+                * This call-back is used in Altera Socfpga platform and so
+                * we may reuse it saying that we're compatible with their
+                * "altr,socfpga-dw-mshc".
+                *
+                * Most probably "Hold Register" utilization is platform-
+                * independent requirement which means that single unified
+                * "snps,dw-mshc" should be enough for all users of DW MMC once
+                * dw_mci_pltfm_prepare_command() is used in generic platform
+                * code.
+                */
+               mmc@0x15000 {
+                       compatible = "altr,socfpga-dw-mshc";
+                       reg = < 0x15000 0x400 >;
+                       num-slots = < 1 >;
+                       fifo-depth = < 16 >;
+                       card-detect-delay = < 200 >;
+                       clocks = <&apbclk>, <&mmcclk>;
+                       clock-names = "biu", "ciu";
+                       interrupts = < 7 >;
+                       bus-width = < 4 >;
+               };
+
+               uart@0x20000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x20000 0x100>;
+                       clock-frequency = <33333333>;
+                       interrupts = <17>;
+                       baud = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+               };
+
+               uart@0x21000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x21000 0x100>;
+                       clock-frequency = <33333333>;
+                       interrupts = <18>;
+                       baud = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+               };
+
+               /* UART muxed with USB data port (ttyS3) */
+               uart@0x22000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x22000 0x100>;
+                       clock-frequency = <33333333>;
+                       interrupts = <19>;
+                       baud = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+               };
+
+               i2c@0x1d000 {
+                       compatible = "snps,designware-i2c";
+                       reg = <0x1d000 0x100>;
+                       clock-frequency = <400000>;
+                       clocks = <&i2cclk>;
+                       interrupts = <14>;
+               };
+
+               i2c@0x1e000 {
+                       compatible = "snps,designware-i2c";
+                       reg = <0x1e000 0x100>;
+                       clock-frequency = <400000>;
+                       clocks = <&i2cclk>;
+                       interrupts = <15>;
+               };
+
+               i2c@0x1f000 {
+                       compatible = "snps,designware-i2c";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x1f000 0x100>;
+                       clock-frequency = <400000>;
+                       clocks = <&i2cclk>;
+                       interrupts = <16>;
+
+                       eeprom@0x54{
+                               compatible = "24c01";
+                               reg = <0x54>;
+                               pagesize = <0x8>;
+                       };
+
+                       eeprom@0x57{
+                               compatible = "24c04";
+                               reg = <0x57>;
+                               pagesize = <0x8>;
+                       };
+               };
+
+               gpio0:gpio@13000 {
+                       compatible = "snps,dw-apb-gpio";
+                       reg = <0x13000 0x1000>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       gpio0_banka: gpio-controller@0 {
+                               compatible = "snps,dw-apb-gpio-port";
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               snps,nr-gpios = <32>;
+                               reg = <0>;
+                       };
+
+                       gpio0_bankb: gpio-controller@1 {
+                               compatible = "snps,dw-apb-gpio-port";
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               snps,nr-gpios = <8>;
+                               reg = <1>;
+                       };
+
+                       gpio0_bankc: gpio-controller@2 {
+                               compatible = "snps,dw-apb-gpio-port";
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               snps,nr-gpios = <8>;
+                               reg = <2>;
+                       };
+               };
+
+               gpio1:gpio@14000 {
+                       compatible = "snps,dw-apb-gpio";
+                       reg = <0x14000 0x1000>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       gpio1_banka: gpio-controller@0 {
+                               compatible = "snps,dw-apb-gpio-port";
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               snps,nr-gpios = <30>;
+                               reg = <0>;
+                       };
+
+                       gpio1_bankb: gpio-controller@1 {
+                               compatible = "snps,dw-apb-gpio-port";
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               snps,nr-gpios = <10>;
+                               reg = <1>;
+                       };
+
+                       gpio1_bankc: gpio-controller@2 {
+                               compatible = "snps,dw-apb-gpio-port";
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               snps,nr-gpios = <8>;
+                               reg = <2>;
+                       };
+               };
+       };
+};
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts
new file mode 100644 (file)
index 0000000..105a001
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+       compatible = "snps,nsim";
+       clock-frequency = <80000000>;   /* 80 MHZ */
+       #address-cells = <1>;
+       #size-cells = <1>;
+       interrupt-parent = <&intc>;
+
+       chosen {
+               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
+       };
+
+       aliases {
+               serial0 = &arcuart0;
+       };
+
+       fpga {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               /* child and parent address space 1:1 mapped */
+               ranges;
+
+               intc: interrupt-controller {
+                       compatible = "snps,arc700-intc";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+               };
+
+               arcuart0: serial@c0fc1000 {
+                       compatible = "snps,arc-uart";
+                       reg = <0xc0fc1000 0x100>;
+                       interrupts = <5>;
+                       clock-frequency = <80000000>;
+                       current-speed = <115200>;
+                       status = "okay";
+               };
+
+               ethernet@c0fc2000 {
+                       compatible = "snps,arc-emac";
+                       reg = <0xc0fc2000 0x3c>;
+                       interrupts = <6>;
+                       mac-address = [ 00 11 22 33 44 55 ];
+                       clock-frequency = <80000000>;
+                       max-speed = <100>;
+                       phy = <&phy0>;
+
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       phy0: ethernet-phy@0 {
+                               reg = <1>;
+                       };
+               };
+
+               arcpmu0: pmu {
+                       compatible = "snps,arc700-pct";
+               };
+       };
+};
diff --git a/arch/arc/boot/dts/nsim_hs.dts b/arch/arc/boot/dts/nsim_hs.dts
new file mode 100644 (file)
index 0000000..911f069
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+       compatible = "snps,nsim_hs";
+       interrupt-parent = <&core_intc>;
+
+       chosen {
+               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
+       };
+
+       aliases {
+               serial0 = &arcuart0;
+       };
+
+       fpga {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               /* child and parent address space 1:1 mapped */
+               ranges;
+
+               core_intc: core-interrupt-controller {
+                       compatible = "snps,archs-intc";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+               };
+
+               arcuart0: serial@c0fc1000 {
+                       compatible = "snps,arc-uart";
+                       reg = <0xc0fc1000 0x100>;
+                       interrupts = <24>;
+                       clock-frequency = <80000000>;
+                       current-speed = <115200>;
+                       status = "okay";
+               };
+
+               arcpct0: pct {
+                       compatible = "snps,archs-pct";
+                       #interrupt-cells = <1>;
+                       interrupts = <20>;
+               };
+       };
+};
diff --git a/arch/arc/boot/dts/nsim_hs_idu.dts b/arch/arc/boot/dts/nsim_hs_idu.dts
new file mode 100644 (file)
index 0000000..46ab319
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+       compatible = "snps,nsim_hs";
+       interrupt-parent = <&core_intc>;
+
+       chosen {
+               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
+       };
+
+       aliases {
+               serial0 = &arcuart0;
+       };
+
+       fpga {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               /* child and parent address space 1:1 mapped */
+               ranges;
+
+               core_intc: core-interrupt-controller {
+                       compatible = "snps,archs-intc";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+               };
+
+               idu_intc: idu-interrupt-controller {
+                       compatible = "snps,archs-idu-intc";
+                       interrupt-controller;
+                       interrupt-parent = <&core_intc>;
+
+                       /*
+                        * <hwirq  distribution>
+                        * distribution: 0=RR; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
+                        */
+                       #interrupt-cells = <2>;
+
+                       /*
+                        * upstream irqs to core intc - downstream these are
+                        * "COMMON" irq 0,1..
+                        */
+                       interrupts = <24 25 26 27 28 29 30 31>;
+               };
+
+               arcuart0: serial@c0fc1000 {
+                       compatible = "snps,arc-uart";
+                       reg = <0xc0fc1000 0x100>;
+                       interrupt-parent = <&idu_intc>;
+                       interrupts = <0 0>;
+                       clock-frequency = <80000000>;
+                       current-speed = <115200>;
+                       status = "okay";
+               };
+
+               arcpct0: pct {
+                       compatible = "snps,archs-pct";
+                       #interrupt-cells = <1>;
+                       interrupts = <20>;
+               };
+       };
+};
diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts
new file mode 100644 (file)
index 0000000..d64a96f
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+       compatible = "snps,nsimosci_hs";
+       clock-frequency = <20000000>;   /* 20 MHZ */
+       #address-cells = <1>;
+       #size-cells = <1>;
+       interrupt-parent = <&core_intc>;
+
+       chosen {
+               /* this is for console on PGU */
+               /* bootargs = "console=tty0 consoleblank=0"; */
+               /* this is for console on serial */
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
+       };
+
+       aliases {
+               serial0 = &uart0;
+       };
+
+       fpga {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               /* child and parent address space 1:1 mapped */
+               ranges;
+
+               core_intc: core-interrupt-controller {
+                       compatible = "snps,archs-intc";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+               };
+
+               uart0: serial@f0000000 {
+                       compatible = "ns8250";
+                       reg = <0xf0000000 0x2000>;
+                       interrupts = <24>;
+                       clock-frequency = <3686400>;
+                       baud = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       no-loopback-test = <1>;
+               };
+
+               pgu0: pgu@f9000000 {
+                       compatible = "snps,arcpgufb";
+                       reg = <0xf9000000 0x400>;
+               };
+
+               ps2: ps2@f9001000 {
+                       compatible = "snps,arc_ps2";
+                       reg = <0xf9000400 0x14>;
+                       interrupts = <27>;
+                       interrupt-names = "arc_ps2_irq";
+               };
+
+               eth0: ethernet@f0003000 {
+                       compatible = "snps,oscilan";
+                       reg = <0xf0003000 0x44>;
+                       interrupts = <25>, <26>;
+                       interrupt-names = "rx", "tx";
+               };
+
+               arcpct0: pct {
+                       compatible = "snps,archs-pct";
+                       #interrupt-cells = <1>;
+                       interrupts = <20>;
+               };
+       };
+};
diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts
new file mode 100644 (file)
index 0000000..f6bf0ca
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+       compatible = "snps,nsimosci_hs";
+       clock-frequency = <5000000>;    /* 5 MHZ */
+       #address-cells = <1>;
+       #size-cells = <1>;
+       interrupt-parent = <&core_intc>;
+
+       chosen {
+               /* this is for console on serial */
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug";
+       };
+
+       aliases {
+               serial0 = &uart0;
+       };
+
+       fpga {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               /* child and parent address space 1:1 mapped */
+               ranges;
+
+               core_intc: core-interrupt-controller {
+                       compatible = "snps,archs-intc";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+/*                     interrupts = <16 17 18 19 20 21 22 23 24 25>; */
+               };
+
+               idu_intc: idu-interrupt-controller {
+                       compatible = "snps,archs-idu-intc";
+                       interrupt-controller;
+                       interrupt-parent = <&core_intc>;
+
+                       /*
+                        * <hwirq  distribution>
+                        * distribution: 0=RR; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
+                        */
+                       #interrupt-cells = <2>;
+
+                       /*
+                        * upstream irqs to core intc - downstream these are
+                        * "COMMON" irq 0,1..
+                        */
+                       interrupts = <24 25 26 27 28 29 30 31>;
+               };
+
+               uart0: serial@f0000000 {
+                       compatible = "ns8250";
+                       reg = <0xf0000000 0x2000>;
+                       interrupt-parent = <&idu_intc>;
+                       interrupts = <0 0>; /* cmn irq 0 -> cpu irq 24
+                                               RR distribute to all cpus */
+                       clock-frequency = <3686400>;
+                       baud = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       no-loopback-test = <1>;
+               };
+
+               pgu0: pgu@f9000000 {
+                       compatible = "snps,arcpgufb";
+                       reg = <0xf9000000 0x400>;
+               };
+
+               ps2: ps2@f9001000 {
+                       compatible = "snps,arc_ps2";
+                       reg = <0xf9000400 0x14>;
+                       interrupts = <3 0>;
+                       interrupt-parent = <&idu_intc>;
+                       interrupt-names = "arc_ps2_irq";
+               };
+
+               eth0: ethernet@f0003000 {
+                       compatible = "snps,oscilan";
+                       reg = <0xf0003000 0x44>;
+                       interrupt-parent = <&idu_intc>;
+                       interrupts = <1 2>, <2 2>;
+                       interrupt-names = "rx", "tx";
+               };
+
+               arcpct0: pct {
+                       compatible = "snps,archs-pct";
+                       #interrupt-cells = <1>;
+                       interrupts = <20>;
+               };
+       };
+};
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi
new file mode 100644 (file)
index 0000000..9393fd9
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2013, 2014 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Device tree for AXC003 CPU card: HS38x UP configuration (VDK version)
+ */
+
+/ {
+       compatible = "snps,arc";
+       clock-frequency = <50000000>;
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpu_card {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               ranges = <0x00000000 0xf0000000 0x10000000>;
+
+               cpu_intc: archs-intc@cpu {
+                       compatible = "snps,archs-intc";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+               };
+
+               debug_uart: dw-apb-uart@0x5000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x5000 0x100>;
+                       clock-frequency = <2403200>;
+                       interrupt-parent = <&cpu_intc>;
+                       interrupts = <19>;
+                       baud = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+               };
+
+       };
+
+       mb_intc: dw-apb-ictl@0xe0012000 {
+               #interrupt-cells = <1>;
+               compatible = "snps,dw-apb-ictl";
+               reg = < 0xe0012000 0x200 >;
+               interrupt-controller;
+               interrupt-parent = <&cpu_intc>;
+               interrupts = < 18 >;
+       };
+
+       memory {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges = <0x00000000 0x80000000 0x40000000>;
+               device_type = "memory";
+               reg = <0x00000000 0x20000000>;  /* 512MiB */
+       };
+};
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
new file mode 100644 (file)
index 0000000..9bee8ed
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2014, 2015 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Device tree for AXC003 CPU card:
+ * HS38x2 (Dual Core) with IDU intc (VDK version)
+ */
+
+/ {
+       compatible = "snps,arc";
+       clock-frequency = <50000000>;
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpu_card {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               ranges = <0x00000000 0xf0000000 0x10000000>;
+
+               cpu_intc: archs-intc@cpu {
+                       compatible = "snps,archs-intc";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+               };
+
+               idu_intc: idu-interrupt-controller {
+                       compatible = "snps,archs-idu-intc";
+                       interrupt-controller;
+                       interrupt-parent = <&cpu_intc>;
+
+                       /*
+                        * <hwirq  distribution>
+                        * distribution: 0=RR; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
+                        */
+                       #interrupt-cells = <2>;
+
+                       interrupts = <24 25 26 27>;
+               };
+
+               debug_uart: dw-apb-uart@0x5000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x5000 0x100>;
+                       clock-frequency = <2403200>;
+                       interrupt-parent = <&idu_intc>;
+                       interrupts = <2 0>;
+                       baud = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+               };
+
+       };
+
+       mb_intc: dw-apb-ictl@0xe0012000 {
+               #interrupt-cells = <1>;
+               compatible = "snps,dw-apb-ictl";
+               reg = < 0xe0012000 0x200 >;
+               interrupt-controller;
+               interrupt-parent = <&idu_intc>;
+               interrupts = < 0 0 >;
+       };
+
+       memory {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges = <0x00000000 0x80000000 0x40000000>;
+               device_type = "memory";
+               reg = <0x00000000 0x20000000>;  /* 512MiB */
+       };
+};
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
new file mode 100644 (file)
index 0000000..45cd665
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Support for peripherals on the AXS10x mainboard (VDK version)
+ *
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+       axs10x_mb_vdk {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges = <0x00000000 0xe0000000 0x10000000>;
+               interrupt-parent = <&mb_intc>;
+
+               clocks {
+                       apbclk: apbclk {
+                               compatible = "fixed-clock";
+                               clock-frequency = <50000000>;
+                               #clock-cells = <0>;
+                       };
+
+               };
+
+               ethernet@0x18000 {
+                       #interrupt-cells = <1>;
+                       compatible = "snps,dwmac";
+                       reg = < 0x18000 0x2000 >;
+                       interrupts = < 4 >;
+                       interrupt-names = "macirq";
+                       phy-mode = "rgmii";
+                       snps,phy-addr = < 0 >;  // VDK model phy address is 0
+                       snps,pbl = < 32 >;
+                       clocks = <&apbclk>;
+                       clock-names = "stmmaceth";
+               };
+
+               ehci@0x40000 {
+                       compatible = "generic-ehci";
+                       reg = < 0x40000 0x100 >;
+                       interrupts = < 8 >;
+               };
+
+               uart@0x20000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x20000 0x100>;
+                       clock-frequency = <2403200>;
+                       interrupts = <17>;
+                       baud = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+               };
+
+               uart@0x21000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x21000 0x100>;
+                       clock-frequency = <2403200>;
+                       interrupts = <18>;
+                       baud = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+               };
+
+               uart@0x22000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x22000 0x100>;
+                       clock-frequency = <2403200>;
+                       interrupts = <19>;
+                       baud = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+               };
+
+/* PGU output directly sent to virtual LCD screen; hdmi controller not modelled */
+               pgu@0x17000 {
+                       compatible = "snps,arcpgufb";
+                       reg = <0x17000 0x400>;
+                       clock-frequency = <51000000>; /* PGU'clock is initated in init function */
+                       /* interrupts = <5>;   PGU interrupts not used, this vector is used for ps2 below */
+               };
+
+/* VDK has additional ps2 keyboard/mouse interface integrated in LCD screen model */
+               ps2: ps2@e0017400 {
+                       compatible = "snps,arc_ps2";
+                       reg = <0x17400 0x14>;
+                       interrupts = <5>;
+                       interrupt-names = "arc_ps2_irq";
+               };
+       };
+};
diff --git a/arch/arc/boot/dts/vdk_hs38.dts b/arch/arc/boot/dts/vdk_hs38.dts
new file mode 100644 (file)
index 0000000..5d803dd
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * ARC HS38 Virtual Development Kit (VDK)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "vdk_axc003.dtsi"
+/include/ "vdk_axs10x_mb.dtsi"
+
+/ {
+       compatible = "snps,axs103";
+
+       chosen {
+               bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0";
+       };
+};
diff --git a/arch/arc/boot/dts/vdk_hs38_smp.dts b/arch/arc/boot/dts/vdk_hs38_smp.dts
new file mode 100644 (file)
index 0000000..031a5bc
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * ARC HS38 Virtual Development Kit, SMP version (VDK)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "vdk_axc003_idu.dtsi"
+/include/ "vdk_axs10x_mb.dtsi"
+
+/ {
+       compatible = "snps,axs103";
+
+       chosen {
+               bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0";
+       };
+};
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
new file mode 100644 (file)
index 0000000..562dac6
--- /dev/null
@@ -0,0 +1,111 @@
+CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARC_PLAT_AXS10X=y
+CONFIG_AXS101=y
+CONFIG_ARC_CACHE_LINE_SHIFT=5
+CONFIG_ARC_BUILTIN_DTB_NAME="axs101"
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_NATIONAL_PHY=y
+# CONFIG_USB_NET_DRIVERS is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+CONFIG_MOUSE_SERIAL=y
+CONFIG_MOUSE_SYNAPTICS_USB=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_IDMAC=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
new file mode 100644 (file)
index 0000000..83a6d8d
--- /dev/null
@@ -0,0 +1,117 @@
+CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARC_PLAT_AXS10X=y
+CONFIG_AXS103=y
+CONFIG_ISA_ARCV2=y
+CONFIG_ARC_BUILTIN_DTB_NAME="axs103"
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_AXS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_NATIONAL_PHY=y
+# CONFIG_USB_NET_DRIVERS is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+CONFIG_MOUSE_SERIAL=y
+CONFIG_MOUSE_SYNAPTICS_USB=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_IDMAC=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
new file mode 100644 (file)
index 0000000..f1e1c84
--- /dev/null
@@ -0,0 +1,118 @@
+CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARC_PLAT_AXS10X=y
+CONFIG_AXS103=y
+CONFIG_ISA_ARCV2=y
+CONFIG_SMP=y
+CONFIG_ARC_BUILTIN_DTB_NAME="axs103_idu"
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_AXS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_NATIONAL_PHY=y
+# CONFIG_USB_NET_DRIVERS is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+CONFIG_MOUSE_SERIAL=y
+CONFIG_MOUSE_SYNAPTICS_USB=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_IDMAC=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
index ef4d3bc7b6c05fdaa414c3c0312bfb829ab769e9..138f9d8879570a8b329415d9238c710fa6e5b032 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
@@ -22,9 +22,8 @@ CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_FPGA_LEGACY=y
-# CONFIG_ARC_HAS_RTSC is not set
-CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
+CONFIG_ARC_PLAT_SIM=y
+CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700"
 CONFIG_PREEMPT=y
 # CONFIG_COMPACTION is not set
 # CONFIG_CROSS_MEMORY_ATTACH is not set
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
new file mode 100644 (file)
index 0000000..f761a7c
--- /dev/null
@@ -0,0 +1,64 @@
+CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_SIM=y
+CONFIG_ISA_ARCV2=y
+CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs"
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_ARC=y
+CONFIG_SERIAL_ARC_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
new file mode 100644 (file)
index 0000000..dc6f74f
--- /dev/null
@@ -0,0 +1,63 @@
+CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_SIM=y
+CONFIG_ARC_BOARD_ML509=y
+CONFIG_ISA_ARCV2=y
+CONFIG_SMP=y
+CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs_idu"
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_ARC=y
+CONFIG_SERIAL_ARC_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_XZ_DEC=y
index d2ac4e56ba1dd6955c43044aa0ec8da7e062c785..31e1d95764ff91dc10fe80d936a5613e6f713cc4 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
@@ -23,8 +23,7 @@ CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_FPGA_LEGACY=y
-# CONFIG_ARC_HAS_RTSC is not set
+CONFIG_ARC_PLAT_SIM=y
 CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
 # CONFIG_COMPACTION is not set
 CONFIG_NET=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
new file mode 100644 (file)
index 0000000..3fef0a2
--- /dev/null
@@ -0,0 +1,73 @@
+CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_SIM=y
+CONFIG_ISA_ARCV2=y
+CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs"
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_OSCI_LAN=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_MOUSE_PS2_ALPS is not set
+# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
+# CONFIG_MOUSE_PS2_SYNAPTICS is not set
+# CONFIG_MOUSE_PS2_TRACKPOINT is not set
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_ARC_PS2=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
new file mode 100644 (file)
index 0000000..5178483
--- /dev/null
@@ -0,0 +1,93 @@
+CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_SIM=y
+CONFIG_ARC_BOARD_ML509=y
+CONFIG_ISA_ARCV2=y
+CONFIG_SMP=y
+CONFIG_ARC_HAS_LL64=y
+# CONFIG_ARC_HAS_RTSC is not set
+CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs_idu"
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_NET_OSCI_LAN=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_ARC_PS2=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_ARCPGU_RGB888=y
+CONFIG_ARCPGU_DISPTYPE=0
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FTRACE=y
index 6be6492442d640d3aba09b42239650995aa6e5be..3b4dc9cebcf15234f3d42a4efd2e40b9c5bd4150 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+CONFIG_CROSS_COMPILE="arc-linux-"
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_DEFAULT_HOSTNAME="tb10x"
 CONFIG_SYSVIPC=y
@@ -26,7 +26,6 @@ CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLOCK is not set
 CONFIG_ARC_PLAT_TB10X=y
 CONFIG_ARC_CACHE_LINE_SHIFT=5
-# CONFIG_ARC_HAS_RTSC is not set
 CONFIG_ARC_STACK_NONEXEC=y
 CONFIG_HZ=250
 CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
new file mode 100644 (file)
index 0000000..ef35ef3
--- /dev/null
@@ -0,0 +1,102 @@
+CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARC_PLAT_AXS10X=y
+CONFIG_AXS103=y
+CONFIG_ISA_ARCV2=y
+CONFIG_ARC_UBOOT_SUPPORT=y
+CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
+CONFIG_PREEMPT=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_SLRAM=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_NATIONAL_PHY=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+CONFIG_SERIO_ARC_PS2=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_ARCPGU_RGB888=y
+CONFIG_ARCPGU_DISPTYPE=0
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_SERIAL=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
new file mode 100644 (file)
index 0000000..634509e
--- /dev/null
@@ -0,0 +1,104 @@
+CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARC_PLAT_AXS10X=y
+CONFIG_AXS103=y
+CONFIG_ISA_ARCV2=y
+CONFIG_SMP=y
+# CONFIG_ARC_HAS_GRTC is not set
+CONFIG_ARC_UBOOT_SUPPORT=y
+CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
+CONFIG_PREEMPT=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_SLRAM=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_NATIONAL_PHY=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+CONFIG_SERIO_ARC_PS2=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_ARCPGU_RGB888=y
+CONFIG_ARCPGU_DISPTYPE=0
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_SERIAL=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
index 769b312c1abb5406c34f52a2db5201a490be8a0b..1a80cc91a03ba323f8418dfb705098a00f031671 100644 (file)
@@ -1,5 +1,4 @@
 generic-y += auxvec.h
-generic-y += barrier.h
 generic-y += bitsperlong.h
 generic-y += bugs.h
 generic-y += clkdev.h
index e2b1b1211b0d4ddbb527ed7239eaf05759dfae27..070f58827a5c12c2e19469ff4280f7c69e0f36a3 100644 (file)
@@ -16,6 +16,8 @@
 #define ARC_REG_PERIBASE_BCR   0x69
 #define ARC_REG_FP_BCR         0x6B    /* ARCompact: Single-Precision FPU */
 #define ARC_REG_DPFP_BCR       0x6C    /* ARCompact: Dbl Precision FPU */
+#define ARC_REG_FP_V2_BCR      0xc8    /* ARCv2 FPU */
+#define ARC_REG_SLC_BCR                0xce
 #define ARC_REG_DCCM_BCR       0x74    /* DCCM Present + SZ */
 #define ARC_REG_TIMERS_BCR     0x75
 #define ARC_REG_AP_BCR         0x76
@@ -31,6 +33,7 @@
 #define ARC_REG_BPU_BCR                0xc0
 #define ARC_REG_ISA_CFG_BCR    0xc1
 #define ARC_REG_RTT_BCR                0xF2
+#define ARC_REG_IRQ_BCR                0xF3
 #define ARC_REG_SMART_BCR      0xFF
 
 /* status32 Bits Positions */
@@ -51,6 +54,7 @@
  * [15: 8] = Exception Cause Code
  * [ 7: 0] = Exception Parameters (for certain types only)
  */
+#ifdef CONFIG_ISA_ARCOMPACT
 #define ECR_V_MEM_ERR                  0x01
 #define ECR_V_INSN_ERR                 0x02
 #define ECR_V_MACH_CHK                 0x20
 #define ECR_V_DTLB_MISS                        0x22
 #define ECR_V_PROTV                    0x23
 #define ECR_V_TRAP                     0x25
+#else
+#define ECR_V_MEM_ERR                  0x01
+#define ECR_V_INSN_ERR                 0x02
+#define ECR_V_MACH_CHK                 0x03
+#define ECR_V_ITLB_MISS                        0x04
+#define ECR_V_DTLB_MISS                        0x05
+#define ECR_V_PROTV                    0x06
+#define ECR_V_TRAP                     0x09
+#endif
 
 /* DTLB Miss and Protection Violation Cause Codes */
 
@@ -76,9 +89,6 @@
 #define ECR_C_BIT_DTLB_LD_MISS         8
 #define ECR_C_BIT_DTLB_ST_MISS         9
 
-/* Dummy ECR values for Interrupts */
-#define event_IRQ1             0x0031abcd
-#define event_IRQ2             0x0032abcd
 
 /* Auxiliary registers */
 #define AUX_IDENTITY           4
@@ -204,9 +214,11 @@ struct bcr_identity {
 
 struct bcr_isa {
 #ifdef CONFIG_CPU_BIG_ENDIAN
-       unsigned int pad1:23, atomic1:1, ver:8;
+       unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1,
+                    pad1:11, atomic1:1, ver:8;
 #else
-       unsigned int ver:8, atomic1:1, pad1:23;
+       unsigned int ver:8, atomic1:1, pad1:11, be:1, atomic:1, unalign:1,
+                    ldd:1, pad2:4, div_rem:4;
 #endif
 };
 
@@ -269,11 +281,19 @@ struct bcr_fp_arcompact {
 #endif
 };
 
+struct bcr_fp_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad2:15, dp:1, pad1:7, sp:1, ver:8;
+#else
+       unsigned int ver:8, sp:1, pad1:7, dp:1, pad2:15;
+#endif
+};
+
 struct bcr_timer {
 #ifdef CONFIG_CPU_BIG_ENDIAN
-       unsigned int pad2:15, rtsc:1, pad1:6, t1:1, t0:1, ver:8;
+       unsigned int pad2:15, rtsc:1, pad1:5, rtc:1, t1:1, t0:1, ver:8;
 #else
-       unsigned int ver:8, t0:1, t1:1, pad1:6, rtsc:1, pad2:15;
+       unsigned int ver:8, t0:1, t1:1, rtc:1, pad1:5, rtsc:1, pad2:15;
 #endif
 };
 
@@ -285,6 +305,14 @@ struct bcr_bpu_arcompact {
 #endif
 };
 
+struct bcr_bpu_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad:6, fbe:2, tqe:2, ts:4, ft:1, rse:2, pte:3, bce:3, ver:8;
+#else
+       unsigned int ver:8, bce:3, pte:3, rse:2, ft:1, ts:4, tqe:2, fbe:2, pad:6;
+#endif
+};
+
 struct bcr_generic {
 #ifdef CONFIG_CPU_BIG_ENDIAN
        unsigned int pad:24, ver:8;
@@ -299,11 +327,12 @@ struct bcr_generic {
  */
 
 struct cpuinfo_arc_mmu {
-       unsigned int ver, pg_sz, sets, ways, u_dtlb, u_itlb, num_tlb;
+       unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, u_dtlb:6, u_itlb:6;
+       unsigned int num_tlb:16, sets:12, ways:4;
 };
 
 struct cpuinfo_arc_cache {
-       unsigned int sz_k:8, line_len:8, assoc:4, ver:4, alias:1, vipt:1, pad:6;
+       unsigned int sz_k:14, line_len:8, assoc:4, ver:4, alias:1, vipt:1;
 };
 
 struct cpuinfo_arc_bpu {
@@ -315,14 +344,13 @@ struct cpuinfo_arc_ccm {
 };
 
 struct cpuinfo_arc {
-       struct cpuinfo_arc_cache icache, dcache;
+       struct cpuinfo_arc_cache icache, dcache, slc;
        struct cpuinfo_arc_mmu mmu;
        struct cpuinfo_arc_bpu bpu;
        struct bcr_identity core;
        struct bcr_isa isa;
        struct bcr_timer timers;
        unsigned int vec_base;
-       unsigned int uncached_base;
        struct cpuinfo_arc_ccm iccm, dccm;
        struct {
                unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
@@ -336,6 +364,22 @@ struct cpuinfo_arc {
 
 extern struct cpuinfo_arc cpuinfo_arc700[];
 
+static inline int is_isa_arcv2(void)
+{
+       return IS_ENABLED(CONFIG_ISA_ARCV2);
+}
+
+static inline int is_isa_arcompact(void)
+{
+       return IS_ENABLED(CONFIG_ISA_ARCOMPACT);
+}
+
+#if defined(CONFIG_ISA_ARCOMPACT) && !defined(_CPU_DEFAULT_A7)
+#error "Toolchain not configured for ARCompact builds"
+#elif defined(CONFIG_ISA_ARCV2) && !defined(_CPU_DEFAULT_HS)
+#error "Toolchain not configured for ARCv2 builds"
+#endif
+
 #endif /* __ASEMBLY__ */
 
 #endif /* _ASM_ARC_ARCREGS_H */
index 9917a45fc430d042a4f59006abf84ceedad1bca7..03484cb4d16d2eb4fada0095ee427726c23bd2e1 100644 (file)
 
 #define atomic_set(v, i) (((v)->counter) = (i))
 
+#ifdef CONFIG_ISA_ARCV2
+#define PREFETCHW      "       prefetchw   [%1]        \n"
+#else
+#define PREFETCHW
+#endif
+
 #define ATOMIC_OP(op, c_op, asm_op)                                    \
 static inline void atomic_##op(int i, atomic_t *v)                     \
 {                                                                      \
        unsigned int temp;                                              \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:     llock   %0, [%1]        \n"                             \
+       "1:                             \n"                             \
+       PREFETCHW                                                       \
+       "       llock   %0, [%1]        \n"                             \
        "       " #asm_op " %0, %0, %2  \n"                             \
        "       scond   %0, [%1]        \n"                             \
        "       bnz     1b              \n"                             \
@@ -43,8 +51,16 @@ static inline int atomic_##op##_return(int i, atomic_t *v)           \
 {                                                                      \
        unsigned int temp;                                              \
                                                                        \
+       /*                                                              \
+        * Explicit full memory barrier needed before/after as          \
+        * LLOCK/SCOND thmeselves don't provide any such semantics      \
+        */                                                             \
+       smp_mb();                                                       \
+                                                                       \
        __asm__ __volatile__(                                           \
-       "1:     llock   %0, [%1]        \n"                             \
+       "1:                             \n"                             \
+       PREFETCHW                                                       \
+       "       llock   %0, [%1]        \n"                             \
        "       " #asm_op " %0, %0, %2  \n"                             \
        "       scond   %0, [%1]        \n"                             \
        "       bnz     1b              \n"                             \
@@ -52,6 +68,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v)            \
        : "r"(&v->counter), "ir"(i)                                     \
        : "cc");                                                        \
                                                                        \
+       smp_mb();                                                       \
+                                                                       \
        return temp;                                                    \
 }
 
@@ -105,6 +123,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
        unsigned long flags;                                            \
        unsigned long temp;                                             \
                                                                        \
+       /*                                                              \
+        * spin lock/unlock provides the needed smp_mb() before/after   \
+        */                                                             \
        atomic_ops_lock(flags);                                         \
        temp = v->counter;                                              \
        temp c_op i;                                                    \
@@ -142,9 +163,19 @@ ATOMIC_OP(and, &=, and)
 #define __atomic_add_unless(v, a, u)                                   \
 ({                                                                     \
        int c, old;                                                     \
+                                                                       \
+       /*                                                              \
+        * Explicit full memory barrier needed before/after as          \
+        * LLOCK/SCOND thmeselves don't provide any such semantics      \
+        */                                                             \
+       smp_mb();                                                       \
+                                                                       \
        c = atomic_read(v);                                             \
        while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
                c = old;                                                \
+                                                                       \
+       smp_mb();                                                       \
+                                                                       \
        c;                                                              \
 })
 
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..a720998
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifdef CONFIG_ISA_ARCV2
+
+/*
+ * ARCv2 based HS38 cores are in-order issue, but still weakly ordered
+ * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ...
+ *
+ * Explicit barrier provided by DMB instruction
+ *  - Operand supports fine grained load/store/load+store semantics
+ *  - Ensures that selected memory operation issued before it will complete
+ *    before any subsequent memory operation of same type
+ *  - DMB guarantees SMP as well as local barrier semantics
+ *    (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e.
+ *    UP: barrier(), SMP: smp_*mb == *mb)
+ *  - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed
+ *    in the general case. Plus it only provides full barrier.
+ */
+
+#define mb()   asm volatile("dmb 3\n" : : : "memory")
+#define rmb()  asm volatile("dmb 1\n" : : : "memory")
+#define wmb()  asm volatile("dmb 2\n" : : : "memory")
+
+#endif
+
+#ifdef CONFIG_ISA_ARCOMPACT
+
+/*
+ * ARCompact based cores (ARC700) only have SYNC instruction which is super
+ * heavy weight as it flushes the pipeline as well.
+ * There are no real SMP implementations of such cores.
+ */
+
+#define mb()   asm volatile("sync\n" : : : "memory")
+#endif
+
+#include <asm-generic/barrier.h>
+
+#endif
index 4051e9525939fd9050b06b048bec8e1a47307d7b..99fe118d3730bc050263e5be7dd3423ab659d46c 100644 (file)
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <asm/barrier.h>
+#ifndef CONFIG_ARC_HAS_LLSC
+#include <asm/smp.h>
+#endif
 
-/*
- * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
- * The Kconfig glue ensures that in SMP, this is only set if the container
- * SoC/platform has cross-core coherent LLOCK/SCOND
- */
 #if defined(CONFIG_ARC_HAS_LLSC)
 
-static inline void set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int temp;
-
-       m += nr >> 5;
-
-       /*
-        * ARC ISA micro-optimization:
-        *
-        * Instructions dealing with bitpos only consider lower 5 bits (0-31)
-        * e.g (x << 33) is handled like (x << 1) by ASL instruction
-        *  (mem pointer still needs adjustment to point to next word)
-        *
-        * Hence the masking to clamp @nr arg can be elided in general.
-        *
-        * However if @nr is a constant (above assumed it in a register),
-        * and greater than 31, gcc can optimize away (x << 33) to 0,
-        * as overflow, given the 32-bit ISA. Thus masking needs to be done
-        * for constant @nr, but no code is generated due to const prop.
-        */
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%1]        \n"
-       "       bset    %0, %0, %2      \n"
-       "       scond   %0, [%1]        \n"
-       "       bnz     1b      \n"
-       : "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-}
-
-static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%1]        \n"
-       "       bclr    %0, %0, %2      \n"
-       "       scond   %0, [%1]        \n"
-       "       bnz     1b      \n"
-       : "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-}
-
-static inline void change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
+/*
+ * Hardware assisted Atomic-R-M-W
+ */
 
-       __asm__ __volatile__(
-       "1:     llock   %0, [%1]        \n"
-       "       bxor    %0, %0, %2      \n"
-       "       scond   %0, [%1]        \n"
-       "       bnz     1b              \n"
-       : "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
+#define BIT_OP(op, c_op, asm_op)                                       \
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned int temp;                                              \
+                                                                       \
+       m += nr >> 5;                                                   \
+                                                                       \
+       /*                                                              \
+        * ARC ISA micro-optimization:                                  \
+        *                                                              \
+        * Instructions dealing with bitpos only consider lower 5 bits  \
+        * e.g (x << 33) is handled like (x << 1) by ASL instruction    \
+        *  (mem pointer still needs adjustment to point to next word)  \
+        *                                                              \
+        * Hence the masking to clamp @nr arg can be elided in general. \
+        *                                                              \
+        * However if @nr is a constant (above assumed in a register),  \
+        * and greater than 31, gcc can optimize away (x << 33) to 0,   \
+        * as overflow, given the 32-bit ISA. Thus masking needs to be  \
+        * done for const @nr, but no code is generated due to gcc      \
+        * const prop.                                                  \
+        */                                                             \
+       if (__builtin_constant_p(nr))                                   \
+               nr &= 0x1f;                                             \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "1:     llock       %0, [%1]            \n"                     \
+       "       " #asm_op " %0, %0, %2  \n"                             \
+       "       scond       %0, [%1]            \n"                     \
+       "       bnz         1b                  \n"                     \
+       : "=&r"(temp)   /* Early clobber, to prevent reg reuse */       \
+       : "r"(m),       /* Not "m": llock only supports reg direct addr mode */ \
+         "ir"(nr)                                                      \
+       : "cc");                                                        \
 }
 
 /*
@@ -108,75 +75,38 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *m)
  * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
  * and the old value of bit is returned
  */
-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old, temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%2]        \n"
-       "       bset    %1, %0, %3      \n"
-       "       scond   %1, [%2]        \n"
-       "       bnz     1b              \n"
-       : "=&r"(old), "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int old, temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%2]        \n"
-       "       bclr    %1, %0, %3      \n"
-       "       scond   %1, [%2]        \n"
-       "       bnz     1b              \n"
-       : "=&r"(old), "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int old, temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%2]        \n"
-       "       bxor    %1, %0, %3      \n"
-       "       scond   %1, [%2]        \n"
-       "       bnz     1b              \n"
-       : "=&r"(old), "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-
-       return (old & (1 << nr)) != 0;
+#define TEST_N_BIT_OP(op, c_op, asm_op)                                        \
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned long old, temp;                                        \
+                                                                       \
+       m += nr >> 5;                                                   \
+                                                                       \
+       if (__builtin_constant_p(nr))                                   \
+               nr &= 0x1f;                                             \
+                                                                       \
+       /*                                                              \
+        * Explicit full memory barrier needed before/after as          \
+        * LLOCK/SCOND themselves don't provide any such smenatic       \
+        */                                                             \
+       smp_mb();                                                       \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "1:     llock       %0, [%2]    \n"                             \
+       "       " #asm_op " %1, %0, %3  \n"                             \
+       "       scond       %1, [%2]    \n"                             \
+       "       bnz         1b          \n"                             \
+       : "=&r"(old), "=&r"(temp)                                       \
+       : "r"(m), "ir"(nr)                                              \
+       : "cc");                                                        \
+                                                                       \
+       smp_mb();                                                       \
+                                                                       \
+       return (old & (1 << nr)) != 0;                                  \
 }
 
 #else  /* !CONFIG_ARC_HAS_LLSC */
 
-#include <asm/smp.h>
-
 /*
  * Non hardware assisted Atomic-R-M-W
  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
@@ -193,108 +123,43 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
  *             at compile time)
  */
 
-static inline void set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       temp = *m;
-       *m = temp | (1UL << nr);
-
-       bitops_unlock(flags);
-}
-
-static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       temp = *m;
-       *m = temp & ~(1UL << nr);
-
-       bitops_unlock(flags);
-}
-
-static inline void change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       temp = *m;
-       *m = temp ^ (1UL << nr);
-
-       bitops_unlock(flags);
-}
-
-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       old = *m;
-       *m = old | (1 << nr);
-
-       bitops_unlock(flags);
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       old = *m;
-       *m = old & ~(1 << nr);
-
-       bitops_unlock(flags);
-
-       return (old & (1 << nr)) != 0;
+#define BIT_OP(op, c_op, asm_op)                                       \
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned long temp, flags;                                      \
+       m += nr >> 5;                                                   \
+                                                                       \
+       if (__builtin_constant_p(nr))                                   \
+               nr &= 0x1f;                                             \
+                                                                       \
+       /*                                                              \
+        * spin lock/unlock provide the needed smp_mb() before/after    \
+        */                                                             \
+       bitops_lock(flags);                                             \
+                                                                       \
+       temp = *m;                                                      \
+       *m = temp c_op (1UL << nr);                                     \
+                                                                       \
+       bitops_unlock(flags);                                           \
 }
 
-static inline int
-test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       old = *m;
-       *m = old ^ (1 << nr);
-
-       bitops_unlock(flags);
-
-       return (old & (1 << nr)) != 0;
+#define TEST_N_BIT_OP(op, c_op, asm_op)                                        \
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned long old, flags;                                       \
+       m += nr >> 5;                                                   \
+                                                                       \
+       if (__builtin_constant_p(nr))                                   \
+               nr &= 0x1f;                                             \
+                                                                       \
+       bitops_lock(flags);                                             \
+                                                                       \
+       old = *m;                                                       \
+       *m = old c_op (1 << nr);                                        \
+                                                                       \
+       bitops_unlock(flags);                                           \
+                                                                       \
+       return (old & (1 << nr)) != 0;                                  \
 }
 
 #endif /* CONFIG_ARC_HAS_LLSC */
@@ -303,86 +168,51 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
  * Non atomic variants
  **************************************/
 
-static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       temp = *m;
-       *m = temp | (1UL << nr);
-}
-
-static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       temp = *m;
-       *m = temp & ~(1UL << nr);
-}
-
-static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       temp = *m;
-       *m = temp ^ (1UL << nr);
-}
-
-static inline int
-__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       old = *m;
-       *m = old | (1 << nr);
-
-       return (old & (1 << nr)) != 0;
+#define __BIT_OP(op, c_op, asm_op)                                     \
+static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)   \
+{                                                                      \
+       unsigned long temp;                                             \
+       m += nr >> 5;                                                   \
+                                                                       \
+       if (__builtin_constant_p(nr))                                   \
+               nr &= 0x1f;                                             \
+                                                                       \
+       temp = *m;                                                      \
+       *m = temp c_op (1UL << nr);                                     \
 }
 
-static inline int
-__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       old = *m;
-       *m = old & ~(1 << nr);
-
-       return (old & (1 << nr)) != 0;
+#define __TEST_N_BIT_OP(op, c_op, asm_op)                              \
+static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned long old;                                              \
+       m += nr >> 5;                                                   \
+                                                                       \
+       if (__builtin_constant_p(nr))                                   \
+               nr &= 0x1f;                                             \
+                                                                       \
+       old = *m;                                                       \
+       *m = old c_op (1 << nr);                                        \
+                                                                       \
+       return (old & (1 << nr)) != 0;                                  \
 }
 
-static inline int
-__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       old = *m;
-       *m = old ^ (1 << nr);
-
-       return (old & (1 << nr)) != 0;
-}
+#define BIT_OPS(op, c_op, asm_op)                                      \
+                                                                       \
+       /* set_bit(), clear_bit(), change_bit() */                      \
+       BIT_OP(op, c_op, asm_op)                                        \
+                                                                       \
+       /* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
+       TEST_N_BIT_OP(op, c_op, asm_op)                                 \
+                                                                       \
+       /* __set_bit(), __clear_bit(), __change_bit() */                \
+       __BIT_OP(op, c_op, asm_op)                                      \
+                                                                       \
+       /* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
+       __TEST_N_BIT_OP(op, c_op, asm_op)
+
+BIT_OPS(set, |, bset)
+BIT_OPS(clear, & ~, bclr)
+BIT_OPS(change, ^, bxor)
 
 /*
  * This routine doesn't need to be atomic.
@@ -402,6 +232,8 @@ test_bit(unsigned int nr, const volatile unsigned long *addr)
        return ((mask & *addr) != 0);
 }
 
+#ifdef CONFIG_ISA_ARCOMPACT
+
 /*
  * Count the number of zeros, starting from MSB
  * Helper for fls( ) friends
@@ -494,6 +326,75 @@ static inline __attribute__ ((const)) int __ffs(unsigned long word)
        return ffs(word) - 1;
 }
 
+#else  /* CONFIG_ISA_ARCV2 */
+
+/*
+ * fls = Find Last Set in word
+ * @result: [1-32]
+ * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
+ */
+static inline __attribute__ ((const)) int fls(unsigned long x)
+{
+       int n;
+
+       asm volatile(
+       "       fls.f   %0, %1          \n"  /* 0:31; 0(Z) if src 0 */
+       "       add.nz  %0, %0, 1       \n"  /* 0:31 -> 1:32 */
+       : "=r"(n)       /* Early clobber not needed */
+       : "r"(x)
+       : "cc");
+
+       return n;
+}
+
+/*
+ * __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
+ */
+static inline __attribute__ ((const)) int __fls(unsigned long x)
+{
+       /* FLS insn has exactly same semantics as the API */
+       return  __builtin_arc_fls(x);
+}
+
+/*
+ * ffs = Find First Set in word (LSB to MSB)
+ * @result: [1-32], 0 if all 0's
+ */
+static inline __attribute__ ((const)) int ffs(unsigned long x)
+{
+       int n;
+
+       asm volatile(
+       "       ffs.f   %0, %1          \n"  /* 0:31; 31(Z) if src 0 */
+       "       add.nz  %0, %0, 1       \n"  /* 0:31 -> 1:32 */
+       "       mov.z   %0, 0           \n"  /* 31(Z)-> 0 */
+       : "=r"(n)       /* Early clobber not needed */
+       : "r"(x)
+       : "cc");
+
+       return n;
+}
+
+/*
+ * __ffs: Similar to ffs, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __ffs(unsigned long x)
+{
+       int n;
+
+       asm volatile(
+       "       ffs.f   %0, %1          \n"  /* 0:31; 31(Z) if src 0 */
+       "       mov.z   %0, 0           \n"  /* 31(Z)-> 0 */
+       : "=r"(n)
+       : "r"(x)
+       : "cc");
+
+       return n;
+
+}
+
+#endif /* CONFIG_ISA_ARCOMPACT */
+
 /*
  * ffz = Find First Zero in word.
  * @return:[0-31], 32 if all 1's
index 7861255da32d64aa62e03d6fa881f84d9abf3b2f..d67345d3e2d444b1d357c004cbb1e33508d93060 100644 (file)
@@ -60,7 +60,7 @@ extern void read_decode_cache_bcr(void);
 #define ARC_REG_IC_IVIC                0x10
 #define ARC_REG_IC_CTRL                0x11
 #define ARC_REG_IC_IVIL                0x19
-#if defined(CONFIG_ARC_MMU_V3)
+#if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4)
 #define ARC_REG_IC_PTAG                0x1E
 #endif
 
@@ -74,12 +74,24 @@ extern void read_decode_cache_bcr(void);
 #define ARC_REG_DC_IVDL                0x4A
 #define ARC_REG_DC_FLSH                0x4B
 #define ARC_REG_DC_FLDL                0x4C
-#if defined(CONFIG_ARC_MMU_V3)
 #define ARC_REG_DC_PTAG                0x5C
-#endif
 
 /* Bit val in DC_CTRL */
 #define DC_CTRL_INV_MODE_FLUSH  0x40
 #define DC_CTRL_FLUSH_STATUS    0x100
 
+/*System-level cache (L2 cache) related Auxiliary registers */
+#define ARC_REG_SLC_CFG                0x901
+#define ARC_REG_SLC_CTRL       0x903
+#define ARC_REG_SLC_FLUSH      0x904
+#define ARC_REG_SLC_INVALIDATE 0x905
+#define ARC_REG_SLC_RGN_START  0x914
+#define ARC_REG_SLC_RGN_END    0x916
+
+/* Bit val in SLC_CONTROL */
+#define SLC_CTRL_IM            0x040
+#define SLC_CTRL_DISABLE       0x001
+#define SLC_CTRL_BUSY          0x100
+#define SLC_CTRL_RGN_OP_INV    0x200
+
 #endif /* _ASM_CACHE_H */
index 6abc4972bc93bbba015c3ebe92ea40926c7413a8..0992d3dbcc65f66e4e97925703ec9dc113a7b9a4 100644 (file)
@@ -34,9 +34,7 @@ void flush_cache_all(void);
 void flush_icache_range(unsigned long start, unsigned long end);
 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
 void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
-void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr);
-#define __flush_dcache_page(p, v)      \
-               ___flush_dcache_page((unsigned long)p, (unsigned long)v)
+void __flush_dcache_page(unsigned long paddr, unsigned long vaddr);
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 
index 03cd6894855d614237851515d1cafb6d2a56908b..44fd531f4d7b93a9df7bff6dec976af5e571506c 100644 (file)
@@ -10,6 +10,8 @@
 #define __ASM_ARC_CMPXCHG_H
 
 #include <linux/types.h>
+
+#include <asm/barrier.h>
 #include <asm/smp.h>
 
 #ifdef CONFIG_ARC_HAS_LLSC
@@ -19,16 +21,25 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
 {
        unsigned long prev;
 
+       /*
+        * Explicit full memory barrier needed before/after as
+        * LLOCK/SCOND thmeselves don't provide any such semantics
+        */
+       smp_mb();
+
        __asm__ __volatile__(
        "1:     llock   %0, [%1]        \n"
        "       brne    %0, %2, 2f      \n"
        "       scond   %3, [%1]        \n"
        "       bnz     1b              \n"
        "2:                             \n"
-       : "=&r"(prev)
-       : "r"(ptr), "ir"(expected),
-         "r"(new) /* can't be "ir". scond can't take limm for "b" */
-       : "cc");
+       : "=&r"(prev)   /* Early clobber, to prevent reg reuse */
+       : "r"(ptr),     /* Not "m": llock only supports reg direct addr mode */
+         "ir"(expected),
+         "r"(new)      /* can't be "ir". scond can't take LIMM for "b" */
+       : "cc", "memory"); /* so that gcc knows memory is being written here */
+
+       smp_mb();
 
        return prev;
 }
@@ -42,6 +53,9 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
        int prev;
        volatile unsigned long *p = ptr;
 
+       /*
+        * spin lock/unlock provide the needed smp_mb() before/after
+        */
        atomic_ops_lock(flags);
        prev = *p;
        if (prev == expected)
@@ -77,12 +91,16 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
 
        switch (size) {
        case 4:
+               smp_mb();
+
                __asm__ __volatile__(
                "       ex  %0, [%1]    \n"
                : "+r"(val)
                : "r"(ptr)
                : "memory");
 
+               smp_mb();
+
                return val;
        }
        return __xchg_bad_pointer();
index 43de302569815073bb4d4f23cb98b60a5e0c0552..08e7e2a16ac176a597ceb21c3b0f399b6ad98ea6 100644 (file)
 static inline void __delay(unsigned long loops)
 {
        __asm__ __volatile__(
-       "1:     sub.f %0, %0, 1 \n"
-       "       jpnz 1b         \n"
-       : "+r"(loops)
-       :
-       : "cc");
+       "       lp  1f  \n"
+       "       nop     \n"
+       "1:             \n"
+       : "+l"(loops));
 }
 
 extern void __bad_udelay(void);
index 45b8e0cea1764d2b9e4a0b0d1f27b787eb845870..2d28ba939d8edc71c693442b4464076f840ea5ff 100644 (file)
 #include <asm-generic/dma-coherent.h>
 #include <asm/cacheflush.h>
 
-#ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
-/*
- * dma_map_* API take cpu addresses, which is kernel logical address in the
- * untranslated address space (0x8000_0000) based. The dma address (bus addr)
- * ideally needs to be 0x0000_0000 based hence these glue routines.
- * However given that intermediate bus bridges can ignore the high bit, we can
- * do with these routines being no-ops.
- * If a platform/device comes up which sriclty requires 0 based bus addr
- * (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
- */
-#define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
-#define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
-
-#else
-#include <plat/dma_addr.h>
-#endif
-
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
                            dma_addr_t *dma_handle, gfp_t gfp);
 
@@ -94,7 +77,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
               enum dma_data_direction dir)
 {
        _dma_cache_sync((unsigned long)cpu_addr, size, dir);
-       return plat_kernel_addr_to_dma(dev, cpu_addr);
+       return (dma_addr_t)cpu_addr;
 }
 
 static inline void
@@ -147,16 +130,14 @@ static inline void
 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
                        size_t size, enum dma_data_direction dir)
 {
-       _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
-                       DMA_FROM_DEVICE);
+       _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
 }
 
 static inline void
 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
                           size_t size, enum dma_data_direction dir)
 {
-       _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
-                       DMA_TO_DEVICE);
+       _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
 }
 
 static inline void
@@ -164,8 +145,7 @@ dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
                              unsigned long offset, size_t size,
                              enum dma_data_direction direction)
 {
-       _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
-                       size, DMA_FROM_DEVICE);
+       _dma_cache_sync(dma_handle + offset, size, DMA_FROM_DEVICE);
 }
 
 static inline void
@@ -173,27 +153,28 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
                                 unsigned long offset, size_t size,
                                 enum dma_data_direction direction)
 {
-       _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
-                       size, DMA_TO_DEVICE);
+       _dma_cache_sync(dma_handle + offset, size, DMA_TO_DEVICE);
 }
 
 static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
                    enum dma_data_direction dir)
 {
        int i;
+       struct scatterlist *sg;
 
-       for (i = 0; i < nelems; i++, sg++)
+       for_each_sg(sglist, sg, nelems, i)
                _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
 }
 
 static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
-                      enum dma_data_direction dir)
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+                      int nelems, enum dma_data_direction dir)
 {
        int i;
+       struct scatterlist *sg;
 
-       for (i = 0; i < nelems; i++, sg++)
+       for_each_sg(sglist, sg, nelems, i)
                _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
 }
 
index a262828576839d9d48e5e2be18ae65b9fad815e5..51a99e25fe338251b23fe00bf2b1849b6a7252a2 100644 (file)
 /* These ELF defines belong to uapi but libc elf.h already defines them */
 #define EM_ARCOMPACT           93
 
+#define EM_ARCV2               195     /* ARCv2 Cores */
+
+#define EM_ARC_INUSE           (IS_ENABLED(CONFIG_ISA_ARCOMPACT) ? \
+                                       EM_ARCOMPACT : EM_ARCV2)
+
 /* ARC Relocations (kernel Modules only) */
 #define  R_ARC_32              0x4
 #define  R_ARC_32_ME           0x1B
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
new file mode 100644 (file)
index 0000000..b5ff87e
--- /dev/null
@@ -0,0 +1,190 @@
+
+#ifndef __ASM_ARC_ENTRY_ARCV2_H
+#define __ASM_ARC_ENTRY_ARCV2_H
+
+#include <asm/asm-offsets.h>
+#include <asm/irqflags-arcv2.h>
+#include <asm/thread_info.h>   /* For THREAD_SIZE */
+
+/*------------------------------------------------------------------------*/
+.macro INTERRUPT_PROLOGUE      called_from
+
+       ; Before jumping to Interrupt Vector, hardware micro-ops did following:
+       ;   1. SP auto-switched to kernel mode stack
+       ;   2. STATUS32.Z flag set to U mode at time of interrupt (U:1, K:0)
+       ;   3. Auto saved: r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI, PC, STAT32
+       ;
+       ; Now manually save: r12, sp, fp, gp, r25
+
+       PUSH    r12
+
+       ; Saving pt_regs->sp correctly requires some extra work due to the way
+       ; Auto stack switch works
+       ;  - U mode: retrieve it from AUX_USER_SP
+       ;  - K mode: add the offset from current SP where H/w starts auto push
+       ;
+       ; Utilize the fact that Z bit is set if Intr taken in U mode
+       mov.nz  r9, sp
+       add.nz  r9, r9, SZ_PT_REGS - PT_sp - 4
+       bnz     1f
+
+       lr      r9, [AUX_USER_SP]
+1:
+       PUSH    r9      ; SP
+
+       PUSH    fp
+       PUSH    gp
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+       PUSH    r25                     ; user_r25
+       GET_CURR_TASK_ON_CPU    r25
+#else
+       sub     sp, sp, 4
+#endif
+
+.ifnc \called_from, exception
+       sub     sp, sp, 12      ; BTA/ECR/orig_r0 placeholder per pt_regs
+.endif
+
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro INTERRUPT_EPILOGUE      called_from
+
+.ifnc \called_from, exception
+       add     sp, sp, 12      ; skip BTA/ECR/orig_r0 placeholderss
+.endif
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+       POP     r25
+#else
+       add     sp, sp, 4
+#endif
+
+       POP     gp
+       POP     fp
+
+       ; Don't touch AUX_USER_SP if returning to K mode (Z bit set)
+       ; (Z bit set on K mode is inverse of INTERRUPT_PROLOGUE)
+       add.z   sp, sp, 4
+       bz      1f
+
+       POPAX   AUX_USER_SP
+1:
+       POP     r12
+
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro EXCEPTION_PROLOGUE
+
+       ; Before jumping to Exception Vector, hardware micro-ops did following:
+       ;   1. SP auto-switched to kernel mode stack
+       ;   2. STATUS32.Z flag set to U mode at time of interrupt (U:1,K:0)
+       ;
+       ; Now manually save the complete reg file
+
+       PUSH    r9              ; freeup a register: slot of erstatus
+
+       PUSHAX  eret
+       sub     sp, sp, 12      ; skip JLI, LDI, EI
+       PUSH    lp_count
+       PUSHAX  lp_start
+       PUSHAX  lp_end
+       PUSH    blink
+
+       PUSH    r11
+       PUSH    r10
+
+       ld.as   r9,  [sp, 10]   ; load stashed r9 (status32 stack slot)
+       lr      r10, [erstatus]
+       st.as   r10, [sp, 10]   ; save status32 at it's right stack slot
+
+       PUSH    r9
+       PUSH    r8
+       PUSH    r7
+       PUSH    r6
+       PUSH    r5
+       PUSH    r4
+       PUSH    r3
+       PUSH    r2
+       PUSH    r1
+       PUSH    r0
+
+       ; -- for interrupts, regs above are auto-saved by h/w in that order --
+       ; Now do what ISR prologue does (manually save r12, sp, fp, gp, r25)
+       ;
+       ; Set Z flag if this was from U mode (expected by INTERRUPT_PROLOGUE)
+       ; Although H/w exception micro-ops do set Z flag for U mode (just like
+       ; for interrupts), it could get clobbered in case we soft land here from
+       ; a TLB Miss exception handler (tlbex.S)
+
+       and     r10, r10, STATUS_U_MASK
+       xor.f   0, r10, STATUS_U_MASK
+
+       INTERRUPT_PROLOGUE  exception
+
+       PUSHAX  erbta
+       PUSHAX  ecr             ; r9 contains ECR, expected by EV_Trap
+
+       PUSH    r0              ; orig_r0
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro EXCEPTION_EPILOGUE
+
+       ; Assumes r0 has PT_status32
+       btst   r0, STATUS_U_BIT ; Z flag set if K, used in INTERRUPT_EPILOGUE
+
+       add     sp, sp, 8       ; orig_r0/ECR don't need restoring
+       POPAX   erbta
+
+       INTERRUPT_EPILOGUE  exception
+
+       POP     r0
+       POP     r1
+       POP     r2
+       POP     r3
+       POP     r4
+       POP     r5
+       POP     r6
+       POP     r7
+       POP     r8
+       POP     r9
+       POP     r10
+       POP     r11
+
+       POP     blink
+       POPAX   lp_end
+       POPAX   lp_start
+
+       POP     r9
+       mov     lp_count, r9
+
+       add     sp, sp, 12      ; skip JLI, LDI, EI
+       POPAX   eret
+       POPAX   erstatus
+
+       ld.as   r9, [sp, -12]   ; reload r9 which got clobbered
+.endm
+
+.macro FAKE_RET_FROM_EXCPN
+       lr      r9, [status32]
+       bic     r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK)
+       or      r9, r9, (STATUS_L_MASK|STATUS_IE_MASK)
+       kflag   r9
+.endm
+
+/* Get thread_info of "current" tsk */
+.macro GET_CURR_THR_INFO_FROM_SP  reg
+       bmskn \reg, sp, THREAD_SHIFT - 1
+.endm
+
+/* Get CPU-ID of this core */
+.macro  GET_CPU_ID  reg
+       lr  \reg, [identity]
+       xbfu \reg, \reg, 0xE8   /* 00111    01000 */
+                               /* M = 8-1  N = 8 */
+.endm
+
+#endif
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
new file mode 100644 (file)
index 0000000..415443c
--- /dev/null
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ *  Stack switching code can no longer reliably rely on the fact that
+ *  if we are NOT in user mode, stack is switched to kernel mode.
+ *  e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
+ *  it's prologue including stack switching from user mode
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ *  -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ *   Normally CPU does this automatically, however when doing FAKE rtie,
+ *   we also need to explicitly do this. The problem in macros
+ *   FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ *   was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
+ *
+ * Vineetg: May 5th 2008
+ *  -Modified CALLEE_REG save/restore macros to handle the fact that
+ *      r25 contains the kernel current task ptr
+ *  - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
+ *  - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
+ *      address Write back load ld.ab instead of seperate ld/add instn
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_ENTRY_COMPACT_H
+#define __ASM_ARC_ENTRY_COMPACT_H
+
+#include <asm/asm-offsets.h>
+#include <asm/irqflags-compact.h>
+#include <asm/thread_info.h>   /* For THREAD_SIZE */
+
+/*--------------------------------------------------------------
+ * Switch to Kernel Mode stack if SP points to User Mode stack
+ *
+ * Entry   : r9 contains pre-IRQ/exception/trap status32
+ * Exit    : SP set to K mode stack
+ *           SP at the time of entry (K/U) saved @ pt_regs->sp
+ * Clobbers: r9
+ *-------------------------------------------------------------*/
+
+.macro SWITCH_TO_KERNEL_STK
+
+       /* User Mode when this happened ? Yes: Proceed to switch stack */
+       bbit1   r9, STATUS_U_BIT, 88f
+
+       /* OK we were already in kernel mode when this event happened, thus can
+        * assume SP is kernel mode SP. _NO_ need to do any stack switching
+        */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+       /* However....
+        * If Level 2 Interrupts enabled, we may end up with a corner case:
+        * 1. User Task executing
+        * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
+        * 3. But before it could switch SP from USER to KERNEL stack
+        *      a L2 IRQ "Interrupts" L1
+        * Thay way although L2 IRQ happened in Kernel mode, stack is still
+        * not switched.
+        * To handle this, we may need to switch stack even if in kernel mode
+        * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
+        */
+       brlo sp, VMALLOC_START, 88f
+
+       /* TODO: vineetg:
+        * We need to be a bit more cautious here. What if a kernel bug in
+        * L1 ISR, caused SP to go whaco (some small value which looks like
+        * USER stk) and then we take L2 ISR.
+        * Above brlo alone would treat it as a valid L1-L2 sceanrio
+        * instead of shouting alound
+        * The only feasible way is to make sure this L2 happened in
+        * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
+        * L1 ISR before it switches stack
+        */
+
+#endif
+
+    /*------Intr/Ecxp happened in kernel mode, SP already setup ------ */
+       /* save it nevertheless @ pt_regs->sp for uniformity */
+
+       b.d     66f
+       st      sp, [sp, PT_sp - SZ_PT_REGS]
+
+88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
+
+       GET_CURR_TASK_ON_CPU   r9
+
+       /* With current tsk in r9, get it's kernel mode stack base */
+       GET_TSK_STACK_BASE  r9, r9
+
+       /* save U mode SP @ pt_regs->sp */
+       st      sp, [r9, PT_sp - SZ_PT_REGS]
+
+       /* final SP switch */
+       mov     sp, r9
+66:
+.endm
+
+/*------------------------------------------------------------
+ * "FAKE" a rtie to return from CPU Exception context
+ * This is to re-enable Exceptions within exception
+ * Look at EV_ProtV to see how this is actually used
+ *-------------------------------------------------------------*/
+
+.macro FAKE_RET_FROM_EXCPN
+
+       ld  r9, [sp, PT_status32]
+       bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK)
+       bset  r9, r9, STATUS_L_BIT
+       sr  r9, [erstatus]
+       mov r9, 55f
+       sr  r9, [eret]
+
+       rtie
+55:
+.endm
+
+/*--------------------------------------------------------------
+ * For early Exception/ISR Prologue, a core reg is temporarily needed to
+ * code the rest of prolog (stack switching). This is done by stashing
+ * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
+ *
+ * Before saving the full regfile - this reg is restored back, only
+ * to be saved again on kernel mode stack, as part of pt_regs.
+ *-------------------------------------------------------------*/
+.macro PROLOG_FREEUP_REG       reg, mem
+#ifdef CONFIG_SMP
+       sr  \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+       st  \reg, [\mem]
+#endif
+.endm
+
+.macro PROLOG_RESTORE_REG      reg, mem
+#ifdef CONFIG_SMP
+       lr  \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+       ld  \reg, [\mem]
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Exception Entry prologue
+ * -Switches stack to K mode (if not already)
+ * -Saves the register file
+ *
+ * After this it is safe to call the "C" handlers
+ *-------------------------------------------------------------*/
+.macro EXCEPTION_PROLOGUE
+
+       /* Need at least 1 reg to code the early exception prologue */
+       PROLOG_FREEUP_REG r9, @ex_saved_reg1
+
+       /* U/K mode at time of exception (stack not switched if already K) */
+       lr  r9, [erstatus]
+
+       /* ARC700 doesn't provide auto-stack switching */
+       SWITCH_TO_KERNEL_STK
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+       /* Treat r25 as scratch reg (save on stack) and load with "current" */
+       PUSH    r25
+       GET_CURR_TASK_ON_CPU   r25
+#else
+       sub     sp, sp, 4
+#endif
+
+       st.a    r0, [sp, -8]    /* orig_r0 needed for syscall (skip ECR slot) */
+       sub     sp, sp, 4       /* skip pt_regs->sp, already saved above */
+
+       /* Restore r9 used to code the early prologue */
+       PROLOG_RESTORE_REG  r9, @ex_saved_reg1
+
+       /* now we are ready to save the regfile */
+       SAVE_R0_TO_R12
+       PUSH    gp
+       PUSH    fp
+       PUSH    blink
+       PUSHAX  eret
+       PUSHAX  erstatus
+       PUSH    lp_count
+       PUSHAX  lp_end
+       PUSHAX  lp_start
+       PUSHAX  erbta
+
+       lr      r9, [ecr]
+       st      r9, [sp, PT_event]    /* EV_Trap expects r9 to have ECR */
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by system call or Exceptions
+ * SP should always be pointing to the next free stack element
+ * when entering this macro.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro EXCEPTION_EPILOGUE
+       POPAX   erbta
+       POPAX   lp_start
+       POPAX   lp_end
+
+       POP     r9
+       mov     lp_count, r9    ;LD to lp_count is not allowed
+
+       POPAX   erstatus
+       POPAX   eret
+       POP     blink
+       POP     fp
+       POP     gp
+       RESTORE_R12_TO_R0
+
+       ld  sp, [sp] /* restore original sp */
+       /* orig_r0, ECR, user_r25 skipped automatically */
+.endm
+
+/* Dummy ECR values for Interrupts */
+#define event_IRQ1             0x0031abcd
+#define event_IRQ2             0x0032abcd
+
+.macro INTERRUPT_PROLOGUE  LVL
+
+       /* free up r9 as scratchpad */
+       PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg
+
+       /* Which mode (user/kernel) was the system in when intr occured */
+       lr  r9, [status32_l\LVL\()]
+
+       SWITCH_TO_KERNEL_STK
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+       /* Treat r25 as scratch reg (save on stack) and load with "current" */
+       PUSH    r25
+       GET_CURR_TASK_ON_CPU   r25
+#else
+       sub     sp, sp, 4
+#endif
+
+       PUSH    0x003\LVL\()abcd    /* Dummy ECR */
+       sub     sp, sp, 8           /* skip orig_r0 (not needed)
+                                      skip pt_regs->sp, already saved above */
+
+       /* Restore r9 used to code the early prologue */
+       PROLOG_RESTORE_REG  r9, @int\LVL\()_saved_reg
+
+       SAVE_R0_TO_R12
+       PUSH    gp
+       PUSH    fp
+       PUSH    blink
+       PUSH    ilink\LVL\()
+       PUSHAX  status32_l\LVL\()
+       PUSH    lp_count
+       PUSHAX  lp_end
+       PUSHAX  lp_start
+       PUSHAX  bta_l\LVL\()
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by interrupt handlers.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro INTERRUPT_EPILOGUE  LVL
+       POPAX   bta_l\LVL\()
+       POPAX   lp_start
+       POPAX   lp_end
+
+       POP     r9
+       mov     lp_count, r9    ;LD to lp_count is not allowed
+
+       POPAX   status32_l\LVL\()
+       POP     ilink\LVL\()
+       POP     blink
+       POP     fp
+       POP     gp
+       RESTORE_R12_TO_R0
+
+       ld  sp, [sp] /* restore original sp */
+       /* orig_r0, ECR, user_r25 skipped automatically */
+.endm
+
+/* Get thread_info of "current" tsk */
+.macro GET_CURR_THR_INFO_FROM_SP  reg
+       bic \reg, sp, (THREAD_SIZE - 1)
+.endm
+
+/* Get CPU-ID of this core */
+.macro  GET_CPU_ID  reg
+       lr  \reg, [identity]
+       lsr \reg, \reg, 8
+       bmsk \reg, \reg, 7
+.endm
+
+#endif  /* __ASM_ARC_ENTRY_COMPACT_H */
index 884081099f800fd6b4ba133bc1f1746084845c49..ad7860c5ce153c731264f770067e653b70568fd5 100644 (file)
@@ -1,45 +1,27 @@
 /*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
- *  Stack switching code can no longer reliably rely on the fact that
- *  if we are NOT in user mode, stack is switched to kernel mode.
- *  e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
- *  it's prologue including stack switching from user mode
- *
- * Vineetg: Aug 28th 2008: Bug #94984
- *  -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
- *   Normally CPU does this automatically, however when doing FAKE rtie,
- *   we also need to explicitly do this. The problem in macros
- *   FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
- *   was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
- *
- * Vineetg: May 5th 2008
- *  -Modified CALLEE_REG save/restore macros to handle the fact that
- *      r25 contains the kernel current task ptr
- *  - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
- *  - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
- *      address Write back load ld.ab instead of seperate ld/add instn
- *
- * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
  */
 
 #ifndef __ASM_ARC_ENTRY_H
 #define __ASM_ARC_ENTRY_H
 
-#ifdef __ASSEMBLY__
 #include <asm/unistd.h>                /* For NR_syscalls defination */
-#include <asm/asm-offsets.h>
 #include <asm/arcregs.h>
 #include <asm/ptrace.h>
 #include <asm/processor.h>     /* For VMALLOC_START */
-#include <asm/thread_info.h>   /* For THREAD_SIZE */
 #include <asm/mmu.h>
 
+#ifdef CONFIG_ISA_ARCOMPACT
+#include <asm/entry-compact.h> /* ISA specific bits */
+#else
+#include <asm/entry-arcv2.h>
+#endif
+
 /* Note on the LD/ST addr modes with addr reg wback
  *
  * LD.a same as LD.aw
        POP     r13
 .endm
 
-#define OFF_USER_R25_FROM_R24  (SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4
-
 /*--------------------------------------------------------------
  * Collect User Mode callee regs as struct callee_regs - needed by
  * fork/do_signal/unaligned-access-emulation.
  *-------------------------------------------------------------*/
 .macro SAVE_CALLEE_SAVED_USER
 
+       mov     r12, sp         ; save SP as ref to pt_regs
        SAVE_R13_TO_R24
 
 #ifdef CONFIG_ARC_CURR_IN_REG
-       ; Retrieve orig r25 and save it on stack
-       ld.as   r12, [sp, OFF_USER_R25_FROM_R24]
-       st.a    r12, [sp, -4]
+       ; Retrieve orig r25 and save it with rest of callee_regs
+       ld.as   r12, [r12, PT_user_r25]
+       PUSH    r12
 #else
        PUSH    r25
 #endif
 .macro RESTORE_CALLEE_SAVED_USER
 
 #ifdef CONFIG_ARC_CURR_IN_REG
-       ld.ab   r12, [sp, 4]
-       st.as   r12, [sp, OFF_USER_R25_FROM_R24]
+       POP     r12
 #else
        POP     r25
 #endif
        RESTORE_R24_TO_R13
+
+       ; SP is back to start of pt_regs
+#ifdef CONFIG_ARC_CURR_IN_REG
+       st.as   r12, [sp, PT_user_r25]
+#endif
 .endm
 
 /*--------------------------------------------------------------
 
 .endm
 
-/*--------------------------------------------------------------
- * Switch to Kernel Mode stack if SP points to User Mode stack
- *
- * Entry   : r9 contains pre-IRQ/exception/trap status32
- * Exit    : SP is set to kernel mode stack pointer
- *           If CURR_IN_REG, r25 set to "current" task pointer
- * Clobbers: r9
- *-------------------------------------------------------------*/
-
-.macro SWITCH_TO_KERNEL_STK
-
-       /* User Mode when this happened ? Yes: Proceed to switch stack */
-       bbit1   r9, STATUS_U_BIT, 88f
-
-       /* OK we were already in kernel mode when this event happened, thus can
-        * assume SP is kernel mode SP. _NO_ need to do any stack switching
-        */
-
-#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
-       /* However....
-        * If Level 2 Interrupts enabled, we may end up with a corner case:
-        * 1. User Task executing
-        * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
-        * 3. But before it could switch SP from USER to KERNEL stack
-        *      a L2 IRQ "Interrupts" L1
-        * Thay way although L2 IRQ happened in Kernel mode, stack is still
-        * not switched.
-        * To handle this, we may need to switch stack even if in kernel mode
-        * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
-        */
-       brlo sp, VMALLOC_START, 88f
-
-       /* TODO: vineetg:
-        * We need to be a bit more cautious here. What if a kernel bug in
-        * L1 ISR, caused SP to go whaco (some small value which looks like
-        * USER stk) and then we take L2 ISR.
-        * Above brlo alone would treat it as a valid L1-L2 sceanrio
-        * instead of shouting alound
-        * The only feasible way is to make sure this L2 happened in
-        * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
-        * L1 ISR before it switches stack
-        */
-
-#endif
-
-       /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
-        * safe-keeping not really needed, but it keeps the epilogue code
-        * (SP restore) simpler/uniform.
-        */
-       b.d     66f
-       mov     r9, sp
-
-88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
-
-       GET_CURR_TASK_ON_CPU   r9
-
-       /* With current tsk in r9, get it's kernel mode stack base */
-       GET_TSK_STACK_BASE  r9, r9
-
-66:
-#ifdef CONFIG_ARC_CURR_IN_REG
-       /*
-        * Treat r25 as scratch reg, save it on stack first
-        * Load it with current task pointer
-        */
-       st      r25, [r9, -4]
-       GET_CURR_TASK_ON_CPU   r25
-#endif
-
-       /* Save Pre Intr/Exception User SP on kernel stack */
-       st.a    sp, [r9, -16]   ; Make room for orig_r0, ECR, user_r25
-
-       /* CAUTION:
-        * SP should be set at the very end when we are done with everything
-        * In case of 2 levels of interrupt we depend on value of SP to assume
-        * that everything else is done (loading r25 etc)
-        */
-
-       /* set SP to point to kernel mode stack */
-       mov sp, r9
-
-       /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
-
-.endm
-
-/*------------------------------------------------------------
- * "FAKE" a rtie to return from CPU Exception context
- * This is to re-enable Exceptions within exception
- * Look at EV_ProtV to see how this is actually used
- *-------------------------------------------------------------*/
-
-.macro FAKE_RET_FROM_EXCPN  reg
-
-       ld  \reg, [sp, PT_status32]
-       bic  \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
-       bset \reg, \reg, STATUS_L_BIT
-       sr  \reg, [erstatus]
-       mov \reg, 55f
-       sr  \reg, [eret]
-
-       rtie
-55:
-.endm
-
-/*
- * @reg [OUT] &thread_info of "current"
- */
-.macro GET_CURR_THR_INFO_FROM_SP  reg
-       bic \reg, sp, (THREAD_SIZE - 1)
-.endm
-
 /*
  * @reg [OUT] thread_info->flags of "current"
  */
        ld  \reg, [\reg, THREAD_INFO_FLAGS]
 .endm
 
-/*--------------------------------------------------------------
- * For early Exception Prologue, a core reg is temporarily needed to
- * code the rest of prolog (stack switching). This is done by stashing
- * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
- *
- * Before saving the full regfile - this reg is restored back, only
- * to be saved again on kernel mode stack, as part of pt_regs.
- *-------------------------------------------------------------*/
-.macro EXCPN_PROLOG_FREEUP_REG reg
-#ifdef CONFIG_SMP
-       sr  \reg, [ARC_REG_SCRATCH_DATA0]
-#else
-       st  \reg, [@ex_saved_reg1]
-#endif
-.endm
-
-.macro EXCPN_PROLOG_RESTORE_REG        reg
-#ifdef CONFIG_SMP
-       lr  \reg, [ARC_REG_SCRATCH_DATA0]
-#else
-       ld  \reg, [@ex_saved_reg1]
-#endif
-.endm
-
-/*--------------------------------------------------------------
- * Exception Entry prologue
- * -Switches stack to K mode (if not already)
- * -Saves the register file
- *
- * After this it is safe to call the "C" handlers
- *-------------------------------------------------------------*/
-.macro EXCEPTION_PROLOGUE
-
-       /* Need at least 1 reg to code the early exception prologue */
-       EXCPN_PROLOG_FREEUP_REG r9
-
-       /* U/K mode at time of exception (stack not switched if already K) */
-       lr  r9, [erstatus]
-
-       /* ARC700 doesn't provide auto-stack switching */
-       SWITCH_TO_KERNEL_STK
-
-       /* save the regfile */
-       SAVE_ALL_SYS
-.endm
-
-/*--------------------------------------------------------------
- * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
- * Requires SP to be already switched to kernel mode Stack
- * sp points to the next free element on the stack at exit of this macro.
- * Registers are pushed / popped in the order defined in struct ptregs
- * in asm/ptrace.h
- * Note that syscalls are implemented via TRAP which is also a exception
- * from CPU's point of view
- *-------------------------------------------------------------*/
-.macro SAVE_ALL_SYS
-
-       lr      r9, [ecr]
-       st      r9, [sp, 8]    /* ECR */
-       st      r0, [sp, 4]    /* orig_r0, needed only for sys calls */
-
-       /* Restore r9 used to code the early prologue */
-       EXCPN_PROLOG_RESTORE_REG  r9
-
-       SAVE_R0_TO_R12
-       PUSH    gp
-       PUSH    fp
-       PUSH    blink
-       PUSHAX  eret
-       PUSHAX  erstatus
-       PUSH    lp_count
-       PUSHAX  lp_end
-       PUSHAX  lp_start
-       PUSHAX  erbta
-.endm
-
-/*--------------------------------------------------------------
- * Restore all registers used by system call or Exceptions
- * SP should always be pointing to the next free stack element
- * when entering this macro.
- *
- * NOTE:
- *
- * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
- * by hardware and that is not good.
- *-------------------------------------------------------------*/
-.macro RESTORE_ALL_SYS
-       POPAX   erbta
-       POPAX   lp_start
-       POPAX   lp_end
-
-       POP     r9
-       mov     lp_count, r9    ;LD to lp_count is not allowed
-
-       POPAX   erstatus
-       POPAX   eret
-       POP     blink
-       POP     fp
-       POP     gp
-       RESTORE_R12_TO_R0
-
-       ld  sp, [sp] /* restore original sp */
-       /* orig_r0, ECR, user_r25 skipped automatically */
-.endm
-
-
-/*--------------------------------------------------------------
- * Save all registers used by interrupt handlers.
- *-------------------------------------------------------------*/
-.macro SAVE_ALL_INT1
-
-       /* restore original r9 to be saved as part of reg-file */
-#ifdef CONFIG_SMP
-       lr  r9, [ARC_REG_SCRATCH_DATA0]
-#else
-       ld  r9, [@int1_saved_reg]
-#endif
-
-       /* now we are ready to save the remaining context :) */
-       st      event_IRQ1, [sp, 8]    /* Dummy ECR */
-       st      0, [sp, 4]    /* orig_r0 , N/A for IRQ */
-
-       SAVE_R0_TO_R12
-       PUSH    gp
-       PUSH    fp
-       PUSH    blink
-       PUSH    ilink1
-       PUSHAX  status32_l1
-       PUSH    lp_count
-       PUSHAX  lp_end
-       PUSHAX  lp_start
-       PUSHAX  bta_l1
-.endm
-
-.macro SAVE_ALL_INT2
-
-       /* TODO-vineetg: SMP we can't use global nor can we use
-       *   SCRATCH0 as we do for int1 because while int1 is using
-       *   it, int2 can come
-       */
-       /* retsore original r9 , saved in sys_saved_r9 */
-       ld  r9, [@int2_saved_reg]
-
-       /* now we are ready to save the remaining context :) */
-       st      event_IRQ2, [sp, 8]    /* Dummy ECR */
-       st      0, [sp, 4]    /* orig_r0 , N/A for IRQ */
-
-       SAVE_R0_TO_R12
-       PUSH    gp
-       PUSH    fp
-       PUSH    blink
-       PUSH    ilink2
-       PUSHAX  status32_l2
-       PUSH    lp_count
-       PUSHAX  lp_end
-       PUSHAX  lp_start
-       PUSHAX  bta_l2
-.endm
-
-/*--------------------------------------------------------------
- * Restore all registers used by interrupt handlers.
- *
- * NOTE:
- *
- * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
- * by hardware and that is not good.
- *-------------------------------------------------------------*/
-
-.macro RESTORE_ALL_INT1
-       POPAX   bta_l1
-       POPAX   lp_start
-       POPAX   lp_end
-
-       POP     r9
-       mov     lp_count, r9    ;LD to lp_count is not allowed
-
-       POPAX   status32_l1
-       POP     ilink1
-       POP     blink
-       POP     fp
-       POP     gp
-       RESTORE_R12_TO_R0
-
-       ld  sp, [sp] /* restore original sp */
-       /* orig_r0, ECR, user_r25 skipped automatically */
-.endm
-
-.macro RESTORE_ALL_INT2
-       POPAX   bta_l2
-       POPAX   lp_start
-       POPAX   lp_end
-
-       POP     r9
-       mov     lp_count, r9    ;LD to lp_count is not allowed
-
-       POPAX   status32_l2
-       POP     ilink2
-       POP     blink
-       POP     fp
-       POP     gp
-       RESTORE_R12_TO_R0
-
-       ld  sp, [sp] /* restore original sp */
-       /* orig_r0, ECR, user_r25 skipped automatically */
-.endm
-
-
-/* Get CPU-ID of this core */
-.macro  GET_CPU_ID  reg
-       lr  \reg, [identity]
-       lsr \reg, \reg, 8
-       bmsk \reg, \reg, 7
-.endm
-
 #ifdef CONFIG_SMP
 
 /*-------------------------------------------------
 
 #endif /* CONFIG_ARC_CURR_IN_REG */
 
-#endif  /* __ASSEMBLY__ */
-
 #endif  /* __ASM_ARC_ENTRY_H */
index 7cc4ced5dbf4e4894c6b7d594a9810df74ab26d0..694ece8a024372bef7fd24b3ccb1e9dc323b4afe 100644 (file)
@@ -99,9 +99,45 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
 
 }
 
-#define readb_relaxed readb
-#define readw_relaxed readw
-#define readl_relaxed readl
+#ifdef CONFIG_ISA_ARCV2
+#include <asm/barrier.h>
+#define __iormb()              rmb()
+#define __iowmb()              wmb()
+#else
+#define __iormb()              do { } while (0)
+#define __iowmb()              do { } while (0)
+#endif
+
+/*
+ * MMIO can also get buffered/optimized in micro-arch, so barriers needed
+ * Based on ARM model for the typical use case
+ *
+ *     <ST [DMA buffer]>
+ *     <writel MMIO "go" reg>
+ *  or:
+ *     <readl MMIO "status" reg>
+ *     <LD [DMA buffer]>
+ *
+ * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
+ */
+#define readb(c)               ({ u8  __v = readb_relaxed(c); __iormb(); __v; })
+#define readw(c)               ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+#define readl(c)               ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+
+#define writeb(v,c)            ({ __iowmb(); writeb_relaxed(v,c); })
+#define writew(v,c)            ({ __iowmb(); writew_relaxed(v,c); })
+#define writel(v,c)            ({ __iowmb(); writel_relaxed(v,c); })
+
+/*
+ * Relaxed API for drivers which can handle any ordering themselves
+ */
+#define readb_relaxed(c)       __raw_readb(c)
+#define readw_relaxed(c)       __raw_readw(c)
+#define readl_relaxed(c)       __raw_readl(c)
+
+#define writeb_relaxed(v,c)    __raw_writeb(v,c)
+#define writew_relaxed(v,c)    __raw_writew(v,c)
+#define writel_relaxed(v,c)    __raw_writel(v,c)
 
 #include <asm-generic/io.h>
 
index f38652fb2ed772566b0a47cf51a01183a329004c..bc51036373261c6068292830adae89a80b5d0c9c 100644 (file)
 #define NR_IRQS                128 /* allow some CPU external IRQ handling */
 
 /* Platform Independent IRQs */
+#ifdef CONFIG_ISA_ARCOMPACT
 #define TIMER0_IRQ      3
 #define TIMER1_IRQ      4
+#else
+#define TIMER0_IRQ      16
+#define TIMER1_IRQ      17
+#define IPI_IRQ         19
+#endif
 
 #include <linux/interrupt.h>
 #include <asm-generic/irq.h>
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
new file mode 100644 (file)
index 0000000..ad481c2
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_IRQFLAGS_ARCV2_H
+#define __ASM_IRQFLAGS_ARCV2_H
+
+#include <asm/arcregs.h>
+
+/* status32 Bits */
+#define STATUS_AD_BIT  19   /* Disable Align chk: core supports non-aligned */
+#define STATUS_IE_BIT  31
+
+#define STATUS_AD_MASK         (1<<STATUS_AD_BIT)
+#define STATUS_IE_MASK         (1<<STATUS_IE_BIT)
+
+#define AUX_USER_SP            0x00D
+#define AUX_IRQ_CTRL           0x00E
+#define AUX_IRQ_ACT            0x043   /* Active Intr across all levels */
+#define AUX_IRQ_LVL_PEND       0x200   /* Pending Intr across all levels */
+#define AUX_IRQ_PRIORITY       0x206
+#define ICAUSE                 0x40a
+#define AUX_IRQ_SELECT         0x40b
+#define AUX_IRQ_ENABLE         0x40c
+
+/* Was Intr taken in User Mode */
+#define AUX_IRQ_ACT_BIT_U      31
+
+/* 0 is highest level, but taken by FIRQs, if present in design */
+#define ARCV2_IRQ_DEF_PRIO             0
+
+/* seed value for status register */
+#define ISA_INIT_STATUS_BITS   (STATUS_IE_MASK | STATUS_AD_MASK | \
+                                       (ARCV2_IRQ_DEF_PRIO << 1))
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+       unsigned long flags;
+
+       __asm__ __volatile__("  clri %0 \n" : "=r" (flags) : : "memory");
+
+       return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+       __asm__ __volatile__("  seti %0 \n" : : "r" (flags) : "memory");
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+static inline void arch_local_irq_enable(void)
+{
+       unsigned int irqact = read_aux_reg(AUX_IRQ_ACT);
+
+       if (irqact & 0xffff)
+               write_aux_reg(AUX_IRQ_ACT, irqact & ~0xffff);
+
+       __asm__ __volatile__("  seti    \n" : : : "memory");
+}
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+       __asm__ __volatile__("  clri    \n" : : : "memory");
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+       unsigned long temp;
+
+       __asm__ __volatile__(
+       "       lr  %0, [status32]      \n"
+       : "=&r"(temp)
+       :
+       : "memory");
+
+       return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+       return !(flags & (STATUS_IE_MASK));
+}
+
+static inline int arch_irqs_disabled(void)
+{
+       return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#else
+
+.macro IRQ_DISABLE  scratch
+       clri
+.endm
+
+.macro IRQ_ENABLE  scratch
+       seti
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
new file mode 100644 (file)
index 0000000..aa80557
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_IRQFLAGS_ARCOMPACT_H
+#define __ASM_IRQFLAGS_ARCOMPACT_H
+
+/* vineetg: March 2010 : local_irq_save( ) optimisation
+ *  -Remove explicit mov of current status32 into reg, that is not needed
+ *  -Use BIC  insn instead of INVERTED + AND
+ *  -Conditionally disable interrupts (if they are not enabled, don't disable)
+*/
+
+#include <asm/arcregs.h>
+
+/* status32 Reg bits related to Interrupt Handling */
+#define STATUS_E1_BIT          1       /* Int 1 enable */
+#define STATUS_E2_BIT          2       /* Int 2 enable */
+#define STATUS_A1_BIT          3       /* Int 1 active */
+#define STATUS_A2_BIT          4       /* Int 2 active */
+
+#define STATUS_E1_MASK         (1<<STATUS_E1_BIT)
+#define STATUS_E2_MASK         (1<<STATUS_E2_BIT)
+#define STATUS_A1_MASK         (1<<STATUS_A1_BIT)
+#define STATUS_A2_MASK         (1<<STATUS_A2_BIT)
+#define STATUS_IE_MASK         (STATUS_E1_MASK | STATUS_E2_MASK)
+
+/* Other Interrupt Handling related Aux regs */
+#define AUX_IRQ_LEV            0x200   /* IRQ Priority: L1 or L2 */
+#define AUX_IRQ_HINT           0x201   /* For generating Soft Interrupts */
+#define AUX_IRQ_LV12           0x43    /* interrupt level register */
+
+#define AUX_IENABLE            0x40c
+#define AUX_ITRIGGER           0x40d
+#define AUX_IPULSE             0x415
+
+#define ISA_INIT_STATUS_BITS   STATUS_IE_MASK
+
+#ifndef __ASSEMBLY__
+
+/******************************************************************
+ * IRQ Control Macros
+ *
+ * All of them have "memory" clobber (compiler barrier) which is needed to
+ * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
+ * are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
+ *
+ * Noted at the time of Abilis Timer List corruption
+ *     Orig Bug + Rejected solution    : https://lkml.org/lkml/2013/3/29/67
+ *     Reasoning                       : https://lkml.org/lkml/2013/4/8/15
+ *
+ ******************************************************************/
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+       unsigned long temp, flags;
+
+       __asm__ __volatile__(
+       "       lr  %1, [status32]      \n"
+       "       bic %0, %1, %2          \n"
+       "       and.f 0, %1, %2 \n"
+       "       flag.nz %0              \n"
+       : "=r"(temp), "=r"(flags)
+       : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+       : "memory", "cc");
+
+       return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+
+       __asm__ __volatile__(
+       "       flag %0                 \n"
+       :
+       : "r"(flags)
+       : "memory");
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+extern void arch_local_irq_enable(void);
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+       unsigned long temp;
+
+       __asm__ __volatile__(
+       "       lr  %0, [status32]      \n"
+       "       and %0, %0, %1          \n"
+       "       flag %0                 \n"
+       : "=&r"(temp)
+       : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
+       : "memory");
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+       unsigned long temp;
+
+       __asm__ __volatile__(
+       "       lr  %0, [status32]      \n"
+       : "=&r"(temp)
+       :
+       : "memory");
+
+       return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+       return !(flags & (STATUS_E1_MASK
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+                       | STATUS_E2_MASK
+#endif
+               ));
+}
+
+static inline int arch_irqs_disabled(void)
+{
+       return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#else
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+       bl      trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+       bl      trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
+
+.macro IRQ_DISABLE  scratch
+       lr      \scratch, [status32]
+       bic     \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+       flag    \scratch
+       TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro IRQ_ENABLE  scratch
+       lr      \scratch, [status32]
+       or      \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+       flag    \scratch
+       TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif
index 27ecc6975a5845dee5960197bded8edeff261cbc..59bc6a64f75da77c9cf1972ba35625d74651d51e 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  *
  * This program is free software; you can redistribute it and/or modify
 #ifndef __ASM_ARC_IRQFLAGS_H
 #define __ASM_ARC_IRQFLAGS_H
 
-/* vineetg: March 2010 : local_irq_save( ) optimisation
- *  -Remove explicit mov of current status32 into reg, that is not needed
- *  -Use BIC  insn instead of INVERTED + AND
- *  -Conditionally disable interrupts (if they are not enabled, don't disable)
-*/
-
-#include <asm/arcregs.h>
-
-/* status32 Reg bits related to Interrupt Handling */
-#define STATUS_E1_BIT          1       /* Int 1 enable */
-#define STATUS_E2_BIT          2       /* Int 2 enable */
-#define STATUS_A1_BIT          3       /* Int 1 active */
-#define STATUS_A2_BIT          4       /* Int 2 active */
-
-#define STATUS_E1_MASK         (1<<STATUS_E1_BIT)
-#define STATUS_E2_MASK         (1<<STATUS_E2_BIT)
-#define STATUS_A1_MASK         (1<<STATUS_A1_BIT)
-#define STATUS_A2_MASK         (1<<STATUS_A2_BIT)
-
-/* Other Interrupt Handling related Aux regs */
-#define AUX_IRQ_LEV            0x200   /* IRQ Priority: L1 or L2 */
-#define AUX_IRQ_HINT           0x201   /* For generating Soft Interrupts */
-#define AUX_IRQ_LV12           0x43    /* interrupt level register */
-
-#define AUX_IENABLE            0x40c
-#define AUX_ITRIGGER           0x40d
-#define AUX_IPULSE             0x415
-
-#ifndef __ASSEMBLY__
-
-/******************************************************************
- * IRQ Control Macros
- *
- * All of them have "memory" clobber (compiler barrier) which is needed to
- * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
- * are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
- *
- * Noted at the time of Abilis Timer List corruption
- *     Orig Bug + Rejected solution    : https://lkml.org/lkml/2013/3/29/67
- *     Reasoning                       : https://lkml.org/lkml/2013/4/8/15
- *
- ******************************************************************/
-
-/*
- * Save IRQ state and disable IRQs
- */
-static inline long arch_local_irq_save(void)
-{
-       unsigned long temp, flags;
-
-       __asm__ __volatile__(
-       "       lr  %1, [status32]      \n"
-       "       bic %0, %1, %2          \n"
-       "       and.f 0, %1, %2 \n"
-       "       flag.nz %0              \n"
-       : "=r"(temp), "=r"(flags)
-       : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
-       : "memory", "cc");
-
-       return flags;
-}
-
-/*
- * restore saved IRQ state
- */
-static inline void arch_local_irq_restore(unsigned long flags)
-{
-
-       __asm__ __volatile__(
-       "       flag %0                 \n"
-       :
-       : "r"(flags)
-       : "memory");
-}
-
-/*
- * Unconditionally Enable IRQs
- */
-extern void arch_local_irq_enable(void);
-
-/*
- * Unconditionally Disable IRQs
- */
-static inline void arch_local_irq_disable(void)
-{
-       unsigned long temp;
-
-       __asm__ __volatile__(
-       "       lr  %0, [status32]      \n"
-       "       and %0, %0, %1          \n"
-       "       flag %0                 \n"
-       : "=&r"(temp)
-       : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
-       : "memory");
-}
-
-/*
- * save IRQ state
- */
-static inline long arch_local_save_flags(void)
-{
-       unsigned long temp;
-
-       __asm__ __volatile__(
-       "       lr  %0, [status32]      \n"
-       : "=&r"(temp)
-       :
-       : "memory");
-
-       return temp;
-}
-
-/*
- * Query IRQ state
- */
-static inline int arch_irqs_disabled_flags(unsigned long flags)
-{
-       return !(flags & (STATUS_E1_MASK
-#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
-                       | STATUS_E2_MASK
-#endif
-               ));
-}
-
-static inline int arch_irqs_disabled(void)
-{
-       return arch_irqs_disabled_flags(arch_local_save_flags());
-}
-
-#else
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-
-.macro TRACE_ASM_IRQ_DISABLE
-       bl      trace_hardirqs_off
-.endm
-
-.macro TRACE_ASM_IRQ_ENABLE
-       bl      trace_hardirqs_on
-.endm
-
+#ifdef CONFIG_ISA_ARCOMPACT
+#include <asm/irqflags-compact.h>
 #else
-
-.macro TRACE_ASM_IRQ_DISABLE
-.endm
-
-.macro TRACE_ASM_IRQ_ENABLE
-.endm
-
+#include <asm/irqflags-arcv2.h>
 #endif
 
-.macro IRQ_DISABLE  scratch
-       lr      \scratch, [status32]
-       bic     \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
-       flag    \scratch
-       TRACE_ASM_IRQ_DISABLE
-.endm
-
-.macro IRQ_ENABLE  scratch
-       lr      \scratch, [status32]
-       or      \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
-       flag    \scratch
-       TRACE_ASM_IRQ_ENABLE
-.endm
-
-#endif /* __ASSEMBLY__ */
-
 #endif
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h
new file mode 100644 (file)
index 0000000..52c11f0
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * ARConnect IP Support (Multi core enabler: Cross core IPI, RTC ...)
+ *
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_MCIP_H
+#define __ASM_MCIP_H
+
+#ifdef CONFIG_ISA_ARCV2
+
+#include <asm/arcregs.h>
+
+#define ARC_REG_MCIP_BCR       0x0d0
+#define ARC_REG_MCIP_CMD       0x600
+#define ARC_REG_MCIP_WDATA     0x601
+#define ARC_REG_MCIP_READBACK  0x602
+
+struct mcip_cmd {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad:8, param:16, cmd:8;
+#else
+       unsigned int cmd:8, param:16, pad:8;
+#endif
+
+#define CMD_INTRPT_GENERATE_IRQ                0x01
+#define CMD_INTRPT_GENERATE_ACK                0x02
+#define CMD_INTRPT_READ_STATUS         0x03
+#define CMD_INTRPT_CHECK_SOURCE                0x04
+
+/* Semaphore Commands */
+#define CMD_SEMA_CLAIM_AND_READ                0x11
+#define CMD_SEMA_RELEASE               0x12
+
+#define CMD_DEBUG_SET_MASK             0x34
+#define CMD_DEBUG_SET_SELECT           0x36
+
+#define CMD_GRTC_READ_LO               0x42
+#define CMD_GRTC_READ_HI               0x43
+
+#define CMD_IDU_ENABLE                 0x71
+#define CMD_IDU_DISABLE                        0x72
+#define CMD_IDU_SET_MODE               0x74
+#define CMD_IDU_SET_DEST               0x76
+#define CMD_IDU_SET_MASK               0x7C
+
+#define IDU_M_TRIG_LEVEL               0x0
+#define IDU_M_TRIG_EDGE                        0x1
+
+#define IDU_M_DISTRI_RR                        0x0
+#define IDU_M_DISTRI_DEST              0x2
+};
+
+/*
+ * MCIP programming model
+ *
+ * - Simple commands write {cmd:8,param:16} to MCIP_CMD aux reg
+ *   (param could be irq, common_irq, core_id ...)
+ * - More involved commands setup MCIP_WDATA with cmd specific data
+ *   before invoking the simple command
+ */
+static inline void __mcip_cmd(unsigned int cmd, unsigned int param)
+{
+       struct mcip_cmd buf;
+
+       buf.pad = 0;
+       buf.cmd = cmd;
+       buf.param = param;
+
+       WRITE_AUX(ARC_REG_MCIP_CMD, buf);
+}
+
+/*
+ * Setup additional data for a cmd
+ * Callers need to lock to ensure atomicity
+ */
+static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
+                                  unsigned int data)
+{
+       write_aux_reg(ARC_REG_MCIP_WDATA, data);
+
+       __mcip_cmd(cmd, param);
+}
+
+extern void mcip_init_early_smp(void);
+extern void mcip_init_smp(unsigned int cpu);
+
+#endif
+
+#endif
index 8c84ae98c33767937ce52f33c8cab6daddabe278..0f9c3eb5327e4494f4a310e62e194c4457c08bea 100644 (file)
 #define CONFIG_ARC_MMU_VER 2
 #elif defined(CONFIG_ARC_MMU_V3)
 #define CONFIG_ARC_MMU_VER 3
+#elif defined(CONFIG_ARC_MMU_V4)
+#define CONFIG_ARC_MMU_VER 4
 #endif
 
 /* MMU Management regs */
 #define ARC_REG_MMU_BCR                0x06f
+#if (CONFIG_ARC_MMU_VER < 4)
 #define ARC_REG_TLBPD0         0x405
 #define ARC_REG_TLBPD1         0x406
 #define ARC_REG_TLBINDEX       0x407
 #define ARC_REG_TLBCOMMAND     0x408
 #define ARC_REG_PID            0x409
 #define ARC_REG_SCRATCH_DATA0  0x418
+#else
+#define ARC_REG_TLBPD0         0x460
+#define ARC_REG_TLBPD1         0x461
+#define ARC_REG_TLBINDEX       0x464
+#define ARC_REG_TLBCOMMAND     0x465
+#define ARC_REG_PID            0x468
+#define ARC_REG_SCRATCH_DATA0  0x46c
+#endif
 
 /* Bits in MMU PID register */
-#define MMU_ENABLE             (1 << 31)       /* Enable MMU for process */
+#define __TLB_ENABLE           (1 << 31)
+#define __PROG_ENABLE          (1 << 30)
+#define MMU_ENABLE             (__TLB_ENABLE | __PROG_ENABLE)
 
 /* Error code if probe fails */
 #define TLB_LKUP_ERR           0x80000000
 
+#if (CONFIG_ARC_MMU_VER < 4)
 #define TLB_DUP_ERR    (TLB_LKUP_ERR | 0x00000001)
+#else
+#define TLB_DUP_ERR    (TLB_LKUP_ERR | 0x40000000)
+#endif
 
 /* TLB Commands */
 #define TLBWrite    0x1
 #define TLBIVUTLB   0x6                /* explicitly inv uTLBs */
 #endif
 
+#if (CONFIG_ARC_MMU_VER >= 4)
+#define TLBInsertEntry 0x7
+#define TLBDeleteEntry 0x8
+#endif
+
 #ifndef __ASSEMBLY__
 
 typedef struct {
index 9615fe1701c60af212b1d20e0d626120956462e1..1281718802f7c8e4d3f71bdf50b3affd57fd66d6 100644 (file)
 #define _PAGE_READ          (1<<3)     /* Page has user read perm (H) */
 #define _PAGE_ACCESSED      (1<<4)     /* Page is accessed (S) */
 #define _PAGE_MODIFIED      (1<<5)     /* Page modified (dirty) (S) */
+
+#if (CONFIG_ARC_MMU_VER >= 4)
+#define _PAGE_WTHRU         (1<<7)     /* Page cache mode write-thru (H) */
+#endif
+
 #define _PAGE_GLOBAL        (1<<8)     /* Page is global (H) */
 #define _PAGE_PRESENT       (1<<9)     /* TLB entry is valid (H) */
+
+#if (CONFIG_ARC_MMU_VER >= 4)
+#define _PAGE_SZ            (1<<10)    /* Page Size indicator (H) */
+#endif
+
 #define _PAGE_SHARED_CODE   (1<<11)    /* Shared Code page with cmn vaddr
                                           usable for shared TLB entries (H) */
 #endif
index 52312cb5dbe21490b48e21343ab8f82b7eecfc0e..ee682d8e0213c5c6c2fac2d70f39dff23b15854a 100644 (file)
@@ -77,7 +77,7 @@ struct task_struct;
  */
 #define TSK_K_ESP(tsk)         (tsk->thread.ksp)
 
-#define TSK_K_REG(tsk, off)    (*((unsigned int *)(TSK_K_ESP(tsk) + \
+#define TSK_K_REG(tsk, off)    (*((unsigned long *)(TSK_K_ESP(tsk) + \
                                        sizeof(struct callee_regs) + off)))
 
 #define TSK_K_BLINK(tsk)       TSK_K_REG(tsk, 4)
@@ -100,29 +100,26 @@ extern unsigned int get_wchan(struct task_struct *p);
 
 #endif /* !__ASSEMBLY__ */
 
-/* Kernels Virtual memory area.
- * Unlike other architectures(MIPS, sh, cris ) ARC 700 does not have a
- * "kernel translated" region (like KSEG2 in MIPS). So we use a upper part
- * of the translated bottom 2GB for kernel virtual memory and protect
- * these pages from user accesses by disabling Ru, Eu and Wu.
+/*
+ * System Memory Map on ARC
+ *
+ * ---------------------------- (lower 2G, Translated) -------------------------
+ * 0x0000_0000         0x5FFF_FFFF     (user vaddr: TASK_SIZE)
+ * 0x6000_0000         0x6FFF_FFFF     (reserved gutter between U/K)
+ * 0x7000_0000         0x7FFF_FFFF     (kvaddr: vmalloc/modules/pkmap..)
+ *
+ * PAGE_OFFSET ---------------- (Upper 2G, Untranslated) -----------------------
+ * 0x8000_0000         0xBFFF_FFFF     (kernel direct mapped)
+ * 0xC000_0000         0xFFFF_FFFF     (peripheral uncached space)
+ * -----------------------------------------------------------------------------
  */
-#define VMALLOC_SIZE   (0x10000000)    /* 256M */
-#define VMALLOC_START  (PAGE_OFFSET - VMALLOC_SIZE)
-#define VMALLOC_END    (PAGE_OFFSET)
+#define VMALLOC_START  0x70000000
+#define VMALLOC_SIZE   (PAGE_OFFSET - VMALLOC_START)
+#define VMALLOC_END    (VMALLOC_START + VMALLOC_SIZE)
 
-/* Most of the architectures seem to be keeping some kind of padding between
- * userspace TASK_SIZE and PAGE_OFFSET. i.e TASK_SIZE != PAGE_OFFSET.
- */
 #define USER_KERNEL_GUTTER    0x10000000
 
-/* User address space:
- * On ARC700, CPU allows the entire lower half of 32 bit address space to be
- * translated. Thus potentially 2G (0:0x7FFF_FFFF) could be User vaddr space.
- * However we steal 256M for kernel addr (0x7000_0000:0x7FFF_FFFF) and another
- * 256M (0x6000_0000:0x6FFF_FFFF) is gutter between user/kernel spaces
- * Thus total User vaddr space is (0:0x5FFF_FFFF)
- */
-#define TASK_SIZE      (PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
+#define TASK_SIZE      (VMALLOC_START - USER_KERNEL_GUTTER)
 
 #define STACK_TOP       TASK_SIZE
 #define STACK_TOP_MAX   STACK_TOP
index 1bfeec2c0558c2f6f91142105bee0c6ff70c7a75..91755972b9a25222c37a36e6b76dfd758cdbe771 100644 (file)
@@ -16,6 +16,7 @@
 
 /* THE pt_regs: Defines how regs are saved during entry into kernel */
 
+#ifdef CONFIG_ISA_ARCOMPACT
 struct pt_regs {
 
        /* Real registers */
@@ -56,6 +57,48 @@ struct pt_regs {
 
        long user_r25;
 };
+#else
+
+struct pt_regs {
+
+       long orig_r0;
+
+       union {
+               struct {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+                       unsigned long state:8, ecr_vec:8,
+                                     ecr_cause:8, ecr_param:8;
+#else
+                       unsigned long ecr_param:8, ecr_cause:8,
+                                     ecr_vec:8, state:8;
+#endif
+               };
+               unsigned long event;
+       };
+
+       long bta;       /* bta_l1, bta_l2, erbta */
+
+       long user_r25;
+
+       long r26;       /* gp */
+       long fp;
+       long sp;        /* user/kernel sp depending on where we came from  */
+
+       long r12;
+
+       /*------- Below list auto saved by h/w -----------*/
+       long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
+
+       long blink;
+       long lp_end, lp_start, lp_count;
+
+       long ei, ldi, jli;
+
+       long ret;
+       long status32;
+};
+
+#endif
 
 /* Callee saved registers - need to be saved only when you are scheduled out */
 
index b6a8c2dfbe6e42cd51def893784f0780bc67264e..e1651df6a93d5bc8ab0af3a833c7c6ffd23acacc 100644 (file)
@@ -22,24 +22,46 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
        unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
 
+       /*
+        * This smp_mb() is technically superfluous, we only need the one
+        * after the lock for providing the ACQUIRE semantics.
+        * However doing the "right" thing was regressing hackbench
+        * so keeping this, pending further investigation
+        */
+       smp_mb();
+
        __asm__ __volatile__(
        "1:     ex  %0, [%1]            \n"
        "       breq  %0, %2, 1b        \n"
        : "+&r" (tmp)
        : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
        : "memory");
+
+       /*
+        * ACQUIRE barrier to ensure load/store after taking the lock
+        * don't "bleed-up" out of the critical section (leak-in is allowed)
+        * http://www.spinics.net/lists/kernel/msg2010409.html
+        *
+        * ARCv2 only has load-load, store-store and all-all barrier
+        * thus need the full all-all barrier
+        */
+       smp_mb();
 }
 
 static inline int arch_spin_trylock(arch_spinlock_t *lock)
 {
        unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
 
+       smp_mb();
+
        __asm__ __volatile__(
        "1:     ex  %0, [%1]            \n"
        : "+r" (tmp)
        : "r"(&(lock->slock))
        : "memory");
 
+       smp_mb();
+
        return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
 }
 
@@ -47,12 +69,22 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
        unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
 
+       /*
+        * RELEASE barrier: given the instructions avail on ARCv2, full barrier
+        * is the only option
+        */
+       smp_mb();
+
        __asm__ __volatile__(
        "       ex  %0, [%1]            \n"
        : "+r" (tmp)
        : "r"(&(lock->slock))
        : "memory");
 
+       /*
+        * superfluous, but keeping for now - see pairing version in
+        * arch_spin_lock above
+        */
        smp_mb();
 }
 
index aca0d5a45c7b84c2bdc0f95da1549ad7d3f1af68..3af67455659af49d9f9d53b67ac26024cf3b77b9 100644 (file)
@@ -25,6 +25,7 @@
 #endif
 
 #define THREAD_SIZE     (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define THREAD_SHIFT   (PAGE_SHIFT << THREAD_SIZE_ORDER)
 
 #ifndef __ASSEMBLY__
 
index 30c9baffa96f1f3a5cab5d6ec6fe83b9f4e86318..d1da6032b715a7fea35d71fcedeeeb8cdfe590e4 100644 (file)
@@ -659,31 +659,30 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
 static inline long
 __arc_strncpy_from_user(char *dst, const char __user *src, long count)
 {
-       long res = count;
+       long res = 0;
        char val;
-       unsigned int hw_count;
 
        if (count == 0)
                return 0;
 
        __asm__ __volatile__(
-       "       lp 2f           \n"
+       "       lp      3f                      \n"
        "1:     ldb.ab  %3, [%2, 1]             \n"
-       "       breq.d  %3, 0, 2f               \n"
+       "       breq.d  %3, 0, 3f               \n"
        "       stb.ab  %3, [%1, 1]             \n"
-       "2:     sub %0, %6, %4                  \n"
-       "3:     ;nop                            \n"
+       "       add     %0, %0, 1       # Num of NON NULL bytes copied  \n"
+       "3:                                                             \n"
        "       .section .fixup, \"ax\"         \n"
        "       .align 4                        \n"
-       "4:     mov %0, %5                      \n"
+       "4:     mov %0, %4              # sets @res as -EFAULT  \n"
        "       j   3b                          \n"
        "       .previous                       \n"
        "       .section __ex_table, \"a\"      \n"
        "       .align 4                        \n"
        "       .word   1b, 4b                  \n"
        "       .previous                       \n"
-       : "=r"(res), "+r"(dst), "+r"(src), "=&r"(val), "=l"(hw_count)
-       : "g"(-EFAULT), "ir"(count), "4"(count) /* this "4" seeds lp_count */
+       : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
+       : "g"(-EFAULT), "l"(count)
        : "memory");
 
        return res;
index e5d41e08240c561f4299d2b3ce6c6e30d7d3a236..9d129a2a1351951465b22fd020f568abe1843b7a 100644 (file)
@@ -30,7 +30,7 @@
 #define PAGE_OFFSET    (0x80000000)
 #else
 #define PAGE_SIZE      (1UL << PAGE_SHIFT)     /* Default 8K */
-#define PAGE_OFFSET    (0x80000000UL)  /* Kernel starts at 2G onwards */
+#define PAGE_OFFSET    (0x80000000UL)          /* Kernel starts at 2G onwards */
 #endif
 
 #define PAGE_MASK      (~(PAGE_SIZE-1))
index 113f2033da9f096a45588a40b00193c8305cf693..e7f3625a19b51dc5f117e13fc71b2930bbb4b52d 100644 (file)
@@ -8,12 +8,14 @@
 # Pass UTS_MACHINE for user_regset definition
 CFLAGS_ptrace.o                += -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
-obj-y  := arcksyms.o setup.o irq.o time.o reset.o ptrace.o entry.o process.o
+obj-y  := arcksyms.o setup.o irq.o time.o reset.o ptrace.o process.o devtree.o
 obj-y  += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o clk.o
-obj-y  += devtree.o
+obj-$(CONFIG_ISA_ARCOMPACT)            += entry-compact.o intc-compact.o
+obj-$(CONFIG_ISA_ARCV2)                        += entry-arcv2.o intc-arcv2.o
 
 obj-$(CONFIG_MODULES)                  += arcksyms.o module.o
 obj-$(CONFIG_SMP)                      += smp.o
+obj-$(CONFIG_ARC_MCIP)                 += mcip.o
 obj-$(CONFIG_ARC_DW2_UNWIND)           += unwind.o
 obj-$(CONFIG_KPROBES)                  += kprobes.o
 obj-$(CONFIG_ARC_EMUL_UNALIGNED)       += unaligned.o
index 6c3aa0edb9b5bc1914d006b2a22b766f71adba9c..ecaf34e9235c20dba4729d79321bef0d3b462a37 100644 (file)
@@ -37,6 +37,8 @@ int main(void)
 
        DEFINE(TASK_ACT_MM, offsetof(struct task_struct, active_mm));
        DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
+       DEFINE(TASK_PID, offsetof(struct task_struct, pid));
+       DEFINE(TASK_COMM, offsetof(struct task_struct, comm));
 
        DEFINE(MM_CTXT, offsetof(struct mm_struct, context));
        DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
@@ -56,8 +58,11 @@ int main(void)
        DEFINE(PT_r5, offsetof(struct pt_regs, r5));
        DEFINE(PT_r6, offsetof(struct pt_regs, r6));
        DEFINE(PT_r7, offsetof(struct pt_regs, r7));
+       DEFINE(PT_ret, offsetof(struct pt_regs, ret));
 
        DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
        DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
+       DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
+
        return 0;
 }
index e32b54abff51fdc6b16894ce466fd16d2063bd7f..7e844fd8213fda0c0e41f6dc436c8a348f2afd52 100644 (file)
@@ -32,6 +32,8 @@ static void __init arc_set_early_base_baud(unsigned long dt_root)
 
        if (of_flat_dt_is_compatible(dt_root, "abilis,arc-tb10x"))
                arc_base_baud = core_clk/3;
+       else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
+               arc_base_baud = 33333333;       /* Fixed 33MHz clk (AXS10x) */
        else
                arc_base_baud = core_clk;
 }
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
new file mode 100644 (file)
index 0000000..bd7105d
--- /dev/null
@@ -0,0 +1,239 @@
+/*
+ * ARCv2 ISA based core Low Level Intr/Traps/Exceptions(non-TLB) Handling
+ *
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>   /* ARC_{EXTRY,EXIT} */
+#include <asm/entry.h>       /* SAVE_ALL_{INT1,INT2,TRAP...} */
+#include <asm/errno.h>
+#include <asm/arcregs.h>
+#include <asm/irqflags.h>
+
+       .cpu HS
+
+#define VECTOR .word
+
+;############################ Vector Table #################################
+
+       .section .vector,"a",@progbits
+       .align 4
+
+# Initial 16 slots are Exception Vectors
+VECTOR stext                   ; Restart Vector (jump to entry point)
+VECTOR mem_service             ; Mem exception
+VECTOR instr_service           ; Instrn Error
+VECTOR EV_MachineCheck         ; Fatal Machine check
+VECTOR EV_TLBMissI             ; Intruction TLB miss
+VECTOR EV_TLBMissD             ; Data TLB miss
+VECTOR EV_TLBProtV             ; Protection Violation
+VECTOR EV_PrivilegeV           ; Privilege Violation
+VECTOR EV_SWI                  ; Software Breakpoint
+VECTOR EV_Trap                 ; Trap exception
+VECTOR EV_Extension            ; Extn Instruction Exception
+VECTOR EV_DivZero              ; Divide by Zero
+VECTOR EV_DCError              ; Data Cache Error
+VECTOR EV_Misaligned           ; Misaligned Data Access
+VECTOR reserved                ; Reserved slots
+VECTOR reserved                ; Reserved slots
+
+# Begin Interrupt Vectors
+VECTOR handle_interrupt        ; (16) Timer0
+VECTOR handle_interrupt        ; unused (Timer1)
+VECTOR handle_interrupt        ; unused (WDT)
+VECTOR handle_interrupt        ; (19) ICI (inter core interrupt)
+VECTOR handle_interrupt
+VECTOR handle_interrupt
+VECTOR handle_interrupt
+VECTOR handle_interrupt        ; (23) End of fixed IRQs
+
+.rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
+       VECTOR  handle_interrupt
+.endr
+
+       .section .text, "ax",@progbits
+
+res_service:           ; processor restart
+       flag    0x1     ; not implemented
+       nop
+       nop
+
+reserved:              ; processor restart
+       rtie            ; jump to processor initializations
+
+;##################### Interrupt Handling ##############################
+
+ENTRY(handle_interrupt)
+
+       INTERRUPT_PROLOGUE  irq
+
+       clri            ; To make status32.IE agree with CPU internal state
+
+       lr  r0, [ICAUSE]
+
+       mov   blink, ret_from_exception
+
+       b.d  arch_do_IRQ
+       mov r1, sp
+
+END(handle_interrupt)
+
+;################### Non TLB Exception Handling #############################
+
+ENTRY(EV_SWI)
+       flag 1
+END(EV_SWI)
+
+ENTRY(EV_DivZero)
+       flag 1
+END(EV_DivZero)
+
+ENTRY(EV_DCError)
+       flag 1
+END(EV_DCError)
+
+ENTRY(EV_Misaligned)
+
+       EXCEPTION_PROLOGUE
+
+       lr  r0, [efa]   ; Faulting Data address
+       mov r1, sp
+
+       FAKE_RET_FROM_EXCPN
+
+       SAVE_CALLEE_SAVED_USER
+       mov r2, sp              ; callee_regs
+
+       bl  do_misaligned_access
+
+       ; TBD: optimize - do this only if a callee reg was involved
+       ; either a dst of emulated LD/ST or src with address-writeback
+       RESTORE_CALLEE_SAVED_USER
+
+       b   ret_from_exception
+END(EV_Misaligned)
+
+; ---------------------------------------------
+; Protection Violation Exception Handler
+; ---------------------------------------------
+
+ENTRY(EV_TLBProtV)
+
+       EXCEPTION_PROLOGUE
+
+       lr  r0, [efa]   ; Faulting Data address
+       mov r1, sp      ; pt_regs
+
+       FAKE_RET_FROM_EXCPN
+
+       mov blink, ret_from_exception
+       b   do_page_fault
+
+END(EV_TLBProtV)
+
+; From Linux standpoint Slow Path I/D TLB Miss is same a ProtV as they
+; need to call do_page_fault().
+; ECR in pt_regs provides whether access was R/W/X
+
+.global        call_do_page_fault
+.set call_do_page_fault, EV_TLBProtV
+
+;############# Common Handlers for ARCompact and ARCv2 ##############
+
+#include "entry.S"
+
+;############# Return from Intr/Excp/Trap (ARCv2 ISA Specifics) ##############
+;
+; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
+; IRQ shd definitely not happen between now and rtie
+; All 2 entry points to here already disable interrupts
+
+.Lrestore_regs:
+
+       ld      r0, [sp, PT_status32]   ; U/K mode at time of entry
+       lr      r10, [AUX_IRQ_ACT]
+
+       bmsk    r11, r10, 15    ; AUX_IRQ_ACT.ACTIVE
+       breq    r11, 0, .Lexcept_ret    ; No intr active, ret from Exception
+
+;####### Return from Intr #######
+
+debug_marker_l1:
+       bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
+
+.Lisr_ret_fast_path:
+       ; Handle special case #1: (Entry via Exception, Return via IRQ)
+       ;
+       ; Exception in U mode, preempted in kernel, Intr taken (K mode), orig
+       ; task now returning to U mode (riding the Intr)
+       ; AUX_IRQ_ACTIVE won't have U bit set (since intr in K mode), hence SP
+       ; won't be switched to correct U mode value (from AUX_SP)
+       ; So force AUX_IRQ_ACT.U for such a case
+
+       btst    r0, STATUS_U_BIT                ; Z flag set if K (Z clear for U)
+       bset.nz r11, r11, AUX_IRQ_ACT_BIT_U     ; NZ means U
+       sr      r11, [AUX_IRQ_ACT]
+
+       INTERRUPT_EPILOGUE  irq
+       rtie
+
+;####### Return from Exception / pure kernel mode #######
+
+.Lexcept_ret:  ; Expects r0 has PT_status32
+
+debug_marker_syscall:
+       EXCEPTION_EPILOGUE
+       rtie
+
+;####### Return from Intr to insn in delay slot #######
+
+; Handle special case #2: (Entry via Exception in Delay Slot, Return via IRQ)
+;
+; Intr returning to a Delay Slot (DS) insn
+; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
+; entry was via Exception in DS which got preempted in kernel).
+;
+; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
+.Lintr_ret_to_delay_slot:
+debug_marker_ds:
+
+       ld      r2, [@intr_to_DE_cnt]
+       add     r2, r2, 1
+       st      r2, [@intr_to_DE_cnt]
+
+       ld      r2, [sp, PT_ret]
+       ld      r3, [sp, PT_status32]
+
+       bic     r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
+       st      r0, [sp, PT_status32]
+
+       mov     r1, .Lintr_ret_to_delay_slot_2
+       st      r1, [sp, PT_ret]
+
+       st      r2, [sp, 0]
+       st      r3, [sp, 4]
+
+       b       .Lisr_ret_fast_path
+
+.Lintr_ret_to_delay_slot_2:
+       sub     sp, sp, SZ_PT_REGS
+       st      r9, [sp, -4]
+
+       ld      r9, [sp, 0]
+       sr      r9, [eret]
+
+       ld      r9, [sp, 4]
+       sr      r9, [erstatus]
+
+       ld      r9, [sp, 8]
+       sr      r9, [erbta]
+
+       ld      r9, [sp, -4]
+       add     sp, sp, SZ_PT_REGS
+       rtie
+
+END(ret_from_exception)
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
new file mode 100644 (file)
index 0000000..15d457b
--- /dev/null
@@ -0,0 +1,393 @@
+/*
+ * Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARCompact ISA
+ *
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ *  -Userspace unaligned access emulation
+ *
+ * vineetg: Feb 2011 (ptrace low level code fixes)
+ *  -traced syscall return code (r0) was not saved into pt_regs for restoring
+ *   into user reg-file when traded task rets to user space.
+ *  -syscalls needing arch-wrappers (mainly for passing sp as pt_regs)
+ *   were not invoking post-syscall trace hook (jumping directly into
+ *   ret_from_system_call)
+ *
+ * vineetg: Nov 2010:
+ *  -Vector table jumps (@8 bytes) converted into branches (@4 bytes)
+ *  -To maintain the slot size of 8 bytes/vector, added nop, which is
+ *   not executed at runtime.
+ *
+ * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
+ *  -do_signal()invoked upon TIF_RESTORE_SIGMASK as well
+ *  -Wrappers for sys_{,rt_}sigsuspend() nolonger needed as they don't
+ *   need ptregs anymore
+ *
+ * Vineetg: Oct 2009
+ *  -In a rare scenario, Process gets a Priv-V exception and gets scheduled
+ *   out. Since we don't do FAKE RTIE for Priv-V, CPU excpetion state remains
+ *   active (AE bit enabled).  This causes a double fault for a subseq valid
+ *   exception. Thus FAKE RTIE needed in low level Priv-Violation handler.
+ *   Instr Error could also cause similar scenario, so same there as well.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ *  -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ *   Normally CPU does this automatically, however when doing FAKE rtie,
+ *   we need to explicitly do this. The problem in macros
+ *   FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ *   was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit,
+ *   setting it and not clearing it clears ZOL context
+ *
+ * Vineetg: May 16th, 2008
+ *  - r25 now contains the Current Task when in kernel
+ *
+ * Vineetg: Dec 22, 2007
+ *    Minor Surgery of Low Level ISR to make it SMP safe
+ *    - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR
+ *    - _current_task is made an array of NR_CPUS
+ *    - Access of _current_task wrapped inside a macro so that if hardware
+ *       team agrees for a dedicated reg, no other code is touched
+ *
+ * Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004
+ */
+
+#include <linux/errno.h>
+#include <linux/linkage.h>     /* {EXTRY,EXIT} */
+#include <asm/entry.h>
+#include <asm/irqflags.h>
+
+       .cpu A7
+
+;############################ Vector Table #################################
+
+.macro VECTOR  lbl
+#if 1   /* Just in case, build breaks */
+       j   \lbl
+#else
+       b   \lbl
+       nop
+#endif
+.endm
+
+       .section .vector, "ax",@progbits
+       .align 4
+
+/* Each entry in the vector table must occupy 2 words. Since it is a jump
+ * across sections (.vector to .text) we are gauranteed that 'j somewhere'
+ * will use the 'j limm' form of the intrsuction as long as somewhere is in
+ * a section other than .vector.
+ */
+
+; ********* Critical System Events **********************
+VECTOR   res_service             ; 0x0, Restart Vector  (0x0)
+VECTOR   mem_service             ; 0x8, Mem exception   (0x1)
+VECTOR   instr_service           ; 0x10, Instrn Error   (0x2)
+
+; ******************** Device ISRs **********************
+#ifdef CONFIG_ARC_IRQ3_LV2
+VECTOR   handle_interrupt_level2
+#else
+VECTOR   handle_interrupt_level1
+#endif
+
+VECTOR   handle_interrupt_level1
+
+#ifdef CONFIG_ARC_IRQ5_LV2
+VECTOR   handle_interrupt_level2
+#else
+VECTOR   handle_interrupt_level1
+#endif
+
+#ifdef CONFIG_ARC_IRQ6_LV2
+VECTOR   handle_interrupt_level2
+#else
+VECTOR   handle_interrupt_level1
+#endif
+
+.rept   25
+VECTOR   handle_interrupt_level1 ; Other devices
+.endr
+
+/* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */
+
+; ******************** Exceptions **********************
+VECTOR   EV_MachineCheck         ; 0x100, Fatal Machine check   (0x20)
+VECTOR   EV_TLBMissI             ; 0x108, Intruction TLB miss   (0x21)
+VECTOR   EV_TLBMissD             ; 0x110, Data TLB miss         (0x22)
+VECTOR   EV_TLBProtV             ; 0x118, Protection Violation  (0x23)
+                                ;         or Misaligned Access
+VECTOR   EV_PrivilegeV           ; 0x120, Privilege Violation   (0x24)
+VECTOR   EV_Trap                 ; 0x128, Trap exception        (0x25)
+VECTOR   EV_Extension            ; 0x130, Extn Intruction Excp  (0x26)
+
+.rept   24
+VECTOR   reserved                ; Reserved Exceptions
+.endr
+
+
+;##################### Scratch Mem for IRQ stack switching #############
+
+ARCFP_DATA int1_saved_reg
+       .align 32
+       .type   int1_saved_reg, @object
+       .size   int1_saved_reg, 4
+int1_saved_reg:
+       .zero 4
+
+/* Each Interrupt level needs its own scratch */
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+
+ARCFP_DATA int2_saved_reg
+       .type   int2_saved_reg, @object
+       .size   int2_saved_reg, 4
+int2_saved_reg:
+       .zero 4
+
+#endif
+
+; ---------------------------------------------
+       .section .text, "ax",@progbits
+
+res_service:           ; processor restart
+       flag    0x1     ; not implemented
+       nop
+       nop
+
+reserved:              ; processor restart
+       rtie            ; jump to processor initializations
+
+;##################### Interrupt Handling ##############################
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+; ---------------------------------------------
+;  Level 2 ISR: Can interrupt a Level 1 ISR
+; ---------------------------------------------
+ENTRY(handle_interrupt_level2)
+
+       INTERRUPT_PROLOGUE 2
+
+       ;------------------------------------------------------
+       ; if L2 IRQ interrupted a L1 ISR, disable preemption
+       ;------------------------------------------------------
+
+       ld r9, [sp, PT_status32]        ; get statu32_l2 (saved in pt_regs)
+       bbit0 r9, STATUS_A1_BIT, 1f     ; L1 not active when L2 IRQ, so normal
+
+       ; A1 is set in status32_l2
+       ; bump thread_info->preempt_count (Disable preemption)
+       GET_CURR_THR_INFO_FROM_SP   r10
+       ld      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+       add     r9, r9, 1
+       st      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+1:
+       ;------------------------------------------------------
+       ; setup params for Linux common ISR and invoke it
+       ;------------------------------------------------------
+       lr  r0, [icause2]
+       and r0, r0, 0x1f
+
+       bl.d  @arch_do_IRQ
+       mov r1, sp
+
+       mov r8,0x2
+       sr r8, [AUX_IRQ_LV12]       ; clear bit in Sticky Status Reg
+
+       b   ret_from_exception
+
+END(handle_interrupt_level2)
+
+#endif
+
+; ---------------------------------------------
+;  Level 1 ISR
+; ---------------------------------------------
+ENTRY(handle_interrupt_level1)
+
+       INTERRUPT_PROLOGUE 1
+
+       lr  r0, [icause1]
+       and r0, r0, 0x1f
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+       ; icause1 needs to be read early, before calling tracing, which
+       ; can clobber scratch regs, hence use of stack to stash it
+       push r0
+       TRACE_ASM_IRQ_DISABLE
+       pop  r0
+#endif
+
+       bl.d  @arch_do_IRQ
+       mov r1, sp
+
+       mov r8,0x1
+       sr r8, [AUX_IRQ_LV12]       ; clear bit in Sticky Status Reg
+
+       b   ret_from_exception
+END(handle_interrupt_level1)
+
+;################### Non TLB Exception Handling #############################
+
+; ---------------------------------------------
+; Protection Violation Exception Handler
+; ---------------------------------------------
+
+ENTRY(EV_TLBProtV)
+
+       EXCEPTION_PROLOGUE
+
+       lr  r2, [ecr]
+       lr  r0, [efa]   ; Faulting Data address (not part of pt_regs saved above)
+
+       ; Exception auto-disables further Intr/exceptions.
+       ; Re-enable them by pretending to return from exception
+       ; (so rest of handler executes in pure K mode)
+
+       FAKE_RET_FROM_EXCPN
+
+       mov   r1, sp    ; Handle to pt_regs
+
+       ;------ (5) Type of Protection Violation? ----------
+       ;
+       ; ProtV Hardware Exception is triggered for Access Faults of 2 types
+       ;   -Access Violaton    : 00_23_(00|01|02|03)_00
+       ;                                x  r  w  r+w
+       ;   -Unaligned Access   : 00_23_04_00
+       ;
+       bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f
+
+       ;========= (6a) Access Violation Processing ========
+       bl  do_page_fault
+       b   ret_from_exception
+
+       ;========== (6b) Non aligned access ============
+4:
+
+       SAVE_CALLEE_SAVED_USER
+       mov r2, sp              ; callee_regs
+
+       bl  do_misaligned_access
+
+       ; TBD: optimize - do this only if a callee reg was involved
+       ; either a dst of emulated LD/ST or src with address-writeback
+       RESTORE_CALLEE_SAVED_USER
+
+       b   ret_from_exception
+
+END(EV_TLBProtV)
+
+; Wrapper for Linux page fault handler called from EV_TLBMiss*
+; Very similar to ProtV handler case (6a) above, but avoids the extra checks
+; for Misaligned access
+;
+ENTRY(call_do_page_fault)
+
+       EXCEPTION_PROLOGUE
+       lr  r0, [efa]   ; Faulting Data address
+       mov   r1, sp
+       FAKE_RET_FROM_EXCPN
+
+       mov blink, ret_from_exception
+       b  do_page_fault
+
+END(call_do_page_fault)
+
+;############# Common Handlers for ARCompact and ARCv2 ##############
+
+#include "entry.S"
+
+;############# Return from Intr/Excp/Trap (ARC Specifics) ##############
+;
+; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
+; IRQ shd definitely not happen between now and rtie
+; All 2 entry points to here already disable interrupts
+
+.Lrestore_regs:
+
+       TRACE_ASM_IRQ_ENABLE
+
+       lr      r10, [status32]
+
+       ; Restore REG File. In case multiple Events outstanding,
+       ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None
+       ; Note that we use realtime STATUS32 (not pt_regs->status32) to
+       ; decide that.
+
+       ; if Returning from Exception
+       btst   r10, STATUS_AE_BIT
+       bnz    .Lexcep_ret
+
+       ; Not Exception so maybe Interrupts (Level 1 or 2)
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+
+       ; Level 2 interrupt return Path - from hardware standpoint
+       bbit0  r10, STATUS_A2_BIT, not_level2_interrupt
+
+       ;------------------------------------------------------------------
+       ; However the context returning might not have taken L2 intr itself
+       ; e.g. Task'A' user-code -> L2 intr -> schedule -> 'B' user-code ret
+       ; Special considerations needed for the context which took L2 intr
+
+       ld   r9, [sp, PT_event]        ; Ensure this is L2 intr context
+       brne r9, event_IRQ2, 149f
+
+       ;------------------------------------------------------------------
+       ; if L2 IRQ interrupted an L1 ISR,  we'd disabled preemption earlier
+       ; so that sched doesn't move to new task, causing L1 to be delayed
+       ; undeterministically. Now that we've achieved that, let's reset
+       ; things to what they were, before returning from L2 context
+       ;----------------------------------------------------------------
+
+       ld r9, [sp, PT_status32]       ; get statu32_l2 (saved in pt_regs)
+       bbit0 r9, STATUS_A1_BIT, 149f  ; L1 not active when L2 IRQ, so normal
+
+       ; decrement thread_info->preempt_count (re-enable preemption)
+       GET_CURR_THR_INFO_FROM_SP   r10
+       ld      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+       ; paranoid check, given A1 was active when A2 happened, preempt count
+       ; must not be 0 because we would have incremented it.
+       ; If this does happen we simply HALT as it means a BUG !!!
+       cmp     r9, 0
+       bnz     2f
+       flag 1
+
+2:
+       sub     r9, r9, 1
+       st      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+149:
+       ;return from level 2
+       INTERRUPT_EPILOGUE 2
+debug_marker_l2:
+       rtie
+
+not_level2_interrupt:
+
+#endif
+
+       bbit0  r10, STATUS_A1_BIT, .Lpure_k_mode_ret
+
+       ;return from level 1
+       INTERRUPT_EPILOGUE 1
+debug_marker_l1:
+       rtie
+
+.Lexcep_ret:
+.Lpure_k_mode_ret:
+
+       ;this case is for syscalls or Exceptions or pure kernel mode
+
+       EXCEPTION_EPILOGUE
+debug_marker_syscall:
+       rtie
+
+END(ret_from_exception)
index d868289c5a26f74e780ee2e59ff6174987d68d68..f7a82fd4d6018b29c4e4297af9b839ca1a154fab 100644 (file)
@@ -1,60 +1,13 @@
 /*
- * Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC
+ * Common Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC
+ * (included from entry-<isa>.S
  *
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * vineetg: May 2011
- *  -Userspace unaligned access emulation
- *
- * vineetg: Feb 2011 (ptrace low level code fixes)
- *  -traced syscall return code (r0) was not saved into pt_regs for restoring
- *   into user reg-file when traded task rets to user space.
- *  -syscalls needing arch-wrappers (mainly for passing sp as pt_regs)
- *   were not invoking post-syscall trace hook (jumping directly into
- *   ret_from_system_call)
- *
- * vineetg: Nov 2010:
- *  -Vector table jumps (@8 bytes) converted into branches (@4 bytes)
- *  -To maintain the slot size of 8 bytes/vector, added nop, which is
- *   not executed at runtime.
- *
- * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
- *  -do_signal()invoked upon TIF_RESTORE_SIGMASK as well
- *  -Wrappers for sys_{,rt_}sigsuspend() nolonger needed as they don't
- *   need ptregs anymore
- *
- * Vineetg: Oct 2009
- *  -In a rare scenario, Process gets a Priv-V exception and gets scheduled
- *   out. Since we don't do FAKE RTIE for Priv-V, CPU excpetion state remains
- *   active (AE bit enabled).  This causes a double fault for a subseq valid
- *   exception. Thus FAKE RTIE needed in low level Priv-Violation handler.
- *   Instr Error could also cause similar scenario, so same there as well.
- *
- * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
- *
- * Vineetg: Aug 28th 2008: Bug #94984
- *  -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
- *   Normally CPU does this automatically, however when doing FAKE rtie,
- *   we need to explicitly do this. The problem in macros
- *   FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
- *   was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit,
- *   setting it and not clearing it clears ZOL context
- *
- * Vineetg: May 16th, 2008
- *  - r25 now contains the Current Task when in kernel
- *
- * Vineetg: Dec 22, 2007
- *    Minor Surgery of Low Level ISR to make it SMP safe
- *    - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR
- *    - _current_task is made an array of NR_CPUS
- *    - Access of _current_task wrapped inside a macro so that if hardware
- *       team agrees for a dedicated reg, no other code is touched
- *
- * Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004
  */
 
 /*------------------------------------------------------------------
  *  Global Pointer (gp)                 r26
  *  Frame Pointer (fp)                  r27
  *  Stack Pointer (sp)                  r28
- *  Interrupt link register (ilink1)    r29
- *  Interrupt link register (ilink2)    r30
  *  Branch link register (blink)        r31
  *------------------------------------------------------------------
  */
 
-       .cpu A7
-
-;############################ Vector Table #################################
-
-.macro VECTOR  lbl
-#if 1   /* Just in case, build breaks */
-       j   \lbl
-#else
-       b   \lbl
-       nop
-#endif
-.endm
-
-       .section .vector, "ax",@progbits
-       .align 4
-
-/* Each entry in the vector table must occupy 2 words. Since it is a jump
- * across sections (.vector to .text) we are gauranteed that 'j somewhere'
- * will use the 'j limm' form of the intrsuction as long as somewhere is in
- * a section other than .vector.
- */
-
-; ********* Critical System Events **********************
-VECTOR   res_service             ; 0x0, Restart Vector  (0x0)
-VECTOR   mem_service             ; 0x8, Mem exception   (0x1)
-VECTOR   instr_service           ; 0x10, Instrn Error   (0x2)
-
-; ******************** Device ISRs **********************
-#ifdef CONFIG_ARC_IRQ3_LV2
-VECTOR   handle_interrupt_level2
-#else
-VECTOR   handle_interrupt_level1
-#endif
-
-VECTOR   handle_interrupt_level1
-
-#ifdef CONFIG_ARC_IRQ5_LV2
-VECTOR   handle_interrupt_level2
-#else
-VECTOR   handle_interrupt_level1
-#endif
-
-#ifdef CONFIG_ARC_IRQ6_LV2
-VECTOR   handle_interrupt_level2
-#else
-VECTOR   handle_interrupt_level1
-#endif
-
-.rept   25
-VECTOR   handle_interrupt_level1 ; Other devices
-.endr
-
-/* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */
-
-; ******************** Exceptions **********************
-VECTOR   EV_MachineCheck         ; 0x100, Fatal Machine check   (0x20)
-VECTOR   EV_TLBMissI             ; 0x108, Intruction TLB miss   (0x21)
-VECTOR   EV_TLBMissD             ; 0x110, Data TLB miss         (0x22)
-VECTOR   EV_TLBProtV             ; 0x118, Protection Violation  (0x23)
-                                ;         or Misaligned Access
-VECTOR   EV_PrivilegeV           ; 0x120, Privilege Violation   (0x24)
-VECTOR   EV_Trap                 ; 0x128, Trap exception        (0x25)
-VECTOR   EV_Extension            ; 0x130, Extn Intruction Excp  (0x26)
-
-.rept   24
-VECTOR   reserved                ; Reserved Exceptions
-.endr
-
-#include <linux/linkage.h>   /* {EXTRY,EXIT} */
-#include <asm/entry.h>       /* SAVE_ALL_{INT1,INT2,SYS...} */
-#include <asm/errno.h>
-#include <asm/arcregs.h>
-#include <asm/irqflags.h>
-
-;##################### Scratch Mem for IRQ stack switching #############
-
-ARCFP_DATA int1_saved_reg
-       .align 32
-       .type   int1_saved_reg, @object
-       .size   int1_saved_reg, 4
-int1_saved_reg:
-       .zero 4
-
-/* Each Interrupt level needs its own scratch */
-#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
-
-ARCFP_DATA int2_saved_reg
-       .type   int2_saved_reg, @object
-       .size   int2_saved_reg, 4
-int2_saved_reg:
-       .zero 4
-
-#endif
-
-; ---------------------------------------------
-       .section .text, "ax",@progbits
-
-res_service:           ; processor restart
-       flag    0x1     ; not implemented
-       nop
-       nop
-
-reserved:              ; processor restart
-       rtie            ; jump to processor initializations
-
-;##################### Interrupt Handling ##############################
-
-#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
-; ---------------------------------------------
-;  Level 2 ISR: Can interrupt a Level 1 ISR
-; ---------------------------------------------
-ENTRY(handle_interrupt_level2)
+;################### Special Sys Call Wrappers ##########################
 
-       ; TODO-vineetg for SMP this wont work
-       ; free up r9 as scratchpad
-       st  r9, [@int2_saved_reg]
+ENTRY(sys_clone_wrapper)
+       SAVE_CALLEE_SAVED_USER
+       bl  @sys_clone
+       DISCARD_CALLEE_SAVED_USER
 
-       ;Which mode (user/kernel) was the system in when intr occured
-       lr  r9, [status32_l2]
+       GET_CURR_THR_INFO_FLAGS   r10
+       btst r10, TIF_SYSCALL_TRACE
+       bnz  tracesys_exit
 
-       SWITCH_TO_KERNEL_STK
-       SAVE_ALL_INT2
+       b ret_from_system_call
+END(sys_clone_wrapper)
 
-       ;------------------------------------------------------
-       ; if L2 IRQ interrupted a L1 ISR, disable preemption
-       ;------------------------------------------------------
+ENTRY(ret_from_fork)
+       ; when the forked child comes here from the __switch_to function
+       ; r0 has the last task pointer.
+       ; put last task in scheduler queue
+       bl   @schedule_tail
 
-       ld r9, [sp, PT_status32]        ; get statu32_l2 (saved in pt_regs)
-       bbit0 r9, STATUS_A1_BIT, 1f     ; L1 not active when L2 IRQ, so normal
+       ld   r9, [sp, PT_status32]
+       brne r9, 0, 1f
 
-       ; A1 is set in status32_l2
-       ; bump thread_info->preempt_count (Disable preemption)
-       GET_CURR_THR_INFO_FROM_SP   r10
-       ld      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
-       add     r9, r9, 1
-       st      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+       jl.d [r14]              ; kernel thread entry point
+       mov  r0, r13            ; (see PF_KTHREAD block in copy_thread)
 
 1:
-       ;------------------------------------------------------
-       ; setup params for Linux common ISR and invoke it
-       ;------------------------------------------------------
-       lr  r0, [icause2]
-       and r0, r0, 0x1f
-
-       bl.d  @arch_do_IRQ
-       mov r1, sp
-
-       mov r8,0x2
-       sr r8, [AUX_IRQ_LV12]       ; clear bit in Sticky Status Reg
-
-       b   ret_from_exception
-
-END(handle_interrupt_level2)
-
-#endif
-
-; ---------------------------------------------
-;  Level 1 ISR
-; ---------------------------------------------
-ENTRY(handle_interrupt_level1)
-
-       /* free up r9 as scratchpad */
-#ifdef CONFIG_SMP
-       sr  r9, [ARC_REG_SCRATCH_DATA0]
-#else
-       st   r9, [@int1_saved_reg]
-#endif
-
-       ;Which mode (user/kernel) was the system in when intr occured
-       lr  r9, [status32_l1]
-
-       SWITCH_TO_KERNEL_STK
-       SAVE_ALL_INT1
+       ; Return to user space
+       ; 1. Any forked task (Reach here via BRne above)
+       ; 2. First ever init task (Reach here via return from JL above)
+       ;    This is the historic "kernel_execve" use-case, to return to init
+       ;    user mode, in a round about way since that is always done from
+       ;    a kernel thread which is executed via JL above but always returns
+       ;    out whenever kernel_execve (now inline do_fork()) is involved
+       b    ret_from_exception
+END(ret_from_fork)
 
-       lr  r0, [icause1]
-       and r0, r0, 0x1f
+#ifdef CONFIG_ARC_DW2_UNWIND
+; Workaround for bug 94179 (STAR ):
+; Despite -fasynchronous-unwind-tables, linker is not making dwarf2 unwinder
+; section (.debug_frame) as loadable. So we force it here.
+; This also fixes STAR 9000487933 where the prev-workaround (objcopy --setflag)
+; would not work after a clean build due to kernel build system dependencies.
+.section .debug_frame, "wa",@progbits
 
-#ifdef CONFIG_TRACE_IRQFLAGS
-       ; icause1 needs to be read early, before calling tracing, which
-       ; can clobber scratch regs, hence use of stack to stash it
-       push r0
-       TRACE_ASM_IRQ_DISABLE
-       pop  r0
+; Reset to .text as this file is included in entry-<isa>.S
+.section .text, "ax",@progbits
 #endif
 
-       bl.d  @arch_do_IRQ
-       mov r1, sp
-
-       mov r8,0x1
-       sr r8, [AUX_IRQ_LV12]       ; clear bit in Sticky Status Reg
-
-       b   ret_from_exception
-END(handle_interrupt_level1)
-
 ;################### Non TLB Exception Handling #############################
 
 ; ---------------------------------------------
@@ -280,7 +86,7 @@ ENTRY(instr_service)
        lr  r0, [efa]
        mov r1, sp
 
-       FAKE_RET_FROM_EXCPN r9
+       FAKE_RET_FROM_EXCPN
 
        bl  do_insterror_or_kprobe
        b   ret_from_exception
@@ -297,7 +103,7 @@ ENTRY(mem_service)
        lr  r0, [efa]
        mov r1, sp
 
-       FAKE_RET_FROM_EXCPN r9
+       FAKE_RET_FROM_EXCPN
 
        bl  do_memory_error
        b   ret_from_exception
@@ -333,60 +139,6 @@ ENTRY(EV_MachineCheck)
 
 END(EV_MachineCheck)
 
-; ---------------------------------------------
-; Protection Violation Exception Handler
-; ---------------------------------------------
-
-ENTRY(EV_TLBProtV)
-
-       EXCEPTION_PROLOGUE
-
-       ;---------(3) Save some more regs-----------------
-       ;  vineetg: Mar 6th: Random Seg Fault issue #1
-       ;  ecr and efa were not saved in case an Intr sneaks in
-       ;  after fake rtie
-
-       lr  r2, [ecr]
-       lr  r0, [efa]   ; Faulting Data address
-
-       ; --------(4) Return from CPU Exception Mode ---------
-       ;  Fake a rtie, but rtie to next label
-       ;  That way, subsequently, do_page_fault ( ) executes in pure kernel
-       ;  mode with further Exceptions enabled
-
-       FAKE_RET_FROM_EXCPN r9
-
-       mov   r1, sp
-
-       ;------ (5) Type of Protection Violation? ----------
-       ;
-       ; ProtV Hardware Exception is triggered for Access Faults of 2 types
-       ;   -Access Violaton    : 00_23_(00|01|02|03)_00
-       ;                                x  r  w  r+w
-       ;   -Unaligned Access   : 00_23_04_00
-       ;
-       bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f
-
-       ;========= (6a) Access Violation Processing ========
-       bl  do_page_fault
-       b   ret_from_exception
-
-       ;========== (6b) Non aligned access ============
-4:
-
-       SAVE_CALLEE_SAVED_USER
-       mov r2, sp              ; callee_regs
-
-       bl  do_misaligned_access
-
-       ; TBD: optimize - do this only if a callee reg was involved
-       ; either a dst of emulated LD/ST or src with address-writeback
-       RESTORE_CALLEE_SAVED_USER
-
-       b   ret_from_exception
-
-END(EV_TLBProtV)
-
 ; ---------------------------------------------
 ; Privilege Violation Exception Handler
 ; ---------------------------------------------
@@ -397,7 +149,7 @@ ENTRY(EV_PrivilegeV)
        lr  r0, [efa]
        mov r1, sp
 
-       FAKE_RET_FROM_EXCPN r9
+       FAKE_RET_FROM_EXCPN
 
        bl  do_privilege_fault
        b   ret_from_exception
@@ -413,14 +165,17 @@ ENTRY(EV_Extension)
        lr  r0, [efa]
        mov r1, sp
 
-       FAKE_RET_FROM_EXCPN r9
+       FAKE_RET_FROM_EXCPN
 
        bl  do_extension_fault
        b   ret_from_exception
 END(EV_Extension)
 
-;######################### System Call Tracing #########################
+;################ Trap Handling (Syscall, Breakpoint) ##################
 
+; ---------------------------------------------
+; syscall Tracing
+; ---------------------------------------------
 tracesys:
        ; save EFA in case tracer wants the PC of traced task
        ; using ERET won't work since next-PC has already committed
@@ -463,10 +218,9 @@ tracesys_exit:
        b   ret_from_exception ; NOT ret_from_system_call at is saves r0 which
        ; we'd done before calling post hook above
 
-;################### Break Point TRAP ##########################
-
-       ; ======= (5b) Trap is due to Break-Point =========
-
+; ---------------------------------------------
+; Breakpoint TRAP
+; ---------------------------------------------
 trap_with_param:
 
        ; stop_pc info by gdb needs this info
@@ -475,7 +229,7 @@ trap_with_param:
 
        ; Now that we have read EFA, it is safe to do "fake" rtie
        ;   and get out of CPU exception mode
-       FAKE_RET_FROM_EXCPN r11
+       FAKE_RET_FROM_EXCPN
 
        ; Save callee regs in case gdb wants to have a look
        ; SP will grow up by size of CALLEE Reg-File
@@ -494,37 +248,33 @@ trap_with_param:
 
        b   ret_from_exception
 
-;##################### Trap Handling ##############################
-;
-; EV_Trap caused by TRAP_S and TRAP0 instructions.
-;------------------------------------------------------------------
-;   (1) System Calls
-;       :parameters in r0-r7.
-;       :r8 has the system call number
-;   (2) Break Points
-;------------------------------------------------------------------
+; ---------------------------------------------
+; syscall TRAP
+; ABI: (r0-r7) upto 8 args, (r8) syscall number
+; ---------------------------------------------
 
 ENTRY(EV_Trap)
 
        EXCEPTION_PROLOGUE
 
-       ;------- (4) What caused the Trap --------------
-       lr     r12, [ecr]
-       bmsk.f 0, r12, 7
+       ;============ TRAP 1   :breakpoints
+       ; Check ECR for trap with arg (PROLOGUE ensures r9 has ECR)
+       bmsk.f 0, r9, 7
        bnz    trap_with_param
 
-       ; ======= (5a) Trap is due to System Call ========
+       ;============ TRAP  (no param): syscall top level
 
-       ; Before doing anything, return from CPU Exception Mode
-       FAKE_RET_FROM_EXCPN r11
+       ; First return from Exception to pure K mode (Exception/IRQs renabled)
+       FAKE_RET_FROM_EXCPN
 
-       ; If syscall tracing ongoing, invoke pre-pos-hooks
+       ; If syscall tracing ongoing, invoke pre-post-hooks
        GET_CURR_THR_INFO_FLAGS   r10
        btst r10, TIF_SYSCALL_TRACE
        bnz tracesys  ; this never comes back
 
-       ;============ This is normal System Call case ==========
-       ; Sys-call num shd not exceed the total system calls avail
+       ;============ Normal syscall case
+
+       ; syscall num shd not exceed the total system calls avail
        cmp     r8,  NR_syscalls
        mov.hi  r0, -ENOSYS
        bhi     ret_from_system_call
@@ -565,7 +315,7 @@ resume_user_mode_begin:
        ; Fast Path return to user mode if no pending work
        GET_CURR_THR_INFO_FLAGS   r9
        and.f  0,  r9, _TIF_WORK_MASK
-       bz     restore_regs
+       bz     .Lrestore_regs
 
        ; --- (Slow Path #1) task preemption ---
        bbit0  r9, TIF_NEED_RESCHED, .Lchk_pend_signals
@@ -624,11 +374,11 @@ resume_kernel_mode:
        ; Can't preempt if preemption disabled
        GET_CURR_THR_INFO_FROM_SP   r10
        ld  r8, [r10, THREAD_INFO_PREEMPT_COUNT]
-       brne  r8, 0, restore_regs
+       brne  r8, 0, .Lrestore_regs
 
        ; check if this task's NEED_RESCHED flag set
        ld  r9, [r10, THREAD_INFO_FLAGS]
-       bbit0  r9, TIF_NEED_RESCHED, restore_regs
+       bbit0  r9, TIF_NEED_RESCHED, .Lrestore_regs
 
        ; Invoke PREEMPTION
        bl      preempt_schedule_irq
@@ -636,142 +386,7 @@ resume_kernel_mode:
        ; preempt_schedule_irq() always returns with IRQ disabled
 #endif
 
-       ; fall through
-
-;############# Return from Intr/Excp/Trap (ARC Specifics) ##############
-;
-; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
-; IRQ shd definitely not happen between now and rtie
-; All 2 entry points to here already disable interrupts
-
-restore_regs :
-
-       TRACE_ASM_IRQ_ENABLE
-
-       lr      r10, [status32]
-
-       ; Restore REG File. In case multiple Events outstanding,
-       ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None
-       ; Note that we use realtime STATUS32 (not pt_regs->status32) to
-       ; decide that.
-
-       ; if Returning from Exception
-       bbit0  r10, STATUS_AE_BIT, not_exception
-       RESTORE_ALL_SYS
-       rtie
-
-       ; Not Exception so maybe Interrupts (Level 1 or 2)
-
-not_exception:
-
-#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
-
-       ; Level 2 interrupt return Path - from hardware standpoint
-       bbit0  r10, STATUS_A2_BIT, not_level2_interrupt
-
-       ;------------------------------------------------------------------
-       ; However the context returning might not have taken L2 intr itself
-       ; e.g. Task'A' user-code -> L2 intr -> schedule -> 'B' user-code ret
-       ; Special considerations needed for the context which took L2 intr
-
-       ld   r9, [sp, PT_event]        ; Ensure this is L2 intr context
-       brne r9, event_IRQ2, 149f
-
-       ;------------------------------------------------------------------
-       ; if L2 IRQ interrupted an L1 ISR,  we'd disabled preemption earlier
-       ; so that sched doesn't move to new task, causing L1 to be delayed
-       ; undeterministically. Now that we've achieved that, let's reset
-       ; things to what they were, before returning from L2 context
-       ;----------------------------------------------------------------
-
-       ld r9, [sp, PT_status32]       ; get statu32_l2 (saved in pt_regs)
-       bbit0 r9, STATUS_A1_BIT, 149f  ; L1 not active when L2 IRQ, so normal
-
-       ; decrement thread_info->preempt_count (re-enable preemption)
-       GET_CURR_THR_INFO_FROM_SP   r10
-       ld      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
-
-       ; paranoid check, given A1 was active when A2 happened, preempt count
-       ; must not be 0 because we would have incremented it.
-       ; If this does happen we simply HALT as it means a BUG !!!
-       cmp     r9, 0
-       bnz     2f
-       flag 1
-
-2:
-       sub     r9, r9, 1
-       st      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
-
-149:
-       ;return from level 2
-       RESTORE_ALL_INT2
-debug_marker_l2:
-       rtie
-
-not_level2_interrupt:
-
-#endif
-
-       bbit0  r10, STATUS_A1_BIT, not_level1_interrupt
+       b       .Lrestore_regs
 
-       ;return from level 1
+##### DONT ADD CODE HERE - .Lrestore_regs actually follows in entry-<isa>.S
 
-       RESTORE_ALL_INT1
-debug_marker_l1:
-       rtie
-
-not_level1_interrupt:
-
-       ;this case is for syscalls or Exceptions (with fake rtie)
-
-       RESTORE_ALL_SYS
-debug_marker_syscall:
-       rtie
-
-END(ret_from_exception)
-
-ENTRY(ret_from_fork)
-       ; when the forked child comes here from the __switch_to function
-       ; r0 has the last task pointer.
-       ; put last task in scheduler queue
-       bl   @schedule_tail
-
-       ld   r9, [sp, PT_status32]
-       brne r9, 0, 1f
-
-       jl.d [r14]              ; kernel thread entry point
-       mov  r0, r13            ; (see PF_KTHREAD block in copy_thread)
-
-1:
-       ; Return to user space
-       ; 1. Any forked task (Reach here via BRne above)
-       ; 2. First ever init task (Reach here via return from JL above)
-       ;    This is the historic "kernel_execve" use-case, to return to init
-       ;    user mode, in a round about way since that is always done from
-       ;    a kernel thread which is executed via JL above but always returns
-       ;    out whenever kernel_execve (now inline do_fork()) is involved
-       b    ret_from_exception
-END(ret_from_fork)
-
-;################### Special Sys Call Wrappers ##########################
-
-ENTRY(sys_clone_wrapper)
-       SAVE_CALLEE_SAVED_USER
-       bl  @sys_clone
-       DISCARD_CALLEE_SAVED_USER
-
-       GET_CURR_THR_INFO_FLAGS   r10
-       btst r10, TIF_SYSCALL_TRACE
-       bnz  tracesys_exit
-
-       b ret_from_system_call
-END(sys_clone_wrapper)
-
-#ifdef CONFIG_ARC_DW2_UNWIND
-; Workaround for bug 94179 (STAR ):
-; Despite -fasynchronous-unwind-tables, linker is not making dwarf2 unwinder
-; section (.debug_frame) as loadable. So we force it here.
-; This also fixes STAR 9000487933 where the prev-workaround (objcopy --setflag)
-; would not work after a clean build due to kernel build system dependencies.
-.section .debug_frame, "wa",@progbits
-#endif
index b0e8666fdccc755ac11763a3bf0b6594f4ed628a..812f95e6ae6946d56550cbd80a0baaa8a6e94cc0 100644 (file)
@@ -49,8 +49,6 @@
 1:
 .endm
 
-       .cpu A7
-
        .section .init.text, "ax",@progbits
        .type stext, @function
        .globl stext
@@ -83,6 +81,7 @@ stext:
        st.ab   0, [r5, 4]
 1:
 
+#ifdef CONFIG_ARC_UBOOT_SUPPORT
        ; Uboot - kernel ABI
        ;    r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
        ;    r1 = magic number (board identity, unused as of now
@@ -90,6 +89,7 @@ stext:
        ; These are handled later in setup_arch()
        st      r0, [@uboot_tag]
        st      r2, [@uboot_arg]
+#endif
 
        ; setup "current" tsk and optionally cache it in dedicated r25
        mov     r9, @init_task
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
new file mode 100644 (file)
index 0000000..6208c63
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2014 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include "../../drivers/irqchip/irqchip.h"
+#include <asm/irq.h>
+
+/*
+ * Early Hardware specific Interrupt setup
+ * -Called very early (start_kernel -> setup_arch -> setup_processor)
+ * -Platform Independent (must for any ARC Core)
+ * -Needed for each CPU (hence not foldable into init_IRQ)
+ */
+void arc_init_IRQ(void)
+{
+       unsigned int tmp;
+
+       struct aux_irq_ctrl {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+               unsigned int res3:18, save_idx_regs:1, res2:1,
+                            save_u_to_u:1, save_lp_regs:1, save_blink:1,
+                            res:4, save_nr_gpr_pairs:5;
+#else
+               unsigned int save_nr_gpr_pairs:5, res:4,
+                            save_blink:1, save_lp_regs:1, save_u_to_u:1,
+                            res2:1, save_idx_regs:1, res3:18;
+#endif
+       } ictrl;
+
+       *(unsigned int *)&ictrl = 0;
+
+       ictrl.save_nr_gpr_pairs = 6;    /* r0 to r11 (r12 saved manually) */
+       ictrl.save_blink = 1;
+       ictrl.save_lp_regs = 1;         /* LP_COUNT, LP_START, LP_END */
+       ictrl.save_u_to_u = 0;          /* user ctxt saved on kernel stack */
+       ictrl.save_idx_regs = 1;        /* JLI, LDI, EI */
+
+       WRITE_AUX(AUX_IRQ_CTRL, ictrl);
+
+       /* setup status32, don't enable intr yet as kernel doesn't want */
+       tmp = read_aux_reg(0xa);
+       tmp |= ISA_INIT_STATUS_BITS;
+       tmp &= ~STATUS_IE_MASK;
+       asm volatile("flag %0   \n"::"r"(tmp));
+
+       /*
+        * ARCv2 core intc provides multiple interrupt priorities (upto 16).
+        * Typical builds though have only two levels (0-high, 1-low)
+        * Linux by default uses lower prio 1 for most irqs, reserving 0 for
+        * NMI style interrupts in future (say perf)
+        *
+        * Read the intc BCR to confirm that Linux default priority is avail
+        * in h/w
+        *
+        * Note:
+        *  IRQ_BCR[27..24] contains N-1 (for N priority levels) and prio level
+        *  is 0 based.
+        */
+       tmp = (read_aux_reg(ARC_REG_IRQ_BCR) >> 24 ) & 0xF;
+       if (ARCV2_IRQ_DEF_PRIO > tmp)
+               panic("Linux default irq prio incorrect\n");
+}
+
+static void arcv2_irq_mask(struct irq_data *data)
+{
+       write_aux_reg(AUX_IRQ_SELECT, data->irq);
+       write_aux_reg(AUX_IRQ_ENABLE, 0);
+}
+
+static void arcv2_irq_unmask(struct irq_data *data)
+{
+       write_aux_reg(AUX_IRQ_SELECT, data->irq);
+       write_aux_reg(AUX_IRQ_ENABLE, 1);
+}
+
+void arcv2_irq_enable(struct irq_data *data)
+{
+       /* set default priority */
+       write_aux_reg(AUX_IRQ_SELECT, data->irq);
+       write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
+
+       /*
+        * hw auto enables (linux unmask) all by default
+        * So no need to do IRQ_ENABLE here
+        * XXX: However OSCI LAN need it
+        */
+       write_aux_reg(AUX_IRQ_ENABLE, 1);
+}
+
+static struct irq_chip arcv2_irq_chip = {
+       .name           = "ARCv2 core Intc",
+       .irq_mask       = arcv2_irq_mask,
+       .irq_unmask     = arcv2_irq_unmask,
+       .irq_enable     = arcv2_irq_enable
+};
+
+static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
+                        irq_hw_number_t hw)
+{
+       if (irq == TIMER0_IRQ || irq == IPI_IRQ)
+               irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
+       else
+               irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops arcv2_irq_ops = {
+       .xlate = irq_domain_xlate_onecell,
+       .map = arcv2_irq_map,
+};
+
+static struct irq_domain *root_domain;
+
+static int __init
+init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
+{
+       if (parent)
+               panic("DeviceTree incore intc not a root irq controller\n");
+
+       root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0,
+                                           &arcv2_irq_ops, NULL);
+
+       if (!root_domain)
+               panic("root irq domain not avail\n");
+
+       /* with this we don't need to export root_domain */
+       irq_set_default_host(root_domain);
+
+       return 0;
+}
+
+IRQCHIP_DECLARE(arc_intc, "snps,archs-intc", init_onchip_IRQ);
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
new file mode 100644 (file)
index 0000000..fcdddb6
--- /dev/null
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include "../../drivers/irqchip/irqchip.h"
+#include <asm/irq.h>
+
+/*
+ * Early Hardware specific Interrupt setup
+ * -Platform independent, needed for each CPU (not foldable into init_IRQ)
+ * -Called very early (start_kernel -> setup_arch -> setup_processor)
+ *
+ * what it does ?
+ * -Optionally, setup the High priority Interrupts as Level 2 IRQs
+ */
+void arc_init_IRQ(void)
+{
+       int level_mask = 0;
+
+       /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
+       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
+       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
+       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
+
+       /*
+        * Write to register, even if no LV2 IRQs configured to reset it
+        * in case bootloader had mucked with it
+        */
+       write_aux_reg(AUX_IRQ_LEV, level_mask);
+
+       if (level_mask)
+               pr_info("Level-2 interrupts bitset %x\n", level_mask);
+}
+
+/*
+ * ARC700 core includes a simple on-chip intc supporting
+ * -per IRQ enable/disable
+ * -2 levels of interrupts (high/low)
+ * -all interrupts being level triggered
+ *
+ * To reduce platform code, we assume all IRQs directly hooked-up into intc.
+ * Platforms with external intc, hence cascaded IRQs, are free to over-ride
+ * below, per IRQ.
+ */
+
+static void arc_irq_mask(struct irq_data *data)
+{
+       unsigned int ienb;
+
+       ienb = read_aux_reg(AUX_IENABLE);
+       ienb &= ~(1 << data->irq);
+       write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static void arc_irq_unmask(struct irq_data *data)
+{
+       unsigned int ienb;
+
+       ienb = read_aux_reg(AUX_IENABLE);
+       ienb |= (1 << data->irq);
+       write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static struct irq_chip onchip_intc = {
+       .name           = "ARC In-core Intc",
+       .irq_mask       = arc_irq_mask,
+       .irq_unmask     = arc_irq_unmask,
+};
+
+static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
+                              irq_hw_number_t hw)
+{
+       /*
+        * XXX: the IPI IRQ needs to be handled like TIMER too. However ARC core
+        *      code doesn't own it (like TIMER0). ISS IDU / ezchip define it
+        *      in platform header which can't be included here as it goes
+        *      against multi-platform image philisophy
+        */
+       if (irq == TIMER0_IRQ)
+               irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
+       else
+               irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops arc_intc_domain_ops = {
+       .xlate = irq_domain_xlate_onecell,
+       .map = arc_intc_domain_map,
+};
+
+static struct irq_domain *root_domain;
+
+static int __init
+init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
+{
+       if (parent)
+               panic("DeviceTree incore intc not a root irq controller\n");
+
+       root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0,
+                                           &arc_intc_domain_ops, NULL);
+
+       if (!root_domain)
+               panic("root irq domain not avail\n");
+
+       /* with this we don't need to export root_domain */
+       irq_set_default_host(root_domain);
+
+       return 0;
+}
+
+IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ);
+
+/*
+ * arch_local_irq_enable - Enable interrupts.
+ *
+ * 1. Explicitly called to re-enable interrupts
+ * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
+ *    which maybe in hard ISR itself
+ *
+ * Semantics of this function change depending on where it is called from:
+ *
+ * -If called from hard-ISR, it must not invert interrupt priorities
+ *  e.g. suppose TIMER is high priority (Level 2) IRQ
+ *    Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
+ *    Here local_irq_enable( ) shd not re-enable lower priority interrupts
+ * -If called from soft-ISR, it must re-enable all interrupts
+ *    soft ISR are low prioity jobs which can be very slow, thus all IRQs
+ *    must be enabled while they run.
+ *    Now hardware context wise we may still be in L2 ISR (not done rtie)
+ *    still we must re-enable both L1 and L2 IRQs
+ *  Another twist is prev scenario with flow being
+ *     L1 ISR ==> interrupted by L2 ISR  ==> L2 soft ISR
+ *     here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
+ *     over-written (this is deficiency in ARC700 Interrupt mechanism)
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS   /* Complex version for 2 IRQ levels */
+
+void arch_local_irq_enable(void)
+{
+
+       unsigned long flags = arch_local_save_flags();
+
+       /* Allow both L1 and L2 at the onset */
+       flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+
+       /* Called from hard ISR (between irq_enter and irq_exit) */
+       if (in_irq()) {
+
+               /* If in L2 ISR, don't re-enable any further IRQs as this can
+                * cause IRQ priorities to get upside down. e.g. it could allow
+                * L1 be taken while in L2 hard ISR which is wrong not only in
+                * theory, it can also cause the dreaded L1-L2-L1 scenario
+                */
+               if (flags & STATUS_A2_MASK)
+                       flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
+
+               /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
+               else if (flags & STATUS_A1_MASK)
+                       flags &= ~(STATUS_E1_MASK);
+       }
+
+       /* called from soft IRQ, ideally we want to re-enable all levels */
+
+       else if (in_softirq()) {
+
+               /* However if this is case of L1 interrupted by L2,
+                * re-enabling both may cause whaco L1-L2-L1 scenario
+                * because ARC700 allows level 1 to interrupt an active L2 ISR
+                * Thus we disable both
+                * However some code, executing in soft ISR wants some IRQs
+                * to be enabled so we re-enable L2 only
+                *
+                * How do we determine L1 intr by L2
+                *  -A2 is set (means in L2 ISR)
+                *  -E1 is set in this ISR's pt_regs->status32 which is
+                *      saved copy of status32_l2 when l2 ISR happened
+                */
+               struct pt_regs *pt = get_irq_regs();
+
+               if ((flags & STATUS_A2_MASK) && pt &&
+                   (pt->status32 & STATUS_A1_MASK)) {
+                       /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
+                       flags &= ~(STATUS_E1_MASK);
+               }
+       }
+
+       arch_local_irq_restore(flags);
+}
+
+#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
+
+/*
+ * Simpler version for only 1 level of interrupt
+ * Here we only Worry about Level 1 Bits
+ */
+void arch_local_irq_enable(void)
+{
+       unsigned long flags;
+
+       /*
+        * ARC IDE Drivers tries to re-enable interrupts from hard-isr
+        * context which is simply wrong
+        */
+       if (in_irq()) {
+               WARN_ONCE(1, "IRQ enabled from hard-isr");
+               return;
+       }
+
+       flags = arch_local_save_flags();
+       flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+       arch_local_irq_restore(flags);
+}
+#endif
+EXPORT_SYMBOL(arch_local_irq_enable);
index 620ec2fe32a94f855c6e3db0a04fa25c41d5d1c2..2989a7bcf8a863709734d7f5343bb16c789089f2 100644 (file)
@@ -8,115 +8,9 @@
  */
 
 #include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/irqdomain.h>
 #include <linux/irqchip.h>
-#include "../../drivers/irqchip/irqchip.h"
-#include <asm/sections.h>
-#include <asm/irq.h>
 #include <asm/mach_desc.h>
 
-/*
- * Early Hardware specific Interrupt setup
- * -Platform independent, needed for each CPU (not foldable into init_IRQ)
- * -Called very early (start_kernel -> setup_arch -> setup_processor)
- *
- * what it does ?
- * -Optionally, setup the High priority Interrupts as Level 2 IRQs
- */
-void arc_init_IRQ(void)
-{
-       int level_mask = 0;
-
-       /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
-       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
-       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
-       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
-
-       /*
-        * Write to register, even if no LV2 IRQs configured to reset it
-        * in case bootloader had mucked with it
-        */
-       write_aux_reg(AUX_IRQ_LEV, level_mask);
-
-       if (level_mask)
-               pr_info("Level-2 interrupts bitset %x\n", level_mask);
-}
-
-/*
- * ARC700 core includes a simple on-chip intc supporting
- * -per IRQ enable/disable
- * -2 levels of interrupts (high/low)
- * -all interrupts being level triggered
- *
- * To reduce platform code, we assume all IRQs directly hooked-up into intc.
- * Platforms with external intc, hence cascaded IRQs, are free to over-ride
- * below, per IRQ.
- */
-
-static void arc_irq_mask(struct irq_data *data)
-{
-       unsigned int ienb;
-
-       ienb = read_aux_reg(AUX_IENABLE);
-       ienb &= ~(1 << data->irq);
-       write_aux_reg(AUX_IENABLE, ienb);
-}
-
-static void arc_irq_unmask(struct irq_data *data)
-{
-       unsigned int ienb;
-
-       ienb = read_aux_reg(AUX_IENABLE);
-       ienb |= (1 << data->irq);
-       write_aux_reg(AUX_IENABLE, ienb);
-}
-
-static struct irq_chip onchip_intc = {
-       .name           = "ARC In-core Intc",
-       .irq_mask       = arc_irq_mask,
-       .irq_unmask     = arc_irq_unmask,
-};
-
-static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
-                               irq_hw_number_t hw)
-{
-       if (irq == TIMER0_IRQ)
-               irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
-       else
-               irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
-
-       return 0;
-}
-
-static const struct irq_domain_ops arc_intc_domain_ops = {
-       .xlate = irq_domain_xlate_onecell,
-       .map = arc_intc_domain_map,
-};
-
-static struct irq_domain *root_domain;
-
-static int __init
-init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
-{
-       if (parent)
-               panic("DeviceTree incore intc not a root irq controller\n");
-
-       root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0,
-                                           &arc_intc_domain_ops, NULL);
-
-       if (!root_domain)
-               panic("root irq domain not avail\n");
-
-       /* with this we don't need to export root_domain */
-       irq_set_default_host(root_domain);
-
-       return 0;
-}
-
-IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ);
-
 /*
  * Late Interrupt system init called from start_kernel for Boot CPU only
  *
@@ -178,107 +72,3 @@ void arc_request_percpu_irq(int irq, int cpu,
 
        enable_percpu_irq(irq, 0);
 }
-
-/*
- * arch_local_irq_enable - Enable interrupts.
- *
- * 1. Explicitly called to re-enable interrupts
- * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
- *    which maybe in hard ISR itself
- *
- * Semantics of this function change depending on where it is called from:
- *
- * -If called from hard-ISR, it must not invert interrupt priorities
- *  e.g. suppose TIMER is high priority (Level 2) IRQ
- *    Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
- *    Here local_irq_enable( ) shd not re-enable lower priority interrupts
- * -If called from soft-ISR, it must re-enable all interrupts
- *    soft ISR are low prioity jobs which can be very slow, thus all IRQs
- *    must be enabled while they run.
- *    Now hardware context wise we may still be in L2 ISR (not done rtie)
- *    still we must re-enable both L1 and L2 IRQs
- *  Another twist is prev scenario with flow being
- *     L1 ISR ==> interrupted by L2 ISR  ==> L2 soft ISR
- *     here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
- *     over-written (this is deficiency in ARC700 Interrupt mechanism)
- */
-
-#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS   /* Complex version for 2 IRQ levels */
-
-void arch_local_irq_enable(void)
-{
-
-       unsigned long flags;
-       flags = arch_local_save_flags();
-
-       /* Allow both L1 and L2 at the onset */
-       flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
-
-       /* Called from hard ISR (between irq_enter and irq_exit) */
-       if (in_irq()) {
-
-               /* If in L2 ISR, don't re-enable any further IRQs as this can
-                * cause IRQ priorities to get upside down. e.g. it could allow
-                * L1 be taken while in L2 hard ISR which is wrong not only in
-                * theory, it can also cause the dreaded L1-L2-L1 scenario
-                */
-               if (flags & STATUS_A2_MASK)
-                       flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
-
-               /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
-               else if (flags & STATUS_A1_MASK)
-                       flags &= ~(STATUS_E1_MASK);
-       }
-
-       /* called from soft IRQ, ideally we want to re-enable all levels */
-
-       else if (in_softirq()) {
-
-               /* However if this is case of L1 interrupted by L2,
-                * re-enabling both may cause whaco L1-L2-L1 scenario
-                * because ARC700 allows level 1 to interrupt an active L2 ISR
-                * Thus we disable both
-                * However some code, executing in soft ISR wants some IRQs
-                * to be enabled so we re-enable L2 only
-                *
-                * How do we determine L1 intr by L2
-                *  -A2 is set (means in L2 ISR)
-                *  -E1 is set in this ISR's pt_regs->status32 which is
-                *      saved copy of status32_l2 when l2 ISR happened
-                */
-               struct pt_regs *pt = get_irq_regs();
-               if ((flags & STATUS_A2_MASK) && pt &&
-                   (pt->status32 & STATUS_A1_MASK)) {
-                       /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
-                       flags &= ~(STATUS_E1_MASK);
-               }
-       }
-
-       arch_local_irq_restore(flags);
-}
-
-#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
-
-/*
- * Simpler version for only 1 level of interrupt
- * Here we only Worry about Level 1 Bits
- */
-void arch_local_irq_enable(void)
-{
-       unsigned long flags;
-
-       /*
-        * ARC IDE Drivers tries to re-enable interrupts from hard-isr
-        * context which is simply wrong
-        */
-       if (in_irq()) {
-               WARN_ONCE(1, "IRQ enabled from hard-isr");
-               return;
-       }
-
-       flags = arch_local_save_flags();
-       flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
-       arch_local_irq_restore(flags);
-}
-#endif
-EXPORT_SYMBOL(arch_local_irq_enable);
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
new file mode 100644 (file)
index 0000000..30284e8
--- /dev/null
@@ -0,0 +1,341 @@
+/*
+ * ARC ARConnect (MultiCore IP) support (formerly known as MCIP)
+ *
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <asm/mcip.h>
+
+static char smp_cpuinfo_buf[128];
+static int idu_detected;
+
+static DEFINE_RAW_SPINLOCK(mcip_lock);
+
+/*
+ * Any SMP specific init any CPU does when it comes up.
+ * Here we setup the CPU to enable Inter-Processor-Interrupts
+ * Called for each CPU
+ * -Master      : init_IRQ()
+ * -Other(s)    : start_kernel_secondary()
+ */
+void mcip_init_smp(unsigned int cpu)
+{
+       smp_ipi_irq_setup(cpu, IPI_IRQ);
+}
+
+static void mcip_ipi_send(int cpu)
+{
+       unsigned long flags;
+       int ipi_was_pending;
+
+       /*
+        * NOTE: We must spin here if the other cpu hasn't yet
+        * serviced a previous message. This can burn lots
+        * of time, but we MUST follows this protocol or
+        * ipi messages can be lost!!!
+        * Also, we must release the lock in this loop because
+        * the other side may get to this same loop and not
+        * be able to ack -- thus causing deadlock.
+        */
+
+       do {
+               raw_spin_lock_irqsave(&mcip_lock, flags);
+               __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
+               ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
+               if (ipi_was_pending == 0)
+                       break; /* break out but keep lock */
+               raw_spin_unlock_irqrestore(&mcip_lock, flags);
+       } while (1);
+
+       __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
+       raw_spin_unlock_irqrestore(&mcip_lock, flags);
+
+#ifdef CONFIG_ARC_IPI_DBG
+       if (ipi_was_pending)
+               pr_info("IPI ACK delayed from cpu %d\n", cpu);
+#endif
+}
+
+static void mcip_ipi_clear(int irq)
+{
+       unsigned int cpu, c;
+       unsigned long flags;
+       unsigned int __maybe_unused copy;
+
+       raw_spin_lock_irqsave(&mcip_lock, flags);
+
+       /* Who sent the IPI */
+       __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
+
+       copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK);       /* 1,2,4,8... */
+
+       /*
+        * In rare case, multiple concurrent IPIs sent to same target can
+        * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
+        * "vectored" (multiple bits sets) as opposed to typical single bit
+        */
+       do {
+               c = __ffs(cpu);                 /* 0,1,2,3 */
+               __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
+               cpu &= ~(1U << c);
+       } while (cpu);
+
+       raw_spin_unlock_irqrestore(&mcip_lock, flags);
+
+#ifdef CONFIG_ARC_IPI_DBG
+       if (c != __ffs(copy))
+               pr_info("IPIs from %x coalesced to %x\n",
+                       copy, raw_smp_processor_id());
+#endif
+}
+
+volatile int wake_flag;
+
+static void mcip_wakeup_cpu(int cpu, unsigned long pc)
+{
+       BUG_ON(cpu == 0);
+       wake_flag = cpu;
+}
+
+void arc_platform_smp_wait_to_boot(int cpu)
+{
+       while (wake_flag != cpu)
+               ;
+
+       wake_flag = 0;
+       __asm__ __volatile__("j @first_lines_of_secondary       \n");
+}
+
+struct plat_smp_ops plat_smp_ops = {
+       .info           = smp_cpuinfo_buf,
+       .cpu_kick       = mcip_wakeup_cpu,
+       .ipi_send       = mcip_ipi_send,
+       .ipi_clear      = mcip_ipi_clear,
+};
+
+void mcip_init_early_smp(void)
+{
+#define IS_AVAIL1(var, str)    ((var) ? str : "")
+
+       struct mcip_bcr {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+               unsigned int pad3:8,
+                            idu:1, llm:1, num_cores:6,
+                            iocoh:1,  grtc:1, dbg:1, pad2:1,
+                            msg:1, sem:1, ipi:1, pad:1,
+                            ver:8;
+#else
+               unsigned int ver:8,
+                            pad:1, ipi:1, sem:1, msg:1,
+                            pad2:1, dbg:1, grtc:1, iocoh:1,
+                            num_cores:6, llm:1, idu:1,
+                            pad3:8;
+#endif
+       } mp;
+
+       READ_BCR(ARC_REG_MCIP_BCR, mp);
+
+       sprintf(smp_cpuinfo_buf,
+               "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
+               mp.ver, mp.num_cores,
+               IS_AVAIL1(mp.ipi, "IPI "),
+               IS_AVAIL1(mp.idu, "IDU "),
+               IS_AVAIL1(mp.dbg, "DEBUG "),
+               IS_AVAIL1(mp.grtc, "GRTC"));
+
+       idu_detected = mp.idu;
+
+       if (mp.dbg) {
+               __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
+               __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
+       }
+
+       if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc)
+               panic("kernel trying to use non-existent GRTC\n");
+}
+
+/***************************************************************************
+ * ARCv2 Interrupt Distribution Unit (IDU)
+ *
+ * Connects external "COMMON" IRQs to core intc, providing:
+ *  -dynamic routing (IRQ affinity)
+ *  -load balancing (Round Robin interrupt distribution)
+ *  -1:N distribution
+ *
+ * It physically resides in the MCIP hw block
+ */
+
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include "../../drivers/irqchip/irqchip.h"
+
+/*
+ * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
+ */
+static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
+{
+       __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
+}
+
+static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
+                          unsigned int distr)
+{
+       union {
+               unsigned int word;
+               struct {
+                       unsigned int distr:2, pad:2, lvl:1, pad2:27;
+               };
+       } data;
+
+       data.distr = distr;
+       data.lvl = lvl;
+       __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
+}
+
+static void idu_irq_mask(struct irq_data *data)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&mcip_lock, flags);
+       __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
+       raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static void idu_irq_unmask(struct irq_data *data)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&mcip_lock, flags);
+       __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
+       raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static int
+idu_irq_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool f)
+{
+       return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip idu_irq_chip = {
+       .name                   = "MCIP IDU Intc",
+       .irq_mask               = idu_irq_mask,
+       .irq_unmask             = idu_irq_unmask,
+#ifdef CONFIG_SMP
+       .irq_set_affinity       = idu_irq_set_affinity,
+#endif
+
+};
+
+static int idu_first_irq;
+
+static void idu_cascade_isr(unsigned int core_irq, struct irq_desc *desc)
+{
+       struct irq_domain *domain = irq_desc_get_handler_data(desc);
+       unsigned int idu_irq;
+
+       idu_irq = core_irq - idu_first_irq;
+       generic_handle_irq(irq_find_mapping(domain, idu_irq));
+}
+
+static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
+{
+       irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
+       irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
+
+       return 0;
+}
+
+static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
+                        const u32 *intspec, unsigned int intsize,
+                        irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+       irq_hw_number_t hwirq = *out_hwirq = intspec[0];
+       int distri = intspec[1];
+       unsigned long flags;
+
+       *out_type = IRQ_TYPE_NONE;
+
+       /* XXX: validate distribution scheme again online cpu mask */
+       if (distri == 0) {
+               /* 0 - Round Robin to all cpus, otherwise 1 bit per core */
+               raw_spin_lock_irqsave(&mcip_lock, flags);
+               idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
+               idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
+               raw_spin_unlock_irqrestore(&mcip_lock, flags);
+       } else {
+               /*
+                * DEST based distribution for Level Triggered intr can only
+                * have 1 CPU, so generalize it to always contain 1 cpu
+                */
+               int cpu = ffs(distri);
+
+               if (cpu != fls(distri))
+                       pr_warn("IDU irq %lx distri mode set to cpu %x\n",
+                               hwirq, cpu);
+
+               raw_spin_lock_irqsave(&mcip_lock, flags);
+               idu_set_dest(hwirq, cpu);
+               idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
+               raw_spin_unlock_irqrestore(&mcip_lock, flags);
+       }
+
+       return 0;
+}
+
+static const struct irq_domain_ops idu_irq_ops = {
+       .xlate  = idu_irq_xlate,
+       .map    = idu_irq_map,
+};
+
+/*
+ * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI)
+ * [24, 23+C]: If C > 0 then "C" common IRQs
+ * [24+C, N]: Not statically assigned, private-per-core
+ */
+
+
+static int __init
+idu_of_init(struct device_node *intc, struct device_node *parent)
+{
+       struct irq_domain *domain;
+       /* Read IDU BCR to confirm nr_irqs */
+       int nr_irqs = of_irq_count(intc);
+       int i, irq;
+
+       if (!idu_detected)
+               panic("IDU not detected, but DeviceTree using it");
+
+       pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
+
+       domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
+
+       /* Parent interrupts (core-intc) are already mapped */
+
+       for (i = 0; i < nr_irqs; i++) {
+               /*
+                * Return parent uplink IRQs (towards core intc) 24,25,.....
+                * this step has been done before already
+                * however we need it to get the parent virq and set IDU handler
+                * as first level isr
+                */
+               irq = irq_of_parse_and_map(intc, i);
+               if (!i)
+                       idu_first_irq = irq;
+
+               irq_set_handler_data(irq, domain);
+               irq_set_chained_handler(irq, idu_cascade_isr);
+       }
+
+       __mcip_cmd(CMD_IDU_ENABLE, 0);
+
+       return 0;
+}
+IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);
index fd2ec50102f201254b1e5e51b4bbc7fa93fba95a..1287388c258ace8ef57f1030a46acb65f0fbb87e 100644 (file)
@@ -266,10 +266,9 @@ static int arc_pmu_add(struct perf_event *event, int flags)
 
 static int arc_pmu_device_probe(struct platform_device *pdev)
 {
-       struct arc_pmu *arc_pmu;
        struct arc_reg_pct_build pct_bcr;
        struct arc_reg_cc_build cc_bcr;
-       int i, j, ret;
+       int i, j;
 
        union cc_name {
                struct {
@@ -336,9 +335,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
        /* ARC 700 PMU does not support sampling events */
        arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
 
-       ret = perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW);
-
-       return ret;
+       return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW);
 }
 
 #ifdef CONFIG_OF
index e095c557afdddc3aefce744c97d5d7bb20a6f2e6..44092456776f8e9cd929b36e5083fbc4074dac9b 100644 (file)
@@ -44,7 +44,11 @@ SYSCALL_DEFINE0(arc_gettls)
 void arch_cpu_idle(void)
 {
        /* sleep, but enable all interrupts before committing */
-       __asm__("sleep 0x3");
+       if (is_isa_arcompact()) {
+               __asm__("sleep 0x3");
+       } else {
+               __asm__("sleep 0x10");
+       }
 }
 
 asmlinkage void ret_from_fork(void);
@@ -166,8 +170,7 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp)
         * [L] ZOL loop inhibited to begin with - cleared by a LP insn
         * Interrupts enabled
         */
-       regs->status32 = STATUS_U_MASK | STATUS_L_MASK |
-                        STATUS_E1_MASK | STATUS_E2_MASK;
+       regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS;
 
        /* bogus seed values for debugging */
        regs->lp_start = 0x10;
@@ -197,8 +200,11 @@ int elf_check_arch(const struct elf32_hdr *x)
 {
        unsigned int eflags;
 
-       if (x->e_machine != EM_ARCOMPACT)
+       if (x->e_machine != EM_ARC_INUSE) {
+               pr_err("ELF not built for %s ISA\n",
+                       is_isa_arcompact() ? "ARCompact":"ARCv2");
                return 0;
+       }
 
        eflags = x->e_flags;
        if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
index 13b3ffb27a384f8c214bfe110943ba020b0c297f..4442204fe238e656886384c5133d4af85492dabb 100644 (file)
@@ -47,10 +47,47 @@ static int genregs_get(struct task_struct *target,
                        offsetof(struct user_regs_struct, LOC) + 4);
 
        REG_O_ZERO(pad);
-       REG_O_CHUNK(scratch, callee, ptregs);
+       REG_O_ONE(scratch.bta, &ptregs->bta);
+       REG_O_ONE(scratch.lp_start, &ptregs->lp_start);
+       REG_O_ONE(scratch.lp_end, &ptregs->lp_end);
+       REG_O_ONE(scratch.lp_count, &ptregs->lp_count);
+       REG_O_ONE(scratch.status32, &ptregs->status32);
+       REG_O_ONE(scratch.ret, &ptregs->ret);
+       REG_O_ONE(scratch.blink, &ptregs->blink);
+       REG_O_ONE(scratch.fp, &ptregs->fp);
+       REG_O_ONE(scratch.gp, &ptregs->r26);
+       REG_O_ONE(scratch.r12, &ptregs->r12);
+       REG_O_ONE(scratch.r11, &ptregs->r11);
+       REG_O_ONE(scratch.r10, &ptregs->r10);
+       REG_O_ONE(scratch.r9, &ptregs->r9);
+       REG_O_ONE(scratch.r8, &ptregs->r8);
+       REG_O_ONE(scratch.r7, &ptregs->r7);
+       REG_O_ONE(scratch.r6, &ptregs->r6);
+       REG_O_ONE(scratch.r5, &ptregs->r5);
+       REG_O_ONE(scratch.r4, &ptregs->r4);
+       REG_O_ONE(scratch.r3, &ptregs->r3);
+       REG_O_ONE(scratch.r2, &ptregs->r2);
+       REG_O_ONE(scratch.r1, &ptregs->r1);
+       REG_O_ONE(scratch.r0, &ptregs->r0);
+       REG_O_ONE(scratch.sp, &ptregs->sp);
+
        REG_O_ZERO(pad2);
-       REG_O_CHUNK(callee, efa, cregs);
-       REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address);
+
+       REG_O_ONE(callee.r25, &cregs->r25);
+       REG_O_ONE(callee.r24, &cregs->r24);
+       REG_O_ONE(callee.r23, &cregs->r23);
+       REG_O_ONE(callee.r22, &cregs->r22);
+       REG_O_ONE(callee.r21, &cregs->r21);
+       REG_O_ONE(callee.r20, &cregs->r20);
+       REG_O_ONE(callee.r19, &cregs->r19);
+       REG_O_ONE(callee.r18, &cregs->r18);
+       REG_O_ONE(callee.r17, &cregs->r17);
+       REG_O_ONE(callee.r16, &cregs->r16);
+       REG_O_ONE(callee.r15, &cregs->r15);
+       REG_O_ONE(callee.r14, &cregs->r14);
+       REG_O_ONE(callee.r13, &cregs->r13);
+
+       REG_O_ONE(efa, &target->thread.fault_address);
 
        if (!ret) {
                if (in_brkpt_trap(ptregs)) {
@@ -97,12 +134,51 @@ static int genregs_set(struct task_struct *target,
                        offsetof(struct user_regs_struct, LOC) + 4);
 
        REG_IGNORE_ONE(pad);
-       /* TBD: disallow updates to STATUS32 etc*/
-       REG_IN_CHUNK(scratch, pad2, ptregs);    /* pt_regs[bta..sp] */
+
+       REG_IN_ONE(scratch.bta, &ptregs->bta);
+       REG_IN_ONE(scratch.lp_start, &ptregs->lp_start);
+       REG_IN_ONE(scratch.lp_end, &ptregs->lp_end);
+       REG_IN_ONE(scratch.lp_count, &ptregs->lp_count);
+
+       REG_IGNORE_ONE(scratch.status32);
+
+       REG_IN_ONE(scratch.ret, &ptregs->ret);
+       REG_IN_ONE(scratch.blink, &ptregs->blink);
+       REG_IN_ONE(scratch.fp, &ptregs->fp);
+       REG_IN_ONE(scratch.gp, &ptregs->r26);
+       REG_IN_ONE(scratch.r12, &ptregs->r12);
+       REG_IN_ONE(scratch.r11, &ptregs->r11);
+       REG_IN_ONE(scratch.r10, &ptregs->r10);
+       REG_IN_ONE(scratch.r9, &ptregs->r9);
+       REG_IN_ONE(scratch.r8, &ptregs->r8);
+       REG_IN_ONE(scratch.r7, &ptregs->r7);
+       REG_IN_ONE(scratch.r6, &ptregs->r6);
+       REG_IN_ONE(scratch.r5, &ptregs->r5);
+       REG_IN_ONE(scratch.r4, &ptregs->r4);
+       REG_IN_ONE(scratch.r3, &ptregs->r3);
+       REG_IN_ONE(scratch.r2, &ptregs->r2);
+       REG_IN_ONE(scratch.r1, &ptregs->r1);
+       REG_IN_ONE(scratch.r0, &ptregs->r0);
+       REG_IN_ONE(scratch.sp, &ptregs->sp);
+
        REG_IGNORE_ONE(pad2);
-       REG_IN_CHUNK(callee, efa, cregs);       /* callee_regs[r25..r13] */
+
+       REG_IN_ONE(callee.r25, &cregs->r25);
+       REG_IN_ONE(callee.r24, &cregs->r24);
+       REG_IN_ONE(callee.r23, &cregs->r23);
+       REG_IN_ONE(callee.r22, &cregs->r22);
+       REG_IN_ONE(callee.r21, &cregs->r21);
+       REG_IN_ONE(callee.r20, &cregs->r20);
+       REG_IN_ONE(callee.r19, &cregs->r19);
+       REG_IN_ONE(callee.r18, &cregs->r18);
+       REG_IN_ONE(callee.r17, &cregs->r17);
+       REG_IN_ONE(callee.r16, &cregs->r16);
+       REG_IN_ONE(callee.r15, &cregs->r15);
+       REG_IN_ONE(callee.r14, &cregs->r14);
+       REG_IN_ONE(callee.r13, &cregs->r13);
+
        REG_IGNORE_ONE(efa);                    /* efa update invalid */
-       REG_IGNORE_ONE(stop_pc);                        /* PC updated via @ret */
+       REG_IGNORE_ONE(stop_pc);                /* PC updated via @ret */
 
        return ret;
 }
@@ -124,7 +200,7 @@ static const struct user_regset arc_regsets[] = {
 
 static const struct user_regset_view user_arc_view = {
        .name           = UTS_MACHINE,
-       .e_machine      = EM_ARCOMPACT,
+       .e_machine      = EM_ARC_INUSE,
        .regsets        = arc_regsets,
        .n              = ARRAY_SIZE(arc_regsets)
 };
index 1d167c6df8caae8a48d59b2aa08539e9f20b51d6..a3d186211ed367bcf852718682a62f24b555f3de 100644 (file)
@@ -30,6 +30,8 @@
 
 #define FIX_PTR(x)  __asm__ __volatile__(";" : "+r"(x))
 
+unsigned int intr_to_DE_cnt;
+
 /* Part of U-boot ABI: see head.S */
 int __initdata uboot_tag;
 char __initdata *uboot_arg;
@@ -54,7 +56,7 @@ static void read_arc_build_cfg_regs(void)
        cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
 
        READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
-       cpu->uncached_base = uncached_space.start << 24;
+       BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE);
 
        READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
 
@@ -96,7 +98,7 @@ static void read_arc_build_cfg_regs(void)
        read_decode_mmu_bcr();
        read_decode_cache_bcr();
 
-       {
+       if (is_isa_arcompact()) {
                struct bcr_fp_arcompact sp, dp;
                struct bcr_bpu_arcompact bpu;
 
@@ -112,6 +114,19 @@ static void read_arc_build_cfg_regs(void)
                        cpu->bpu.num_cache = 256 << (bpu.ent - 1);
                        cpu->bpu.num_pred = 256 << (bpu.ent - 1);
                }
+       } else {
+               struct bcr_fp_arcv2 spdp;
+               struct bcr_bpu_arcv2 bpu;
+
+               READ_BCR(ARC_REG_FP_V2_BCR, spdp);
+               cpu->extn.fpu_sp = spdp.sp ? 1 : 0;
+               cpu->extn.fpu_dp = spdp.dp ? 1 : 0;
+
+               READ_BCR(ARC_REG_BPU_BCR, bpu);
+               cpu->bpu.ver = bpu.ver;
+               cpu->bpu.full = bpu.ft;
+               cpu->bpu.num_cache = 256 << bpu.bce;
+               cpu->bpu.num_pred = 2048 << bpu.pte;
        }
 
        READ_BCR(ARC_REG_AP_BCR, bcr);
@@ -131,6 +146,7 @@ static const struct cpuinfo_data arc_cpu_tbl[] = {
        { {0x30, "ARC 700"      }, 0x33},
        { {0x34, "ARC 700 R4.10"}, 0x34},
        { {0x35, "ARC 700 R4.11"}, 0x35},
+       { {0x50, "ARC HS38"     }, 0x51},
        { {0x00, NULL           } }
 };
 
@@ -149,13 +165,17 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 
        FIX_PTR(cpu);
 
-       {
+       if (is_isa_arcompact()) {
                isa_nm = "ARCompact";
                be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
 
                atomic = cpu->isa.atomic1;
                if (!cpu->isa.ver)      /* ISA BCR absent, use Kconfig info */
                        atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
+       } else {
+               isa_nm = "ARCv2";
+               be = cpu->isa.be;
+               atomic = cpu->isa.atomic;
        }
 
        n += scnprintf(buf + n, len - n,
@@ -183,16 +203,34 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
        n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
                       IS_AVAIL1(cpu->timers.t0, "Timer0 "),
                       IS_AVAIL1(cpu->timers.t1, "Timer1 "),
-                      IS_AVAIL2(cpu->timers.rtsc, "64-bit RTSC ", CONFIG_ARC_HAS_RTSC));
+                      IS_AVAIL2(cpu->timers.rtc, "64-bit RTC ",
+                                CONFIG_ARC_HAS_RTC));
 
-       n += i = scnprintf(buf + n, len - n, "%s%s",
-                          IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC));
+       n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
+                          IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
+                          IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
+                          IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
 
        if (i)
                n += scnprintf(buf + n, len - n, "\n\t\t: ");
 
+       if (cpu->extn_mpy.ver) {
+               if (cpu->extn_mpy.ver <= 0x2) { /* ARCompact */
+                       n += scnprintf(buf + n, len - n, "mpy ");
+               } else {
+                       int opt = 2;    /* stock MPY/MPYH */
+
+                       if (cpu->extn_mpy.dsp)  /* OPT 7-9 */
+                               opt = cpu->extn_mpy.dsp + 6;
+
+                       n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
+               }
+               n += scnprintf(buf + n, len - n, "%s",
+                              IS_USED(CONFIG_ARC_HAS_HW_MPY));
+       }
+
        n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
-                      IS_AVAIL1(cpu->extn_mpy.ver, "mpy "),
+                      IS_AVAIL1(cpu->isa.div_rem, "div_rem "),
                       IS_AVAIL1(cpu->extn.norm, "norm "),
                       IS_AVAIL1(cpu->extn.barrel, "barrel-shift "),
                       IS_AVAIL1(cpu->extn.swap, "swap "),
@@ -219,7 +257,7 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
 
        n += scnprintf(buf + n, len - n,
                       "Vector Table\t: %#x\nUncached Base\t: %#x\n",
-                      cpu->vec_base, cpu->uncached_base);
+                      cpu->vec_base, ARC_UNCACHED_ADDR_SPACE);
 
        if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
                n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
@@ -254,8 +292,8 @@ static void arc_chk_core_config(void)
        if (!cpu->timers.t1)
                panic("Timer1 is not present!\n");
 
-       if (IS_ENABLED(CONFIG_ARC_HAS_RTSC) && !cpu->timers.rtsc)
-               panic("RTSC is not present\n");
+       if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->timers.rtc)
+               panic("RTC is not present\n");
 
 #ifdef CONFIG_ARC_HAS_DCCM
        /*
@@ -323,13 +361,16 @@ static inline int is_kernel(unsigned long addr)
 
 void __init setup_arch(char **cmdline_p)
 {
+#ifdef CONFIG_ARC_UBOOT_SUPPORT
        /* make sure that uboot passed pointer to cmdline/dtb is valid */
        if (uboot_tag && is_kernel((unsigned long)uboot_arg))
                panic("Invalid uboot arg\n");
 
        /* See if u-boot passed an external Device Tree blob */
        machine_desc = setup_machine_fdt(uboot_arg);    /* uboot_tag == 2 */
-       if (!machine_desc) {
+       if (!machine_desc)
+#endif
+       {
                /* No, so try the embedded one */
                machine_desc = setup_machine_fdt(__dtb_start);
                if (!machine_desc)
index 2251fb4bbfd76c4e8ab67477302d1e215017e73d..004b7f0bc76cc58c6988547df1bb8705cd36004d 100644 (file)
@@ -67,7 +67,33 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
               sigset_t *set)
 {
        int err;
-       err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
+       struct user_regs_struct uregs;
+
+       uregs.scratch.bta       = regs->bta;
+       uregs.scratch.lp_start  = regs->lp_start;
+       uregs.scratch.lp_end    = regs->lp_end;
+       uregs.scratch.lp_count  = regs->lp_count;
+       uregs.scratch.status32  = regs->status32;
+       uregs.scratch.ret       = regs->ret;
+       uregs.scratch.blink     = regs->blink;
+       uregs.scratch.fp        = regs->fp;
+       uregs.scratch.gp        = regs->r26;
+       uregs.scratch.r12       = regs->r12;
+       uregs.scratch.r11       = regs->r11;
+       uregs.scratch.r10       = regs->r10;
+       uregs.scratch.r9        = regs->r9;
+       uregs.scratch.r8        = regs->r8;
+       uregs.scratch.r7        = regs->r7;
+       uregs.scratch.r6        = regs->r6;
+       uregs.scratch.r5        = regs->r5;
+       uregs.scratch.r4        = regs->r4;
+       uregs.scratch.r3        = regs->r3;
+       uregs.scratch.r2        = regs->r2;
+       uregs.scratch.r1        = regs->r1;
+       uregs.scratch.r0        = regs->r0;
+       uregs.scratch.sp        = regs->sp;
+
+       err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
                             sizeof(sf->uc.uc_mcontext.regs.scratch));
        err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
 
@@ -78,14 +104,40 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
 {
        sigset_t set;
        int err;
+       struct user_regs_struct uregs;
 
        err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
        if (!err)
                set_current_blocked(&set);
 
-       err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
+       err |= __copy_from_user(&uregs.scratch,
+                               &(sf->uc.uc_mcontext.regs.scratch),
                                sizeof(sf->uc.uc_mcontext.regs.scratch));
 
+       regs->bta       = uregs.scratch.bta;
+       regs->lp_start  = uregs.scratch.lp_start;
+       regs->lp_end    = uregs.scratch.lp_end;
+       regs->lp_count  = uregs.scratch.lp_count;
+       regs->status32  = uregs.scratch.status32;
+       regs->ret       = uregs.scratch.ret;
+       regs->blink     = uregs.scratch.blink;
+       regs->fp        = uregs.scratch.fp;
+       regs->r26       = uregs.scratch.gp;
+       regs->r12       = uregs.scratch.r12;
+       regs->r11       = uregs.scratch.r11;
+       regs->r10       = uregs.scratch.r10;
+       regs->r9        = uregs.scratch.r9;
+       regs->r8        = uregs.scratch.r8;
+       regs->r7        = uregs.scratch.r7;
+       regs->r6        = uregs.scratch.r6;
+       regs->r5        = uregs.scratch.r5;
+       regs->r4        = uregs.scratch.r4;
+       regs->r3        = uregs.scratch.r3;
+       regs->r2        = uregs.scratch.r2;
+       regs->r1        = uregs.scratch.r1;
+       regs->r0        = uregs.scratch.r0;
+       regs->sp        = uregs.scratch.sp;
+
        return err;
 }
 
@@ -284,7 +336,7 @@ static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs)
                 * their orig user space value when we ret from kernel
                 */
                regs->r0 = regs->orig_r0;
-               regs->ret -= 4;
+               regs->ret -= is_isa_arcv2() ? 2 : 4;
                break;
        }
 }
@@ -325,10 +377,10 @@ void do_signal(struct pt_regs *regs)
                if (regs->r0 == -ERESTARTNOHAND ||
                    regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) {
                        regs->r0 = regs->orig_r0;
-                       regs->ret -= 4;
+                       regs->ret -= is_isa_arcv2() ? 2 : 4;
                } else if (regs->r0 == -ERESTART_RESTARTBLOCK) {
                        regs->r8 = __NR_restart_syscall;
-                       regs->ret -= 4;
+                       regs->ret -= is_isa_arcv2() ? 2 : 4;
                }
                syscall_wont_restart(regs);     /* No more restarts */
        }
index 6a400b1b0b62e07b9e0d7b14d549e8a57652d681..be13d12420bad642c5141a58fdc82d5798204b59 100644 (file)
@@ -31,7 +31,7 @@ arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 #endif
 
-struct plat_smp_ops  plat_smp_ops;
+struct plat_smp_ops  __weak plat_smp_ops;
 
 /* XXX: per cpu ? Only needed once in early seconday boot */
 struct task_struct *secondary_idle_tsk;
@@ -182,7 +182,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
 /*
  * not supported here
  */
-int __init setup_profiling_timer(unsigned int multiplier)
+int setup_profiling_timer(unsigned int multiplier)
 {
        return -EINVAL;
 }
@@ -278,8 +278,10 @@ static void ipi_cpu_stop(void)
        machine_halt();
 }
 
-static inline void __do_IPI(unsigned long msg)
+static inline int __do_IPI(unsigned long msg)
 {
+       int rc = 0;
+
        switch (msg) {
        case IPI_RESCHEDULE:
                scheduler_ipi();
@@ -294,8 +296,10 @@ static inline void __do_IPI(unsigned long msg)
                break;
 
        default:
-               pr_warn("IPI with unexpected msg %ld\n", msg);
+               rc = 1;
        }
+
+       return rc;
 }
 
 /*
@@ -305,6 +309,7 @@ static inline void __do_IPI(unsigned long msg)
 irqreturn_t do_IPI(int irq, void *dev_id)
 {
        unsigned long pending;
+       unsigned long __maybe_unused copy;
 
        pr_debug("IPI [%ld] received on cpu %d\n",
                 *this_cpu_ptr(&ipi_data), smp_processor_id());
@@ -316,11 +321,18 @@ irqreturn_t do_IPI(int irq, void *dev_id)
         * "dequeue" the msg corresponding to this IPI (and possibly other
         * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
         */
-       pending = xchg(this_cpu_ptr(&ipi_data), 0);
+       copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
 
        do {
                unsigned long msg = __ffs(pending);
-               __do_IPI(msg);
+               int rc;
+
+               rc = __do_IPI(msg);
+#ifdef CONFIG_ARC_IPI_DBG
+               /* IPI received but no valid @msg */
+               if (rc)
+                       pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
+#endif
                pending &= ~(1U << msg);
        } while (pending);
 
index 92320d6f737cf5149d0968f5da3cd73a47af9848..001de4ce711eae2b095451a52f68e77db3d81ef4 100644 (file)
@@ -122,19 +122,17 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
        while (1) {
                address = UNW_PC(&frame_info);
 
-               if (address && __kernel_text_address(address)) {
-                       if (consumer_fn(address, arg) == -1)
-                               break;
-               }
+               if (!address || !__kernel_text_address(address))
+                       break;
 
-               ret = arc_unwind(&frame_info);
+               if (consumer_fn(address, arg) == -1)
+                       break;
 
-               if (ret == 0) {
-                       frame_info.regs.r63 = frame_info.regs.r31;
-                       continue;
-               } else {
+               ret = arc_unwind(&frame_info);
+               if (ret)
                        break;
-               }
+
+               frame_info.regs.r63 = frame_info.regs.r31;
        }
 
        return address;         /* return the last address it saw */
index dbe74f418019bf7e498ed95bcdc08149a752ff51..3364d2bbc515471bba6478b8b34a417251ffde56 100644 (file)
@@ -26,6 +26,7 @@
  * while TIMER1 for free running (clocksource)
  *
  * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
+ * which however is currently broken
  */
 
 #include <linux/spinlock.h>
@@ -44,6 +45,8 @@
 #include <asm/clk.h>
 #include <asm/mach_desc.h>
 
+#include <asm/mcip.h>
+
 /* Timer related Aux registers */
 #define ARC_REG_TIMER0_LIMIT   0x23    /* timer 0 limit */
 #define ARC_REG_TIMER0_CTRL    0x22    /* timer 0 control */
 
 /********** Clock Source Device *********/
 
-#ifdef CONFIG_ARC_HAS_RTSC
+#ifdef CONFIG_ARC_HAS_GRTC
 
-int arc_counter_setup(void)
+static int arc_counter_setup(void)
 {
-       /*
-        * For SMP this needs to be 0. However Kconfig glue doesn't
-        * enable this option for SMP configs
-        */
        return 1;
 }
 
@@ -75,45 +74,84 @@ static cycle_t arc_counter_read(struct clocksource *cs)
        unsigned long flags;
        union {
 #ifdef CONFIG_CPU_BIG_ENDIAN
-               struct { u32 high, low; };
+               struct { u32 h, l; };
 #else
-               struct { u32 low, high; };
+               struct { u32 lh; };
 #endif
                cycle_t  full;
        } stamp;
 
-       flags = arch_local_irq_save();
+       local_irq_save(flags);
 
-       __asm__ __volatile(
-       "       .extCoreRegister tsch, 58,  r, cannot_shortcut  \n"
-       "       rtsc %0, 0      \n"
-       "       mov  %1, 0      \n"
-       : "=r" (stamp.low), "=r" (stamp.high));
+       __mcip_cmd(CMD_GRTC_READ_LO, 0);
+       stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
+
+       __mcip_cmd(CMD_GRTC_READ_HI, 0);
+       stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
 
-       arch_local_irq_restore(flags);
+       local_irq_restore(flags);
 
        return stamp.full;
 }
 
 static struct clocksource arc_counter = {
-       .name   = "ARC RTSC",
-       .rating = 300,
+       .name   = "ARConnect GRTC",
+       .rating = 400,
        .read   = arc_counter_read,
-       .mask   = CLOCKSOURCE_MASK(32),
+       .mask   = CLOCKSOURCE_MASK(64),
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-#else /* !CONFIG_ARC_HAS_RTSC */
+#else
+
+#ifdef CONFIG_ARC_HAS_RTC
+
+#define AUX_RTC_CTRL   0x103
+#define AUX_RTC_LOW    0x104
+#define AUX_RTC_HIGH   0x105
 
-static bool is_usable_as_clocksource(void)
+int arc_counter_setup(void)
 {
-#ifdef CONFIG_SMP
-       return 0;
+       write_aux_reg(AUX_RTC_CTRL, 1);
+
+       /* Not usable in SMP */
+       return !IS_ENABLED(CONFIG_SMP);
+}
+
+static cycle_t arc_counter_read(struct clocksource *cs)
+{
+       unsigned long status;
+       union {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+               struct { u32 high, low; };
 #else
-       return 1;
+               struct { u32 low, high; };
 #endif
+               cycle_t  full;
+       } stamp;
+
+
+       __asm__ __volatile(
+       "1:                                             \n"
+       "       lr              %0, [AUX_RTC_LOW]       \n"
+       "       lr              %1, [AUX_RTC_HIGH]      \n"
+       "       lr              %2, [AUX_RTC_CTRL]      \n"
+       "       bbit0.nt        %2, 31, 1b              \n"
+       : "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
+
+       return stamp.full;
 }
 
+static struct clocksource arc_counter = {
+       .name   = "ARCv2 RTC",
+       .rating = 350,
+       .read   = arc_counter_read,
+       .mask   = CLOCKSOURCE_MASK(64),
+       .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#else /* !CONFIG_ARC_HAS_RTC */
+
 /*
  * set 32bit TIMER1 to keep counting monotonically and wraparound
  */
@@ -123,7 +161,8 @@ int arc_counter_setup(void)
        write_aux_reg(ARC_REG_TIMER1_CNT, 0);
        write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
 
-       return is_usable_as_clocksource();
+       /* Not usable in SMP */
+       return !IS_ENABLED(CONFIG_SMP);
 }
 
 static cycle_t arc_counter_read(struct clocksource *cs)
@@ -139,6 +178,7 @@ static struct clocksource arc_counter = {
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+#endif
 #endif
 
 /********** Clock Event Device *********/
index e00a01879025ea4d55be8d404b80cf2d2e53c22b..807f7d61d7a7cf867bca011251729d1164bd3f33 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/proc_fs.h>
 #include <linux/file.h>
 #include <asm/arcregs.h>
+#include <asm/irqflags.h>
 
 /*
  * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
@@ -34,7 +35,10 @@ static noinline void print_reg_file(long *reg_rev, int start_num)
                        n += scnprintf(buf + n, len - n, "\n");
 
                /* because pt_regs has regs reversed: r12..r0, r25..r13 */
-               reg_rev--;
+               if (is_isa_arcv2() && start_num == 0)
+                       reg_rev++;
+               else
+                       reg_rev--;
        }
 
        if (start_num != 0)
@@ -67,15 +71,12 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
        mmput(mm);
 
        if (exe_file) {
-               path = exe_file->f_path;
-               path_get(&exe_file->f_path);
+               path_nm = file_path(exe_file, buf, 255);
                fput(exe_file);
-               path_nm = d_path(&path, buf, 255);
-               path_put(&path);
        }
 
 done:
-       pr_info("Path: %s\n", path_nm);
+       pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
 }
 
 static void show_faulting_vma(unsigned long address, char *buf)
@@ -99,8 +100,7 @@ static void show_faulting_vma(unsigned long address, char *buf)
        if (vma && (vma->vm_start <= address)) {
                struct file *file = vma->vm_file;
                if (file) {
-                       struct path *path = &file->f_path;
-                       nm = d_path(path, buf, PAGE_SIZE - 1);
+                       nm = file_path(file, buf, PAGE_SIZE - 1);
                        inode = file_inode(vma->vm_file);
                        dev = inode->i_sb->s_dev;
                        ino = inode->i_ino;
@@ -152,6 +152,15 @@ static void show_ecr_verbose(struct pt_regs *regs)
                                ((cause_code == 0x02) ? "Write" : "EX"));
        } else if (vec == ECR_V_INSN_ERR) {
                pr_cont("Illegal Insn\n");
+#ifdef CONFIG_ISA_ARCV2
+       } else if (vec == ECR_V_MEM_ERR) {
+               if (cause_code == 0x00)
+                       pr_cont("Bus Error from Insn Mem\n");
+               else if (cause_code == 0x10)
+                       pr_cont("Bus Error from Data Mem\n");
+               else
+                       pr_cont("Bus Error, check PRM\n");
+#endif
        } else {
                pr_cont("Check Programmer's Manual\n");
        }
@@ -185,12 +194,20 @@ void show_regs(struct pt_regs *regs)
 
        pr_info("[STAT32]: 0x%08lx", regs->status32);
 
-#define STS_BIT(r, bit)        r->status32 & STATUS_##bit##_MASK ? #bit : ""
-       if (!user_mode(regs))
-               pr_cont(" : %2s %2s %2s %2s %2s\n",
-                       STS_BIT(regs, AE), STS_BIT(regs, A2), STS_BIT(regs, A1),
-                       STS_BIT(regs, E2), STS_BIT(regs, E1));
+#define STS_BIT(r, bit)        r->status32 & STATUS_##bit##_MASK ? #bit" " : ""
 
+#ifdef CONFIG_ISA_ARCOMPACT
+       pr_cont(" : %2s%2s%2s%2s%2s%2s%2s\n",
+                       (regs->status32 & STATUS_U_MASK) ? "U " : "K ",
+                       STS_BIT(regs, DE), STS_BIT(regs, AE),
+                       STS_BIT(regs, A2), STS_BIT(regs, A1),
+                       STS_BIT(regs, E2), STS_BIT(regs, E1));
+#else
+       pr_cont(" : %2s%2s%2s%2s\n",
+                       STS_BIT(regs, IE),
+                       (regs->status32 & STATUS_U_MASK) ? "U " : "K ",
+                       STS_BIT(regs, DE), STS_BIT(regs, AE));
+#endif
        pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n",
                regs->bta, regs->sp, regs->fp);
        pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
index db46e200baba298cb7d1af1c59f0e26128755395..b1656d15609750910512c9e00799c8d736f665b2 100644 (file)
@@ -5,5 +5,7 @@
 # it under the terms of the GNU General Public License version 2 as
 # published by the Free Software Foundation.
 
-lib-y  := strchr-700.o strcmp.o strcpy-700.o strlen.o
-lib-y  += memcmp.o memcpy-700.o memset.o
+lib-y  := strchr-700.o strcpy-700.o strlen.o memcmp.o
+
+lib-$(CONFIG_ISA_ARCOMPACT)    += memcpy-700.o memset.o strcmp.o
+lib-$(CONFIG_ISA_ARCV2)                += memcpy-archs.o memset-archs.o strcmp-archs.o
index 978bf8314dfb47397b9732235428a5f74fce0447..a4015e7d9ab7aa6862b41ab6138b7d64c715693e 100644 (file)
@@ -24,14 +24,32 @@ ENTRY(memcmp)
        ld      r4,[r0,0]
        ld      r5,[r1,0]
        lsr.f   lp_count,r3,3
+#ifdef CONFIG_ISA_ARCV2
+       /* In ARCv2 a branch can't be the last instruction in a zero overhead
+        * loop.
+        * So we move the branch to the start of the loop, duplicate it
+        * after the end, and set up r12 so that the branch isn't taken
+        *  initially.
+        */
+       mov_s   r12,WORD2
+       lpne    .Loop_end
+       brne    WORD2,r12,.Lodd
+       ld      WORD2,[r0,4]
+#else
        lpne    .Loop_end
        ld_s    WORD2,[r0,4]
+#endif
        ld_s    r12,[r1,4]
        brne    r4,r5,.Leven
        ld.a    r4,[r0,8]
        ld.a    r5,[r1,8]
+#ifdef CONFIG_ISA_ARCV2
+.Loop_end:
+       brne    WORD2,r12,.Lodd
+#else
        brne    WORD2,r12,.Lodd
 .Loop_end:
+#endif
        asl_s   SHIFT,SHIFT,3
        bhs_s   .Last_cmp
        brne    r4,r5,.Leven
@@ -89,7 +107,6 @@ ENTRY(memcmp)
        bset.cs r0,r0,31
 .Lodd:
        cmp_s   WORD2,r12
-
        mov_s   r0,1
        j_s.d   [blink]
        bset.cs r0,r0,31
@@ -100,14 +117,25 @@ ENTRY(memcmp)
        ldb     r4,[r0,0]
        ldb     r5,[r1,0]
        lsr.f   lp_count,r3
+#ifdef CONFIG_ISA_ARCV2
+       mov     r12,r3
        lpne    .Lbyte_end
+       brne    r3,r12,.Lbyte_odd
+#else
+       lpne    .Lbyte_end
+#endif
        ldb_s   r3,[r0,1]
        ldb     r12,[r1,1]
        brne    r4,r5,.Lbyte_even
        ldb.a   r4,[r0,2]
        ldb.a   r5,[r1,2]
+#ifdef CONFIG_ISA_ARCV2
+.Lbyte_end:
+       brne    r3,r12,.Lbyte_odd
+#else
        brne    r3,r12,.Lbyte_odd
 .Lbyte_end:
+#endif
        bcc     .Lbyte_even
        brne    r4,r5,.Lbyte_even
        ldb_s   r3,[r0,1]
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
new file mode 100644 (file)
index 0000000..1b2b3ac
--- /dev/null
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+#ifdef __LITTLE_ENDIAN__
+# define SHIFT_1(RX,RY,IMM)    asl     RX, RY, IMM     ; <<
+# define SHIFT_2(RX,RY,IMM)    lsr     RX, RY, IMM     ; >>
+# define MERGE_1(RX,RY,IMM)    asl     RX, RY, IMM
+# define MERGE_2(RX,RY,IMM)
+# define EXTRACT_1(RX,RY,IMM)  and     RX, RY, 0xFFFF
+# define EXTRACT_2(RX,RY,IMM)  lsr     RX, RY, IMM
+#else
+# define SHIFT_1(RX,RY,IMM)    lsr     RX, RY, IMM     ; >>
+# define SHIFT_2(RX,RY,IMM)    asl     RX, RY, IMM     ; <<
+# define MERGE_1(RX,RY,IMM)    asl     RX, RY, IMM     ; <<
+# define MERGE_2(RX,RY,IMM)    asl     RX, RY, IMM     ; <<
+# define EXTRACT_1(RX,RY,IMM)  lsr     RX, RY, IMM
+# define EXTRACT_2(RX,RY,IMM)  lsr     RX, RY, 0x08
+#endif
+
+#ifdef CONFIG_ARC_HAS_LL64
+# define PREFETCH_READ(RX)     prefetch    [RX, 56]
+# define PREFETCH_WRITE(RX)    prefetchw   [RX, 64]
+# define LOADX(DST,RX)         ldd.ab  DST, [RX, 8]
+# define STOREX(SRC,RX)                std.ab  SRC, [RX, 8]
+# define ZOLSHFT               5
+# define ZOLAND                        0x1F
+#else
+# define PREFETCH_READ(RX)     prefetch    [RX, 28]
+# define PREFETCH_WRITE(RX)    prefetchw   [RX, 32]
+# define LOADX(DST,RX)         ld.ab   DST, [RX, 4]
+# define STOREX(SRC,RX)                st.ab   SRC, [RX, 4]
+# define ZOLSHFT               4
+# define ZOLAND                        0xF
+#endif
+
+ENTRY(memcpy)
+       prefetch [r1]           ; Prefetch the read location
+       prefetchw [r0]          ; Prefetch the write location
+       mov.f   0, r2
+;;; if size is zero
+       jz.d    [blink]
+       mov     r3, r0          ; don;t clobber ret val
+
+;;; if size <= 8
+       cmp     r2, 8
+       bls.d   @smallchunk
+       mov.f   lp_count, r2
+
+       and.f   r4, r0, 0x03
+       rsub    lp_count, r4, 4
+       lpnz    @aligndestination
+       ;; LOOP BEGIN
+       ldb.ab  r5, [r1,1]
+       sub     r2, r2, 1
+       stb.ab  r5, [r3,1]
+aligndestination:
+
+;;; Check the alignment of the source
+       and.f   r4, r1, 0x03
+       bnz.d   @sourceunaligned
+
+;;; CASE 0: Both source and destination are 32bit aligned
+;;; Convert len to Dwords, unfold x4
+       lsr.f   lp_count, r2, ZOLSHFT
+       lpnz    @copy32_64bytes
+       ;; LOOP START
+       LOADX (r6, r1)
+       PREFETCH_READ (r1)
+       PREFETCH_WRITE (r3)
+       LOADX (r8, r1)
+       LOADX (r10, r1)
+       LOADX (r4, r1)
+       STOREX (r6, r3)
+       STOREX (r8, r3)
+       STOREX (r10, r3)
+       STOREX (r4, r3)
+copy32_64bytes:
+
+       and.f   lp_count, r2, ZOLAND ;Last remaining 31 bytes
+smallchunk:
+       lpnz    @copyremainingbytes
+       ;; LOOP START
+       ldb.ab  r5, [r1,1]
+       stb.ab  r5, [r3,1]
+copyremainingbytes:
+
+       j       [blink]
+;;; END CASE 0
+
+sourceunaligned:
+       cmp     r4, 2
+       beq.d   @unalignedOffby2
+       sub     r2, r2, 1
+
+       bhi.d   @unalignedOffby3
+       ldb.ab  r5, [r1, 1]
+
+;;; CASE 1: The source is unaligned, off by 1
+       ;; Hence I need to read 1 byte for a 16bit alignment
+       ;; and 2bytes to reach 32bit alignment
+       ldh.ab  r6, [r1, 2]
+       sub     r2, r2, 2
+       ;; Convert to words, unfold x2
+       lsr.f   lp_count, r2, 3
+       MERGE_1 (r6, r6, 8)
+       MERGE_2 (r5, r5, 24)
+       or      r5, r5, r6
+
+       ;; Both src and dst are aligned
+       lpnz    @copy8bytes_1
+       ;; LOOP START
+       ld.ab   r6, [r1, 4]
+       prefetch [r1, 28]       ;Prefetch the next read location
+       ld.ab   r8, [r1,4]
+       prefetchw [r3, 32]      ;Prefetch the next write location
+
+       SHIFT_1 (r7, r6, 24)
+       or      r7, r7, r5
+       SHIFT_2 (r5, r6, 8)
+
+       SHIFT_1 (r9, r8, 24)
+       or      r9, r9, r5
+       SHIFT_2 (r5, r8, 8)
+
+       st.ab   r7, [r3, 4]
+       st.ab   r9, [r3, 4]
+copy8bytes_1:
+
+       ;; Write back the remaining 16bits
+       EXTRACT_1 (r6, r5, 16)
+       sth.ab  r6, [r3, 2]
+       ;; Write back the remaining 8bits
+       EXTRACT_2 (r5, r5, 16)
+       stb.ab  r5, [r3, 1]
+
+       and.f   lp_count, r2, 0x07 ;Last 8bytes
+       lpnz    @copybytewise_1
+       ;; LOOP START
+       ldb.ab  r6, [r1,1]
+       stb.ab  r6, [r3,1]
+copybytewise_1:
+       j       [blink]
+
+unalignedOffby2:
+;;; CASE 2: The source is unaligned, off by 2
+       ldh.ab  r5, [r1, 2]
+       sub     r2, r2, 1
+
+       ;; Both src and dst are aligned
+       ;; Convert to words, unfold x2
+       lsr.f   lp_count, r2, 3
+#ifdef __BIG_ENDIAN__
+       asl.nz  r5, r5, 16
+#endif
+       lpnz    @copy8bytes_2
+       ;; LOOP START
+       ld.ab   r6, [r1, 4]
+       prefetch [r1, 28]       ;Prefetch the next read location
+       ld.ab   r8, [r1,4]
+       prefetchw [r3, 32]      ;Prefetch the next write location
+
+       SHIFT_1 (r7, r6, 16)
+       or      r7, r7, r5
+       SHIFT_2 (r5, r6, 16)
+
+       SHIFT_1 (r9, r8, 16)
+       or      r9, r9, r5
+       SHIFT_2 (r5, r8, 16)
+
+       st.ab   r7, [r3, 4]
+       st.ab   r9, [r3, 4]
+copy8bytes_2:
+
+#ifdef __BIG_ENDIAN__
+       lsr.nz  r5, r5, 16
+#endif
+       sth.ab  r5, [r3, 2]
+
+       and.f   lp_count, r2, 0x07 ;Last 8bytes
+       lpnz    @copybytewise_2
+       ;; LOOP START
+       ldb.ab  r6, [r1,1]
+       stb.ab  r6, [r3,1]
+copybytewise_2:
+       j       [blink]
+
+unalignedOffby3:
+;;; CASE 3: The source is unaligned, off by 3
+;;; Hence, I need to read 1byte for achieve the 32bit alignment
+
+       ;; Both src and dst are aligned
+       ;; Convert to words, unfold x2
+       lsr.f   lp_count, r2, 3
+#ifdef __BIG_ENDIAN__
+       asl.ne  r5, r5, 24
+#endif
+       lpnz    @copy8bytes_3
+       ;; LOOP START
+       ld.ab   r6, [r1, 4]
+       prefetch [r1, 28]       ;Prefetch the next read location
+       ld.ab   r8, [r1,4]
+       prefetch [r3, 32]       ;Prefetch the next write location
+
+       SHIFT_1 (r7, r6, 8)
+       or      r7, r7, r5
+       SHIFT_2 (r5, r6, 24)
+
+       SHIFT_1 (r9, r8, 8)
+       or      r9, r9, r5
+       SHIFT_2 (r5, r8, 24)
+
+       st.ab   r7, [r3, 4]
+       st.ab   r9, [r3, 4]
+copy8bytes_3:
+
+#ifdef __BIG_ENDIAN__
+       lsr.nz  r5, r5, 24
+#endif
+       stb.ab  r5, [r3, 1]
+
+       and.f   lp_count, r2, 0x07 ;Last 8bytes
+       lpnz    @copybytewise_3
+       ;; LOOP START
+       ldb.ab  r6, [r1,1]
+       stb.ab  r6, [r3,1]
+copybytewise_3:
+       j       [blink]
+
+END(memcpy)
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
new file mode 100644 (file)
index 0000000..92d573c
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+#undef PREALLOC_NOT_AVAIL
+
+#ifdef PREALLOC_NOT_AVAIL
+#define PREWRITE(A,B)  prefetchw [(A),(B)]
+#else
+#define PREWRITE(A,B)  prealloc [(A),(B)]
+#endif
+
+ENTRY(memset)
+       prefetchw [r0]          ; Prefetch the write location
+       mov.f   0, r2
+;;; if size is zero
+       jz.d    [blink]
+       mov     r3, r0          ; don't clobber ret val
+
+;;; if length < 8
+       brls.d.nt       r2, 8, .Lsmallchunk
+       mov.f   lp_count,r2
+
+       and.f   r4, r0, 0x03
+       rsub    lp_count, r4, 4
+       lpnz    @.Laligndestination
+       ;; LOOP BEGIN
+       stb.ab  r1, [r3,1]
+       sub     r2, r2, 1
+.Laligndestination:
+
+;;; Destination is aligned
+       and     r1, r1, 0xFF
+       asl     r4, r1, 8
+       or      r4, r4, r1
+       asl     r5, r4, 16
+       or      r5, r5, r4
+       mov     r4, r5
+
+       sub3    lp_count, r2, 8
+       cmp     r2, 64
+       bmsk.hi r2, r2, 5
+       mov.ls  lp_count, 0
+       add3.hi r2, r2, 8
+
+;;; Convert len to Dwords, unfold x8
+       lsr.f   lp_count, lp_count, 6
+       lpnz    @.Lset64bytes
+       ;; LOOP START
+       PREWRITE(r3, 64)        ;Prefetch the next write location
+       std.ab  r4, [r3, 8]
+       std.ab  r4, [r3, 8]
+       std.ab  r4, [r3, 8]
+       std.ab  r4, [r3, 8]
+       std.ab  r4, [r3, 8]
+       std.ab  r4, [r3, 8]
+       std.ab  r4, [r3, 8]
+       std.ab  r4, [r3, 8]
+.Lset64bytes:
+
+       lsr.f   lp_count, r2, 5 ;Last remaining  max 124 bytes
+       lpnz    .Lset32bytes
+       ;; LOOP START
+       prefetchw   [r3, 32]    ;Prefetch the next write location
+       std.ab  r4, [r3, 8]
+       std.ab  r4, [r3, 8]
+       std.ab  r4, [r3, 8]
+       std.ab  r4, [r3, 8]
+.Lset32bytes:
+
+       and.f   lp_count, r2, 0x1F ;Last remaining 31 bytes
+.Lsmallchunk:
+       lpnz    .Lcopy3bytes
+       ;; LOOP START
+       stb.ab  r1, [r3, 1]
+.Lcopy3bytes:
+
+       j       [blink]
+
+END(memset)
+
+ENTRY(memzero)
+    ; adjust bzero args to memset args
+    mov r2, r1
+    b.d  memset    ;tail call so need to tinker with blink
+    mov r1, 0
+END(memzero)
diff --git a/arch/arc/lib/strcmp-archs.S b/arch/arc/lib/strcmp-archs.S
new file mode 100644 (file)
index 0000000..4f338ee
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+ENTRY(strcmp)
+       or      r2, r0, r1
+       bmsk_s  r2, r2, 1
+       brne    r2, 0, @.Lcharloop
+
+;;; s1 and s2 are word aligned
+       ld.ab   r2, [r0, 4]
+
+       mov_s   r12, 0x01010101
+       ror     r11, r12
+       .align  4
+.LwordLoop:
+       ld.ab   r3, [r1, 4]
+       ;; Detect NULL char in str1
+       sub     r4, r2, r12
+       ld.ab   r5, [r0, 4]
+       bic     r4, r4, r2
+       and     r4, r4, r11
+       brne.d.nt       r4, 0, .LfoundNULL
+       ;; Check if the read locations are the same
+       cmp     r2, r3
+       beq.d   .LwordLoop
+       mov.eq  r2, r5
+
+       ;; A match is found, spot it out
+#ifdef __LITTLE_ENDIAN__
+       swape   r3, r3
+       mov_s   r0, 1
+       swape   r2, r2
+#else
+       mov_s   r0, 1
+#endif
+       cmp_s   r2, r3
+       j_s.d   [blink]
+       bset.lo r0, r0, 31
+
+       .align 4
+.LfoundNULL:
+#ifdef __BIG_ENDIAN__
+       swape   r4, r4
+       swape   r2, r2
+       swape   r3, r3
+#endif
+       ;; Find null byte
+       ffs     r0, r4
+       bmsk    r2, r2, r0
+       bmsk    r3, r3, r0
+       swape   r2, r2
+       swape   r3, r3
+       ;; make the return value
+       sub.f   r0, r2, r3
+       mov.hi  r0, 1
+       j_s.d   [blink]
+       bset.lo r0, r0, 31
+
+       .align 4
+.Lcharloop:
+       ldb.ab  r2, [r0, 1]
+       ldb.ab  r3, [r1, 1]
+       nop
+       breq    r2, 0, .Lcmpend
+       breq    r2, r3, .Lcharloop
+
+       .align 4
+.Lcmpend:
+       j_s.d   [blink]
+       sub     r0, r2, r3
+END(strcmp)
index ac95cc239c1e47d3d2f5a133b1d66936c6af0b91..7beb941556c3f73567b8174b6dc1cd15c2ef2d49 100644 (file)
@@ -7,4 +7,4 @@
 #
 
 obj-y  := extable.o ioremap.o dma.o fault.o init.o
-obj-y  += tlb.o tlbex.o cache_arc700.o mmap.o
+obj-y  += tlb.o tlbex.o cache.o mmap.o
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
new file mode 100644 (file)
index 0000000..b29d62e
--- /dev/null
@@ -0,0 +1,843 @@
+/*
+ * ARC Cache Management
+ *
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/cache.h>
+#include <linux/mmu_context.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
+#include <asm/setup.h>
+
+static int l2_line_sz;
+
+void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr,
+                              unsigned long sz, const int cacheop);
+
+char *arc_cache_mumbojumbo(int c, char *buf, int len)
+{
+       int n = 0;
+       struct cpuinfo_arc_cache *p;
+
+#define PR_CACHE(p, cfg, str)                                          \
+       if (!(p)->ver)                                                  \
+               n += scnprintf(buf + n, len - n, str"\t\t: N/A\n");     \
+       else                                                            \
+               n += scnprintf(buf + n, len - n,                        \
+                       str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",  \
+                       (p)->sz_k, (p)->assoc, (p)->line_len,           \
+                       (p)->vipt ? "VIPT" : "PIPT",                    \
+                       (p)->alias ? " aliasing" : "",                  \
+                       IS_ENABLED(cfg) ? "" : " (not used)");
+
+       PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
+       PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
+
+       p = &cpuinfo_arc700[c].slc;
+       if (p->ver)
+               n += scnprintf(buf + n, len - n,
+                       "SLC\t\t: %uK, %uB Line\n", p->sz_k, p->line_len);
+
+       return buf;
+}
+
+/*
+ * Read the Cache Build Confuration Registers, Decode them and save into
+ * the cpuinfo structure for later use.
+ * No Validation done here, simply read/convert the BCRs
+ */
+void read_decode_cache_bcr(void)
+{
+       struct cpuinfo_arc_cache *p_ic, *p_dc, *p_slc;
+       unsigned int cpu = smp_processor_id();
+       struct bcr_cache {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+               unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
+#else
+               unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
+#endif
+       } ibcr, dbcr;
+
+       struct bcr_generic sbcr;
+
+       struct bcr_slc_cfg {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+               unsigned int pad:24, way:2, lsz:2, sz:4;
+#else
+               unsigned int sz:4, lsz:2, way:2, pad:24;
+#endif
+       } slc_cfg;
+
+       p_ic = &cpuinfo_arc700[cpu].icache;
+       READ_BCR(ARC_REG_IC_BCR, ibcr);
+
+       if (!ibcr.ver)
+               goto dc_chk;
+
+       if (ibcr.ver <= 3) {
+               BUG_ON(ibcr.config != 3);
+               p_ic->assoc = 2;                /* Fixed to 2w set assoc */
+       } else if (ibcr.ver >= 4) {
+               p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
+       }
+
+       p_ic->line_len = 8 << ibcr.line_len;
+       p_ic->sz_k = 1 << (ibcr.sz - 1);
+       p_ic->ver = ibcr.ver;
+       p_ic->vipt = 1;
+       p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
+
+dc_chk:
+       p_dc = &cpuinfo_arc700[cpu].dcache;
+       READ_BCR(ARC_REG_DC_BCR, dbcr);
+
+       if (!dbcr.ver)
+               goto slc_chk;
+
+       if (dbcr.ver <= 3) {
+               BUG_ON(dbcr.config != 2);
+               p_dc->assoc = 4;                /* Fixed to 4w set assoc */
+               p_dc->vipt = 1;
+               p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
+       } else if (dbcr.ver >= 4) {
+               p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
+               p_dc->vipt = 0;
+               p_dc->alias = 0;                /* PIPT so can't VIPT alias */
+       }
+
+       p_dc->line_len = 16 << dbcr.line_len;
+       p_dc->sz_k = 1 << (dbcr.sz - 1);
+       p_dc->ver = dbcr.ver;
+
+slc_chk:
+       if (!is_isa_arcv2())
+               return;
+
+       p_slc = &cpuinfo_arc700[cpu].slc;
+       READ_BCR(ARC_REG_SLC_BCR, sbcr);
+       if (sbcr.ver) {
+               READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
+               p_slc->ver = sbcr.ver;
+               p_slc->sz_k = 128 << slc_cfg.sz;
+               l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
+       }
+}
+
+/*
+ * Line Operation on {I,D}-Cache
+ */
+
+#define OP_INV         0x1
+#define OP_FLUSH       0x2
+#define OP_FLUSH_N_INV 0x3
+#define OP_INV_IC      0x4
+
+/*
+ *             I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
+ *
+ * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
+ * The orig Cache Management Module "CDU" only required paddr to invalidate a
+ * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
+ * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
+ * the exact same line.
+ *
+ * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
+ * paddr alone could not be used to correctly index the cache.
+ *
+ * ------------------
+ * MMU v1/v2 (Fixed Page Size 8k)
+ * ------------------
+ * The solution was to provide CDU with these additonal vaddr bits. These
+ * would be bits [x:13], x would depend on cache-geometry, 13 comes from
+ * standard page size of 8k.
+ * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
+ * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
+ * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
+ * represent the offset within cache-line. The adv of using this "clumsy"
+ * interface for additional info was no new reg was needed in CDU programming
+ * model.
+ *
+ * 17:13 represented the max num of bits passable, actual bits needed were
+ * fewer, based on the num-of-aliases possible.
+ * -for 2 alias possibility, only bit 13 needed (32K cache)
+ * -for 4 alias possibility, bits 14:13 needed (64K cache)
+ *
+ * ------------------
+ * MMU v3
+ * ------------------
+ * This ver of MMU supports variable page sizes (1k-16k): although Linux will
+ * only support 8k (default), 16k and 4k.
+ * However from hardware perspective, smaller page sizes aggrevate aliasing
+ * meaning more vaddr bits needed to disambiguate the cache-line-op ;
+ * the existing scheme of piggybacking won't work for certain configurations.
+ * Two new registers IC_PTAG and DC_PTAG inttoduced.
+ * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
+ */
+
+static inline
+void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
+                         unsigned long sz, const int op)
+{
+       unsigned int aux_cmd;
+       int num_lines;
+       const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
+
+       if (op == OP_INV_IC) {
+               aux_cmd = ARC_REG_IC_IVIL;
+       } else {
+               /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
+               aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
+       }
+
+       /* Ensure we properly floor/ceil the non-line aligned/sized requests
+        * and have @paddr - aligned to cache line and integral @num_lines.
+        * This however can be avoided for page sized since:
+        *  -@paddr will be cache-line aligned already (being page aligned)
+        *  -@sz will be integral multiple of line size (being page sized).
+        */
+       if (!full_page) {
+               sz += paddr & ~CACHE_LINE_MASK;
+               paddr &= CACHE_LINE_MASK;
+               vaddr &= CACHE_LINE_MASK;
+       }
+
+       num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
+
+       /* MMUv2 and before: paddr contains stuffed vaddrs bits */
+       paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
+
+       while (num_lines-- > 0) {
+               write_aux_reg(aux_cmd, paddr);
+               paddr += L1_CACHE_BYTES;
+       }
+}
+
+static inline
+void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
+                         unsigned long sz, const int op)
+{
+       unsigned int aux_cmd, aux_tag;
+       int num_lines;
+       const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
+
+       if (op == OP_INV_IC) {
+               aux_cmd = ARC_REG_IC_IVIL;
+               aux_tag = ARC_REG_IC_PTAG;
+       } else {
+               aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
+               aux_tag = ARC_REG_DC_PTAG;
+       }
+
+       /* Ensure we properly floor/ceil the non-line aligned/sized requests
+        * and have @paddr - aligned to cache line and integral @num_lines.
+        * This however can be avoided for page sized since:
+        *  -@paddr will be cache-line aligned already (being page aligned)
+        *  -@sz will be integral multiple of line size (being page sized).
+        */
+       if (!full_page) {
+               sz += paddr & ~CACHE_LINE_MASK;
+               paddr &= CACHE_LINE_MASK;
+               vaddr &= CACHE_LINE_MASK;
+       }
+       num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
+
+       /*
+        * MMUv3, cache ops require paddr in PTAG reg
+        * if V-P const for loop, PTAG can be written once outside loop
+        */
+       if (full_page)
+               write_aux_reg(aux_tag, paddr);
+
+       while (num_lines-- > 0) {
+               if (!full_page) {
+                       write_aux_reg(aux_tag, paddr);
+                       paddr += L1_CACHE_BYTES;
+               }
+
+               write_aux_reg(aux_cmd, vaddr);
+               vaddr += L1_CACHE_BYTES;
+       }
+}
+
+/*
+ * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache
+ * maintenance ops (in IVIL reg), as long as icache doesn't alias.
+ *
+ * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is
+ * specified in PTAG (similar to MMU v3)
+ */
+static inline
+void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
+                         unsigned long sz, const int cacheop)
+{
+       unsigned int aux_cmd;
+       int num_lines;
+       const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
+
+       if (cacheop == OP_INV_IC) {
+               aux_cmd = ARC_REG_IC_IVIL;
+       } else {
+               /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
+               aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
+       }
+
+       /* Ensure we properly floor/ceil the non-line aligned/sized requests
+        * and have @paddr - aligned to cache line and integral @num_lines.
+        * This however can be avoided for page sized since:
+        *  -@paddr will be cache-line aligned already (being page aligned)
+        *  -@sz will be integral multiple of line size (being page sized).
+        */
+       if (!full_page_op) {
+               sz += paddr & ~CACHE_LINE_MASK;
+               paddr &= CACHE_LINE_MASK;
+       }
+
+       num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
+
+       while (num_lines-- > 0) {
+               write_aux_reg(aux_cmd, paddr);
+               paddr += L1_CACHE_BYTES;
+       }
+}
+
+#if (CONFIG_ARC_MMU_VER < 3)
+#define __cache_line_loop      __cache_line_loop_v2
+#elif (CONFIG_ARC_MMU_VER == 3)
+#define __cache_line_loop      __cache_line_loop_v3
+#elif (CONFIG_ARC_MMU_VER > 3)
+#define __cache_line_loop      __cache_line_loop_v4
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+
+/***************************************************************
+ * Machine specific helpers for Entire D-Cache or Per Line ops
+ */
+
+static inline void __before_dc_op(const int op)
+{
+       if (op == OP_FLUSH_N_INV) {
+               /* Dcache provides 2 cmd: FLUSH or INV
+                * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
+                * flush-n-inv is achieved by INV cmd but with IM=1
+                * So toggle INV sub-mode depending on op request and default
+                */
+               const unsigned int ctl = ARC_REG_DC_CTRL;
+               write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
+       }
+}
+
+static inline void __after_dc_op(const int op)
+{
+       if (op & OP_FLUSH) {
+               const unsigned int ctl = ARC_REG_DC_CTRL;
+               unsigned int reg;
+
+               /* flush / flush-n-inv both wait */
+               while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
+                       ;
+
+               /* Switch back to default Invalidate mode */
+               if (op == OP_FLUSH_N_INV)
+                       write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
+       }
+}
+
+/*
+ * Operation on Entire D-Cache
+ * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
+ * Note that constant propagation ensures all the checks are gone
+ * in generated code
+ */
+static inline void __dc_entire_op(const int op)
+{
+       int aux;
+
+       __before_dc_op(op);
+
+       if (op & OP_INV)        /* Inv or flush-n-inv use same cmd reg */
+               aux = ARC_REG_DC_IVDC;
+       else
+               aux = ARC_REG_DC_FLSH;
+
+       write_aux_reg(aux, 0x1);
+
+       __after_dc_op(op);
+}
+
+/* For kernel mappings cache operation: index is same as paddr */
+#define __dc_line_op_k(p, sz, op)      __dc_line_op(p, p, sz, op)
+
+/*
+ * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
+ */
+static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
+                               unsigned long sz, const int op)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       __before_dc_op(op);
+
+       __cache_line_loop(paddr, vaddr, sz, op);
+
+       __after_dc_op(op);
+
+       local_irq_restore(flags);
+}
+
+#else
+
+#define __dc_entire_op(op)
+#define __dc_line_op(paddr, vaddr, sz, op)
+#define __dc_line_op_k(paddr, sz, op)
+
+#endif /* CONFIG_ARC_HAS_DCACHE */
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+
+static inline void __ic_entire_inv(void)
+{
+       write_aux_reg(ARC_REG_IC_IVIC, 1);
+       read_aux_reg(ARC_REG_IC_CTRL);  /* blocks */
+}
+
+static inline void
+__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
+                         unsigned long sz)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC);
+       local_irq_restore(flags);
+}
+
+#ifndef CONFIG_SMP
+
+#define __ic_line_inv_vaddr(p, v, s)   __ic_line_inv_vaddr_local(p, v, s)
+
+#else
+
+struct ic_inv_args {
+       unsigned long paddr, vaddr;
+       int sz;
+};
+
+static void __ic_line_inv_vaddr_helper(void *info)
+{
+        struct ic_inv_args *ic_inv = info;
+
+        __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
+}
+
+static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
+                               unsigned long sz)
+{
+       struct ic_inv_args ic_inv = {
+               .paddr = paddr,
+               .vaddr = vaddr,
+               .sz    = sz
+       };
+
+       on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
+}
+
+#endif /* CONFIG_SMP */
+
+#else  /* !CONFIG_ARC_HAS_ICACHE */
+
+#define __ic_entire_inv()
+#define __ic_line_inv_vaddr(pstart, vstart, sz)
+
+#endif /* CONFIG_ARC_HAS_ICACHE */
+
+noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
+{
+#ifdef CONFIG_ISA_ARCV2
+       unsigned long flags;
+       unsigned int ctrl;
+
+       local_irq_save(flags);
+
+       /*
+        * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
+        *  - b'000 (default) is Flush,
+        *  - b'001 is Invalidate if CTRL.IM == 0
+        *  - b'001 is Flush-n-Invalidate if CTRL.IM == 1
+        */
+       ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
+
+       /* Don't rely on default value of IM bit */
+       if (!(op & OP_FLUSH))           /* i.e. OP_INV */
+               ctrl &= ~SLC_CTRL_IM;   /* clear IM: Disable flush before Inv */
+       else
+               ctrl |= SLC_CTRL_IM;
+
+       if (op & OP_INV)
+               ctrl |= SLC_CTRL_RGN_OP_INV;    /* Inv or flush-n-inv */
+       else
+               ctrl &= ~SLC_CTRL_RGN_OP_INV;
+
+       write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
+
+       /*
+        * Lower bits are ignored, no need to clip
+        * END needs to be setup before START (latter triggers the operation)
+        * END can't be same as START, so add (l2_line_sz - 1) to sz
+        */
+       write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
+       write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
+
+       while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
+
+       local_irq_restore(flags);
+#endif
+}
+
+static inline int need_slc_flush(void)
+{
+       return is_isa_arcv2() && l2_line_sz;
+}
+
+/***********************************************************
+ * Exported APIs
+ */
+
+/*
+ * Handle cache congruency of kernel and userspace mappings of page when kernel
+ * writes-to/reads-from
+ *
+ * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
+ *  -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
+ *  -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
+ *  -In SMP, if hardware caches are coherent
+ *
+ * There's a corollary case, where kernel READs from a userspace mapped page.
+ * If the U-mapping is not congruent to to K-mapping, former needs flushing.
+ */
+void flush_dcache_page(struct page *page)
+{
+       struct address_space *mapping;
+
+       if (!cache_is_vipt_aliasing()) {
+               clear_bit(PG_dc_clean, &page->flags);
+               return;
+       }
+
+       /* don't handle anon pages here */
+       mapping = page_mapping(page);
+       if (!mapping)
+               return;
+
+       /*
+        * pagecache page, file not yet mapped to userspace
+        * Make a note that K-mapping is dirty
+        */
+       if (!mapping_mapped(mapping)) {
+               clear_bit(PG_dc_clean, &page->flags);
+       } else if (page_mapped(page)) {
+
+               /* kernel reading from page with U-mapping */
+               unsigned long paddr = (unsigned long)page_address(page);
+               unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
+
+               if (addr_not_cache_congruent(paddr, vaddr))
+                       __flush_dcache_page(paddr, vaddr);
+       }
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+void dma_cache_wback_inv(unsigned long start, unsigned long sz)
+{
+       __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
+
+       if (need_slc_flush())
+               slc_op(start, sz, OP_FLUSH_N_INV);
+}
+EXPORT_SYMBOL(dma_cache_wback_inv);
+
+void dma_cache_inv(unsigned long start, unsigned long sz)
+{
+       __dc_line_op_k(start, sz, OP_INV);
+
+       if (need_slc_flush())
+               slc_op(start, sz, OP_INV);
+}
+EXPORT_SYMBOL(dma_cache_inv);
+
+void dma_cache_wback(unsigned long start, unsigned long sz)
+{
+       __dc_line_op_k(start, sz, OP_FLUSH);
+
+       if (need_slc_flush())
+               slc_op(start, sz, OP_FLUSH);
+}
+EXPORT_SYMBOL(dma_cache_wback);
+
+/*
+ * This is API for making I/D Caches consistent when modifying
+ * kernel code (loadable modules, kprobes, kgdb...)
+ * This is called on insmod, with kernel virtual address for CODE of
+ * the module. ARC cache maintenance ops require PHY address thus we
+ * need to convert vmalloc addr to PHY addr
+ */
+void flush_icache_range(unsigned long kstart, unsigned long kend)
+{
+       unsigned int tot_sz;
+
+       WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
+
+       /* Shortcut for bigger flush ranges.
+        * Here we don't care if this was kernel virtual or phy addr
+        */
+       tot_sz = kend - kstart;
+       if (tot_sz > PAGE_SIZE) {
+               flush_cache_all();
+               return;
+       }
+
+       /* Case: Kernel Phy addr (0x8000_0000 onwards) */
+       if (likely(kstart > PAGE_OFFSET)) {
+               /*
+                * The 2nd arg despite being paddr will be used to index icache
+                * This is OK since no alternate virtual mappings will exist
+                * given the callers for this case: kprobe/kgdb in built-in
+                * kernel code only.
+                */
+               __sync_icache_dcache(kstart, kstart, kend - kstart);
+               return;
+       }
+
+       /*
+        * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
+        * (1) ARC Cache Maintenance ops only take Phy addr, hence special
+        *     handling of kernel vaddr.
+        *
+        * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
+        *     it still needs to handle  a 2 page scenario, where the range
+        *     straddles across 2 virtual pages and hence need for loop
+        */
+       while (tot_sz > 0) {
+               unsigned int off, sz;
+               unsigned long phy, pfn;
+
+               off = kstart % PAGE_SIZE;
+               pfn = vmalloc_to_pfn((void *)kstart);
+               phy = (pfn << PAGE_SHIFT) + off;
+               sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
+               __sync_icache_dcache(phy, kstart, sz);
+               kstart += sz;
+               tot_sz -= sz;
+       }
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+/*
+ * General purpose helper to make I and D cache lines consistent.
+ * @paddr is phy addr of region
+ * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
+ *    However in one instance, when called by kprobe (for a breakpt in
+ *    builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
+ *    use a paddr to index the cache (despite VIPT). This is fine since since a
+ *    builtin kernel page will not have any virtual mappings.
+ *    kprobe on loadable module will be kernel vaddr.
+ */
+void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
+{
+       __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
+       __ic_line_inv_vaddr(paddr, vaddr, len);
+}
+
+/* wrapper to compile time eliminate alignment checks in flush loop */
+void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
+{
+       __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
+}
+
+/*
+ * wrapper to clearout kernel or userspace mappings of a page
+ * For kernel mappings @vaddr == @paddr
+ */
+void __flush_dcache_page(unsigned long paddr, unsigned long vaddr)
+{
+       __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
+}
+
+noinline void flush_cache_all(void)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       __ic_entire_inv();
+       __dc_entire_op(OP_FLUSH_N_INV);
+
+       local_irq_restore(flags);
+
+}
+
+#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+       flush_cache_all();
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
+                     unsigned long pfn)
+{
+       unsigned int paddr = pfn << PAGE_SHIFT;
+
+       u_vaddr &= PAGE_MASK;
+
+       __flush_dcache_page(paddr, u_vaddr);
+
+       if (vma->vm_flags & VM_EXEC)
+               __inv_icache_page(paddr, u_vaddr);
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+                      unsigned long end)
+{
+       flush_cache_all();
+}
+
+void flush_anon_page(struct vm_area_struct *vma, struct page *page,
+                    unsigned long u_vaddr)
+{
+       /* TBD: do we really need to clear the kernel mapping */
+       __flush_dcache_page(page_address(page), u_vaddr);
+       __flush_dcache_page(page_address(page), page_address(page));
+
+}
+
+#endif
+
+void copy_user_highpage(struct page *to, struct page *from,
+       unsigned long u_vaddr, struct vm_area_struct *vma)
+{
+       unsigned long kfrom = (unsigned long)page_address(from);
+       unsigned long kto = (unsigned long)page_address(to);
+       int clean_src_k_mappings = 0;
+
+       /*
+        * If SRC page was already mapped in userspace AND it's U-mapping is
+        * not congruent with K-mapping, sync former to physical page so that
+        * K-mapping in memcpy below, sees the right data
+        *
+        * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
+        * equally valid for SRC page as well
+        */
+       if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
+               __flush_dcache_page(kfrom, u_vaddr);
+               clean_src_k_mappings = 1;
+       }
+
+       copy_page((void *)kto, (void *)kfrom);
+
+       /*
+        * Mark DST page K-mapping as dirty for a later finalization by
+        * update_mmu_cache(). Although the finalization could have been done
+        * here as well (given that both vaddr/paddr are available).
+        * But update_mmu_cache() already has code to do that for other
+        * non copied user pages (e.g. read faults which wire in pagecache page
+        * directly).
+        */
+       clear_bit(PG_dc_clean, &to->flags);
+
+       /*
+        * if SRC was already usermapped and non-congruent to kernel mapping
+        * sync the kernel mapping back to physical page
+        */
+       if (clean_src_k_mappings) {
+               __flush_dcache_page(kfrom, kfrom);
+               set_bit(PG_dc_clean, &from->flags);
+       } else {
+               clear_bit(PG_dc_clean, &from->flags);
+       }
+}
+
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
+{
+       clear_page(to);
+       clear_bit(PG_dc_clean, &page->flags);
+}
+
+
+/**********************************************************************
+ * Explicit Cache flush request from user space via syscall
+ * Needed for JITs which generate code on the fly
+ */
+SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
+{
+       /* TBD: optimize this */
+       flush_cache_all();
+       return 0;
+}
+
+void arc_cache_init(void)
+{
+       unsigned int __maybe_unused cpu = smp_processor_id();
+       char str[256];
+
+       printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+       if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
+               struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
+
+               if (!ic->ver)
+                       panic("cache support enabled but non-existent cache\n");
+
+               if (ic->line_len != L1_CACHE_BYTES)
+                       panic("ICache line [%d] != kernel Config [%d]",
+                             ic->line_len, L1_CACHE_BYTES);
+
+               if (ic->ver != CONFIG_ARC_MMU_VER)
+                       panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
+                             ic->ver, CONFIG_ARC_MMU_VER);
+
+               /*
+                * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
+                * pair to provide vaddr/paddr respectively, just as in MMU v3
+                */
+               if (is_isa_arcv2() && ic->alias)
+                       _cache_line_loop_ic_fn = __cache_line_loop_v3;
+               else
+                       _cache_line_loop_ic_fn = __cache_line_loop;
+       }
+
+       if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
+               struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
+
+               if (!dc->ver)
+                       panic("cache support enabled but non-existent cache\n");
+
+               if (dc->line_len != L1_CACHE_BYTES)
+                       panic("DCache line [%d] != kernel Config [%d]",
+                             dc->line_len, L1_CACHE_BYTES);
+
+               /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
+               if (is_isa_arcompact()) {
+                       int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
+
+                       if (dc->alias && !handled)
+                               panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+                       else if (!dc->alias && handled)
+                               panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+               }
+       }
+}
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
deleted file mode 100644 (file)
index 12b2100..0000000
+++ /dev/null
@@ -1,723 +0,0 @@
-/*
- * ARC700 VIPT Cache Management
- *
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
- *   -flush_cache_dup_mm (fork)
- *   -likewise for flush_cache_mm (exit/execve)
- *   -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break)
- *
- * vineetg: Apr 2011
- *  -Now that MMU can support larger pg sz (16K), the determiniation of
- *   aliasing shd not be based on assumption of 8k pg
- *
- * vineetg: Mar 2011
- *  -optimised version of flush_icache_range( ) for making I/D coherent
- *   when vaddr is available (agnostic of num of aliases)
- *
- * vineetg: Mar 2011
- *  -Added documentation about I-cache aliasing on ARC700 and the way it
- *   was handled up until MMU V2.
- *  -Spotted a three year old bug when killing the 4 aliases, which needs
- *   bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03}
- *                        instead of paddr | {0x00, 0x01, 0x10, 0x11}
- *   (Rajesh you owe me one now)
- *
- * vineetg: Dec 2010
- *  -Off-by-one error when computing num_of_lines to flush
- *   This broke signal handling with bionic which uses synthetic sigret stub
- *
- * vineetg: Mar 2010
- *  -GCC can't generate ZOL for core cache flush loops.
- *   Conv them into iterations based as opposed to while (start < end) types
- *
- * Vineetg: July 2009
- *  -In I-cache flush routine we used to chk for aliasing for every line INV.
- *   Instead now we setup routines per cache geometry and invoke them
- *   via function pointers.
- *
- * Vineetg: Jan 2009
- *  -Cache Line flush routines used to flush an extra line beyond end addr
- *   because check was while (end >= start) instead of (end > start)
- *     =Some call sites had to work around by doing -1, -4 etc to end param
- *     =Some callers didnt care. This was spec bad in case of INV routines
- *      which would discard valid data (cause of the horrible ext2 bug
- *      in ARC IDE driver)
- *
- * vineetg: June 11th 2008: Fixed flush_icache_range( )
- *  -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need
- *   to be flushed, which it was not doing.
- *  -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API,
- *   however ARC cache maintenance OPs require PHY addr. Thus need to do
- *   vmalloc_to_phy.
- *  -Also added optimisation there, that for range > PAGE SIZE we flush the
- *   entire cache in one shot rather than line by line. For e.g. a module
- *   with Code sz 600k, old code flushed 600k worth of cache (line-by-line),
- *   while cache is only 16 or 32k.
- */
-
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/cache.h>
-#include <linux/mmu_context.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <linux/pagemap.h>
-#include <asm/cacheflush.h>
-#include <asm/cachectl.h>
-#include <asm/setup.h>
-
-char *arc_cache_mumbojumbo(int c, char *buf, int len)
-{
-       int n = 0;
-
-#define PR_CACHE(p, cfg, str)                                          \
-       if (!(p)->ver)                                                  \
-               n += scnprintf(buf + n, len - n, str"\t\t: N/A\n");     \
-       else                                                            \
-               n += scnprintf(buf + n, len - n,                        \
-                       str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",  \
-                       (p)->sz_k, (p)->assoc, (p)->line_len,           \
-                       (p)->vipt ? "VIPT" : "PIPT",                    \
-                       (p)->alias ? " aliasing" : "",                  \
-                       IS_ENABLED(cfg) ? "" : " (not used)");
-
-       PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
-       PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
-
-       return buf;
-}
-
-/*
- * Read the Cache Build Confuration Registers, Decode them and save into
- * the cpuinfo structure for later use.
- * No Validation done here, simply read/convert the BCRs
- */
-void read_decode_cache_bcr(void)
-{
-       struct cpuinfo_arc_cache *p_ic, *p_dc;
-       unsigned int cpu = smp_processor_id();
-       struct bcr_cache {
-#ifdef CONFIG_CPU_BIG_ENDIAN
-               unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
-#else
-               unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
-#endif
-       } ibcr, dbcr;
-
-       p_ic = &cpuinfo_arc700[cpu].icache;
-       READ_BCR(ARC_REG_IC_BCR, ibcr);
-
-       if (!ibcr.ver)
-               goto dc_chk;
-
-       BUG_ON(ibcr.config != 3);
-       p_ic->assoc = 2;                /* Fixed to 2w set assoc */
-       p_ic->line_len = 8 << ibcr.line_len;
-       p_ic->sz_k = 1 << (ibcr.sz - 1);
-       p_ic->ver = ibcr.ver;
-       p_ic->vipt = 1;
-       p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
-
-dc_chk:
-       p_dc = &cpuinfo_arc700[cpu].dcache;
-       READ_BCR(ARC_REG_DC_BCR, dbcr);
-
-       if (!dbcr.ver)
-               return;
-
-       BUG_ON(dbcr.config != 2);
-       p_dc->assoc = 4;                /* Fixed to 4w set assoc */
-       p_dc->line_len = 16 << dbcr.line_len;
-       p_dc->sz_k = 1 << (dbcr.sz - 1);
-       p_dc->ver = dbcr.ver;
-       p_dc->vipt = 1;
-       p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
-}
-
-/*
- * 1. Validate the Cache Geomtery (compile time config matches hardware)
- * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn)
- *    (aliasing D-cache configurations are not supported YET)
- * 3. Enable the Caches, setup default flush mode for D-Cache
- * 3. Calculate the SHMLBA used by user space
- */
-void arc_cache_init(void)
-{
-       unsigned int __maybe_unused cpu = smp_processor_id();
-       char str[256];
-
-       printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
-
-       if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
-               struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
-
-               if (!ic->ver)
-                       panic("cache support enabled but non-existent cache\n");
-
-               if (ic->line_len != L1_CACHE_BYTES)
-                       panic("ICache line [%d] != kernel Config [%d]",
-                             ic->line_len, L1_CACHE_BYTES);
-
-               if (ic->ver != CONFIG_ARC_MMU_VER)
-                       panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
-                             ic->ver, CONFIG_ARC_MMU_VER);
-       }
-
-       if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
-               struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
-               int handled;
-
-               if (!dc->ver)
-                       panic("cache support enabled but non-existent cache\n");
-
-               if (dc->line_len != L1_CACHE_BYTES)
-                       panic("DCache line [%d] != kernel Config [%d]",
-                             dc->line_len, L1_CACHE_BYTES);
-
-               /* check for D-Cache aliasing */
-               handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
-
-               if (dc->alias && !handled)
-                       panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
-               else if (!dc->alias && handled)
-                       panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
-       }
-}
-
-#define OP_INV         0x1
-#define OP_FLUSH       0x2
-#define OP_FLUSH_N_INV 0x3
-#define OP_INV_IC      0x4
-
-/*
- * Common Helper for Line Operations on {I,D}-Cache
- */
-static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
-                                    unsigned long sz, const int cacheop)
-{
-       unsigned int aux_cmd, aux_tag;
-       int num_lines;
-       const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
-
-       if (cacheop == OP_INV_IC) {
-               aux_cmd = ARC_REG_IC_IVIL;
-#if (CONFIG_ARC_MMU_VER > 2)
-               aux_tag = ARC_REG_IC_PTAG;
-#endif
-       }
-       else {
-               /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
-               aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
-#if (CONFIG_ARC_MMU_VER > 2)
-               aux_tag = ARC_REG_DC_PTAG;
-#endif
-       }
-
-       /* Ensure we properly floor/ceil the non-line aligned/sized requests
-        * and have @paddr - aligned to cache line and integral @num_lines.
-        * This however can be avoided for page sized since:
-        *  -@paddr will be cache-line aligned already (being page aligned)
-        *  -@sz will be integral multiple of line size (being page sized).
-        */
-       if (!full_page_op) {
-               sz += paddr & ~CACHE_LINE_MASK;
-               paddr &= CACHE_LINE_MASK;
-               vaddr &= CACHE_LINE_MASK;
-       }
-
-       num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
-
-#if (CONFIG_ARC_MMU_VER <= 2)
-       /* MMUv2 and before: paddr contains stuffed vaddrs bits */
-       paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
-#else
-       /* if V-P const for loop, PTAG can be written once outside loop */
-       if (full_page_op)
-               write_aux_reg(aux_tag, paddr);
-#endif
-
-       while (num_lines-- > 0) {
-#if (CONFIG_ARC_MMU_VER > 2)
-               /* MMUv3, cache ops require paddr seperately */
-               if (!full_page_op) {
-                       write_aux_reg(aux_tag, paddr);
-                       paddr += L1_CACHE_BYTES;
-               }
-
-               write_aux_reg(aux_cmd, vaddr);
-               vaddr += L1_CACHE_BYTES;
-#else
-               write_aux_reg(aux_cmd, paddr);
-               paddr += L1_CACHE_BYTES;
-#endif
-       }
-}
-
-#ifdef CONFIG_ARC_HAS_DCACHE
-
-/***************************************************************
- * Machine specific helpers for Entire D-Cache or Per Line ops
- */
-
-static inline unsigned int __before_dc_op(const int op)
-{
-       unsigned int reg = reg;
-
-       if (op == OP_FLUSH_N_INV) {
-               /* Dcache provides 2 cmd: FLUSH or INV
-                * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
-                * flush-n-inv is achieved by INV cmd but with IM=1
-                * So toggle INV sub-mode depending on op request and default
-                */
-               reg = read_aux_reg(ARC_REG_DC_CTRL);
-               write_aux_reg(ARC_REG_DC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH)
-                       ;
-       }
-
-       return reg;
-}
-
-static inline void __after_dc_op(const int op, unsigned int reg)
-{
-       if (op & OP_FLUSH)      /* flush / flush-n-inv both wait */
-               while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
-
-       /* Switch back to default Invalidate mode */
-       if (op == OP_FLUSH_N_INV)
-               write_aux_reg(ARC_REG_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
-}
-
-/*
- * Operation on Entire D-Cache
- * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
- * Note that constant propagation ensures all the checks are gone
- * in generated code
- */
-static inline void __dc_entire_op(const int cacheop)
-{
-       unsigned int ctrl_reg;
-       int aux;
-
-       ctrl_reg = __before_dc_op(cacheop);
-
-       if (cacheop & OP_INV)   /* Inv or flush-n-inv use same cmd reg */
-               aux = ARC_REG_DC_IVDC;
-       else
-               aux = ARC_REG_DC_FLSH;
-
-       write_aux_reg(aux, 0x1);
-
-       __after_dc_op(cacheop, ctrl_reg);
-}
-
-/* For kernel mappings cache operation: index is same as paddr */
-#define __dc_line_op_k(p, sz, op)      __dc_line_op(p, p, sz, op)
-
-/*
- * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
- */
-static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
-                               unsigned long sz, const int cacheop)
-{
-       unsigned long flags;
-       unsigned int ctrl_reg;
-
-       local_irq_save(flags);
-
-       ctrl_reg = __before_dc_op(cacheop);
-
-       __cache_line_loop(paddr, vaddr, sz, cacheop);
-
-       __after_dc_op(cacheop, ctrl_reg);
-
-       local_irq_restore(flags);
-}
-
-#else
-
-#define __dc_entire_op(cacheop)
-#define __dc_line_op(paddr, vaddr, sz, cacheop)
-#define __dc_line_op_k(paddr, sz, cacheop)
-
-#endif /* CONFIG_ARC_HAS_DCACHE */
-
-
-#ifdef CONFIG_ARC_HAS_ICACHE
-
-/*
- *             I-Cache Aliasing in ARC700 VIPT caches
- *
- * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
- * The orig Cache Management Module "CDU" only required paddr to invalidate a
- * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
- * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
- * the exact same line.
- *
- * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
- * paddr alone could not be used to correctly index the cache.
- *
- * ------------------
- * MMU v1/v2 (Fixed Page Size 8k)
- * ------------------
- * The solution was to provide CDU with these additonal vaddr bits. These
- * would be bits [x:13], x would depend on cache-geometry, 13 comes from
- * standard page size of 8k.
- * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
- * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
- * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
- * represent the offset within cache-line. The adv of using this "clumsy"
- * interface for additional info was no new reg was needed in CDU programming
- * model.
- *
- * 17:13 represented the max num of bits passable, actual bits needed were
- * fewer, based on the num-of-aliases possible.
- * -for 2 alias possibility, only bit 13 needed (32K cache)
- * -for 4 alias possibility, bits 14:13 needed (64K cache)
- *
- * ------------------
- * MMU v3
- * ------------------
- * This ver of MMU supports variable page sizes (1k-16k): although Linux will
- * only support 8k (default), 16k and 4k.
- * However from hardware perspective, smaller page sizes aggrevate aliasing
- * meaning more vaddr bits needed to disambiguate the cache-line-op ;
- * the existing scheme of piggybacking won't work for certain configurations.
- * Two new registers IC_PTAG and DC_PTAG inttoduced.
- * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
- */
-
-/***********************************************************
- * Machine specific helper for per line I-Cache invalidate.
- */
-
-static inline void __ic_entire_inv(void)
-{
-       write_aux_reg(ARC_REG_IC_IVIC, 1);
-       read_aux_reg(ARC_REG_IC_CTRL);  /* blocks */
-}
-
-static inline void
-__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
-                         unsigned long sz)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __cache_line_loop(paddr, vaddr, sz, OP_INV_IC);
-       local_irq_restore(flags);
-}
-
-#ifndef CONFIG_SMP
-
-#define __ic_line_inv_vaddr(p, v, s)   __ic_line_inv_vaddr_local(p, v, s)
-
-#else
-
-struct ic_inv_args {
-       unsigned long paddr, vaddr;
-       int sz;
-};
-
-static void __ic_line_inv_vaddr_helper(void *info)
-{
-        struct ic_inv_args *ic_inv = info;
-
-        __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
-}
-
-static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
-                               unsigned long sz)
-{
-       struct ic_inv_args ic_inv = {
-               .paddr = paddr,
-               .vaddr = vaddr,
-               .sz    = sz
-       };
-
-       on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
-}
-
-#endif /* CONFIG_SMP */
-
-#else  /* !CONFIG_ARC_HAS_ICACHE */
-
-#define __ic_entire_inv()
-#define __ic_line_inv_vaddr(pstart, vstart, sz)
-
-#endif /* CONFIG_ARC_HAS_ICACHE */
-
-
-/***********************************************************
- * Exported APIs
- */
-
-/*
- * Handle cache congruency of kernel and userspace mappings of page when kernel
- * writes-to/reads-from
- *
- * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
- *  -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
- *  -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
- *  -In SMP, if hardware caches are coherent
- *
- * There's a corollary case, where kernel READs from a userspace mapped page.
- * If the U-mapping is not congruent to to K-mapping, former needs flushing.
- */
-void flush_dcache_page(struct page *page)
-{
-       struct address_space *mapping;
-
-       if (!cache_is_vipt_aliasing()) {
-               clear_bit(PG_dc_clean, &page->flags);
-               return;
-       }
-
-       /* don't handle anon pages here */
-       mapping = page_mapping(page);
-       if (!mapping)
-               return;
-
-       /*
-        * pagecache page, file not yet mapped to userspace
-        * Make a note that K-mapping is dirty
-        */
-       if (!mapping_mapped(mapping)) {
-               clear_bit(PG_dc_clean, &page->flags);
-       } else if (page_mapped(page)) {
-
-               /* kernel reading from page with U-mapping */
-               void *paddr = page_address(page);
-               unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
-
-               if (addr_not_cache_congruent(paddr, vaddr))
-                       __flush_dcache_page(paddr, vaddr);
-       }
-}
-EXPORT_SYMBOL(flush_dcache_page);
-
-
-void dma_cache_wback_inv(unsigned long start, unsigned long sz)
-{
-       __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
-}
-EXPORT_SYMBOL(dma_cache_wback_inv);
-
-void dma_cache_inv(unsigned long start, unsigned long sz)
-{
-       __dc_line_op_k(start, sz, OP_INV);
-}
-EXPORT_SYMBOL(dma_cache_inv);
-
-void dma_cache_wback(unsigned long start, unsigned long sz)
-{
-       __dc_line_op_k(start, sz, OP_FLUSH);
-}
-EXPORT_SYMBOL(dma_cache_wback);
-
-/*
- * This is API for making I/D Caches consistent when modifying
- * kernel code (loadable modules, kprobes, kgdb...)
- * This is called on insmod, with kernel virtual address for CODE of
- * the module. ARC cache maintenance ops require PHY address thus we
- * need to convert vmalloc addr to PHY addr
- */
-void flush_icache_range(unsigned long kstart, unsigned long kend)
-{
-       unsigned int tot_sz;
-
-       WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
-
-       /* Shortcut for bigger flush ranges.
-        * Here we don't care if this was kernel virtual or phy addr
-        */
-       tot_sz = kend - kstart;
-       if (tot_sz > PAGE_SIZE) {
-               flush_cache_all();
-               return;
-       }
-
-       /* Case: Kernel Phy addr (0x8000_0000 onwards) */
-       if (likely(kstart > PAGE_OFFSET)) {
-               /*
-                * The 2nd arg despite being paddr will be used to index icache
-                * This is OK since no alternate virtual mappings will exist
-                * given the callers for this case: kprobe/kgdb in built-in
-                * kernel code only.
-                */
-               __sync_icache_dcache(kstart, kstart, kend - kstart);
-               return;
-       }
-
-       /*
-        * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
-        * (1) ARC Cache Maintenance ops only take Phy addr, hence special
-        *     handling of kernel vaddr.
-        *
-        * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
-        *     it still needs to handle  a 2 page scenario, where the range
-        *     straddles across 2 virtual pages and hence need for loop
-        */
-       while (tot_sz > 0) {
-               unsigned int off, sz;
-               unsigned long phy, pfn;
-
-               off = kstart % PAGE_SIZE;
-               pfn = vmalloc_to_pfn((void *)kstart);
-               phy = (pfn << PAGE_SHIFT) + off;
-               sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
-               __sync_icache_dcache(phy, kstart, sz);
-               kstart += sz;
-               tot_sz -= sz;
-       }
-}
-EXPORT_SYMBOL(flush_icache_range);
-
-/*
- * General purpose helper to make I and D cache lines consistent.
- * @paddr is phy addr of region
- * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
- *    However in one instance, when called by kprobe (for a breakpt in
- *    builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
- *    use a paddr to index the cache (despite VIPT). This is fine since since a
- *    builtin kernel page will not have any virtual mappings.
- *    kprobe on loadable module will be kernel vaddr.
- */
-void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
-{
-       __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
-       __ic_line_inv_vaddr(paddr, vaddr, len);
-}
-
-/* wrapper to compile time eliminate alignment checks in flush loop */
-void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
-{
-       __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
-}
-
-/*
- * wrapper to clearout kernel or userspace mappings of a page
- * For kernel mappings @vaddr == @paddr
- */
-void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
-{
-       __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
-}
-
-noinline void flush_cache_all(void)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-
-       __ic_entire_inv();
-       __dc_entire_op(OP_FLUSH_N_INV);
-
-       local_irq_restore(flags);
-
-}
-
-#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
-
-void flush_cache_mm(struct mm_struct *mm)
-{
-       flush_cache_all();
-}
-
-void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
-                     unsigned long pfn)
-{
-       unsigned int paddr = pfn << PAGE_SHIFT;
-
-       u_vaddr &= PAGE_MASK;
-
-       ___flush_dcache_page(paddr, u_vaddr);
-
-       if (vma->vm_flags & VM_EXEC)
-               __inv_icache_page(paddr, u_vaddr);
-}
-
-void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
-                      unsigned long end)
-{
-       flush_cache_all();
-}
-
-void flush_anon_page(struct vm_area_struct *vma, struct page *page,
-                    unsigned long u_vaddr)
-{
-       /* TBD: do we really need to clear the kernel mapping */
-       __flush_dcache_page(page_address(page), u_vaddr);
-       __flush_dcache_page(page_address(page), page_address(page));
-
-}
-
-#endif
-
-void copy_user_highpage(struct page *to, struct page *from,
-       unsigned long u_vaddr, struct vm_area_struct *vma)
-{
-       void *kfrom = page_address(from);
-       void *kto = page_address(to);
-       int clean_src_k_mappings = 0;
-
-       /*
-        * If SRC page was already mapped in userspace AND it's U-mapping is
-        * not congruent with K-mapping, sync former to physical page so that
-        * K-mapping in memcpy below, sees the right data
-        *
-        * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
-        * equally valid for SRC page as well
-        */
-       if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
-               __flush_dcache_page(kfrom, u_vaddr);
-               clean_src_k_mappings = 1;
-       }
-
-       copy_page(kto, kfrom);
-
-       /*
-        * Mark DST page K-mapping as dirty for a later finalization by
-        * update_mmu_cache(). Although the finalization could have been done
-        * here as well (given that both vaddr/paddr are available).
-        * But update_mmu_cache() already has code to do that for other
-        * non copied user pages (e.g. read faults which wire in pagecache page
-        * directly).
-        */
-       clear_bit(PG_dc_clean, &to->flags);
-
-       /*
-        * if SRC was already usermapped and non-congruent to kernel mapping
-        * sync the kernel mapping back to physical page
-        */
-       if (clean_src_k_mappings) {
-               __flush_dcache_page(kfrom, kfrom);
-               set_bit(PG_dc_clean, &from->flags);
-       } else {
-               clear_bit(PG_dc_clean, &from->flags);
-       }
-}
-
-void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
-{
-       clear_page(to);
-       clear_bit(PG_dc_clean, &page->flags);
-}
-
-
-/**********************************************************************
- * Explicit Cache flush request from user space via syscall
- * Needed for JITs which generate code on the fly
- */
-SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
-{
-       /* TBD: optimize this */
-       flush_cache_all();
-       return 0;
-}
index 12cc6485b2185c4eae5399adbda4598da619a1dd..74a637a1cfc48b2c5d4f0047a0b25bc51d238e16 100644 (file)
@@ -14,8 +14,6 @@
  * Cache bit off in the TLB entry.
  *
  * The default DMA address == Phy address which is 0x8000_0000 based.
- * A platform/device can make it zero based, by over-riding
- * plat_{dma,kernel}_addr_to_{kernel,dma}
  */
 
 #include <linux/dma-mapping.h>
@@ -37,7 +35,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
                return NULL;
 
        /* This is bus address, platform dependent */
-       *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
+       *dma_handle = (dma_addr_t)paddr;
 
        return paddr;
 }
@@ -46,8 +44,7 @@ EXPORT_SYMBOL(dma_alloc_noncoherent);
 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
                          dma_addr_t dma_handle)
 {
-       free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
-                        size);
+       free_pages_exact((void *)dma_handle, size);
 }
 EXPORT_SYMBOL(dma_free_noncoherent);
 
@@ -67,7 +64,19 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
                memset(kvaddr, 0, size);
 
        /* This is bus address, platform dependent */
-       *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
+       *dma_handle = (dma_addr_t)paddr;
+
+       /*
+        * Evict any existing L1 and/or L2 lines for the backing page
+        * in case it was used earlier as a normal "cached" page.
+        * Yeah this bit us - STAR 9000898266
+        *
+        * Although core does call flush_cache_vmap(), it gets kvaddr hence
+        * can't be used to efficiently flush L1 and/or L2 which need paddr
+        * Currently flush_cache_vmap nukes the L1 cache completely which
+        * will be optimized as a separate commit
+        */
+       dma_cache_wback_inv((unsigned long)paddr, size);
 
        return kvaddr;
 }
@@ -78,8 +87,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
 {
        iounmap((void __force __iomem *)kvaddr);
 
-       free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
-                        size);
+       free_pages_exact((void *)dma_handle, size);
 }
 EXPORT_SYMBOL(dma_free_coherent);
 
index 7f47d2a56f44374e00939e6742ed6717c452c58a..2c7ce8bb74758c127673582426f214a0ee0d0af7 100644 (file)
@@ -113,6 +113,8 @@ static inline void __tlb_entry_erase(void)
        write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
 }
 
+#if (CONFIG_ARC_MMU_VER < 4)
+
 static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
 {
        unsigned int idx;
@@ -210,6 +212,28 @@ static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
        write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
 }
 
+#else  /* CONFIG_ARC_MMU_VER >= 4) */
+
+static void utlb_invalidate(void)
+{
+       /* No need since uTLB is always in sync with JTLB */
+}
+
+static void tlb_entry_erase(unsigned int vaddr_n_asid)
+{
+       write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
+       write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
+}
+
+static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
+{
+       write_aux_reg(ARC_REG_TLBPD0, pd0);
+       write_aux_reg(ARC_REG_TLBPD1, pd1);
+       write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
+}
+
+#endif
+
 /*
  * Un-conditionally (without lookup) erase the entire MMU contents
  */
@@ -582,23 +606,42 @@ void read_decode_mmu_bcr(void)
 #endif
        } *mmu3;
 
+       struct bcr_mmu_4 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
+                    n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
+#else
+       /*           DTLB      ITLB      JES        JE         JA      */
+       unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
+                    pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
+#endif
+       } *mmu4;
+
        tmp = read_aux_reg(ARC_REG_MMU_BCR);
        mmu->ver = (tmp >> 24);
 
        if (mmu->ver <= 2) {
                mmu2 = (struct bcr_mmu_1_2 *)&tmp;
-               mmu->pg_sz = PAGE_SIZE;
+               mmu->pg_sz_k = TO_KB(PAGE_SIZE);
                mmu->sets = 1 << mmu2->sets;
                mmu->ways = 1 << mmu2->ways;
                mmu->u_dtlb = mmu2->u_dtlb;
                mmu->u_itlb = mmu2->u_itlb;
-       } else {
+       } else if (mmu->ver == 3) {
                mmu3 = (struct bcr_mmu_3 *)&tmp;
-               mmu->pg_sz = 512 << mmu3->pg_sz;
+               mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
                mmu->sets = 1 << mmu3->sets;
                mmu->ways = 1 << mmu3->ways;
                mmu->u_dtlb = mmu3->u_dtlb;
                mmu->u_itlb = mmu3->u_itlb;
+       } else {
+               mmu4 = (struct bcr_mmu_4 *)&tmp;
+               mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
+               mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
+               mmu->sets = 64 << mmu4->n_entry;
+               mmu->ways = mmu4->n_ways * 2;
+               mmu->u_dtlb = mmu4->u_dtlb * 4;
+               mmu->u_itlb = mmu4->u_itlb * 4;
        }
 
        mmu->num_tlb = mmu->sets * mmu->ways;
@@ -608,10 +651,15 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
 {
        int n = 0;
        struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
+       char super_pg[64] = "";
+
+       if (p_mmu->s_pg_sz_m)
+               scnprintf(super_pg, 64, "%dM Super Page%s, ",
+                         p_mmu->s_pg_sz_m, " (not used)");
 
        n += scnprintf(buf + n, len - n,
-                     "MMU [v%x]\t: %dk PAGE, JTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n",
-                      p_mmu->ver, TO_KB(p_mmu->pg_sz),
+                     "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n",
+                      p_mmu->ver, p_mmu->pg_sz_k, super_pg,
                       p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
                       p_mmu->u_dtlb, p_mmu->u_itlb,
                       IS_ENABLED(CONFIG_ARC_MMU_SASID) ? ",SASID" : "");
@@ -639,7 +687,7 @@ void arc_mmu_init(void)
                      mmu->ver, CONFIG_ARC_MMU_VER);
        }
 
-       if (mmu->pg_sz != PAGE_SIZE)
+       if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
                panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
 
        /* Enable the MMU */
index d572f1c2c72470e4b8f321787a8499e02da518e0..f6f4c3cb505d1341a8c24b1172a6a89f71e6fc26 100644 (file)
@@ -35,8 +35,6 @@
  * Rahul Trivedi, Amit Bhor: Codito Technologies 2004
  */
 
-       .cpu A7
-
 #include <linux/linkage.h>
 #include <asm/entry.h>
 #include <asm/mmu.h>
@@ -46,6 +44,7 @@
 #include <asm/processor.h>
 #include <asm/tlb-mmu1.h>
 
+#ifdef CONFIG_ISA_ARCOMPACT
 ;-----------------------------------------------------------------
 ; ARC700 Exception Handling doesn't auto-switch stack and it only provides
 ; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
@@ -123,6 +122,24 @@ ex_saved_reg1:
 #endif
 .endm
 
+#else  /* ARCv2 */
+
+.macro TLBMISS_FREEUP_REGS
+       PUSH  r0
+       PUSH  r1
+       PUSH  r2
+       PUSH  r3
+.endm
+
+.macro TLBMISS_RESTORE_REGS
+       POP   r3
+       POP   r2
+       POP   r1
+       POP   r0
+.endm
+
+#endif
+
 ;============================================================================
 ;  Troubleshooting Stuff
 ;============================================================================
@@ -241,6 +258,7 @@ ex_saved_reg1:
 ; Commit the TLB entry into MMU
 
 .macro COMMIT_ENTRY_TO_MMU
+#if (CONFIG_ARC_MMU_VER < 4)
 
        /* Get free TLB slot: Set = computed from vaddr, way = random */
        sr  TLBGetIndex, [ARC_REG_TLBCOMMAND]
@@ -251,6 +269,10 @@ ex_saved_reg1:
 #else
        sr TLBWrite, [ARC_REG_TLBCOMMAND]
 #endif
+
+#else
+       sr TLBInsertEntry, [ARC_REG_TLBCOMMAND]
+#endif
 .endm
 
 
@@ -291,6 +313,7 @@ ENTRY(EV_TLBMissI)
        CONV_PTE_TO_TLB
        COMMIT_ENTRY_TO_MMU
        TLBMISS_RESTORE_REGS
+EV_TLBMissI_fast_ret:  ; additional label for VDK OS-kit instrumentation
        rtie
 
 END(EV_TLBMissI)
@@ -356,6 +379,7 @@ ENTRY(EV_TLBMissD)
 
        COMMIT_ENTRY_TO_MMU
        TLBMISS_RESTORE_REGS
+EV_TLBMissD_fast_ret:  ; additional label for VDK OS-kit instrumentation
        rtie
 
 ;-------- Common routine to call Linux Page Fault Handler -----------
@@ -366,19 +390,5 @@ do_slow_path_pf:
 
        ; Slow path TLB Miss handled as a regular ARC Exception
        ; (stack switching / save the complete reg-file).
-       EXCEPTION_PROLOGUE
-
-       ; ------- setup args for Linux Page fault Hanlder ---------
-       mov_s r1, sp
-       lr    r0, [efa]
-
-       ; We don't want exceptions to be disabled while the fault is handled.
-       ; Now that we have saved the context we return from exception hence
-       ; exceptions get re-enable
-
-       FAKE_RET_FROM_EXCPN  r9
-
-       bl  do_page_fault
-       b   ret_from_exception
-
+       b  call_do_page_fault
 END(EV_TLBMissD)
diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig
deleted file mode 100644 (file)
index 217593a..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-
-menuconfig ARC_PLAT_FPGA_LEGACY
-       bool "\"Legacy\" ARC FPGA dev Boards"
-       select ARC_HAS_COH_CACHES if SMP
-       help
-         Support for ARC development boards, provided by Synopsys.
-         These are based on FPGA or ISS. e.g.
-         - ARCAngel4
-         - ML509
-         - MetaWare ISS
-
-if ARC_PLAT_FPGA_LEGACY
-
-config ISS_SMP_EXTN
-       bool "ARC SMP Extensions (ISS Models only)"
-       default n
-       depends on SMP
-       help
-         SMP Extensions to ARC700, in a "simulation only" Model, supported in
-         ARC ISS (Instruction Set Simulator).
-         The SMP extensions include:
-         -IDU (Interrupt Distribution Unit)
-         -XTL (To enable CPU start/stop/set-PC for another CPU)
-         It doesn't provide coherent Caches and/or Atomic Ops (LLOCK/SCOND)
-
-endif
diff --git a/arch/arc/plat-arcfpga/Makefile b/arch/arc/plat-arcfpga/Makefile
deleted file mode 100644 (file)
index 66fd0ec..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-
-KBUILD_CFLAGS  += -Iarch/arc/plat-arcfpga/include
-
-obj-y := platform.o
-obj-$(CONFIG_ISS_SMP_EXTN)             += smp.o
diff --git a/arch/arc/plat-arcfpga/include/plat/smp.h b/arch/arc/plat-arcfpga/include/plat/smp.h
deleted file mode 100644 (file)
index c09eb4c..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  Rajeshwar Ranga: Interrupt Distribution Unit API's
- */
-
-#ifndef __PLAT_ARCFPGA_SMP_H
-#define __PLAT_ARCFPGA_SMP_H
-
-#ifdef CONFIG_SMP
-
-#include <linux/types.h>
-#include <asm/arcregs.h>
-
-#define ARC_AUX_IDU_REG_CMD            0x2000
-#define ARC_AUX_IDU_REG_PARAM          0x2001
-
-#define ARC_AUX_XTL_REG_CMD            0x2002
-#define ARC_AUX_XTL_REG_PARAM          0x2003
-
-#define ARC_REG_MP_BCR                 0x2021
-
-#define ARC_XTL_CMD_WRITE_PC           0x04
-#define ARC_XTL_CMD_CLEAR_HALT         0x02
-
-/*
- * Build Configuration Register which identifies the sub-components
- */
-struct bcr_mp {
-#ifdef CONFIG_CPU_BIG_ENDIAN
-       unsigned int mp_arch:16, pad:5, sdu:1, idu:1, scu:1, ver:8;
-#else
-       unsigned int ver:8, scu:1, idu:1, sdu:1, pad:5, mp_arch:16;
-#endif
-};
-
-/* IDU supports 256 common interrupts */
-#define NR_IDU_IRQS                    256
-
-/*
- * The Aux Regs layout is same bit-by-bit in both BE/LE modes.
- * However when casted as a bitfield encoded "C" struct, gcc treats it as
- * memory, generating different code for BE/LE, requiring strcture adj (see
- * include/asm/arcregs.h)
- *
- * However when manually "carving" the value for a Aux, no special handling
- * of BE is needed because of the property discribed above
- */
-#define IDU_SET_COMMAND(irq, cmd)                      \
-do {                                                   \
-       uint32_t __val;                                 \
-       __val = (((irq & 0xFF) << 8) | (cmd & 0xFF));   \
-       write_aux_reg(ARC_AUX_IDU_REG_CMD, __val);      \
-} while (0)
-
-#define IDU_SET_PARAM(par)  write_aux_reg(ARC_AUX_IDU_REG_PARAM, par)
-#define IDU_GET_PARAM()     read_aux_reg(ARC_AUX_IDU_REG_PARAM)
-
-/* IDU Commands */
-#define IDU_DISABLE                    0x00
-#define IDU_ENABLE                     0x01
-#define IDU_IRQ_CLEAR                  0x02
-#define IDU_IRQ_ASSERT                 0x03
-#define IDU_IRQ_WMODE                  0x04
-#define IDU_IRQ_STATUS                 0x05
-#define IDU_IRQ_ACK                    0x06
-#define IDU_IRQ_PEND                   0x07
-#define IDU_IRQ_RMODE                  0x08
-#define IDU_IRQ_WBITMASK               0x09
-#define IDU_IRQ_RBITMASK               0x0A
-
-#define idu_enable()           IDU_SET_COMMAND(0, IDU_ENABLE)
-#define idu_disable()          IDU_SET_COMMAND(0, IDU_DISABLE)
-
-#define idu_irq_assert(irq)    IDU_SET_COMMAND((irq), IDU_IRQ_ASSERT)
-#define idu_irq_clear(irq)     IDU_SET_COMMAND((irq), IDU_IRQ_CLEAR)
-
-/* IDU Interrupt Mode - Destination Encoding */
-#define IDU_IRQ_MOD_DISABLE            0x00
-#define IDU_IRQ_MOD_ROUND_RECP         0x01
-#define IDU_IRQ_MOD_TCPU_FIRSTRECP     0x02
-#define IDU_IRQ_MOD_TCPU_ALLRECP       0x03
-
-/* IDU Interrupt Mode  - Triggering Mode */
-#define IDU_IRQ_MODE_LEVEL_TRIG                0x00
-#define IDU_IRQ_MODE_PULSE_TRIG                0x01
-
-#define IDU_IRQ_MODE_PARAM(dest_mode, trig_mode)   \
-       (((trig_mode & 0x01) << 15) | (dest_mode & 0xFF))
-
-struct idu_irq_config {
-       uint8_t irq;
-       uint8_t dest_mode;
-       uint8_t trig_mode;
-};
-
-struct idu_irq_status {
-       uint8_t irq;
-       bool enabled;
-       bool status;
-       bool ack;
-       bool pend;
-       uint8_t next_rr;
-};
-
-extern void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask);
-extern void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode);
-
-extern void iss_model_init_smp(unsigned int cpu);
-extern void iss_model_init_early_smp(void);
-
-#endif /* CONFIG_SMP */
-
-#endif
diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c
deleted file mode 100644 (file)
index afc8825..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * ARC FPGA Platform support code
- *
- * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <asm/mach_desc.h>
-#include <plat/smp.h>
-
-/*----------------------- Machine Descriptions ------------------------------
- *
- * Machine description is simply a set of platform/board specific callbacks
- * This is not directly related to DeviceTree based dynamic device creation,
- * however as part of early device tree scan, we also select the right
- * callback set, by matching the DT compatible name.
- */
-
-static const char *legacy_fpga_compat[] __initconst = {
-       "snps,arc-angel4",
-       "snps,arc-ml509",
-       NULL,
-};
-
-MACHINE_START(LEGACY_FPGA, "legacy_fpga")
-       .dt_compat      = legacy_fpga_compat,
-#ifdef CONFIG_ISS_SMP_EXTN
-       .init_early     = iss_model_init_early_smp,
-       .init_smp       = iss_model_init_smp,
-#endif
-MACHINE_END
-
-static const char *simulation_compat[] __initconst = {
-       "snps,nsim",
-       "snps,nsimosci",
-       NULL,
-};
-
-MACHINE_START(SIMULATION, "simulation")
-       .dt_compat      = simulation_compat,
-MACHINE_END
diff --git a/arch/arc/plat-arcfpga/smp.c b/arch/arc/plat-arcfpga/smp.c
deleted file mode 100644 (file)
index 64797ba..0000000
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * ARC700 Simulation-only Extensions for SMP
- *
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  Vineet Gupta    - 2012 : split off arch common and plat specific SMP
- *  Rajeshwar Ranga - 2007 : Interrupt Distribution Unit API's
- */
-
-#include <linux/smp.h>
-#include <linux/irq.h>
-#include <plat/smp.h>
-
-#define IDU_INTERRUPT_0 16
-
-static char smp_cpuinfo_buf[128];
-
-/*
- *-------------------------------------------------------------------
- * Platform specific callbacks expected by arch SMP code
- *-------------------------------------------------------------------
- */
-
-/*
- * Master kick starting another CPU
- */
-static void iss_model_smp_wakeup_cpu(int cpu, unsigned long pc)
-{
-       /* setup the start PC */
-       write_aux_reg(ARC_AUX_XTL_REG_PARAM, pc);
-
-       /* Trigger WRITE_PC cmd for this cpu */
-       write_aux_reg(ARC_AUX_XTL_REG_CMD,
-                       (ARC_XTL_CMD_WRITE_PC | (cpu << 8)));
-
-       /* Take the cpu out of Halt */
-       write_aux_reg(ARC_AUX_XTL_REG_CMD,
-                       (ARC_XTL_CMD_CLEAR_HALT | (cpu << 8)));
-
-}
-
-static inline int get_hw_config_num_irq(void)
-{
-       uint32_t val = read_aux_reg(ARC_REG_VECBASE_BCR);
-
-       switch (val & 0x03) {
-       case 0:
-               return 16;
-       case 1:
-               return 32;
-       case 2:
-               return 8;
-       default:
-               return 0;
-       }
-
-       return 0;
-}
-
-/*
- * Any SMP specific init any CPU does when it comes up.
- * Here we setup the CPU to enable Inter-Processor-Interrupts
- * Called for each CPU
- * -Master      : init_IRQ()
- * -Other(s)    : start_kernel_secondary()
- */
-void iss_model_init_smp(unsigned int cpu)
-{
-       /* Check if CPU is configured for more than 16 interrupts */
-       if (NR_IRQS <= 16 || get_hw_config_num_irq() <= 16)
-               panic("[arcfpga] IRQ system can't support IDU IPI\n");
-
-       idu_disable();
-
-       /****************************************************************
-        * IDU provides a set of Common IRQs, each of which can be dynamically
-        * attached to (1|many|all) CPUs.
-        * The Common IRQs [0-15] are mapped as CPU pvt [16-31]
-        *
-        * Here we use a simple 1:1 mapping:
-        * A CPU 'x' is wired to Common IRQ 'x'.
-        * So an IDU ASSERT on IRQ 'x' will trigger Interupt on CPU 'x', which
-        * makes up for our simple IPI plumbing.
-        *
-        * TBD: Have a dedicated multicast IRQ for sending IPIs to all CPUs
-        *      w/o having to do one-at-a-time
-        ******************************************************************/
-
-       /*
-        * Claim an IRQ which would trigger IPI on this CPU.
-        * In IDU parlance it involves setting up a cpu bitmask for the IRQ
-        * The bitmap here contains only 1 CPU (self).
-        */
-       idu_irq_set_tgtcpu(cpu, 0x1 << cpu);
-
-       /* Set the IRQ destination to use the bitmask above */
-       idu_irq_set_mode(cpu, 7, /* XXX: IDU_IRQ_MOD_TCPU_ALLRECP: ISS bug */
-                        IDU_IRQ_MODE_PULSE_TRIG);
-
-       idu_enable();
-
-       /* Attach the arch-common IPI ISR to our IDU IRQ */
-       smp_ipi_irq_setup(cpu, IDU_INTERRUPT_0 + cpu);
-}
-
-static void iss_model_ipi_send(int cpu)
-{
-       idu_irq_assert(cpu);
-}
-
-static void iss_model_ipi_clear(int irq)
-{
-       idu_irq_clear(IDU_INTERRUPT_0 + smp_processor_id());
-}
-
-void iss_model_init_early_smp(void)
-{
-#define IS_AVAIL1(var, str)    ((var) ? str : "")
-
-       struct bcr_mp mp;
-
-       READ_BCR(ARC_REG_MP_BCR, mp);
-
-       sprintf(smp_cpuinfo_buf, "Extn [ISS-SMP]: v%d, arch(%d) %s %s %s\n",
-               mp.ver, mp.mp_arch, IS_AVAIL1(mp.scu, "SCU"),
-               IS_AVAIL1(mp.idu, "IDU"), IS_AVAIL1(mp.sdu, "SDU"));
-
-       plat_smp_ops.info = smp_cpuinfo_buf;
-
-       plat_smp_ops.cpu_kick = iss_model_smp_wakeup_cpu;
-       plat_smp_ops.ipi_send = iss_model_ipi_send;
-       plat_smp_ops.ipi_clear = iss_model_ipi_clear;
-}
-
-/*
- *-------------------------------------------------------------------
- * Low level Platform IPI Providers
- *-------------------------------------------------------------------
- */
-
-/* Set the Mode for the Common IRQ */
-void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode)
-{
-       uint32_t par = IDU_IRQ_MODE_PARAM(dest_mode, trig_mode);
-
-       IDU_SET_PARAM(par);
-       IDU_SET_COMMAND(irq, IDU_IRQ_WMODE);
-}
-
-/* Set the target cpu Bitmask for Common IRQ */
-void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask)
-{
-       IDU_SET_PARAM(mask);
-       IDU_SET_COMMAND(irq, IDU_IRQ_WBITMASK);
-}
-
-/* Get the Interrupt Acknowledged status for IRQ (as CPU Bitmask) */
-bool idu_irq_get_ack(uint8_t irq)
-{
-       uint32_t val;
-
-       IDU_SET_COMMAND(irq, IDU_IRQ_ACK);
-       val = IDU_GET_PARAM();
-
-       return val & (1 << irq);
-}
-
-/*
- * Get the Interrupt Pending status for IRQ (as CPU Bitmask)
- * -Pending means CPU has not yet noticed the IRQ (e.g. disabled)
- * -After Interrupt has been taken, the IPI expcitily needs to be
- *  cleared, to be acknowledged.
- */
-bool idu_irq_get_pend(uint8_t irq)
-{
-       uint32_t val;
-
-       IDU_SET_COMMAND(irq, IDU_IRQ_PEND);
-       val = IDU_GET_PARAM();
-
-       return val & (1 << irq);
-}
diff --git a/arch/arc/plat-axs10x/Kconfig b/arch/arc/plat-axs10x/Kconfig
new file mode 100644 (file)
index 0000000..d475f9d
--- /dev/null
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+menuconfig ARC_PLAT_AXS10X
+       bool "Synopsys ARC AXS10x Software Development Platforms"
+       select DW_APB_ICTL
+       select GPIO_DWAPB
+       select OF_GPIO
+       select GENERIC_IRQ_CHIP
+       select ARCH_REQUIRE_GPIOLIB
+       help
+         Support for the ARC AXS10x Software Development Platforms.
+
+         The AXS10x Platforms consist of a mainboard with peripherals,
+         on which several daughter cards can be placed. The daughter cards
+         typically contain a CPU and memory.
+
+if ARC_PLAT_AXS10X
+
+config AXS101
+       depends on ISA_ARCOMPACT
+       bool "AXS101 with AXC001 CPU Card (ARC 770D/EM6/AS221)"
+       help
+         This adds support for the 770D/EM6/AS221 CPU Card. Only the ARC
+         770D is supported in Linux.
+
+         The AXS101 Platform consists of an AXS10x mainboard with
+         this daughtercard. Please use the axs101.dts device tree
+         with this configuration.
+
+config AXS103
+       bool "AXS103 with AXC003 CPU Card (ARC HS38x)"
+       depends on ISA_ARCV2
+       help
+         This adds support for the HS38x CPU Card.
+
+         The AXS103 Platform consists of an AXS10x mainboard with
+         this daughtercard. Please use the axs103.dts device tree
+         with this configuration.
+
+endif
diff --git a/arch/arc/plat-axs10x/Makefile b/arch/arc/plat-axs10x/Makefile
new file mode 100644 (file)
index 0000000..d4748f2
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x.o
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
new file mode 100644 (file)
index 0000000..99f7da5
--- /dev/null
@@ -0,0 +1,484 @@
+/*
+ * AXS101/AXS103 Software Development Platform
+ *
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/of_platform.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/clk.h>
+#include <asm/io.h>
+#include <asm/mach_desc.h>
+#include <asm/mcip.h>
+
+#define AXS_MB_CGU             0xE0010000
+#define AXS_MB_CREG            0xE0011000
+
+#define CREG_MB_IRQ_MUX                (AXS_MB_CREG + 0x214)
+#define CREG_MB_SW_RESET       (AXS_MB_CREG + 0x220)
+#define CREG_MB_VER            (AXS_MB_CREG + 0x230)
+#define CREG_MB_CONFIG         (AXS_MB_CREG + 0x234)
+
+#define AXC001_CREG            0xF0001000
+#define AXC001_GPIO_INTC       0xF0003000
+
+static void __init axs10x_enable_gpio_intc_wire(void)
+{
+       /*
+        * Peripherals on CPU Card and Mother Board are wired to cpu intc via
+        * intermediate DW APB GPIO blocks (mainly for debouncing)
+        *
+        *         ---------------------
+        *        |  snps,arc700-intc |
+        *        ---------------------
+        *          | #7          | #15
+        * -------------------   -------------------
+        * | snps,dw-apb-gpio |  | snps,dw-apb-gpio |
+        * -------------------   -------------------
+        *        |                         |
+        *        |                 [ Debug UART on cpu card ]
+        *        |
+        * ------------------------
+        * | snps,dw-apb-intc (MB)|
+        * ------------------------
+        *  |      |       |      |
+        * [eth] [uart]        [... other perip on Main Board]
+        *
+        * Current implementation of "irq-dw-apb-ictl" driver doesn't work well
+        * with stacked INTCs. In particular problem happens if its master INTC
+        * not yet instantiated. See discussion here -
+        * https://lkml.org/lkml/2015/3/4/755
+        *
+        * So setup the first gpio block as a passive pass thru and hide it from
+        * DT hardware topology - connect MB intc directly to cpu intc
+        * The GPIO "wire" needs to be init nevertheless (here)
+        *
+        * One side adv is that peripheral interrupt handling avoids one nested
+        * intc ISR hop
+        */
+#define GPIO_INTEN             (AXC001_GPIO_INTC + 0x30)
+#define GPIO_INTMASK           (AXC001_GPIO_INTC + 0x34)
+#define GPIO_INTTYPE_LEVEL     (AXC001_GPIO_INTC + 0x38)
+#define GPIO_INT_POLARITY      (AXC001_GPIO_INTC + 0x3c)
+#define MB_TO_GPIO_IRQ         12
+
+       iowrite32(~(1 << MB_TO_GPIO_IRQ), (void __iomem *) GPIO_INTMASK);
+       iowrite32(0, (void __iomem *) GPIO_INTTYPE_LEVEL);
+       iowrite32(~0, (void __iomem *) GPIO_INT_POLARITY);
+       iowrite32(1 << MB_TO_GPIO_IRQ, (void __iomem *) GPIO_INTEN);
+}
+
+static inline void __init
+write_cgu_reg(uint32_t value, void __iomem *reg, void __iomem *lock_reg)
+{
+       unsigned int loops = 128 * 1024, ctr;
+
+       iowrite32(value, reg);
+
+       ctr = loops;
+       while (((ioread32(lock_reg) & 1) == 1) && ctr--) /* wait for unlock */
+               cpu_relax();
+
+       ctr = loops;
+       while (((ioread32(lock_reg) & 1) == 0) && ctr--) /* wait for re-lock */
+               cpu_relax();
+}
+
+static void __init axs10x_print_board_ver(unsigned int creg, const char *str)
+{
+       union ver {
+               struct {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+                       unsigned int pad:11, y:12, m:4, d:5;
+#else
+                       unsigned int d:5, m:4, y:12, pad:11;
+#endif
+               };
+               unsigned int val;
+       } board;
+
+       board.val = ioread32((void __iomem *)creg);
+       pr_info("AXS: %s FPGA Date: %u-%u-%u\n", str, board.d, board.m,
+               board.y);
+}
+
+static void __init axs10x_early_init(void)
+{
+       int mb_rev;
+       char mb[32];
+
+       /* Determine motherboard version */
+       if (ioread32((void __iomem *) CREG_MB_CONFIG) & (1 << 28))
+               mb_rev = 3;     /* HT-3 (rev3.0) */
+       else
+               mb_rev = 2;     /* HT-2 (rev2.0) */
+
+       axs10x_enable_gpio_intc_wire();
+
+       scnprintf(mb, 32, "MainBoard v%d", mb_rev);
+       axs10x_print_board_ver(CREG_MB_VER, mb);
+}
+
+#ifdef CONFIG_AXS101
+
+#define CREG_CPU_ADDR_770      (AXC001_CREG + 0x20)
+#define CREG_CPU_ADDR_TUNN     (AXC001_CREG + 0x60)
+#define CREG_CPU_ADDR_770_UPD  (AXC001_CREG + 0x34)
+#define CREG_CPU_ADDR_TUNN_UPD (AXC001_CREG + 0x74)
+
+#define CREG_CPU_ARC770_IRQ_MUX        (AXC001_CREG + 0x114)
+#define CREG_CPU_GPIO_UART_MUX (AXC001_CREG + 0x120)
+
+/*
+ * Set up System Memory Map for ARC cpu / peripherals controllers
+ *
+ * Each AXI master has a 4GB memory map specified as 16 apertures of 256MB, each
+ * of which maps to a corresponding 256MB aperture in Target slave memory map.
+ *
+ * e.g. ARC cpu AXI Master's aperture 8 (0x8000_0000) is mapped to aperture 0
+ * (0x0000_0000) of DDR Port 0 (slave #1)
+ *
+ * Access from cpu to MB controllers such as GMAC is setup using AXI Tunnel:
+ * which has master/slaves on both ends.
+ * e.g. aperture 14 (0xE000_0000) of ARC cpu is mapped to aperture 14
+ * (0xE000_0000) of CPU Card AXI Tunnel slave (slave #3) which is mapped to
+ * MB AXI Tunnel Master, which also has a mem map setup
+ *
+ * In the reverse direction, MB AXI Masters (e.g. GMAC) mem map is setup
+ * to map to MB AXI Tunnel slave which connects to CPU Card AXI Tunnel Master
+ */
+struct aperture {
+       unsigned int slave_sel:4, slave_off:4, pad:24;
+};
+
+/* CPU Card target slaves */
+#define AXC001_SLV_NONE                        0
+#define AXC001_SLV_DDR_PORT0           1
+#define AXC001_SLV_SRAM                        2
+#define AXC001_SLV_AXI_TUNNEL          3
+#define AXC001_SLV_AXI2APB             6
+#define AXC001_SLV_DDR_PORT1           7
+
+/* MB AXI Target slaves */
+#define AXS_MB_SLV_NONE                        0
+#define AXS_MB_SLV_AXI_TUNNEL_CPU      1
+#define AXS_MB_SLV_AXI_TUNNEL_HAPS     2
+#define AXS_MB_SLV_SRAM                        3
+#define AXS_MB_SLV_CONTROL             4
+
+/* MB AXI masters */
+#define AXS_MB_MST_TUNNEL_CPU          0
+#define AXS_MB_MST_USB_OHCI            10
+
+/*
+ * memmap for ARC core on CPU Card
+ */
+static const struct aperture axc001_memmap[16] = {
+       {AXC001_SLV_AXI_TUNNEL,         0x0},
+       {AXC001_SLV_AXI_TUNNEL,         0x1},
+       {AXC001_SLV_SRAM,               0x0}, /* 0x2000_0000: Local SRAM */
+       {AXC001_SLV_NONE,               0x0},
+       {AXC001_SLV_NONE,               0x0},
+       {AXC001_SLV_NONE,               0x0},
+       {AXC001_SLV_NONE,               0x0},
+       {AXC001_SLV_NONE,               0x0},
+       {AXC001_SLV_DDR_PORT0,          0x0}, /* 0x8000_0000: DDR   0..256M */
+       {AXC001_SLV_DDR_PORT0,          0x1}, /* 0x9000_0000: DDR 256..512M */
+       {AXC001_SLV_DDR_PORT0,          0x2},
+       {AXC001_SLV_DDR_PORT0,          0x3},
+       {AXC001_SLV_NONE,               0x0},
+       {AXC001_SLV_AXI_TUNNEL,         0xD},
+       {AXC001_SLV_AXI_TUNNEL,         0xE}, /* MB: CREG, CGU... */
+       {AXC001_SLV_AXI2APB,            0x0}, /* CPU Card local CREG, CGU... */
+};
+
+/*
+ * memmap for CPU Card AXI Tunnel Master (for access by MB controllers)
+ * GMAC (MB) -> MB AXI Tunnel slave -> CPU Card AXI Tunnel Master -> DDR
+ */
+static const struct aperture axc001_axi_tunnel_memmap[16] = {
+       {AXC001_SLV_AXI_TUNNEL,         0x0},
+       {AXC001_SLV_AXI_TUNNEL,         0x1},
+       {AXC001_SLV_SRAM,               0x0},
+       {AXC001_SLV_NONE,               0x0},
+       {AXC001_SLV_NONE,               0x0},
+       {AXC001_SLV_NONE,               0x0},
+       {AXC001_SLV_NONE,               0x0},
+       {AXC001_SLV_NONE,               0x0},
+       {AXC001_SLV_DDR_PORT1,          0x0},
+       {AXC001_SLV_DDR_PORT1,          0x1},
+       {AXC001_SLV_DDR_PORT1,          0x2},
+       {AXC001_SLV_DDR_PORT1,          0x3},
+       {AXC001_SLV_NONE,               0x0},
+       {AXC001_SLV_AXI_TUNNEL,         0xD},
+       {AXC001_SLV_AXI_TUNNEL,         0xE},
+       {AXC001_SLV_AXI2APB,            0x0},
+};
+
+/*
+ * memmap for MB AXI Masters
+ * Same mem map for all perip controllers as well as MB AXI Tunnel Master
+ */
+static const struct aperture axs_mb_memmap[16] = {
+       {AXS_MB_SLV_SRAM,               0x0},
+       {AXS_MB_SLV_SRAM,               0x0},
+       {AXS_MB_SLV_NONE,               0x0},
+       {AXS_MB_SLV_NONE,               0x0},
+       {AXS_MB_SLV_NONE,               0x0},
+       {AXS_MB_SLV_NONE,               0x0},
+       {AXS_MB_SLV_NONE,               0x0},
+       {AXS_MB_SLV_NONE,               0x0},
+       {AXS_MB_SLV_AXI_TUNNEL_CPU,     0x8},   /* DDR on CPU Card */
+       {AXS_MB_SLV_AXI_TUNNEL_CPU,     0x9},   /* DDR on CPU Card */
+       {AXS_MB_SLV_AXI_TUNNEL_CPU,     0xA},
+       {AXS_MB_SLV_AXI_TUNNEL_CPU,     0xB},
+       {AXS_MB_SLV_NONE,               0x0},
+       {AXS_MB_SLV_AXI_TUNNEL_HAPS,    0xD},
+       {AXS_MB_SLV_CONTROL,            0x0},   /* MB Local CREG, CGU... */
+       {AXS_MB_SLV_AXI_TUNNEL_CPU,     0xF},
+};
+
+static noinline void __init
+axs101_set_memmap(void __iomem *base, const struct aperture map[16])
+{
+       unsigned int slave_select, slave_offset;
+       int i;
+
+       slave_select = slave_offset = 0;
+       for (i = 0; i < 8; i++) {
+               slave_select |= map[i].slave_sel << (i << 2);
+               slave_offset |= map[i].slave_off << (i << 2);
+       }
+
+       iowrite32(slave_select, base + 0x0);    /* SLV0 */
+       iowrite32(slave_offset, base + 0x8);    /* OFFSET0 */
+
+       slave_select = slave_offset = 0;
+       for (i = 0; i < 8; i++) {
+               slave_select |= map[i+8].slave_sel << (i << 2);
+               slave_offset |= map[i+8].slave_off << (i << 2);
+       }
+
+       iowrite32(slave_select, base + 0x4);    /* SLV1 */
+       iowrite32(slave_offset, base + 0xC);    /* OFFSET1 */
+}
+
+static void __init axs101_early_init(void)
+{
+       int i;
+
+       /* ARC 770D memory view */
+       axs101_set_memmap((void __iomem *) CREG_CPU_ADDR_770, axc001_memmap);
+       iowrite32(1, (void __iomem *) CREG_CPU_ADDR_770_UPD);
+
+       /* AXI tunnel memory map (incoming traffic from MB into CPU Card */
+       axs101_set_memmap((void __iomem *) CREG_CPU_ADDR_TUNN,
+                             axc001_axi_tunnel_memmap);
+       iowrite32(1, (void __iomem *) CREG_CPU_ADDR_TUNN_UPD);
+
+       /* MB peripherals memory map */
+       for (i = AXS_MB_MST_TUNNEL_CPU; i <= AXS_MB_MST_USB_OHCI; i++)
+               axs101_set_memmap((void __iomem *) AXS_MB_CREG + (i << 4),
+                                     axs_mb_memmap);
+
+       iowrite32(0x3ff, (void __iomem *) AXS_MB_CREG + 0x100); /* Update */
+
+       /* GPIO pins 18 and 19 are used as UART rx and tx, respectively. */
+       iowrite32(0x01, (void __iomem *) CREG_CPU_GPIO_UART_MUX);
+
+       /* Set up the MB interrupt system: mux interrupts to GPIO7) */
+       iowrite32(0x01, (void __iomem *) CREG_MB_IRQ_MUX);
+
+       /* reset ethernet and ULPI interfaces */
+       iowrite32(0x18, (void __iomem *) CREG_MB_SW_RESET);
+
+       /* map GPIO 14:10 to ARC 9:5 (IRQ mux change for MB v2 onwards) */
+       iowrite32(0x52, (void __iomem *) CREG_CPU_ARC770_IRQ_MUX);
+
+       axs10x_early_init();
+}
+
+#endif /* CONFIG_AXS101 */
+
+#ifdef CONFIG_AXS103
+
+#define AXC003_CGU     0xF0000000
+#define AXC003_CREG    0xF0001000
+#define AXC003_MST_AXI_TUNNEL  0
+#define AXC003_MST_HS38                1
+
+#define CREG_CPU_AXI_M0_IRQ_MUX        (AXC003_CREG + 0x440)
+#define CREG_CPU_GPIO_UART_MUX (AXC003_CREG + 0x480)
+#define CREG_CPU_TUN_IO_CTRL   (AXC003_CREG + 0x494)
+
+
+union pll_reg {
+       struct {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+               unsigned int pad:17, noupd:1, bypass:1, edge:1, high:6, low:6;
+#else
+               unsigned int low:6, high:6, edge:1, bypass:1, noupd:1, pad:17;
+#endif
+       };
+       unsigned int val;
+};
+
+static unsigned int __init axs103_get_freq(void)
+{
+       union pll_reg idiv, fbdiv, odiv;
+       unsigned int f = 33333333;
+
+       idiv.val = ioread32((void __iomem *)AXC003_CGU + 0x80 + 0);
+       fbdiv.val = ioread32((void __iomem *)AXC003_CGU + 0x80 + 4);
+       odiv.val = ioread32((void __iomem *)AXC003_CGU + 0x80 + 8);
+
+       if (idiv.bypass != 1)
+               f = f / (idiv.low + idiv.high);
+
+       if (fbdiv.bypass != 1)
+               f = f * (fbdiv.low + fbdiv.high);
+
+       if (odiv.bypass != 1)
+               f = f / (odiv.low + odiv.high);
+
+       f = (f + 500000) / 1000000; /* Rounding */
+       return f;
+}
+
+static inline unsigned int __init encode_div(unsigned int id, int upd)
+{
+       union pll_reg div;
+
+       div.val = 0;
+
+       div.noupd = !upd;
+       div.bypass = id == 1 ? 1 : 0;
+       div.edge = (id%2 == 0) ? 0 : 1;  /* 0 = rising */
+       div.low = (id%2 == 0) ? id >> 1 : (id >> 1)+1;
+       div.high = id >> 1;
+
+       return div.val;
+}
+
+noinline static void __init
+axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od)
+{
+       write_cgu_reg(encode_div(id, 0),
+                     (void __iomem *)AXC003_CGU + 0x80 + 0,
+                     (void __iomem *)AXC003_CGU + 0x110);
+
+       write_cgu_reg(encode_div(fd, 0),
+                     (void __iomem *)AXC003_CGU + 0x80 + 4,
+                     (void __iomem *)AXC003_CGU + 0x110);
+
+       write_cgu_reg(encode_div(od, 1),
+                     (void __iomem *)AXC003_CGU + 0x80 + 8,
+                     (void __iomem *)AXC003_CGU + 0x110);
+}
+
+static void __init axs103_early_init(void)
+{
+       switch (arc_get_core_freq()/1000000) {
+       case 33:
+               axs103_set_freq(1, 1, 1);
+               break;
+       case 50:
+               axs103_set_freq(1, 30, 20);
+               break;
+       case 75:
+               axs103_set_freq(2, 45, 10);
+               break;
+       case 90:
+               axs103_set_freq(2, 54, 10);
+               break;
+       case 100:
+               axs103_set_freq(1, 30, 10);
+               break;
+       case 125:
+               axs103_set_freq(2, 45,  6);
+               break;
+       default:
+               /*
+                * In this case, core_frequency derived from
+                * DT "clock-frequency" might not match with board value.
+                * Hence update it to match the board value.
+                */
+               arc_set_core_freq(axs103_get_freq() * 1000000);
+               break;
+       }
+
+       pr_info("Freq is %dMHz\n", axs103_get_freq());
+
+       /* Memory maps already config in pre-bootloader */
+
+       /* set GPIO mux to UART */
+       iowrite32(0x01, (void __iomem *) CREG_CPU_GPIO_UART_MUX);
+
+       iowrite32((0x00100000U | 0x000C0000U | 0x00003322U),
+                 (void __iomem *) CREG_CPU_TUN_IO_CTRL);
+
+       /* Set up the AXS_MB interrupt system.*/
+       iowrite32(12, (void __iomem *) (CREG_CPU_AXI_M0_IRQ_MUX
+                                        + (AXC003_MST_HS38 << 2)));
+
+       /* connect ICTL - Main Board with GPIO line */
+       iowrite32(0x01, (void __iomem *) CREG_MB_IRQ_MUX);
+
+       axs10x_print_board_ver(AXC003_CREG + 4088, "AXC003 CPU Card");
+
+       axs10x_early_init();
+
+#ifdef CONFIG_ARC_MCIP
+       /* No Hardware init, but filling the smp ops callbacks */
+       mcip_init_early_smp();
+#endif
+}
+#endif
+
+#ifdef CONFIG_AXS101
+
+static const char *axs101_compat[] __initconst = {
+       "snps,axs101",
+       NULL,
+};
+
+MACHINE_START(AXS101, "axs101")
+       .dt_compat      = axs101_compat,
+       .init_early     = axs101_early_init,
+MACHINE_END
+
+#endif /* CONFIG_AXS101 */
+
+#ifdef CONFIG_AXS103
+
+static const char *axs103_compat[] __initconst = {
+       "snps,axs103",
+       NULL,
+};
+
+MACHINE_START(AXS103, "axs103")
+       .dt_compat      = axs103_compat,
+       .init_early     = axs103_early_init,
+#ifdef CONFIG_ARC_MCIP
+       .init_smp       = mcip_init_smp,
+#endif
+MACHINE_END
+
+/*
+ * For the VDK OS-kit, to get the offset to pid and command fields
+ */
+char coware_swa_pid_offset[TASK_PID];
+char coware_swa_comm_offset[TASK_COMM];
+
+#endif /* CONFIG_AXS103 */
diff --git a/arch/arc/plat-sim/Kconfig b/arch/arc/plat-sim/Kconfig
new file mode 100644 (file)
index 0000000..18e39fc
--- /dev/null
@@ -0,0 +1,14 @@
+#
+# Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+menuconfig ARC_PLAT_SIM
+       bool "ARC nSIM based simulation virtual platforms"
+       select ARC_HAS_COH_CACHES if SMP
+       help
+         Support for nSIM based ARC simulation platforms
+         This includes the standalone nSIM (uart only) vs. System C OSCI VP
diff --git a/arch/arc/plat-sim/Makefile b/arch/arc/plat-sim/Makefile
new file mode 100644 (file)
index 0000000..00b1a95
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+obj-y := platform.o
diff --git a/arch/arc/plat-sim/platform.c b/arch/arc/plat-sim/platform.c
new file mode 100644 (file)
index 0000000..d9e35b4
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * ARC simulation Platform support code
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <asm/mach_desc.h>
+#include <asm/mcip.h>
+
+/*----------------------- Machine Descriptions ------------------------------
+ *
+ * Machine description is simply a set of platform/board specific callbacks
+ * This is not directly related to DeviceTree based dynamic device creation,
+ * however as part of early device tree scan, we also select the right
+ * callback set, by matching the DT compatible name.
+ */
+
+static const char *simulation_compat[] __initconst = {
+       "snps,nsim",
+       "snps,nsim_hs",
+       "snps,nsimosci",
+       "snps,nsimosci_hs",
+       NULL,
+};
+
+MACHINE_START(SIMULATION, "simulation")
+       .dt_compat      = simulation_compat,
+#ifdef CONFIG_ARC_MCIP
+       .init_early     = mcip_init_early_smp,
+       .init_smp       = mcip_init_smp,
+#endif
+MACHINE_END
index 1f4e71876b00a0719c57b40ff9805accbb1f45bd..17ae0f3efac8e71d45043252e32298772ee43237 100644 (file)
@@ -5,6 +5,10 @@
 #include <linux/string.h>
 #include <asm/byteorder.h>
 
+typedef __be16 fdt16_t;
+typedef __be32 fdt32_t;
+typedef __be64 fdt64_t;
+
 #define fdt16_to_cpu(x)                be16_to_cpu(x)
 #define cpu_to_fdt16(x)                cpu_to_be16(x)
 #define fdt32_to_cpu(x)                be32_to_cpu(x)
index 7f0252c580e4bd0b32a2e2eb5de440bacbfd3b98..a718866ba52d8e827653c7cbcb3341778f480b83 100644 (file)
                        };
 
                        eth0: ethernet@70000 {
-                               compatible = "marvell,armada-370-neta";
                                reg = <0x70000 0x4000>;
                                interrupts = <8>;
                                clocks = <&gateclk 4>;
                        };
 
                        eth1: ethernet@74000 {
-                               compatible = "marvell,armada-370-neta";
                                reg = <0x74000 0x4000>;
                                interrupts = <10>;
                                clocks = <&gateclk 3>;
index 3f036bd635f4207ac2ffe87e809155a51debaa64..53a1a5abe14739d5c71a64b9689147fa7f0c37cb 100644 (file)
                                        dmacap,memset;
                                };
                        };
+
+                       ethernet@70000 {
+                               compatible = "marvell,armada-370-neta";
+                       };
+
+                       ethernet@74000 {
+                               compatible = "marvell,armada-370-neta";
+                       };
                };
        };
 };
index 8479fdc9e9c2468e072c3592528a263610c2acc1..c5fdc99f0dbebb47f88e135a4013fdfb9d732772 100644 (file)
                        };
 
                        eth3: ethernet@34000 {
-                               compatible = "marvell,armada-370-neta";
+                               compatible = "marvell,armada-xp-neta";
                                reg = <0x34000 0x4000>;
                                interrupts = <14>;
                                clocks = <&gateclk 1>;
index 661d54c815802d1bb1d2e1fa31cb255d90caf12e..0e24f1a38540e30ccc257972537c8982074f4c75 100644 (file)
                        };
 
                        eth3: ethernet@34000 {
-                               compatible = "marvell,armada-370-neta";
+                               compatible = "marvell,armada-xp-neta";
                                reg = <0x34000 0x4000>;
                                interrupts = <14>;
                                clocks = <&gateclk 1>;
index e78ce4ab6b75b03d4fd0785388b1d4d47c4ef1cd..3de9b761cc1ab0fe7a8d3f0ea9caa7ca72e2a989 100644 (file)
                        };
 
                        eth2: ethernet@30000 {
-                               compatible = "marvell,armada-370-neta";
+                               compatible = "marvell,armada-xp-neta";
                                reg = <0x30000 0x4000>;
                                interrupts = <12>;
                                clocks = <&gateclk 2>;
                                };
                        };
 
+                       ethernet@70000 {
+                               compatible = "marvell,armada-xp-neta";
+                       };
+
+                       ethernet@74000 {
+                               compatible = "marvell,armada-xp-neta";
+                       };
+
                        xor@f0900 {
                                compatible = "marvell,orion-xor";
                                reg = <0xF0900 0x100
        spi0_pins: spi0-pins {
                marvell,pins = "mpp36", "mpp37",
                               "mpp38", "mpp39";
-               marvell,function = "spi";
+               marvell,function = "spi0";
        };
 
        uart2_pins: uart2-pins {
index d260ba779ae53ce671db09142e99c573a23c22dd..18177f5a7464200453c9a82415b08bc73e6a6703 100644 (file)
                        usb2: gadget@fff78000 {
                                #address-cells = <1>;
                                #size-cells = <0>;
-                               compatible = "atmel,at91sam9rl-udc";
+                               compatible = "atmel,at91sam9g45-udc";
                                reg = <0x00600000 0x80000
                                       0xfff78000 0x400>;
                                interrupts = <27 IRQ_TYPE_LEVEL_HIGH 0>;
index 7521bdf17ef25ab133e61f7c7a71fd0a189004d2..b6c8df8d380ea41c9ed9807fb15dd4708d23a913 100644 (file)
                        usb2: gadget@f803c000 {
                                #address-cells = <1>;
                                #size-cells = <0>;
-                               compatible = "atmel,at91sam9rl-udc";
+                               compatible = "atmel,at91sam9g45-udc";
                                reg = <0x00500000 0x80000
                                       0xf803c000 0x400>;
                                interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
index a753178abc854a060a29630535667dcd4f7e7ca8..5dfd3a44bf82b38c614da737cea42c6498ef6a73 100644 (file)
                };
        };
 
+       clocks {
+               xinw {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <32768>;
+                       clock-output-names = "xinw";
+               };
+               xin {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <26000000>;
+                       clock-output-names = "xin";
+               };
+       };
+
        noc {
                compatible = "simple-bus";
                #address-cells = <1>;
index 5ab7548e04e1f459eb7105e5808bcc3e54f938b1..9e2444b07bceee0153c69a83e22c8fd7a54c06fe 100644 (file)
                usb0: gadget@00500000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       compatible = "atmel,at91sam9rl-udc";
+                       compatible = "atmel,sama5d3-udc";
                        reg = <0x00500000 0x100000
                               0xf8030000 0x4000>;
                        interrupts = <33 IRQ_TYPE_LEVEL_HIGH 2>;
index 653a1f851f2b8f5bf641cb586ede4f0841105c6b..3ee22ee13c5a899fba199321cb55b6a3a15221b1 100644 (file)
                usb0: gadget@00400000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       compatible = "atmel,at91sam9rl-udc";
+                       compatible = "atmel,sama5d3-udc";
                        reg = <0x00400000 0x100000
                               0xfc02c000 0x4000>;
                        interrupts = <47 IRQ_TYPE_LEVEL_HIGH 2>;
index fd6a6d23bc20b0f470c757a1592d88cf84ac5a9c..6d83a1bf0c7494593eb608ddba29f2bfea8d8455 100644 (file)
@@ -169,6 +169,7 @@ CONFIG_MTD_BLOCK=y
 CONFIG_MTD_M25P80=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_NAND_BRCMNAND=y
 CONFIG_MTD_NAND_DAVINCI=y
 CONFIG_MTD_SPI_NOR=y
 CONFIG_MTD_UBI=y
index 1317ee40f4dfd6051daa9efaa77e52dfcee37fe5..04ff8e7b37dfd914e8319b0727aa3e8682efa0dc 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ASM_ARM_XEN_HYPERVISOR_H
 #define _ASM_ARM_XEN_HYPERVISOR_H
 
+#include <linux/init.h>
+
 extern struct shared_info *HYPERVISOR_shared_info;
 extern struct start_info *xen_start_info;
 
@@ -18,4 +20,10 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
 
 extern struct dma_map_ops *xen_dma_ops;
 
+#ifdef CONFIG_XEN
+void __init xen_early_init(void);
+#else
+static inline void xen_early_init(void) { return; }
+#endif
+
 #endif /* _ASM_ARM_XEN_HYPERVISOR_H */
index 0b579b2f4e0e6490bb396c301cd2adf433e3b1ad..1bee8ca124945cdde1226e3cbff5503b9132c526 100644 (file)
@@ -12,7 +12,6 @@
 #include <xen/interface/grant_table.h>
 
 #define phys_to_machine_mapping_valid(pfn) (1)
-#define mfn_to_virt(m)                 (__va(mfn_to_pfn(m) << PAGE_SHIFT))
 
 #define pte_mfn            pte_pfn
 #define mfn_pte            pfn_pte
index 7335fc60d6cd5c054680e5e5d4e37b33fa1c51b9..cb4fb1e69778603d41356f3ed7a98695f4cc0cdb 100644 (file)
@@ -15,6 +15,8 @@
  *  that causes it to save wrong values...  Be aware!
  */
 
+#include <linux/init.h>
+
 #include <asm/assembler.h>
 #include <asm/memory.h>
 #include <asm/glue-df.h>
index e6d8c7658ffda7d4e5a03d600d9d1ed20e6ac57f..36c18b73c1f4631f3882ba492dbeb1f99e9b7242 100644 (file)
@@ -46,6 +46,7 @@
 #include <asm/cacheflush.h>
 #include <asm/cachetype.h>
 #include <asm/tlbflush.h>
+#include <asm/xen/hypervisor.h>
 
 #include <asm/prom.h>
 #include <asm/mach/arch.h>
@@ -972,6 +973,7 @@ void __init setup_arch(char **cmdline_p)
 
        arm_dt_init_cpu_maps();
        psci_init();
+       xen_early_init();
 #ifdef CONFIG_SMP
        if (is_smp()) {
                if (!mdesc->smp_init || !mdesc->smp_init()) {
index 1e184767c3be5d49f207c17438d078c60896a28b..e24df77abd79c91296f90192ba9b6e902c32935a 100644 (file)
@@ -369,7 +369,7 @@ static void __init at91_pm_sram_init(void)
                return;
        }
 
-       sram_pool = dev_get_gen_pool(&pdev->dev);
+       sram_pool = gen_pool_get(&pdev->dev);
        if (!sram_pool) {
                pr_warn("%s: sram pool unavailable!\n", __func__);
                return;
index e9184feffc4e5b55d46008be3f3587d2756ea6e3..0ac9e4b3b26525b1ff466ad60a92f6a79a1204c9 100644 (file)
@@ -19,7 +19,6 @@ config ARCH_BCM_IPROC
        select ARCH_REQUIRE_GPIOLIB
        select ARM_AMBA
        select PINCTRL
-       select MTD_NAND_BRCMNAND
        help
          This enables support for systems based on Broadcom IPROC architected SoCs.
          The IPROC complex contains one or more ARM CPUs along with common
index 03d401d20453eb447f15ab5dca15cd80967d2baf..3f29e6bca058623504e9c87c1b46fc4afefe7aaf 100644 (file)
 /*
  * Dove Low Interrupt Controller
  */
-#define IRQ_DOVE_BRIDGE                0
-#define IRQ_DOVE_H2C           1
-#define IRQ_DOVE_C2H           2
-#define IRQ_DOVE_NAND          3
-#define IRQ_DOVE_PDMA          4
-#define IRQ_DOVE_SPI1          5
-#define IRQ_DOVE_SPI0          6
-#define IRQ_DOVE_UART_0                7
-#define IRQ_DOVE_UART_1                8
-#define IRQ_DOVE_UART_2                9
-#define IRQ_DOVE_UART_3                10
-#define IRQ_DOVE_I2C           11
-#define IRQ_DOVE_GPIO_0_7      12
-#define IRQ_DOVE_GPIO_8_15     13
-#define IRQ_DOVE_GPIO_16_23    14
-#define IRQ_DOVE_PCIE0_ERR     15
-#define IRQ_DOVE_PCIE0         16
-#define IRQ_DOVE_PCIE1_ERR     17
-#define IRQ_DOVE_PCIE1         18
-#define IRQ_DOVE_I2S0          19
-#define IRQ_DOVE_I2S0_ERR      20
-#define IRQ_DOVE_I2S1          21
-#define IRQ_DOVE_I2S1_ERR      22
-#define IRQ_DOVE_USB_ERR       23
-#define IRQ_DOVE_USB0          24
-#define IRQ_DOVE_USB1          25
-#define IRQ_DOVE_GE00_RX       26
-#define IRQ_DOVE_GE00_TX       27
-#define IRQ_DOVE_GE00_MISC     28
-#define IRQ_DOVE_GE00_SUM      29
-#define IRQ_DOVE_GE00_ERR      30
-#define IRQ_DOVE_CRYPTO                31
+#define IRQ_DOVE_BRIDGE                (1 + 0)
+#define IRQ_DOVE_H2C           (1 + 1)
+#define IRQ_DOVE_C2H           (1 + 2)
+#define IRQ_DOVE_NAND          (1 + 3)
+#define IRQ_DOVE_PDMA          (1 + 4)
+#define IRQ_DOVE_SPI1          (1 + 5)
+#define IRQ_DOVE_SPI0          (1 + 6)
+#define IRQ_DOVE_UART_0                (1 + 7)
+#define IRQ_DOVE_UART_1                (1 + 8)
+#define IRQ_DOVE_UART_2                (1 + 9)
+#define IRQ_DOVE_UART_3                (1 + 10)
+#define IRQ_DOVE_I2C           (1 + 11)
+#define IRQ_DOVE_GPIO_0_7      (1 + 12)
+#define IRQ_DOVE_GPIO_8_15     (1 + 13)
+#define IRQ_DOVE_GPIO_16_23    (1 + 14)
+#define IRQ_DOVE_PCIE0_ERR     (1 + 15)
+#define IRQ_DOVE_PCIE0         (1 + 16)
+#define IRQ_DOVE_PCIE1_ERR     (1 + 17)
+#define IRQ_DOVE_PCIE1         (1 + 18)
+#define IRQ_DOVE_I2S0          (1 + 19)
+#define IRQ_DOVE_I2S0_ERR      (1 + 20)
+#define IRQ_DOVE_I2S1          (1 + 21)
+#define IRQ_DOVE_I2S1_ERR      (1 + 22)
+#define IRQ_DOVE_USB_ERR       (1 + 23)
+#define IRQ_DOVE_USB0          (1 + 24)
+#define IRQ_DOVE_USB1          (1 + 25)
+#define IRQ_DOVE_GE00_RX       (1 + 26)
+#define IRQ_DOVE_GE00_TX       (1 + 27)
+#define IRQ_DOVE_GE00_MISC     (1 + 28)
+#define IRQ_DOVE_GE00_SUM      (1 + 29)
+#define IRQ_DOVE_GE00_ERR      (1 + 30)
+#define IRQ_DOVE_CRYPTO                (1 + 31)
 
 /*
  * Dove High Interrupt Controller
  */
-#define IRQ_DOVE_AC97          32
-#define IRQ_DOVE_PMU           33
-#define IRQ_DOVE_CAM           34
-#define IRQ_DOVE_SDIO0         35
-#define IRQ_DOVE_SDIO1         36
-#define IRQ_DOVE_SDIO0_WAKEUP  37
-#define IRQ_DOVE_SDIO1_WAKEUP  38
-#define IRQ_DOVE_XOR_00                39
-#define IRQ_DOVE_XOR_01                40
-#define IRQ_DOVE_XOR0_ERR      41
-#define IRQ_DOVE_XOR_10                42
-#define IRQ_DOVE_XOR_11                43
-#define IRQ_DOVE_XOR1_ERR      44
-#define IRQ_DOVE_LCD_DCON      45
-#define IRQ_DOVE_LCD1          46
-#define IRQ_DOVE_LCD0          47
-#define IRQ_DOVE_GPU           48
-#define IRQ_DOVE_PERFORM_MNTR  49
-#define IRQ_DOVE_VPRO_DMA1     51
-#define IRQ_DOVE_SSP_TIMER     54
-#define IRQ_DOVE_SSP           55
-#define IRQ_DOVE_MC_L2_ERR     56
-#define IRQ_DOVE_CRYPTO_ERR    59
-#define IRQ_DOVE_GPIO_24_31    60
-#define IRQ_DOVE_HIGH_GPIO     61
-#define IRQ_DOVE_SATA          62
+#define IRQ_DOVE_AC97          (1 + 32)
+#define IRQ_DOVE_PMU           (1 + 33)
+#define IRQ_DOVE_CAM           (1 + 34)
+#define IRQ_DOVE_SDIO0         (1 + 35)
+#define IRQ_DOVE_SDIO1         (1 + 36)
+#define IRQ_DOVE_SDIO0_WAKEUP  (1 + 37)
+#define IRQ_DOVE_SDIO1_WAKEUP  (1 + 38)
+#define IRQ_DOVE_XOR_00                (1 + 39)
+#define IRQ_DOVE_XOR_01                (1 + 40)
+#define IRQ_DOVE_XOR0_ERR      (1 + 41)
+#define IRQ_DOVE_XOR_10                (1 + 42)
+#define IRQ_DOVE_XOR_11                (1 + 43)
+#define IRQ_DOVE_XOR1_ERR      (1 + 44)
+#define IRQ_DOVE_LCD_DCON      (1 + 45)
+#define IRQ_DOVE_LCD1          (1 + 46)
+#define IRQ_DOVE_LCD0          (1 + 47)
+#define IRQ_DOVE_GPU           (1 + 48)
+#define IRQ_DOVE_PERFORM_MNTR  (1 + 49)
+#define IRQ_DOVE_VPRO_DMA1     (1 + 51)
+#define IRQ_DOVE_SSP_TIMER     (1 + 54)
+#define IRQ_DOVE_SSP           (1 + 55)
+#define IRQ_DOVE_MC_L2_ERR     (1 + 56)
+#define IRQ_DOVE_CRYPTO_ERR    (1 + 59)
+#define IRQ_DOVE_GPIO_24_31    (1 + 60)
+#define IRQ_DOVE_HIGH_GPIO     (1 + 61)
+#define IRQ_DOVE_SATA          (1 + 62)
 
 /*
  * DOVE General Purpose Pins
  */
-#define IRQ_DOVE_GPIO_START    64
+#define IRQ_DOVE_GPIO_START    65
 #define NR_GPIO_IRQS           64
 
 /*
index 4a5a7aedcb763e9673dd8d51b416c6f1024e3310..df0223f76fa92d8752f31d2ccb1dd40e18d11a4f 100644 (file)
@@ -126,14 +126,14 @@ __exception_irq_entry dove_legacy_handle_irq(struct pt_regs *regs)
        stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_LOW_OFF);
        stat &= readl_relaxed(dove_irq_base + IRQ_MASK_LOW_OFF);
        if (stat) {
-               unsigned int hwirq = __fls(stat);
+               unsigned int hwirq = 1 + __fls(stat);
                handle_IRQ(hwirq, regs);
                return;
        }
        stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_HIGH_OFF);
        stat &= readl_relaxed(dove_irq_base + IRQ_MASK_HIGH_OFF);
        if (stat) {
-               unsigned int hwirq = 32 + __fls(stat);
+               unsigned int hwirq = 33 + __fls(stat);
                handle_IRQ(hwirq, regs);
                return;
        }
@@ -144,8 +144,8 @@ void __init dove_init_irq(void)
 {
        int i;
 
-       orion_irq_init(0, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF);
-       orion_irq_init(32, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF);
+       orion_irq_init(1, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF);
+       orion_irq_init(33, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF);
 
 #ifdef CONFIG_MULTI_IRQ_HANDLER
        set_handle_irq(dove_legacy_handle_irq);
index 4bd8b76538175aa2ba4ab3f58a127b94bda08939..5f8ddcdeeacf1117d92313e6cb34608be136a955 100644 (file)
@@ -224,6 +224,25 @@ static void __init exynos_init_irq(void)
        exynos_map_pmu();
 }
 
+static const struct of_device_id exynos_cpufreq_matches[] = {
+       { .compatible = "samsung,exynos4210", .data = "cpufreq-dt" },
+       { /* sentinel */ }
+};
+
+static void __init exynos_cpufreq_init(void)
+{
+       struct device_node *root = of_find_node_by_path("/");
+       const struct of_device_id *match;
+
+       match = of_match_node(exynos_cpufreq_matches, root);
+       if (!match) {
+               platform_device_register_simple("exynos-cpufreq", -1, NULL, 0);
+               return;
+       }
+
+       platform_device_register_simple(match->data, -1, NULL, 0);
+}
+
 static void __init exynos_dt_machine_init(void)
 {
        /*
@@ -246,7 +265,7 @@ static void __init exynos_dt_machine_init(void)
            of_machine_is_compatible("samsung,exynos5250"))
                platform_device_register(&exynos_cpuidle);
 
-       platform_device_register_simple("exynos-cpufreq", -1, NULL, 0);
+       exynos_cpufreq_init();
 
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
index 0309ccda36a91704e3aa4a395cebce66aa9eaa21..1885676c23c08238d3ebb193de6239daf84a3e89 100644 (file)
@@ -297,7 +297,7 @@ static int __init imx_suspend_alloc_ocram(
                goto put_node;
        }
 
-       ocram_pool = dev_get_gen_pool(&pdev->dev);
+       ocram_pool = gen_pool_get(&pdev->dev);
        if (!ocram_pool) {
                pr_warn("%s: ocram pool unavailable!\n", __func__);
                ret = -ENODEV;
index b01650d94f910111d04628dfc87a21cac4b6b9bf..93ecf559d06d64215a6de790e79624e14f59f4f5 100644 (file)
@@ -451,7 +451,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
                goto put_node;
        }
 
-       ocram_pool = dev_get_gen_pool(&pdev->dev);
+       ocram_pool = gen_pool_get(&pdev->dev);
        if (!ocram_pool) {
                pr_warn("%s: ocram pool unavailable!\n", __func__);
                ret = -ENODEV;
index 9ecb8f9c4ef507788e6187fef5f3371815d3c982..d4f7dc87042b28c565ff47721492d4876f655447 100644 (file)
@@ -283,25 +283,25 @@ static int lpc32xx_set_irq_type(struct irq_data *d, unsigned int type)
        case IRQ_TYPE_EDGE_RISING:
                /* Rising edge sensitive */
                __lpc32xx_set_irq_type(d->hwirq, 1, 1);
-               __irq_set_handler_locked(d->hwirq, handle_edge_irq);
+               __irq_set_handler_locked(d->irq, handle_edge_irq);
                break;
 
        case IRQ_TYPE_EDGE_FALLING:
                /* Falling edge sensitive */
                __lpc32xx_set_irq_type(d->hwirq, 0, 1);
-               __irq_set_handler_locked(d->hwirq, handle_edge_irq);
+               __irq_set_handler_locked(d->irq, handle_edge_irq);
                break;
 
        case IRQ_TYPE_LEVEL_LOW:
                /* Low level sensitive */
                __lpc32xx_set_irq_type(d->hwirq, 0, 0);
-               __irq_set_handler_locked(d->hwirq, handle_level_irq);
+               __irq_set_handler_locked(d->irq, handle_level_irq);
                break;
 
        case IRQ_TYPE_LEVEL_HIGH:
                /* High level sensitive */
                __lpc32xx_set_irq_type(d->hwirq, 1, 0);
-               __irq_set_handler_locked(d->hwirq, handle_level_irq);
+               __irq_set_handler_locked(d->irq, handle_level_irq);
                break;
 
        /* Other modes are not supported */
index 48e4c4b3cd1c9a52f6e5580c531088e10aac8662..b093a196e80176d44cc083ee89d51b6fee1318c4 100644 (file)
  */
 
 #include <linux/linkage.h>
-#include <linux/init.h>
 
 #include <asm/assembler.h>
 
-       __CPUINIT
-
 ENTRY(mvebu_cortex_a9_secondary_startup)
 ARM_BE8(setend be)
        bl      armada_38x_scu_power_up
index df0a9cc5da59ad2ce7a95e7d13dc8e2a82937069..3d5000481c112dda6c0ee32a0ce036f3b08c9b53 100644 (file)
@@ -24,7 +24,7 @@
 
 extern void mvebu_cortex_a9_secondary_startup(void);
 
-static int __cpuinit mvebu_cortex_a9_boot_secondary(unsigned int cpu,
+static int mvebu_cortex_a9_boot_secondary(unsigned int cpu,
                                                    struct task_struct *idle)
 {
        int ret, hw_cpu;
index 6dfd4ab97b2aaf2e6982de227dd17e403dce7d24..301ab38d38ba884e194657d3e1173576b7f43ad0 100644 (file)
@@ -43,6 +43,9 @@ static void mvebu_armada_xp_gp_pm_enter(void __iomem *sdram_reg, u32 srcmd)
        for (i = 0; i < ARMADA_XP_GP_PIC_NR_GPIOS; i++)
                ackcmd |= BIT(pic_raw_gpios[i]);
 
+       srcmd = cpu_to_le32(srcmd);
+       ackcmd = cpu_to_le32(ackcmd);
+
        /*
         * Wait a while, the PIC needs quite a bit of time between the
         * two GPIO commands.
index f1a68c63dc9933c8be7c2bcd2f30fb1973db5f65..903c85be28972e6e7d28aaf9459f5bc87b4ac87c 100644 (file)
@@ -274,8 +274,5 @@ obj-y                                       += $(nand-m) $(nand-y)
 
 smsc911x-$(CONFIG_SMSC911X)            := gpmc-smsc911x.o
 obj-y                                  += $(smsc911x-m) $(smsc911x-y)
-ifneq ($(CONFIG_HWSPINLOCK_OMAP),)
-obj-y                                  += hwspinlock.o
-endif
 
 obj-y                                  += common-board-devices.o twl-common.o dss-common.o
diff --git a/arch/arm/mach-omap2/hwspinlock.c b/arch/arm/mach-omap2/hwspinlock.c
deleted file mode 100644 (file)
index ef175ac..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * OMAP hardware spinlock device initialization
- *
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
- *
- * Contact: Simon Que <sque@ti.com>
- *          Hari Kanigeri <h-kanigeri2@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/hwspinlock.h>
-
-#include "soc.h"
-#include "omap_hwmod.h"
-#include "omap_device.h"
-
-static struct hwspinlock_pdata omap_hwspinlock_pdata __initdata = {
-       .base_id = 0,
-};
-
-static int __init hwspinlocks_init(void)
-{
-       int retval = 0;
-       struct omap_hwmod *oh;
-       struct platform_device *pdev;
-       const char *oh_name = "spinlock";
-       const char *dev_name = "omap_hwspinlock";
-
-       /*
-        * Hwmod lookup will fail in case our platform doesn't support the
-        * hardware spinlock module, so it is safe to run this initcall
-        * on all omaps
-        */
-       oh = omap_hwmod_lookup(oh_name);
-       if (oh == NULL)
-               return -EINVAL;
-
-       pdev = omap_device_build(dev_name, 0, oh, &omap_hwspinlock_pdata,
-                               sizeof(struct hwspinlock_pdata));
-       if (IS_ERR(pdev)) {
-               pr_err("Can't build omap_device for %s:%s\n", dev_name,
-                                                               oh_name);
-               retval = PTR_ERR(pdev);
-       }
-
-       return retval;
-}
-/* early board code might need to reserve specific hwspinlock instances */
-omap_postcore_initcall(hwspinlocks_init);
index 2e6ab67e2284497f9fc1d8fe323c2daaa4f34809..8fcec1cc101e09be617340f8c7e91b6e004b636f 100644 (file)
@@ -119,8 +119,7 @@ static int pmu_set_power_domain(int pd, bool on)
  * Handling of CPU cores
  */
 
-static int __cpuinit rockchip_boot_secondary(unsigned int cpu,
-                                            struct task_struct *idle)
+static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
        int ret;
 
index 1ed89fc2b7a8479191bfb6bc6b27b3c068434cc0..6a4199f2bffb8b36248d0993bf995e82cc1bc991 100644 (file)
@@ -56,7 +56,7 @@ static int socfpga_setup_ocram_self_refresh(void)
                goto put_node;
        }
 
-       ocram_pool = dev_get_gen_pool(&pdev->dev);
+       ocram_pool = gen_pool_get(&pdev->dev);
        if (!ocram_pool) {
                pr_warn("%s: ocram pool unavailable!\n", __func__);
                ret = -ENODEV;
index f61158c6ce7185a3b30ef396116a7fc2d74b3c72..5766ce2be32bbd28452c95ea9e43e92de0f4dcb0 100644 (file)
@@ -589,4 +589,4 @@ static int __init ve_spc_clk_init(void)
        platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
        return 0;
 }
-module_init(ve_spc_clk_init);
+device_initcall(ve_spc_clk_init);
index 7d0f07020c809598c8a5ea292093d3676c083548..6c09cc440a2b24c4c0acfbd3027c74f9a1dc9efe 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/cpuidle.h>
 #include <linux/cpufreq.h>
 #include <linux/cpu.h>
+#include <linux/console.h>
 
 #include <linux/mm.h>
 
@@ -51,7 +52,9 @@ EXPORT_SYMBOL_GPL(xen_have_vector_callback);
 int xen_platform_pci_unplug = XEN_UNPLUG_ALL;
 EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
 
-static __read_mostly int xen_events_irq = -1;
+static __read_mostly unsigned int xen_events_irq;
+
+static __initdata struct device_node *xen_node;
 
 int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
                               unsigned long addr,
@@ -150,40 +153,28 @@ static irqreturn_t xen_arm_callback(int irq, void *arg)
  * documentation of the Xen Device Tree format.
  */
 #define GRANT_TABLE_PHYSADDR 0
-static int __init xen_guest_init(void)
+void __init xen_early_init(void)
 {
-       struct xen_add_to_physmap xatp;
-       static struct shared_info *shared_info_page = 0;
-       struct device_node *node;
        int len;
        const char *s = NULL;
        const char *version = NULL;
        const char *xen_prefix = "xen,xen-";
-       struct resource res;
-       phys_addr_t grant_frames;
 
-       node = of_find_compatible_node(NULL, NULL, "xen,xen");
-       if (!node) {
+       xen_node = of_find_compatible_node(NULL, NULL, "xen,xen");
+       if (!xen_node) {
                pr_debug("No Xen support\n");
-               return 0;
+               return;
        }
-       s = of_get_property(node, "compatible", &len);
+       s = of_get_property(xen_node, "compatible", &len);
        if (strlen(xen_prefix) + 3  < len &&
                        !strncmp(xen_prefix, s, strlen(xen_prefix)))
                version = s + strlen(xen_prefix);
        if (version == NULL) {
                pr_debug("Xen version not found\n");
-               return 0;
+               return;
        }
-       if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
-               return 0;
-       grant_frames = res.start;
-       xen_events_irq = irq_of_parse_and_map(node, 0);
-       pr_info("Xen %s support found, events_irq=%d gnttab_frame=%pa\n",
-                       version, xen_events_irq, &grant_frames);
 
-       if (xen_events_irq < 0)
-               return -ENODEV;
+       pr_info("Xen %s support found\n", version);
 
        xen_domain_type = XEN_HVM_DOMAIN;
 
@@ -194,9 +185,34 @@ static int __init xen_guest_init(void)
        else
                xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
 
-       if (!shared_info_page)
-               shared_info_page = (struct shared_info *)
-                       get_zeroed_page(GFP_KERNEL);
+       if (!console_set_on_cmdline && !xen_initial_domain())
+               add_preferred_console("hvc", 0, NULL);
+}
+
+static int __init xen_guest_init(void)
+{
+       struct xen_add_to_physmap xatp;
+       struct shared_info *shared_info_page = NULL;
+       struct resource res;
+       phys_addr_t grant_frames;
+
+       if (!xen_domain())
+               return 0;
+
+       if (of_address_to_resource(xen_node, GRANT_TABLE_PHYSADDR, &res)) {
+               pr_err("Xen grant table base address not found\n");
+               return -ENODEV;
+       }
+       grant_frames = res.start;
+
+       xen_events_irq = irq_of_parse_and_map(xen_node, 0);
+       if (!xen_events_irq) {
+               pr_err("Xen event channel interrupt not found\n");
+               return -ENODEV;
+       }
+
+       shared_info_page = (struct shared_info *)get_zeroed_page(GFP_KERNEL);
+
        if (!shared_info_page) {
                pr_err("not enough memory\n");
                return -ENOMEM;
index 498325074a06fa911a5cfa9f37137c7936f8b9c4..03e75fef15b8254483929f1332de033a1eeeac05 100644 (file)
 #include <xen/xen.h>
 #include <xen/interface/grant_table.h>
 #include <xen/interface/memory.h>
+#include <xen/page.h>
 #include <xen/swiotlb-xen.h>
 
 #include <asm/cacheflush.h>
-#include <asm/xen/page.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/interface.h>
 
index cb7a14c5cd69e6814df607cea20afe9342e0e8d2..887596c67b129a024ffd2fd0ce2db26d4762f89e 100644 (file)
 
 #include <xen/xen.h>
 #include <xen/interface/memory.h>
+#include <xen/page.h>
 #include <xen/swiotlb-xen.h>
 
 #include <asm/cacheflush.h>
-#include <asm/xen/page.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/interface.h>
 
index 0bb287ca0a98e8dcaac6ebd6497d762005b27013..0689c3fb56e3d84fe3ed7790f4a6b25835c86555 100644 (file)
                        phy-names = "sata-phy";
                };
 
+               sbgpio: sbgpio@17001000{
+                       compatible = "apm,xgene-gpio-sb";
+                       reg = <0x0 0x17001000 0x0 0x400>;
+                       #gpio-cells = <2>;
+                       gpio-controller;
+                       interrupts =    <0x0 0x28 0x1>,
+                                       <0x0 0x29 0x1>,
+                                       <0x0 0x2a 0x1>,
+                                       <0x0 0x2b 0x1>,
+                                       <0x0 0x2c 0x1>,
+                                       <0x0 0x2d 0x1>;
+               };
+
                rtc: rtc@10510000 {
                        compatible = "apm,xgene-rtc";
                        reg = <0x0 0x10510000 0x0 0x400>;
index 7ce589ca54a4fbb8c3b9ea3c5c5235737e1947e7..9047cab68fd3494ebf1c634f137fddcfde955932 100644 (file)
@@ -15,7 +15,7 @@
 #include <asm/cpuidle.h>
 #include <asm/cpu_ops.h>
 
-int arm_cpuidle_init(unsigned int cpu)
+int __init arm_cpuidle_init(unsigned int cpu)
 {
        int ret = -EOPNOTSUPP;
 
index e7d934d3afe02817fc53bf437d19c36195025b79..7a1a5da6c8c1b997edbd12e06e1ff90549f50337 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/current.h>
 #include <asm/debug-monitors.h>
 #include <asm/hw_breakpoint.h>
-#include <asm/kdebug.h>
 #include <asm/traps.h>
 #include <asm/cputype.h>
 #include <asm/system_misc.h>
index 702591f6180a8d50704648977ff48e4f6811880d..b31e9a4b62754bb49aa10bcb7cd3fcead3ffac8d 100644 (file)
@@ -1318,7 +1318,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
        /* Don't bother with PPIs; they're already affine */
        irq = platform_get_irq(pdev, 0);
        if (irq >= 0 && irq_is_percpu(irq))
-               return 0;
+               goto out;
 
        irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
        if (!irqs)
@@ -1340,12 +1340,13 @@ static int armpmu_device_probe(struct platform_device *pdev)
                        if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
                                break;
 
-               of_node_put(dn);
                if (cpu >= nr_cpu_ids) {
                        pr_warn("Failed to find logical CPU for %s\n",
                                dn->name);
+                       of_node_put(dn);
                        break;
                }
+               of_node_put(dn);
 
                irqs[i] = cpu;
        }
@@ -1355,6 +1356,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
        else
                kfree(irqs);
 
+out:
        cpu_pmu->plat_device = pdev;
        return 0;
 }
index ffd3970721bf6497db2f622bcd3ed31fd3007de1..f3067d4d4e35711680376372395667efdc45ad07 100644 (file)
@@ -64,6 +64,7 @@
 #include <asm/psci.h>
 #include <asm/efi.h>
 #include <asm/virt.h>
+#include <asm/xen/hypervisor.h>
 
 unsigned long elf_hwcap __read_mostly;
 EXPORT_SYMBOL_GPL(elf_hwcap);
@@ -401,6 +402,7 @@ void __init setup_arch(char **cmdline_p)
        } else {
                psci_acpi_init();
        }
+       xen_early_init();
 
        cpu_read_bootcpu_ops();
 #ifdef CONFIG_SMP
index 4b2121bd7f9cf9f6244ba4e3ff7acca7d00e80e6..695801a54ca54dac465c2b421a822658e571cc88 100644 (file)
@@ -396,13 +396,13 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
 {
        u64 hwid = processor->arm_mpidr;
 
-       if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
-               pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
+       if (!(processor->flags & ACPI_MADT_ENABLED)) {
+               pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
                return;
        }
 
-       if (!(processor->flags & ACPI_MADT_ENABLED)) {
-               pr_err("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
+       if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
+               pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
                return;
        }
 
@@ -693,7 +693,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        struct pt_regs *old_regs = set_irq_regs(regs);
 
        if ((unsigned)ipinr < NR_IPI) {
-               trace_ipi_entry(ipi_types[ipinr]);
+               trace_ipi_entry_rcuidle(ipi_types[ipinr]);
                __inc_irq_stat(cpu, ipi_irqs[ipinr]);
        }
 
@@ -736,7 +736,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        }
 
        if ((unsigned)ipinr < NR_IPI)
-               trace_ipi_exit(ipi_types[ipinr]);
+               trace_ipi_exit_rcuidle(ipi_types[ipinr]);
        set_irq_regs(old_regs);
 }
 
index a12251c074a8298435cc5d10d7361f87a0c0b128..566bc4c350405427f1fb441169b300733aa6ddfd 100644 (file)
@@ -335,7 +335,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
        if (call_undef_hook(regs) == 0)
                return;
 
-       if (show_unhandled_signals_ratelimited() && unhandled_signal(current, SIGILL)) {
+       if (unhandled_signal(current, SIGILL) && show_unhandled_signals_ratelimited()) {
                pr_info("%s[%d]: undefined instruction: pc=%p\n",
                        current->comm, task_pid_nr(current), pc);
                dump_instr(KERN_INFO, regs);
index b1fc69cd14994a0d75b404c810e04814ee298340..94d98cd1aad8a1e98472b95189e87f50f973156f 100644 (file)
@@ -115,7 +115,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
 {
        struct siginfo si;
 
-       if (show_unhandled_signals_ratelimited() && unhandled_signal(tsk, sig)) {
+       if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
                pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
                        tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
                        addr, esr);
index cccc4af87a0372976e25e16667143892562b3457..831ec534d449b0b7d040b41815b78df4c1e6ac4d 100644 (file)
 
 int pmd_huge(pmd_t pmd)
 {
-       return !(pmd_val(pmd) & PMD_TABLE_BIT);
+       return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
 }
 
 int pud_huge(pud_t pud)
 {
 #ifndef __PAGETABLE_PMD_FOLDED
-       return !(pud_val(pud) & PUD_TABLE_BIT);
+       return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
 #else
        return 0;
 #endif
index 82d3435bf14ffdf91fccb2e4857a1028c22750b6..a4ede4e2ddd1e7dfdca20594741e9e3d5f7f68dd 100644 (file)
@@ -117,7 +117,7 @@ void split_pud(pud_t *old_pud, pmd_t *pmd)
        int i = 0;
 
        do {
-               set_pmd(pmd, __pmd(addr | prot));
+               set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
                addr += PMD_SIZE;
        } while (pmd++, i++, i < PTRS_PER_PMD);
 }
index de0a81a539a01ca450eb53e7919216c2a1e907c2..98a26ce82d266f164021a9a7d93b2694bad7641d 100644 (file)
 /* Rd = Rn >> shift; signed */
 #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
 
+/* Zero extend */
+#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
+#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
+
 /* Move wide (immediate) */
 #define A64_MOVEW(sf, Rd, imm16, shift, type) \
        aarch64_insn_gen_movewide(Rd, imm16, shift, \
index dc6a4842683aa500b9a5fc3af41caec349472902..c047598b09e051cfdad2b7e1f24c21bb6e6153f8 100644 (file)
@@ -113,9 +113,9 @@ static inline void emit_a64_mov_i(const int is64, const int reg,
 static inline int bpf2a64_offset(int bpf_to, int bpf_from,
                                 const struct jit_ctx *ctx)
 {
-       int to = ctx->offset[bpf_to + 1];
+       int to = ctx->offset[bpf_to];
        /* -1 to account for the Branch instruction */
-       int from = ctx->offset[bpf_from + 1] - 1;
+       int from = ctx->offset[bpf_from] - 1;
 
        return to - from;
 }
@@ -289,23 +289,41 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        case BPF_ALU | BPF_END | BPF_FROM_BE:
 #ifdef CONFIG_CPU_BIG_ENDIAN
                if (BPF_SRC(code) == BPF_FROM_BE)
-                       break;
+                       goto emit_bswap_uxt;
 #else /* !CONFIG_CPU_BIG_ENDIAN */
                if (BPF_SRC(code) == BPF_FROM_LE)
-                       break;
+                       goto emit_bswap_uxt;
 #endif
                switch (imm) {
                case 16:
                        emit(A64_REV16(is64, dst, dst), ctx);
+                       /* zero-extend 16 bits into 64 bits */
+                       emit(A64_UXTH(is64, dst, dst), ctx);
                        break;
                case 32:
                        emit(A64_REV32(is64, dst, dst), ctx);
+                       /* upper 32 bits already cleared */
                        break;
                case 64:
                        emit(A64_REV64(dst, dst), ctx);
                        break;
                }
                break;
+emit_bswap_uxt:
+               switch (imm) {
+               case 16:
+                       /* zero-extend 16 bits into 64 bits */
+                       emit(A64_UXTH(is64, dst, dst), ctx);
+                       break;
+               case 32:
+                       /* zero-extend 32 bits into 64 bits */
+                       emit(A64_UXTW(is64, dst, dst), ctx);
+                       break;
+               case 64:
+                       /* nop */
+                       break;
+               }
+               break;
        /* dst = imm */
        case BPF_ALU | BPF_MOV | BPF_K:
        case BPF_ALU64 | BPF_MOV | BPF_K:
@@ -640,10 +658,11 @@ static int build_body(struct jit_ctx *ctx)
                const struct bpf_insn *insn = &prog->insnsi[i];
                int ret;
 
+               ret = build_insn(insn, ctx);
+
                if (ctx->image == NULL)
                        ctx->offset[i] = ctx->idx;
 
-               ret = build_insn(insn, ctx);
                if (ret > 0) {
                        i++;
                        continue;
index cfb298d6630582b6924824a6b7bd4aed4663d526..2d48b6a461664e5fd03d775cc4ba922dd56dacf7 100644 (file)
@@ -231,8 +231,7 @@ static int __init eic_probe(struct platform_device *pdev)
                irq_set_chip_data(eic->first_irq + i, eic);
        }
 
-       irq_set_chained_handler(int_irq, demux_eic_irq);
-       irq_set_handler_data(int_irq, eic);
+       irq_set_chained_handler_and_data(int_irq, demux_eic_irq, eic);
 
        if (pdev->id == 0) {
                nmi_eic = eic;
index c36efa0c7163c4dac53515ceb822154e559dd698..719dd796c12cb1cac2ca95de973700ac37942bdd 100644 (file)
@@ -136,7 +136,7 @@ void decode_address(char *buf, unsigned long address)
                                struct file *file = vma->vm_file;
 
                                if (file) {
-                                       char *d_name = d_path(&file->f_path, _tmpbuf,
+                                       char *d_name = file_path(file, _tmpbuf,
                                                      sizeof(_tmpbuf));
                                        if (!IS_ERR(d_name))
                                                name = d_name;
index 5047a33043bdf4bfa4d6ae3226f7385a6c703d37..f679a19dfeb8bbd857d8c577b14baeccf05d3865 100644 (file)
@@ -848,5 +848,4 @@ static void eeprom_disable_write_protect(void)
     /* Write protect disabled */
   }
 }
-
-module_init(eeprom_init);
+device_initcall(eeprom_init);
index 1b17d92cef8ebb98e57b34a63be0f6f8fb6d9508..9ef56092a4c54f9cdd49255ae6683bf33adeb72a 100644 (file)
@@ -145,6 +145,5 @@ unsigned long crisv32_intmem_virt_to_phys(void* addr)
                (unsigned long)intmem_virtual + MEM_INTMEM_START +
                RESERVED_SIZE);
 }
-
-module_init(crisv32_intmem_init);
+device_initcall(crisv32_intmem_init);
 
index c0e3707c2299bae3f4cb5cf1a1c7ded959f361e9..e1cf802d1639bb431cef9f27e8bfa29c3c2505f6 100644 (file)
@@ -9,7 +9,7 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
-#include <linux/init.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
index 3a428f19a00116ad963f7cdae2ddf469ec21a054..085047f3a545b283c41ec546cb2b71e623a4c7fa 100644 (file)
@@ -368,13 +368,4 @@ simscsi_init(void)
        scsi_host_put(host);
        return error;
 }
-
-static void __exit
-simscsi_exit(void)
-{
-       scsi_remove_host(host);
-       scsi_host_put(host);
-}
-
-module_init(simscsi_init);
-module_exit(simscsi_exit);
+device_initcall(simscsi_init);
index 7f3028965064b1af467bfe93028e1277006fabc1..97e48b0eefc7c18f54f0d1ee76860eed43a53c4e 100644 (file)
@@ -215,10 +215,6 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
        pmd_t *pmd;
        pte_t *pte;
 
-       if (!PageReserved(page))
-               printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
-                      page_address(page));
-
        pgd = pgd_offset_k(address);            /* note: this is NOT pgd_offset()! */
 
        {
index ea21d4cad540eb8207f29d19fb01dd5517843f4a..aa19b7ac8222a2fb604b90fc9edd3d3b86ef483c 100644 (file)
@@ -58,27 +58,22 @@ paddr_to_nid(unsigned long paddr)
  * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where
  * the section resides.
  */
-int __meminit __early_pfn_to_nid(unsigned long pfn)
+int __meminit __early_pfn_to_nid(unsigned long pfn,
+                                       struct mminit_pfnnid_cache *state)
 {
        int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
-       /*
-        * NOTE: The following SMP-unsafe globals are only used early in boot
-        * when the kernel is running single-threaded.
-        */
-       static int __meminitdata last_ssec, last_esec;
-       static int __meminitdata last_nid;
 
-       if (section >= last_ssec && section < last_esec)
-               return last_nid;
+       if (section >= state->last_start && section < state->last_end)
+               return state->last_nid;
 
        for (i = 0; i < num_node_memblks; i++) {
                ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
                esec = (node_memblk[i].start_paddr + node_memblk[i].size +
                        ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
                if (section >= ssec && section < esec) {
-                       last_ssec = ssec;
-                       last_esec = esec;
-                       last_nid = node_memblk[i].nid;
+                       state->last_start = ssec;
+                       state->last_end = esec;
+                       state->last_nid = node_memblk[i].nid;
                        return node_memblk[i].nid;
                }
        }
index 27793f7aa99c3c406b5547511a59347b05dd0210..5b799d4deb747c481f3ad7a47d1c75374fa92715 100644 (file)
@@ -142,5 +142,4 @@ static int __init sn_salinfo_init(void)
                salinfo_platform_oemdata = &sn_salinfo_platform_oemdata;
        return 0;
 }
-
-module_init(sn_salinfo_init)
+device_initcall(sn_salinfo_init);
index 835fa04511c85ad0d6e71ba8ee1f3a2dc26fcc1b..272dde481d170e815d740d0880ae613c3567cca8 100644 (file)
@@ -148,14 +148,10 @@ static void psc_irq(unsigned int irq, struct irq_desc *desc)
 
 void __init psc_register_interrupts(void)
 {
-       irq_set_chained_handler(IRQ_AUTO_3, psc_irq);
-       irq_set_handler_data(IRQ_AUTO_3, (void *)0x30);
-       irq_set_chained_handler(IRQ_AUTO_4, psc_irq);
-       irq_set_handler_data(IRQ_AUTO_4, (void *)0x40);
-       irq_set_chained_handler(IRQ_AUTO_5, psc_irq);
-       irq_set_handler_data(IRQ_AUTO_5, (void *)0x50);
-       irq_set_chained_handler(IRQ_AUTO_6, psc_irq);
-       irq_set_handler_data(IRQ_AUTO_6, (void *)0x60);
+       irq_set_chained_handler_and_data(IRQ_AUTO_3, psc_irq, (void *)0x30);
+       irq_set_chained_handler_and_data(IRQ_AUTO_4, psc_irq, (void *)0x40);
+       irq_set_chained_handler_and_data(IRQ_AUTO_5, psc_irq, (void *)0x50);
+       irq_set_chained_handler_and_data(IRQ_AUTO_6, psc_irq, (void *)0x60);
 }
 
 void psc_irq_enable(int irq) {
index 8742e1cee4928f9d522ec329bb60f113ad67a89d..ec9a371f1e62c49b823bdac3b86d101804796da3 100644 (file)
@@ -161,8 +161,8 @@ void __init ar2315_arch_init_irq(void)
        irq = irq_create_mapping(domain, AR2315_MISC_IRQ_AHB);
        setup_irq(irq, &ar2315_ahb_err_interrupt);
 
-       irq_set_chained_handler(AR2315_IRQ_MISC, ar2315_misc_irq_handler);
-       irq_set_handler_data(AR2315_IRQ_MISC, domain);
+       irq_set_chained_handler_and_data(AR2315_IRQ_MISC,
+                                        ar2315_misc_irq_handler, domain);
 
        ar2315_misc_irq_domain = domain;
 }
index 094b938fd603d0da938d4e24aae8533f1413159b..e63e38fa488033499cdf005df028d15e9b616139 100644 (file)
@@ -156,8 +156,8 @@ void __init ar5312_arch_init_irq(void)
        irq = irq_create_mapping(domain, AR5312_MISC_IRQ_AHB_PROC);
        setup_irq(irq, &ar5312_ahb_err_interrupt);
 
-       irq_set_chained_handler(AR5312_IRQ_MISC, ar5312_misc_irq_handler);
-       irq_set_handler_data(AR5312_IRQ_MISC, domain);
+       irq_set_chained_handler_and_data(AR5312_IRQ_MISC,
+                                        ar5312_misc_irq_handler, domain);
 
        ar5312_misc_irq_domain = domain;
 }
index 69a8a8dabc2b215f6c8050498f50613ef1f90552..2a59265788413e3387cb358e35a9c720d1ba4a3b 100644 (file)
@@ -9,9 +9,6 @@
 # Copyright (C) 2005-2009 Cavium Networks
 #
 
-CFLAGS_octeon-platform.o = -I$(src)/../../../scripts/dtc/libfdt
-CFLAGS_setup.o = -I$(src)/../../../scripts/dtc/libfdt
-
 obj-y := cpu.o setup.o octeon-platform.o octeon-irq.o csrc-octeon.o
 obj-y += dma-octeon.o
 obj-y += octeon-memcpy.o
index ecd71db6258b350fb74eb6397c432f8e48e879fa..2e52cbd20cebac71a1dc50482d48983df00f15e8 100644 (file)
@@ -15,5 +15,3 @@ obj-y                         := sead3-lcd.o sead3-display.o sead3-init.o \
 obj-y                          += leds-sead3.o
 
 obj-$(CONFIG_EARLY_PRINTK)     += sead3-console.o
-
-CFLAGS_sead3-setup.o = -I$(src)/../../../scripts/dtc/libfdt
index dadb30306a0a10457782801eaec567544816c7d7..f8d0acb4f973635ff323585ce4e5a2166c081a3a 100644 (file)
@@ -384,8 +384,8 @@ static void ar2315_pci_irq_init(struct ar2315_pci_ctrl *apc)
 
        apc->irq_ext = irq_create_mapping(apc->domain, AR2315_PCI_IRQ_EXT);
 
-       irq_set_chained_handler(apc->irq, ar2315_pci_irq_handler);
-       irq_set_handler_data(apc->irq, apc);
+       irq_set_chained_handler_and_data(apc->irq, ar2315_pci_irq_handler,
+                                        apc);
 
        /* Clear any pending Abort or external Interrupts
         * and enable interrupt processing */
index da301e0a2f1f42825898616f1348e3e429228f93..53707aacc0f86cb13134546de07ccaf71d76321d 100644 (file)
@@ -184,8 +184,7 @@ static int __init intc_of_init(struct device_node *node,
 
        rt_intc_w32(INTC_INT_GLOBAL, INTC_REG_ENABLE);
 
-       irq_set_chained_handler(irq, ralink_intc_irq_handler);
-       irq_set_handler_data(irq, domain);
+       irq_set_chained_handler_and_data(irq, ralink_intc_irq_handler, domain);
 
        /* tell the kernel which irq is used for performance monitoring */
        rt_perfcount_irq = irq_create_mapping(domain, 9);
index 6ab3b73efcf8d4304d0b832d4d398f94a0f332d5..480de70f405980c1828f84b6a0eeba5c75203b73 100644 (file)
@@ -320,11 +320,11 @@ void migrate_irqs(void)
                if (irqd_is_per_cpu(data))
                        continue;
 
-               if (cpumask_test_cpu(self, &data->affinity) &&
+               if (cpumask_test_cpu(self, data->affinity) &&
                    !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) {
                        int cpu_id;
                        cpu_id = cpumask_first(cpu_online_mask);
-                       cpumask_set_cpu(cpu_id, &data->affinity);
+                       cpumask_set_cpu(cpu_id, data->affinity);
                }
                /* We need to operate irq_affinity_online atomically. */
                arch_local_cli_save(flags);
@@ -335,7 +335,7 @@ void migrate_irqs(void)
                        GxICR(irq) = x & GxICR_LEVEL;
                        tmp = GxICR(irq);
 
-                       new = cpumask_any_and(&data->affinity,
+                       new = cpumask_any_and(data->affinity,
                                              cpu_online_mask);
                        irq_affinity_online[irq] = new;
 
index 17fe083fcb6fa985cec4f108036ad490e11b785d..b03d8738d67cd682ef0a96d8770f8bb9ebc4fbc6 100644 (file)
@@ -96,5 +96,4 @@ static int __init asb2303_mtd_init(void)
        platform_device_register(&asb2303_sysflash);
        return 0;
 }
-
-module_init(asb2303_mtd_init);
+device_initcall(asb2303_mtd_init);
index be186a75f6225f46cb0f18e3bd191257aa20e460..9e3cc8a40ee9feb164842780ef885426681c9466 100644 (file)
@@ -19,7 +19,9 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 
-#define ALTERA_TIMER_STATUS_REG                0
+#define ALTR_TIMER_COMPATIBLE          "altr,timer-1.0"
+
+#define ALTERA_TIMER_STATUS_REG        0
 #define ALTERA_TIMER_CONTROL_REG       4
 #define ALTERA_TIMER_PERIODL_REG       8
 #define ALTERA_TIMER_PERIODH_REG       12
@@ -304,7 +306,16 @@ void read_persistent_clock(struct timespec *ts)
 
 void __init time_init(void)
 {
+       struct device_node *np;
+       int count = 0;
+
+       for_each_compatible_node(np, NULL,  ALTR_TIMER_COMPATIBLE)
+               count++;
+
+       if (count < 2)
+               panic("%d timer is found, it needs 2 timers in system\n", count);
+
        clocksource_of_init();
 }
 
-CLOCKSOURCE_OF_DECLARE(nios2_timer, "altr,timer-1.0", nios2_time_init);
+CLOCKSOURCE_OF_DECLARE(nios2_timer, ALTR_TIMER_COMPATIBLE, nios2_time_init);
index d5cae55195ecfd4f108b1431ecf52193b5a8d282..10a5ae9553fd657211524e4af5257958815e134a 100644 (file)
@@ -207,8 +207,7 @@ static int __init pdc_console_tty_driver_init(void)
 
        return 0;
 }
-
-module_init(pdc_console_tty_driver_init);
+device_initcall(pdc_console_tty_driver_init);
 
 static struct tty_driver * pdc_console_device (struct console *c, int *index)
 {
index ba0c053e25ae9d66cf536ab87ce1cca9eef53dbf..518f4f5f1f43ec6b2dcaceb9b2f5c9536097d59f 100644 (file)
@@ -543,6 +543,7 @@ static int __init perf_init(void)
 
        return 0;
 }
+device_initcall(perf_init);
 
 /*
  * perf_start_counters(void)
@@ -847,5 +848,3 @@ printk("perf_rdr_write\n");
        }
 printk("perf_rdr_write done\n");
 }
-
-module_init(perf_init);
index 8dcd744e5728c86e889bc2a9c4dda71c0c8787f0..7e3789ea396b88103644cf14726337fb98b00392 100644 (file)
@@ -10,6 +10,10 @@ typedef u32 uint32_t;
 typedef u64 uint64_t;
 typedef unsigned long uintptr_t;
 
+typedef __be16 fdt16_t;
+typedef __be32 fdt32_t;
+typedef __be64 fdt64_t;
+
 #define fdt16_to_cpu(x)                be16_to_cpu(x)
 #define cpu_to_fdt16(x)                cpu_to_be16(x)
 #define fdt32_to_cpu(x)                be32_to_cpu(x)
index 5603320dce07dc55b5c84f36dc834617f6db4597..53f8f27f94e4e57d37d0f8bb6ebc9a17e32b506e 100644 (file)
@@ -21,7 +21,9 @@ int of_setprop(const void *phandle, const char *name, const void *buf,
 /* Console functions */
 void of_console_init(void);
 
+typedef u16                    __be16;
 typedef u32                    __be32;
+typedef u64                    __be64;
 
 #ifdef __LITTLE_ENDIAN__
 #define cpu_to_be16(x) swab16(x)
index 87c7d1473488a95fc5956fa2f80bcef543b74b69..12868b1c4e0562613a57ba956f95667f1d37e87c 100644 (file)
@@ -2,7 +2,6 @@
 # Makefile for the linux kernel.
 #
 
-CFLAGS_prom.o          = -I$(src)/../../../scripts/dtc/libfdt
 CFLAGS_ptrace.o                += -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
 subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
index 56f44848b044b67c7d3b84caf82a9a9ed6fb493a..43922509a4833e8da175b668e9be588daa123f4f 100644 (file)
@@ -1124,4 +1124,4 @@ static int __init rtc_init(void)
        return PTR_ERR_OR_ZERO(pdev);
 }
 
-module_init(rtc_init);
+device_initcall(rtc_init);
index 1f614d778a8b5ffc2b1cf32465fce3a579839f3a..bb0bd7025cb88f893af04d3f98141860c038ee54 100644 (file)
@@ -928,7 +928,7 @@ static int __init hugetlbpage_init(void)
        return 0;
 }
 #endif
-module_init(hugetlbpage_init);
+arch_initcall(hugetlbpage_init);
 
 void flush_dcache_icache_hugepage(struct page *page)
 {
index c9adbfb65006e153aa8f6caf0d0fd5b396cf2997..fcbea4b51a7821ac2865622de1440a9aaa766449 100644 (file)
@@ -445,5 +445,4 @@ static int pmc_init(void)
 {
        return platform_driver_register(&pmc_driver);
 }
-
-module_init(pmc_init);
+device_initcall(pmc_init);
index 1ba6307be4dba78b24bbb1eff1566a796cd71fde..11634fa7ab3c1dfe5bab4081b5250a8bf83faf54 100644 (file)
@@ -166,7 +166,7 @@ static void spufs_prune_dir(struct dentry *dir)
        mutex_lock(&d_inode(dir)->i_mutex);
        list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
                spin_lock(&dentry->d_lock);
-               if (!(d_unhashed(dentry)) && d_really_is_positive(dentry)) {
+               if (simple_positive(dentry)) {
                        dget_dlock(dentry);
                        __d_drop(dentry);
                        spin_unlock(&dentry->d_lock);
index ce73ce865613b0521330f8e896f471daf6da9a15..791c6142c4a7bd0185437890350381c529779a54 100644 (file)
@@ -92,5 +92,4 @@ static int __init ps3_rtc_init(void)
 
        return PTR_ERR_OR_ZERO(pdev);
 }
-
-module_init(ps3_rtc_init);
+device_initcall(ps3_rtc_init);
index d631022ffb4b3c77a4fe057c3c6da91457a20c64..38138cf8d33e2213804dad3e903c4037a59db605 100644 (file)
@@ -407,4 +407,4 @@ static int __init fsl_lbc_init(void)
 {
        return platform_driver_register(&fsl_lbc_ctrl_driver);
 }
-module_init(fsl_lbc_init);
+subsys_initcall(fsl_lbc_init);
index d3f896a35b9813080351f1bb7be178f127114c19..b2e5902bd8f4d8e5f4f53cc3e6fad1cb183db7be 100644 (file)
@@ -62,18 +62,13 @@ static void hypfs_add_dentry(struct dentry *dentry)
        hypfs_last_dentry = dentry;
 }
 
-static inline int hypfs_positive(struct dentry *dentry)
-{
-       return d_really_is_positive(dentry) && !d_unhashed(dentry);
-}
-
 static void hypfs_remove(struct dentry *dentry)
 {
        struct dentry *parent;
 
        parent = dentry->d_parent;
        mutex_lock(&d_inode(parent)->i_mutex);
-       if (hypfs_positive(dentry)) {
+       if (simple_positive(dentry)) {
                if (d_is_dir(dentry))
                        simple_rmdir(d_inode(parent), dentry);
                else
@@ -456,8 +451,6 @@ static const struct super_operations hypfs_s_ops = {
        .show_options   = hypfs_show_options,
 };
 
-static struct kobject *s390_kobj;
-
 static int __init hypfs_init(void)
 {
        int rc;
@@ -481,18 +474,16 @@ static int __init hypfs_init(void)
                rc = -ENODATA;
                goto fail_hypfs_sprp_exit;
        }
-       s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
-       if (!s390_kobj) {
-               rc = -ENOMEM;
+       rc = sysfs_create_mount_point(hypervisor_kobj, "s390");
+       if (rc)
                goto fail_hypfs_diag0c_exit;
-       }
        rc = register_filesystem(&hypfs_type);
        if (rc)
                goto fail_filesystem;
        return 0;
 
 fail_filesystem:
-       kobject_put(s390_kobj);
+       sysfs_remove_mount_point(hypervisor_kobj, "s390");
 fail_hypfs_diag0c_exit:
        hypfs_diag0c_exit();
 fail_hypfs_sprp_exit:
@@ -510,7 +501,7 @@ fail_dbfs_exit:
 static void __exit hypfs_exit(void)
 {
        unregister_filesystem(&hypfs_type);
-       kobject_put(s390_kobj);
+       sysfs_remove_mount_point(hypervisor_kobj, "s390");
        hypfs_diag0c_exit();
        hypfs_sprp_exit();
        hypfs_vm_exit();
index e6a1578fc00095929db02b51d4894fe9ad59802c..afe05bfb7e008a723388ff6c39f2d83d5693ad65 100644 (file)
@@ -1572,7 +1572,7 @@ static int param_set_sfb_size(const char *val, const struct kernel_param *kp)
 }
 
 #define param_check_sfb_size(name, p) __param_check(name, p, void)
-static struct kernel_param_ops param_ops_sfb_size = {
+static const struct kernel_param_ops param_ops_sfb_size = {
        .set = param_set_sfb_size,
        .get = param_get_sfb_size,
 };
index 522786318d36c4f5dbee20c8cbdee94009bbf201..40e2b585d4887b7bc48361395c0b9881ecf64970 100644 (file)
@@ -10,7 +10,7 @@
  * for more details.
  */
 #include <linux/io.h>
-#include <linux/init.h>
+#include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <mach/highlander.h>
index bef83522f958c25ed186dc21187963607757ed89..5192b1f43ada5ce884bc4a8abc3f1e0f53e7cc1d 100644 (file)
@@ -140,4 +140,4 @@ static int __init psw_init(void)
 {
        return platform_add_devices(psw_devices, ARRAY_SIZE(psw_devices));
 }
-module_init(psw_init);
+device_initcall(psw_init);
index 35d34635e4f1305473f5cf1990d185216657c59d..402b9c85a894dc4b10982462178dd08529bca49d 100644 (file)
@@ -332,7 +332,7 @@ static void describe_addr(struct KBacktraceIterator *kbt,
        }
 
        if (vma->vm_file) {
-               p = d_path(&vma->vm_file->f_path, buf, bufsize);
+               p = file_path(vma->vm_file, buf, bufsize);
                if (IS_ERR(p))
                        p = "?";
                name = kbasename(p);
index 5af8debc6a71aab44a6cf48e63d44f7343f2fca4..f0da5a237e94077ced050b6f3d746c89d44bd341 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/usb/tilegx.h>
+#include <linux/init.h>
 #include <linux/types.h>
 
 static u64 ehci_dmamask = DMA_BIT_MASK(32);
index f7ddae3725a4aedc686b834efe05237dd1517310..6225cc998db1308b81a4c3abc1bd2edb0ebb2347 100644 (file)
@@ -56,7 +56,7 @@ static int notify_exec(struct mm_struct *mm)
        if (exe_file == NULL)
                goto done_free;
 
-       path = d_path(&exe_file->f_path, buf, PAGE_SIZE);
+       path = file_path(exe_file, buf, PAGE_SIZE);
        if (IS_ERR(path))
                goto done_put;
 
index 9b90fdc4b151d325e9179424b1b4a44878e357ec..f6b911cc3923a1958f9c393a94799a879c4aa8d9 100644 (file)
@@ -185,9 +185,9 @@ static int hostaudio_open(struct inode *inode, struct file *file)
        int ret;
 
 #ifdef DEBUG
-       kparam_block_sysfs_write(dsp);
+       kernel_param_lock(THIS_MODULE);
        printk(KERN_DEBUG "hostaudio: open called (host: %s)\n", dsp);
-       kparam_unblock_sysfs_write(dsp);
+       kernel_param_unlock(THIS_MODULE);
 #endif
 
        state = kmalloc(sizeof(struct hostaudio_state), GFP_KERNEL);
@@ -199,11 +199,11 @@ static int hostaudio_open(struct inode *inode, struct file *file)
        if (file->f_mode & FMODE_WRITE)
                w = 1;
 
-       kparam_block_sysfs_write(dsp);
+       kernel_param_lock(THIS_MODULE);
        mutex_lock(&hostaudio_mutex);
        ret = os_open_file(dsp, of_set_rw(OPENFLAGS(), r, w), 0);
        mutex_unlock(&hostaudio_mutex);
-       kparam_unblock_sysfs_write(dsp);
+       kernel_param_unlock(THIS_MODULE);
 
        if (ret < 0) {
                kfree(state);
@@ -260,17 +260,17 @@ static int hostmixer_open_mixdev(struct inode *inode, struct file *file)
        if (file->f_mode & FMODE_WRITE)
                w = 1;
 
-       kparam_block_sysfs_write(mixer);
+       kernel_param_lock(THIS_MODULE);
        mutex_lock(&hostaudio_mutex);
        ret = os_open_file(mixer, of_set_rw(OPENFLAGS(), r, w), 0);
        mutex_unlock(&hostaudio_mutex);
-       kparam_unblock_sysfs_write(mixer);
+       kernel_param_unlock(THIS_MODULE);
 
        if (ret < 0) {
-               kparam_block_sysfs_write(dsp);
+               kernel_param_lock(THIS_MODULE);
                printk(KERN_ERR "hostaudio_open_mixdev failed to open '%s', "
                       "err = %d\n", dsp, -ret);
-               kparam_unblock_sysfs_write(dsp);
+               kernel_param_unlock(THIS_MODULE);
                kfree(state);
                return ret;
        }
@@ -326,10 +326,10 @@ MODULE_LICENSE("GPL");
 
 static int __init hostaudio_init_module(void)
 {
-       __kernel_param_lock();
+       kernel_param_lock(THIS_MODULE);
        printk(KERN_INFO "UML Audio Relay (host dsp = %s, host mixer = %s)\n",
               dsp, mixer);
-       __kernel_param_unlock();
+       kernel_param_unlock(THIS_MODULE);
 
        module_data.dev_audio = register_sound_dsp(&hostaudio_fops, -1);
        if (module_data.dev_audio < 0) {
index 282a60ac82bacc424ee94bfdf1e3cdd9310197f2..a53343a90ca2944ab9c9a9aa4151c1edad93af3e 100644 (file)
@@ -90,8 +90,8 @@ void ucf64_exchandler(u32 inst, u32 fpexc, struct pt_regs *regs)
                        tmp &= ~(FPSCR_CON);
                exc &= ~(FPSCR_CMPINSTR_BIT | FPSCR_CON);
        } else {
-               pr_debug(KERN_ERR "UniCore-F64 Error: unhandled exceptions\n");
-               pr_debug(KERN_ERR "UniCore-F64 FPSCR 0x%08x INST 0x%08x\n",
+               pr_debug("UniCore-F64 Error: unhandled exceptions\n");
+               pr_debug("UniCore-F64 FPSCR 0x%08x INST 0x%08x\n",
                                cff(FPSCR), inst);
 
                ucf64_raise_sigfpe(0, regs);
index d05a42357ef0f5d02e27e630292998d0eba019b2..55bced17dc95a475194f624d0e259fcaee3f8c8a 100644 (file)
@@ -34,6 +34,7 @@ config X86
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
        select ARCH_SUPPORTS_ATOMIC_RMW
+       select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
        select ARCH_SUPPORTS_INT128             if X86_64
        select ARCH_SUPPORTS_NUMA_BALANCING     if X86_64
        select ARCH_USE_BUILTIN_BSWAP
@@ -87,6 +88,7 @@ config X86
        select HAVE_CMPXCHG_DOUBLE
        select HAVE_CMPXCHG_LOCAL
        select HAVE_CONTEXT_TRACKING            if X86_64
+       select HAVE_COPY_THREAD_TLS
        select HAVE_C_RECORDMCOUNT
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DEBUG_STACKOVERFLOW
diff --git a/arch/x86/configs/xen.config b/arch/x86/configs/xen.config
new file mode 100644 (file)
index 0000000..d9fc713
--- /dev/null
@@ -0,0 +1,28 @@
+# global x86 required specific stuff
+# On 32-bit HIGHMEM4G is not allowed
+CONFIG_HIGHMEM64G=y
+CONFIG_64BIT=y
+
+# These enable us to allow some of the
+# not so generic stuff below
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_X86_MCE=y
+CONFIG_ACPI_PROCESSOR=y
+CONFIG_CPU_FREQ=y
+
+# x86 xen specific config options
+CONFIG_XEN_PVH=y
+CONFIG_XEN_MAX_DOMAIN_MEMORY=500
+CONFIG_XEN_SAVE_RESTORE=y
+# CONFIG_XEN_DEBUG_FS is not set
+CONFIG_XEN_MCE_LOG=y
+CONFIG_XEN_ACPI_PROCESSOR=m
+# x86 specific backend drivers
+CONFIG_XEN_PCIDEV_BACKEND=m
+# x86 specific frontend drivers
+CONFIG_XEN_PCIDEV_FRONTEND=m
+# depends on MEMORY_HOTPLUG, arm64 doesn't enable this yet,
+# move to generic config if it ever does.
+CONFIG_XEN_BALLOON_MEMORY_HOTPLUG=y
index 2bfc8a7c88c11e1d4f20d4d3c58e62063c76328f..dccad38b59a8d741fe5f442b558b63de2b4d931d 100644 (file)
@@ -1537,7 +1537,7 @@ static void __exit aesni_exit(void)
        crypto_fpu_exit();
 }
 
-module_init(aesni_init);
+late_initcall(aesni_init);
 module_exit(aesni_exit);
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
diff --git a/arch/x86/include/asm/intel_pmc_ipc.h b/arch/x86/include/asm/intel_pmc_ipc.h
new file mode 100644 (file)
index 0000000..200ec2e
--- /dev/null
@@ -0,0 +1,82 @@
+#ifndef _ASM_X86_INTEL_PMC_IPC_H_
+#define  _ASM_X86_INTEL_PMC_IPC_H_
+
+/* Commands */
+#define PMC_IPC_PMIC_ACCESS            0xFF
+#define                PMC_IPC_PMIC_ACCESS_READ        0x0
+#define                PMC_IPC_PMIC_ACCESS_WRITE       0x1
+#define PMC_IPC_USB_PWR_CTRL           0xF0
+#define PMC_IPC_PMIC_BLACKLIST_SEL     0xEF
+#define PMC_IPC_PHY_CONFIG             0xEE
+#define PMC_IPC_NORTHPEAK_CTRL         0xED
+#define PMC_IPC_PM_DEBUG               0xEC
+#define PMC_IPC_PMC_TELEMTRY           0xEB
+#define PMC_IPC_PMC_FW_MSG_CTRL                0xEA
+
+/* IPC return code */
+#define IPC_ERR_NONE                   0
+#define IPC_ERR_CMD_NOT_SUPPORTED      1
+#define IPC_ERR_CMD_NOT_SERVICED       2
+#define IPC_ERR_UNABLE_TO_SERVICE      3
+#define IPC_ERR_CMD_INVALID            4
+#define IPC_ERR_CMD_FAILED             5
+#define IPC_ERR_EMSECURITY             6
+#define IPC_ERR_UNSIGNEDKERNEL         7
+
+#if IS_ENABLED(CONFIG_INTEL_PMC_IPC)
+
+/*
+ * intel_pmc_ipc_simple_command
+ * @cmd: command
+ * @sub: sub type
+ */
+int intel_pmc_ipc_simple_command(int cmd, int sub);
+
+/*
+ * intel_pmc_ipc_raw_cmd
+ * @cmd: command
+ * @sub: sub type
+ * @in: input data
+ * @inlen: input length in bytes
+ * @out: output data
+ * @outlen: output length in dwords
+ * @sptr: data writing to SPTR register
+ * @dptr: data writing to DPTR register
+ */
+int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
+               u32 *out, u32 outlen, u32 dptr, u32 sptr);
+
+/*
+ * intel_pmc_ipc_command
+ * @cmd: command
+ * @sub: sub type
+ * @in: input data
+ * @inlen: input length in bytes
+ * @out: output data
+ * @outlen: output length in dwords
+ */
+int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+               u32 *out, u32 outlen);
+
+#else
+
+static inline int intel_pmc_ipc_simple_command(int cmd, int sub)
+{
+       return -EINVAL;
+}
+
+static inline int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
+               u32 *out, u32 outlen, u32 dptr, u32 sptr)
+{
+       return -EINVAL;
+}
+
+static inline int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+               u32 *out, u32 outlen)
+{
+       return -EINVAL;
+}
+
+#endif /*CONFIG_INTEL_PMC_IPC*/
+
+#endif
index c7fa57b529d2972c6e968a05625f65a4fa3ca14b..2a7f5d782c332d1965ac1c5a23a33289cfce7352 100644 (file)
@@ -607,7 +607,7 @@ struct kvm_arch {
        struct kvm_pic *vpic;
        struct kvm_ioapic *vioapic;
        struct kvm_pit *vpit;
-       int vapics_in_nmi_mode;
+       atomic_t vapics_in_nmi_mode;
        struct mutex apic_map_lock;
        struct kvm_apic_map *apic_map;
 
index ce6068dbcfbc6e84c76b43239d80f5d020d431ee..8fba544e9cc4164261f76c6c5d894b80a92f0377 100644 (file)
 #define HV_X64_MSR_STIMER3_CONFIG              0x400000B6
 #define HV_X64_MSR_STIMER3_COUNT               0x400000B7
 
+/* Hyper-V guest crash notification MSR's */
+#define HV_X64_MSR_CRASH_P0                    0x40000100
+#define HV_X64_MSR_CRASH_P1                    0x40000101
+#define HV_X64_MSR_CRASH_P2                    0x40000102
+#define HV_X64_MSR_CRASH_P3                    0x40000103
+#define HV_X64_MSR_CRASH_P4                    0x40000104
+#define HV_X64_MSR_CRASH_CTL                   0x40000105
+#define HV_X64_MSR_CRASH_CTL_NOTIFY            (1ULL << 63)
+#define HV_X64_MSR_CRASH_PARAMS                \
+               (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0))
+
 #define HV_X64_MSR_HYPERCALL_ENABLE            0x00000001
 #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT        12
 #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
index 5de7f4c5697136e180e4a24e153bde77c7112b8c..52c8e3c7789dc81f6f6bd5ae60a6672f500dc8ca 100644 (file)
@@ -98,4 +98,4 @@ static int __init sbf_init(void)
 
        return 0;
 }
-module_init(sbf_init);
+arch_initcall(sbf_init);
index 9fc5e3d9d9c8390f4c9bb177449979061f20bb14..922c5e0cea4c961b1aa6e7a266ce588de7f7a300 100644 (file)
@@ -742,7 +742,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
        cpu_detect(c);
        get_cpu_vendor(c);
        get_cpu_cap(c);
-       fpu__init_system(c);
 
        if (this_cpu->c_early_init)
                this_cpu->c_early_init(c);
@@ -754,6 +753,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                this_cpu->c_bsp_init(c);
 
        setup_force_cpu_cap(X86_FEATURE_ALWAYS);
+       fpu__init_system(c);
 }
 
 void __init early_cpu_init(void)
index 5801a14f7524315a7318fe5a0f60509704fdb756..3658de47900f9a921a0f8373db962e061d42db3a 100644 (file)
@@ -357,34 +357,24 @@ void x86_release_hardware(void)
  */
 int x86_add_exclusive(unsigned int what)
 {
-       int ret = -EBUSY, i;
-
-       if (atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what]))
-               return 0;
+       int i;
 
-       mutex_lock(&pmc_reserve_mutex);
-       for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
-               if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
-                       goto out;
+       if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
+               mutex_lock(&pmc_reserve_mutex);
+               for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
+                       if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
+                               goto fail_unlock;
+               }
+               atomic_inc(&x86_pmu.lbr_exclusive[what]);
+               mutex_unlock(&pmc_reserve_mutex);
        }
 
-       atomic_inc(&x86_pmu.lbr_exclusive[what]);
-       ret = 0;
+       atomic_inc(&active_events);
+       return 0;
 
-out:
+fail_unlock:
        mutex_unlock(&pmc_reserve_mutex);
-
-       /*
-        * Assuming that all exclusive events will share the PMI handler
-        * (which checks active_events for whether there is work to do),
-        * we can bump active_events counter right here, except for
-        * x86_lbr_exclusive_lbr events that go through x86_pmu_event_init()
-        * path, which already bumps active_events for them.
-        */
-       if (!ret && what != x86_lbr_exclusive_lbr)
-               atomic_inc(&active_events);
-
-       return ret;
+       return -EBUSY;
 }
 
 void x86_del_exclusive(unsigned int what)
index 7795f3f8b1d57198469ded20ac9a1244035428e8..43dd672d788bcc8a07106f8c84c0aeae786cb49e 100644 (file)
@@ -530,5 +530,4 @@ static __init int bts_init(void)
 
        return perf_pmu_register(&bts_pmu, "intel_bts", -1);
 }
-
-module_init(bts_init);
+arch_initcall(bts_init);
index 159887c3a89d66a4aaad415ddc069b25904b5676..183de719628d2a4740d20a173a6c950f35c322fa 100644 (file)
@@ -1106,5 +1106,4 @@ static __init int pt_init(void)
 
        return ret;
 }
-
-module_init(pt_init);
+arch_initcall(pt_init);
index 5ee771859b6f6e144090efe8f315e0c7707978b3..1f4acd68b98bccb7bf4032bc6ff2bde3f4efecdb 100644 (file)
@@ -65,7 +65,7 @@ static int __init add_bus_probe(void)
 
        return of_platform_bus_probe(NULL, ce4100_ids, NULL);
 }
-module_init(add_bus_probe);
+device_initcall(add_bus_probe);
 
 #ifdef CONFIG_PCI
 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
index fc878fee6a512a4485ac1d37db76da75f2501f25..32826791e6757203b5440dad36f5e80b3fc8fbe9 100644 (file)
@@ -95,11 +95,12 @@ static void __init fpu__init_system_mxcsr(void)
        unsigned int mask = 0;
 
        if (cpu_has_fxsr) {
-               struct fxregs_state fx_tmp __aligned(32) = { };
+               /* Static because GCC does not get 16-byte stack alignment right: */
+               static struct fxregs_state fxregs __initdata;
 
-               asm volatile("fxsave %0" : "+m" (fx_tmp));
+               asm volatile("fxsave %0" : "+m" (fxregs));
 
-               mask = fx_tmp.mxcsr_mask;
+               mask = fxregs.mxcsr_mask;
 
                /*
                 * If zero then use the default features mask,
index ca05f86481aace3a37cd6bf3fbe0e7d7ce3bb33a..ca83f7ac388bec2e8c494e51ac5fdbeeda9525c9 100644 (file)
@@ -72,15 +72,16 @@ static int setup_cmdline(struct kimage *image, struct boot_params *params,
                         unsigned long cmdline_len)
 {
        char *cmdline_ptr = ((char *)params) + cmdline_offset;
-       unsigned long cmdline_ptr_phys, len;
+       unsigned long cmdline_ptr_phys, len = 0;
        uint32_t cmdline_low_32, cmdline_ext_32;
 
-       memcpy(cmdline_ptr, cmdline, cmdline_len);
        if (image->type == KEXEC_TYPE_CRASH) {
-               len = sprintf(cmdline_ptr + cmdline_len - 1,
-                       " elfcorehdr=0x%lx", image->arch.elf_load_addr);
-               cmdline_len += len;
+               len = sprintf(cmdline_ptr,
+                       "elfcorehdr=0x%lx ", image->arch.elf_load_addr);
        }
+       memcpy(cmdline_ptr + len, cmdline, cmdline_len);
+       cmdline_len += len;
+
        cmdline_ptr[cmdline_len - 1] = '\0';
 
        pr_debug("Final command line is: %s\n", cmdline_ptr);
index c09c99ccf3e33fc5afff500b6310e2e6f9150526..f73c962fe636a268851e1c61e97b55c075ebb687 100644 (file)
@@ -128,8 +128,8 @@ void release_thread(struct task_struct *dead_task)
        release_vm86_irqs(dead_task);
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long sp,
-       unsigned long arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+       unsigned long arg, struct task_struct *p, unsigned long tls)
 {
        struct pt_regs *childregs = task_pt_regs(p);
        struct task_struct *tsk;
@@ -184,7 +184,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
         */
        if (clone_flags & CLONE_SETTLS)
                err = do_set_thread_area(p, -1,
-                       (struct user_desc __user *)childregs->si, 0);
+                       (struct user_desc __user *)tls, 0);
 
        if (err && p->thread.io_bitmap_ptr) {
                kfree(p->thread.io_bitmap_ptr);
index 843f92e4c7110cd621fb94dfca8cb980044fa32d..71d7849a07f7ca93b126bf081464012c7a91c9a6 100644 (file)
@@ -150,8 +150,8 @@ static inline u32 read_32bit_tls(struct task_struct *t, int tls)
        return get_desc_base(&t->thread.tls_array[tls]);
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long sp,
-               unsigned long arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+               unsigned long arg, struct task_struct *p, unsigned long tls)
 {
        int err;
        struct pt_regs *childregs;
@@ -207,10 +207,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
 #ifdef CONFIG_IA32_EMULATION
                if (is_ia32_task())
                        err = do_set_thread_area(p, -1,
-                               (struct user_desc __user *)childregs->si, 0);
+                               (struct user_desc __user *)tls, 0);
                else
 #endif
-                       err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
+                       err = do_arch_prctl(p, ARCH_SET_FS, tls);
                if (err)
                        goto out;
        }
index d3b95b89e9b2974401b48b534bf71604d7db57cc..80f874bf999e3cfbb62eae13b8cf0d1b4a2c3c18 100644 (file)
@@ -461,19 +461,18 @@ static void __init e820_reserve_setup_data(void)
 {
        struct setup_data *data;
        u64 pa_data;
-       int found = 0;
 
        pa_data = boot_params.hdr.setup_data;
+       if (!pa_data)
+               return;
+
        while (pa_data) {
                data = early_memremap(pa_data, sizeof(*data));
                e820_update_range(pa_data, sizeof(*data)+data->len,
                         E820_RAM, E820_RESERVED_KERN);
-               found = 1;
                pa_data = data->next;
                early_memunmap(data, sizeof(*data));
        }
-       if (!found)
-               return;
 
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
        memcpy(&e820_saved, &e820, sizeof(struct e820map));
index ee22c1d93ae5c4c5ffe065981d591507a0511a07..b034b1b14b9c66ab77b24d9757ce10de7f0f318e 100644 (file)
@@ -72,7 +72,7 @@ asmlinkage __visible void vsmp_irq_enable(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
 
-static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
+static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
                                  unsigned long addr, unsigned len)
 {
        switch (type) {
index 4dce6f8b6129ebea2432840154cc97efd513947b..f90952f64e796eae9049690910a511a7d7ff79dd 100644 (file)
@@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work)
                 * LVT0 to NMI delivery. Other PIC interrupts are just sent to
                 * VCPU0, and only if its LVT0 is in EXTINT mode.
                 */
-               if (kvm->arch.vapics_in_nmi_mode > 0)
+               if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
                        kvm_for_each_vcpu(i, vcpu, kvm)
                                kvm_apic_nmi_wd_deliver(vcpu);
        }
index 36e9de1b4127c5576ceb292c6ef320eae830df24..954e98a8c2e38bf9861d4d6349eb721264e8cb18 100644 (file)
@@ -1257,16 +1257,17 @@ static void start_apic_timer(struct kvm_lapic *apic)
 
 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
 {
-       int nmi_wd_enabled = apic_lvt_nmi_mode(kvm_apic_get_reg(apic, APIC_LVT0));
+       bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
 
-       if (apic_lvt_nmi_mode(lvt0_val)) {
-               if (!nmi_wd_enabled) {
+       if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
+               apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
+               if (lvt0_in_nmi_mode) {
                        apic_debug("Receive NMI setting on APIC_LVT0 "
                                   "for cpu %d\n", apic->vcpu->vcpu_id);
-                       apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
-               }
-       } else if (nmi_wd_enabled)
-               apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
+                       atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
+               } else
+                       atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
+       }
 }
 
 static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
@@ -1597,6 +1598,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED))
                apic_set_reg(apic, APIC_LVT0,
                             SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
+       apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
 
        apic_set_reg(apic, APIC_DFR, 0xffffffffU);
        apic_set_spiv(apic, 0xff);
@@ -1822,6 +1824,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
        apic_update_ppr(apic);
        hrtimer_cancel(&apic->lapic_timer.timer);
        apic_update_lvtt(apic);
+       apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
        update_divide_count(apic);
        start_apic_timer(apic);
        apic->irr_pending = true;
index f2f4e10ab7724640fb02ea3b6ea31ec08cfe95a4..71952748222a4a08597711acc21a599c9c499990 100644 (file)
@@ -26,6 +26,7 @@ struct kvm_lapic {
        struct kvm_vcpu *vcpu;
        bool sw_enabled;
        bool irr_pending;
+       bool lvt0_in_nmi_mode;
        /* Number of bits set in ISR. */
        s16 isr_count;
        /* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
index a4f62e6f2db2fe74cb78337ebeab9307b873c738..03d518e499a6d5a6184b19ae86b70adfcb3955ba 100644 (file)
@@ -297,7 +297,7 @@ static int mmu_audit_set(const char *val, const struct kernel_param *kp)
        return 0;
 }
 
-static struct kernel_param_ops audit_param_ops = {
+static const struct kernel_param_ops audit_param_ops = {
        .set = mmu_audit_set,
        .get = param_get_bool,
 };
index ac165c2fb8e54a307cb3f70a5e990635f95978b2..bbaf44e8f0d3cdd7100c40c98ccf2ab40ae1ea2f 100644 (file)
@@ -2379,8 +2379,6 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
-       u64 data;
-
        switch (msr_info->index) {
        case MSR_IA32_PLATFORM_ID:
        case MSR_IA32_EBL_CR_POWERON:
@@ -2453,7 +2451,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                /* TSC increment by tick */
                msr_info->data = 1000ULL;
                /* CPU multiplier */
-               data |= (((uint64_t)4ULL) << 40);
+               msr_info->data |= (((uint64_t)4ULL) << 40);
                break;
        case MSR_EFER:
                msr_info->data = vcpu->arch.efer;
index ddf9ecb53cc3e23171ea889dc47339e67ba5d2c8..e342586db6e4b08230ce6a596c91e27bb5fcf2db 100644 (file)
@@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
        unsigned long ret;
 
        if (__range_not_ok(from, n, TASK_SIZE))
-               return 0;
+               return n;
 
        /*
         * Even though this function is typically called from NMI/IRQ context
index 32947ba0f62dad08eaaf712848bff024ea396a46..ee40fcb6e54dd5f816819bf2f048dcc9177ce95c 100644 (file)
@@ -173,5 +173,4 @@ static int __init intel_mid_device_create(void)
 
        return platform_device_register(&vrtc_device);
 }
-
-module_init(intel_mid_device_create);
+device_initcall(intel_mid_device_create);
index 7488cafab9553de5a11d860b8b57d81bec2f866f..020c101c255fec8386ba36c13c82ac8ddaf715b3 100644 (file)
@@ -104,7 +104,7 @@ static int param_set_local64(const char *val, const struct kernel_param *kp)
        return 0;
 }
 
-static struct kernel_param_ops param_ops_local64 = {
+static const struct kernel_param_ops param_ops_local64 = {
        .get = param_get_local64,
        .set = param_set_local64,
 };
index 17b1ef3232e4833349c2f64ed19e08dede454b52..8ab021b1f14128d49948de51f685a78fd7180fa2 100644 (file)
@@ -681,6 +681,4 @@ static int iss_net_init(void)
 
        return 1;
 }
-
-module_init(iss_net_init);
-
+device_initcall(iss_net_init);
index 751f8fd7335db2203f7257edc8ad680dc7ea2a14..3d13b042da735823b7c6425f93cce9cff82e0abe 100644 (file)
@@ -12,6 +12,7 @@
 #define pr_fmt(fmt) "PKCS7key: "fmt
 #include <linux/key.h>
 #include <linux/err.h>
+#include <linux/module.h>
 #include <linux/key-type.h>
 #include <crypto/pkcs7.h>
 #include <keys/user-type.h>
index f15db002be8ec238dabac8114e862b8ec140255a..114cf48085abd34eacb341fae1f246e2fdfb4b41 100644 (file)
@@ -80,6 +80,26 @@ config ACPI_PROCFS_POWER
 
          Say N to delete power /proc/acpi/ directories that have moved to /sys/
 
+config ACPI_REV_OVERRIDE_POSSIBLE
+       bool "Allow supported ACPI revision to be overriden"
+       depends on X86
+       default y
+       help
+         The platform firmware on some systems expects Linux to return "5" as
+         the supported ACPI revision which makes it expose system configuration
+         information in a special way.
+
+         For example, based on what ACPI exports as the supported revision,
+         Dell XPS 13 (2015) configures its audio device to either work in HDA
+         mode or in I2S mode, where the former is supposed to be used on Linux
+         until the latter is fully supported (in the kernel as well as in user
+         space).
+
+         This option enables a DMI-based quirk for the above Dell machine (so
+         that HDA audio is exposed by the platform firmware to the kernel) and
+         makes it possible to force the kernel to return "5" as the supported
+         ACPI revision via the "acpi_rev_override" command line switch.
+
 config ACPI_EC_DEBUGFS
        tristate "EC read/write access through /sys/kernel/debug/ec"
        default n
index 853aa2dbdb61d203d7d01090895620c1929f1421..a8d8092ee39152920caf04f8e862b414ba43d417 100644 (file)
@@ -59,5 +59,8 @@
 #include "acglobal.h"          /* All global variables */
 #include "achware.h"           /* Hardware defines and interfaces */
 #include "acutils.h"           /* Utility interfaces */
+#ifndef ACPI_USE_SYSTEM_CLIBRARY
+#include "acclib.h"            /* C library interfaces */
+#endif                         /* !ACPI_USE_SYSTEM_CLIBRARY */
 
 #endif                         /* __ACCOMMON_H__ */
index a0c47878431422be01acae9e41c7a5d3a7c8c33e..53f96a3707624416d8ae6773027a1e693a51d623 100644 (file)
@@ -61,6 +61,8 @@ ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
 
 #if (!ACPI_REDUCED_HARDWARE)
 ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
+ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs32);
+ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs64);
 
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
index 1886bde54b5d323e0d9543dbac41245d7b8e1075..7ac98000b46b61dd417fe260315000092b2be861 100644 (file)
@@ -468,6 +468,8 @@ void acpi_ex_eisa_id_to_string(char *dest, u64 compressed_id);
 
 void acpi_ex_integer_to_string(char *dest, u64 value);
 
+void acpi_ex_pci_cls_to_string(char *dest, u8 class_code[3]);
+
 u8 acpi_is_valid_space_id(u8 space_id);
 
 /*
index ffdb956391f614786ba2580fede7a7ce529b0897..bc600969c6a1551f81bf4a1a40c522124dd5a39a 100644 (file)
@@ -213,6 +213,7 @@ struct acpi_table_list {
 
 #define ACPI_TABLE_INDEX_DSDT           (0)
 #define ACPI_TABLE_INDEX_FACS           (1)
+#define ACPI_TABLE_INDEX_X_FACS         (2)
 
 struct acpi_find_context {
        char *search_for;
index 952fbe0b7231a79c62f463bd36e8bce596c8c478..0dd088290d80588fa1e4f7545c70d712e8b7c86c 100644 (file)
@@ -66,6 +66,7 @@
 #define ACPI_NS_PREFIX_IS_SCOPE     0x10
 #define ACPI_NS_EXTERNAL            0x20
 #define ACPI_NS_TEMPORARY           0x40
+#define ACPI_NS_OVERRIDE_IF_FOUND   0x80
 
 /* Flags for acpi_ns_walk_namespace */
 
index 3e9720e1f34f79464abbeb0fe7cb6ca7977a7fe0..c81d98d09cace4e747531511f3d95971372713a9 100644 (file)
@@ -335,6 +335,7 @@ struct acpi_object_reference {
        void *object;           /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */
        struct acpi_namespace_node *node;       /* ref_of or Namepath */
        union acpi_operand_object **where;      /* Target of Index */
+       u8 *index_pointer;      /* Used for Buffers and Strings */
        u32 value;              /* Used for Local/Arg/Index/ddb_handle */
 };
 
index 87c7860b3394b483dd3c0fe6c039e1d589682d65..44997ca02ae26a59f4e2efe0992d6c80dbcc32b7 100644 (file)
@@ -82,6 +82,7 @@ struct acpi_walk_state {
        u8 return_used;
        u8 scope_depth;
        u8 pass_number;         /* Parse pass during table load */
+       u8 namespace_override;  /* Override existing objects */
        u8 result_size;         /* Total elements for the result stack */
        u8 result_count;        /* Current number of occupied elements of result stack */
        u32 aml_offset;
index d49f5c7a20d90197ece8b625dd2fcd22fca4c4b1..6de0d3573037a99651037628d69e02d8bceb7498 100644 (file)
@@ -205,66 +205,6 @@ acpi_status acpi_ut_hardware_initialize(void);
 
 void acpi_ut_subsystem_shutdown(void);
 
-/*
- * utclib - Local implementations of C library functions
- */
-#ifndef ACPI_USE_SYSTEM_CLIBRARY
-
-acpi_size acpi_ut_strlen(const char *string);
-
-char *acpi_ut_strchr(const char *string, int ch);
-
-char *acpi_ut_strcpy(char *dst_string, const char *src_string);
-
-char *acpi_ut_strncpy(char *dst_string,
-                     const char *src_string, acpi_size count);
-
-int acpi_ut_memcmp(const char *buffer1, const char *buffer2, acpi_size count);
-
-int acpi_ut_strncmp(const char *string1, const char *string2, acpi_size count);
-
-int acpi_ut_strcmp(const char *string1, const char *string2);
-
-char *acpi_ut_strcat(char *dst_string, const char *src_string);
-
-char *acpi_ut_strncat(char *dst_string,
-                     const char *src_string, acpi_size count);
-
-u32 acpi_ut_strtoul(const char *string, char **terminator, u32 base);
-
-char *acpi_ut_strstr(char *string1, char *string2);
-
-void *acpi_ut_memcpy(void *dest, const void *src, acpi_size count);
-
-void *acpi_ut_memset(void *dest, u8 value, acpi_size count);
-
-int acpi_ut_to_upper(int c);
-
-int acpi_ut_to_lower(int c);
-
-extern const u8 _acpi_ctype[];
-
-#define _ACPI_XA     0x00      /* extra alphabetic - not supported */
-#define _ACPI_XS     0x40      /* extra space */
-#define _ACPI_BB     0x00      /* BEL, BS, etc. - not supported */
-#define _ACPI_CN     0x20      /* CR, FF, HT, NL, VT */
-#define _ACPI_DI     0x04      /* '0'-'9' */
-#define _ACPI_LO     0x02      /* 'a'-'z' */
-#define _ACPI_PU     0x10      /* punctuation */
-#define _ACPI_SP     0x08      /* space, tab, CR, LF, VT, FF */
-#define _ACPI_UP     0x01      /* 'A'-'Z' */
-#define _ACPI_XD     0x80      /* '0'-'9', 'A'-'F', 'a'-'f' */
-
-#define ACPI_IS_DIGIT(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_DI))
-#define ACPI_IS_SPACE(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_SP))
-#define ACPI_IS_XDIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_XD))
-#define ACPI_IS_UPPER(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_UP))
-#define ACPI_IS_LOWER(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO))
-#define ACPI_IS_PRINT(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_XS | _ACPI_PU))
-#define ACPI_IS_ALPHA(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP))
-
-#endif                         /* !ACPI_USE_SYSTEM_CLIBRARY */
-
 #define ACPI_IS_ASCII(c)  ((c) < 0x80)
 
 /*
@@ -430,6 +370,10 @@ acpi_status
 acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
                    struct acpi_pnp_device_id_list ** return_cid_list);
 
+acpi_status
+acpi_ut_execute_CLS(struct acpi_namespace_node *device_node,
+                   struct acpi_pnp_device_id **return_id);
+
 /*
  * utlock - reader/writer locks
  */
index 43b40de90484cbb9f44338bb2d74dbb4bf4af291..20de148594fdc0459e1ddd589abe999f71c0003f 100644 (file)
@@ -502,7 +502,7 @@ acpi_ds_create_field(union acpi_parse_object *op,
                }
        }
 
-       ACPI_MEMSET(&info, 0, sizeof(struct acpi_create_field_info));
+       memset(&info, 0, sizeof(struct acpi_create_field_info));
 
        /* Second arg is the field flags */
 
index bbe74bcebbae882f2bb1da3211b4f4aad347534c..95779e8ec3bb2ab16eb69b8d14a729812aef8df4 100644 (file)
@@ -207,7 +207,7 @@ acpi_ds_initialize_objects(u32 table_index,
 
        /* Set all init info to zero */
 
-       ACPI_MEMSET(&info, 0, sizeof(struct acpi_init_walk_info));
+       memset(&info, 0, sizeof(struct acpi_init_walk_info));
 
        info.owner_id = owner_id;
        info.table_index = table_index;
index 8a7b07b6adc81a761cefcb2689c82de187eb82b2..2beb7fd674ae14a20b43031254d874afbd859886 100644 (file)
@@ -339,8 +339,8 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
                /* Initialize buffer from the byte_list (if present) */
 
                if (byte_list) {
-                       ACPI_MEMCPY(obj_desc->buffer.pointer,
-                                   byte_list->named.data, byte_list_length);
+                       memcpy(obj_desc->buffer.pointer, byte_list->named.data,
+                              byte_list_length);
                }
        }
 
@@ -750,8 +750,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
        case ACPI_TYPE_STRING:
 
                obj_desc->string.pointer = op->common.value.string;
-               obj_desc->string.length =
-                   (u32) ACPI_STRLEN(op->common.value.string);
+               obj_desc->string.length = (u32)strlen(op->common.value.string);
 
                /*
                 * The string is contained in the ACPI table, don't ever try
index deeddd6d2f0523fbc5aff635acf63c3794a11b23..ebc577baeaf9fb14e29796a9286d22ca107fa6b5 100644 (file)
@@ -572,8 +572,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
                                        obj_desc =
                                            acpi_ut_create_string_object((acpi_size) name_length);
 
-                                       ACPI_STRNCPY(obj_desc->string.pointer,
-                                                    name_string, name_length);
+                                       strncpy(obj_desc->string.pointer,
+                                               name_string, name_length);
                                        status = AE_OK;
                                } else {
                                        /*
index 843942fb4be501c8dcc3d15ced1729a152489011..845ff44919c3713f8725f08daff16d9b9679e338 100644 (file)
@@ -315,10 +315,19 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
                flags = ACPI_NS_NO_UPSEARCH;
                if ((walk_state->opcode != AML_SCOPE_OP) &&
                    (!(walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP))) {
-                       flags |= ACPI_NS_ERROR_IF_FOUND;
-                       ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-                                         "[%s] Cannot already exist\n",
-                                         acpi_ut_get_type_name(object_type)));
+                       if (walk_state->namespace_override) {
+                               flags |= ACPI_NS_OVERRIDE_IF_FOUND;
+                               ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+                                                 "[%s] Override allowed\n",
+                                                 acpi_ut_get_type_name
+                                                 (object_type)));
+                       } else {
+                               flags |= ACPI_NS_ERROR_IF_FOUND;
+                               ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+                                                 "[%s] Cannot already exist\n",
+                                                 acpi_ut_get_type_name
+                                                 (object_type)));
+                       }
                } else {
                        ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
                                          "[%s] Both Find or Create allowed\n",
index 8840296d5b205080045de3fd93a97f046bcb91f5..ea4c0d3fca2d820edfbdb219f33d9718f32cb734 100644 (file)
@@ -377,7 +377,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
 
        /* 4) The last two characters of the name are the hex GPE Number */
 
-       gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
+       gpe_number = strtoul(&name[2], NULL, 16);
        if (gpe_number == ACPI_UINT32_MAX) {
 
                /* Conversion failed; invalid method, just ignore it */
index 6e0df2b9d5a475f329f2d40bfda2b592a26b764c..24a4c5c2b124825b5616371882ffa0f9666cdc85 100644 (file)
@@ -470,7 +470,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
                        return_ACPI_STATUS(AE_NO_MEMORY);
                }
 
-               ACPI_MEMCPY(table, table_header, length);
+               memcpy(table, table_header, length);
                break;
 
        default:
index 89a976b4ccf2ad9f8d01ed702b2288d5080c64a2..075d654c837f27e767ebed874e68140b97fc12ff 100644 (file)
@@ -227,9 +227,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
                /* Copy the integer to the buffer, LSB first */
 
                new_buf = return_desc->buffer.pointer;
-               ACPI_MEMCPY(new_buf,
-                           &obj_desc->integer.value,
-                           acpi_gbl_integer_byte_width);
+               memcpy(new_buf,
+                      &obj_desc->integer.value, acpi_gbl_integer_byte_width);
                break;
 
        case ACPI_TYPE_STRING:
@@ -252,8 +251,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
                /* Copy the string to the buffer */
 
                new_buf = return_desc->buffer.pointer;
-               ACPI_STRNCPY((char *)new_buf, (char *)obj_desc->string.pointer,
-                            obj_desc->string.length);
+               strncpy((char *)new_buf, (char *)obj_desc->string.pointer,
+                       obj_desc->string.length);
                break;
 
        default:
index e67d0aca3fe68b23a9f84284c1a017736a708b8a..815442bbd0518e6d2ee154971966eff3a760c6d5 100644 (file)
@@ -76,6 +76,8 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 {
        u32 i;
        u32 timer;
+       union acpi_operand_object *object_desc;
+       u32 value;
 
        ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
 
@@ -254,8 +256,44 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
                                                         object)->object,
                                                        level + 4, 0);
                        } else {
-                               acpi_ex_do_debug_object(source_desc->reference.
-                                                       object, level + 4, 0);
+                               object_desc = source_desc->reference.object;
+                               value = source_desc->reference.value;
+
+                               switch (object_desc->common.type) {
+                               case ACPI_TYPE_BUFFER:
+
+                                       acpi_os_printf("Buffer[%u] = 0x%2.2X\n",
+                                                      value,
+                                                      *source_desc->reference.
+                                                      index_pointer);
+                                       break;
+
+                               case ACPI_TYPE_STRING:
+
+                                       acpi_os_printf
+                                           ("String[%u] = \"%c\" (0x%2.2X)\n",
+                                            value,
+                                            *source_desc->reference.
+                                            index_pointer,
+                                            *source_desc->reference.
+                                            index_pointer);
+                                       break;
+
+                               case ACPI_TYPE_PACKAGE:
+
+                                       acpi_os_printf("Package[%u] = ", value);
+                                       acpi_ex_do_debug_object(*source_desc->
+                                                               reference.where,
+                                                               level + 4, 0);
+                                       break;
+
+                               default:
+
+                                       acpi_os_printf
+                                           ("Unknown Reference object type %X\n",
+                                            object_desc->common.type);
+                                       break;
+                               }
                        }
                }
                break;
index 1da52bef632e1a6b28f343f78ae6277c083f7792..401e7edcd419371a1d661a7f57cc21adce5eb134 100644 (file)
@@ -224,7 +224,7 @@ static struct acpi_exdump_info acpi_ex_dump_index_field[5] = {
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.data_obj), "Data Object"}
 };
 
-static struct acpi_exdump_info acpi_ex_dump_reference[8] = {
+static struct acpi_exdump_info acpi_ex_dump_reference[9] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_reference), NULL},
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.class), "Class"},
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"},
@@ -232,6 +232,8 @@ static struct acpi_exdump_info acpi_ex_dump_reference[8] = {
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"},
        {ACPI_EXD_NODE, ACPI_EXD_OFFSET(reference.node), "Node"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.index_pointer),
+        "Index Pointer"},
        {ACPI_EXD_REFERENCE, 0, NULL}
 };
 
@@ -1005,14 +1007,13 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
        } else if (obj_desc->reference.object) {
                if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) ==
                    ACPI_DESC_TYPE_OPERAND) {
-                       acpi_os_printf(" Target: %p",
+                       acpi_os_printf("%22s %p", "Target :",
                                       obj_desc->reference.object);
                        if (obj_desc->reference.class == ACPI_REFCLASS_TABLE) {
                                acpi_os_printf(" Table Index: %X\n",
                                               obj_desc->reference.value);
                        } else {
-                               acpi_os_printf(" Target: %p [%s]\n",
-                                              obj_desc->reference.object,
+                               acpi_os_printf(" [%s]\n",
                                               acpi_ut_get_type_name(((union
                                                                       acpi_operand_object
                                                                       *)
index c161dd974f741c1700c501fa6576585034c67e43..61fd9c7b88bc508360ab4d96a034b8ac41bcca65 100644 (file)
@@ -428,7 +428,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
                }
 
                buffer = buffer_desc->buffer.pointer;
-               ACPI_MEMCPY(buffer, source_desc->buffer.pointer, length);
+               memcpy(buffer, source_desc->buffer.pointer, length);
 
                /* Lock entire transaction if requested */
 
index 725a3746a2df095163e5c10ca35077cf8afbb72c..70b7bbbb860b216aaeeb5e520cb5e253fc9b8f6b 100644 (file)
@@ -416,22 +416,22 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
                         * Copy the data from the source buffer.
                         * Length is the field width in bytes.
                         */
-                       ACPI_MEMCPY(value,
-                                   (obj_desc->buffer_field.buffer_obj)->buffer.
-                                   pointer +
-                                   obj_desc->buffer_field.base_byte_offset +
-                                   field_datum_byte_offset,
-                                   obj_desc->common_field.access_byte_width);
+                       memcpy(value,
+                              (obj_desc->buffer_field.buffer_obj)->buffer.
+                              pointer +
+                              obj_desc->buffer_field.base_byte_offset +
+                              field_datum_byte_offset,
+                              obj_desc->common_field.access_byte_width);
                } else {
                        /*
                         * Copy the data to the target buffer.
                         * Length is the field width in bytes.
                         */
-                       ACPI_MEMCPY((obj_desc->buffer_field.buffer_obj)->buffer.
-                                   pointer +
-                                   obj_desc->buffer_field.base_byte_offset +
-                                   field_datum_byte_offset, value,
-                                   obj_desc->common_field.access_byte_width);
+                       memcpy((obj_desc->buffer_field.buffer_obj)->buffer.
+                              pointer +
+                              obj_desc->buffer_field.base_byte_offset +
+                              field_datum_byte_offset, value,
+                              obj_desc->common_field.access_byte_width);
                }
 
                status = AE_OK;
@@ -703,7 +703,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
                return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
        }
 
-       ACPI_MEMSET(buffer, 0, buffer_length);
+       memset(buffer, 0, buffer_length);
        access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width);
 
        /* Handle the simple case here */
@@ -720,7 +720,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
                        status =
                            acpi_ex_field_datum_io(obj_desc, 0, &raw_datum,
                                                   ACPI_READ);
-                       ACPI_MEMCPY(buffer, &raw_datum, buffer_length);
+                       memcpy(buffer, &raw_datum, buffer_length);
                }
 
                return_ACPI_STATUS(status);
@@ -793,9 +793,9 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
 
                /* Write merged datum to target buffer */
 
-               ACPI_MEMCPY(((char *)buffer) + buffer_offset, &merged_datum,
-                           ACPI_MIN(obj_desc->common_field.access_byte_width,
-                                    buffer_length - buffer_offset));
+               memcpy(((char *)buffer) + buffer_offset, &merged_datum,
+                      ACPI_MIN(obj_desc->common_field.access_byte_width,
+                               buffer_length - buffer_offset));
 
                buffer_offset += obj_desc->common_field.access_byte_width;
                merged_datum =
@@ -811,9 +811,9 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
 
        /* Write the last datum to the buffer */
 
-       ACPI_MEMCPY(((char *)buffer) + buffer_offset, &merged_datum,
-                   ACPI_MIN(obj_desc->common_field.access_byte_width,
-                            buffer_length - buffer_offset));
+       memcpy(((char *)buffer) + buffer_offset, &merged_datum,
+              ACPI_MIN(obj_desc->common_field.access_byte_width,
+                       buffer_length - buffer_offset));
 
        return_ACPI_STATUS(AE_OK);
 }
@@ -878,7 +878,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
                 * at Byte zero. All unused (upper) bytes of the
                 * buffer will be 0.
                 */
-               ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length);
+               memcpy((char *)new_buffer, (char *)buffer, buffer_length);
                buffer = new_buffer;
                buffer_length = required_length;
        }
@@ -918,9 +918,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
 
        /* Get initial Datum from the input buffer */
 
-       ACPI_MEMCPY(&raw_datum, buffer,
-                   ACPI_MIN(obj_desc->common_field.access_byte_width,
-                            buffer_length - buffer_offset));
+       memcpy(&raw_datum, buffer,
+              ACPI_MIN(obj_desc->common_field.access_byte_width,
+                       buffer_length - buffer_offset));
 
        merged_datum =
            raw_datum << obj_desc->common_field.start_field_bit_offset;
@@ -970,9 +970,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
                /* Get the next input datum from the buffer */
 
                buffer_offset += obj_desc->common_field.access_byte_width;
-               ACPI_MEMCPY(&raw_datum, ((char *)buffer) + buffer_offset,
-                           ACPI_MIN(obj_desc->common_field.access_byte_width,
-                                    buffer_length - buffer_offset));
+               memcpy(&raw_datum, ((char *)buffer) + buffer_offset,
+                      ACPI_MIN(obj_desc->common_field.access_byte_width,
+                               buffer_length - buffer_offset));
 
                merged_datum |=
                    raw_datum << obj_desc->common_field.start_field_bit_offset;
index b56fc9d6f48e3a1180fd234d1e9eacf811202454..d02afece0f103ae9034d54657b9ef2a3ca686d9b 100644 (file)
@@ -209,8 +209,8 @@ acpi_ex_concat_template(union acpi_operand_object *operand0,
         * end_tag descriptor is copied from Operand1.
         */
        new_buf = return_desc->buffer.pointer;
-       ACPI_MEMCPY(new_buf, operand0->buffer.pointer, length0);
-       ACPI_MEMCPY(new_buf + length0, operand1->buffer.pointer, length1);
+       memcpy(new_buf, operand0->buffer.pointer, length0);
+       memcpy(new_buf + length0, operand1->buffer.pointer, length1);
 
        /* Insert end_tag and set the checksum to zero, means "ignore checksum" */
 
@@ -318,14 +318,14 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
 
                /* Copy the first integer, LSB first */
 
-               ACPI_MEMCPY(new_buf, &operand0->integer.value,
-                           acpi_gbl_integer_byte_width);
+               memcpy(new_buf, &operand0->integer.value,
+                      acpi_gbl_integer_byte_width);
 
                /* Copy the second integer (LSB first) after the first */
 
-               ACPI_MEMCPY(new_buf + acpi_gbl_integer_byte_width,
-                           &local_operand1->integer.value,
-                           acpi_gbl_integer_byte_width);
+               memcpy(new_buf + acpi_gbl_integer_byte_width,
+                      &local_operand1->integer.value,
+                      acpi_gbl_integer_byte_width);
                break;
 
        case ACPI_TYPE_STRING:
@@ -346,9 +346,9 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
 
                /* Concatenate the strings */
 
-               ACPI_STRCPY(new_buf, operand0->string.pointer);
-               ACPI_STRCPY(new_buf + operand0->string.length,
-                           local_operand1->string.pointer);
+               strcpy(new_buf, operand0->string.pointer);
+               strcpy(new_buf + operand0->string.length,
+                      local_operand1->string.pointer);
                break;
 
        case ACPI_TYPE_BUFFER:
@@ -369,11 +369,11 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
 
                /* Concatenate the buffers */
 
-               ACPI_MEMCPY(new_buf, operand0->buffer.pointer,
-                           operand0->buffer.length);
-               ACPI_MEMCPY(new_buf + operand0->buffer.length,
-                           local_operand1->buffer.pointer,
-                           local_operand1->buffer.length);
+               memcpy(new_buf, operand0->buffer.pointer,
+                      operand0->buffer.length);
+               memcpy(new_buf + operand0->buffer.length,
+                      local_operand1->buffer.pointer,
+                      local_operand1->buffer.length);
                break;
 
        default:
@@ -660,9 +660,9 @@ acpi_ex_do_logical_op(u16 opcode,
 
                /* Lexicographic compare: compare the data bytes */
 
-               compare = ACPI_MEMCMP(operand0->buffer.pointer,
-                                     local_operand1->buffer.pointer,
-                                     (length0 > length1) ? length1 : length0);
+               compare = memcmp(operand0->buffer.pointer,
+                                local_operand1->buffer.pointer,
+                                (length0 > length1) ? length1 : length0);
 
                switch (opcode) {
                case AML_LEQUAL_OP:     /* LEqual (Operand0, Operand1) */
index 453b00c301773fbb9977efc583f5a9163e54462a..20e87813c7d7c6b304c2bc2cbebb6b2ec370eb13 100644 (file)
@@ -192,7 +192,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
                char_buf[4] = '\0';
 
                if (name_string) {
-                       ACPI_STRCAT(name_string, char_buf);
+                       strcat(name_string, char_buf);
                        ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
                                          "Appended to - %s\n", name_string));
                } else {
index fcc618aa2061496e089c73fb0c8e760e165bd64b..b8944ebb108145aeb817f1b6f62774da4fb1c8ac 100644 (file)
@@ -337,8 +337,8 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
                 * Copy the raw buffer data with no transform.
                 * (NULL terminated already)
                 */
-               ACPI_MEMCPY(return_desc->string.pointer,
-                           operand[0]->buffer.pointer, length);
+               memcpy(return_desc->string.pointer,
+                      operand[0]->buffer.pointer, length);
                break;
 
        case AML_CONCAT_RES_OP:
@@ -380,6 +380,8 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
 
                        return_desc->reference.target_type =
                            ACPI_TYPE_BUFFER_FIELD;
+                       return_desc->reference.index_pointer =
+                           &(operand[0]->buffer.pointer[index]);
                        break;
 
                case ACPI_TYPE_BUFFER:
@@ -391,6 +393,8 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
 
                        return_desc->reference.target_type =
                            ACPI_TYPE_BUFFER_FIELD;
+                       return_desc->reference.index_pointer =
+                           &(operand[0]->buffer.pointer[index]);
                        break;
 
                case ACPI_TYPE_PACKAGE:
index 1c64a988cbee538634ab5eef9be21d7b7c315145..fa100b3b92ee8a4180c5ce8fe052e274ac26bce7 100644 (file)
@@ -237,8 +237,8 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
 
                        /* We have a buffer, copy the portion requested */
 
-                       ACPI_MEMCPY(buffer, operand[0]->string.pointer + index,
-                                   length);
+                       memcpy(buffer, operand[0]->string.pointer + index,
+                              length);
                }
 
                /* Set the length of the new String/Buffer */
index f6c2f5499935c7dffc1c53655b7f1f45da0a7073..b4a5e44c00dd05df639b9d56d06ee44f04dcaf1a 100644 (file)
@@ -517,15 +517,14 @@ acpi_ex_data_table_space_handler(u32 function,
        switch (function) {
        case ACPI_READ:
 
-               ACPI_MEMCPY(ACPI_CAST_PTR(char, value),
-                           ACPI_PHYSADDR_TO_PTR(address),
-                           ACPI_DIV_8(bit_width));
+               memcpy(ACPI_CAST_PTR(char, value),
+                      ACPI_PHYSADDR_TO_PTR(address), ACPI_DIV_8(bit_width));
                break;
 
        case ACPI_WRITE:
 
-               ACPI_MEMCPY(ACPI_PHYSADDR_TO_PTR(address),
-                           ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width));
+               memcpy(ACPI_PHYSADDR_TO_PTR(address),
+                      ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width));
                break;
 
        default:
index 6fa3c8d8fc5f9fd08e8df6f37c781134e7dab842..e1d4f4d51b97a41703546fdd71d6efb5770e3544 100644 (file)
@@ -100,9 +100,9 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
 
                /* Clear existing buffer and copy in the new one */
 
-               ACPI_MEMSET(target_desc->buffer.pointer, 0,
-                           target_desc->buffer.length);
-               ACPI_MEMCPY(target_desc->buffer.pointer, buffer, length);
+               memset(target_desc->buffer.pointer, 0,
+                      target_desc->buffer.length);
+               memcpy(target_desc->buffer.pointer, buffer, length);
 
 #ifdef ACPI_OBSOLETE_BEHAVIOR
                /*
@@ -129,8 +129,8 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
        } else {
                /* Truncate the source, copy only what will fit */
 
-               ACPI_MEMCPY(target_desc->buffer.pointer, buffer,
-                           target_desc->buffer.length);
+               memcpy(target_desc->buffer.pointer, buffer,
+                      target_desc->buffer.length);
 
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "Truncating source buffer from %X to %X\n",
@@ -187,9 +187,9 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
                 * String will fit in existing non-static buffer.
                 * Clear old string and copy in the new one
                 */
-               ACPI_MEMSET(target_desc->string.pointer, 0,
-                           (acpi_size) target_desc->string.length + 1);
-               ACPI_MEMCPY(target_desc->string.pointer, buffer, length);
+               memset(target_desc->string.pointer, 0,
+                      (acpi_size) target_desc->string.length + 1);
+               memcpy(target_desc->string.pointer, buffer, length);
        } else {
                /*
                 * Free the current buffer, then allocate a new buffer
@@ -210,7 +210,7 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
                }
 
                target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER;
-               ACPI_MEMCPY(target_desc->string.pointer, buffer, length);
+               memcpy(target_desc->string.pointer, buffer, length);
        }
 
        /* Set the new target length */
index 3f4225e95d9311ffe5a915f5081f686b0619ca4f..30c3f464fda5bcae5e98c7dd323fb963acea217d 100644 (file)
@@ -378,6 +378,38 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
        }
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_pci_cls_to_string
+ *
+ * PARAMETERS:  out_string      - Where to put the converted string (7 bytes)
+ * PARAMETERS:  class_code      - PCI class code to be converted (3 bytes)
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Convert 3-bytes PCI class code to string representation.
+ *              Return buffer must be large enough to hold the string. The
+ *              string returned is always exactly of length
+ *              ACPI_PCICLS_STRING_SIZE (includes null terminator).
+ *
+ ******************************************************************************/
+
+void acpi_ex_pci_cls_to_string(char *out_string, u8 class_code[3])
+{
+
+       ACPI_FUNCTION_ENTRY();
+
+       /* All 3 bytes are hexadecimal */
+
+       out_string[0] = acpi_ut_hex_to_ascii_char((u64)class_code[0], 4);
+       out_string[1] = acpi_ut_hex_to_ascii_char((u64)class_code[0], 0);
+       out_string[2] = acpi_ut_hex_to_ascii_char((u64)class_code[1], 4);
+       out_string[3] = acpi_ut_hex_to_ascii_char((u64)class_code[1], 0);
+       out_string[4] = acpi_ut_hex_to_ascii_char((u64)class_code[2], 4);
+       out_string[5] = acpi_ut_hex_to_ascii_char((u64)class_code[2], 0);
+       out_string[6] = 0;
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_is_valid_space_id
index 3b3767698827f9d2553af49a3bdbdbae9f331613..52dfd0d050fa30b09077446e55cd7b27f054452d 100644 (file)
 ACPI_MODULE_NAME("hwxfsleep")
 
 /* Local prototypes */
+#if (!ACPI_REDUCED_HARDWARE)
+static acpi_status
+acpi_hw_set_firmware_waking_vectors(struct acpi_table_facs *facs,
+                                   acpi_physical_address physical_address,
+                                   acpi_physical_address physical_address64);
+#endif
+
 static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
 
 /*
@@ -72,6 +79,7 @@ static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
 
 /*
  * These functions are removed for the ACPI_REDUCED_HARDWARE case:
+ *      acpi_set_firmware_waking_vectors
  *      acpi_set_firmware_waking_vector
  *      acpi_set_firmware_waking_vector64
  *      acpi_enter_sleep_state_s4bios
@@ -80,20 +88,26 @@ static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
 #if (!ACPI_REDUCED_HARDWARE)
 /*******************************************************************************
  *
- * FUNCTION:    acpi_set_firmware_waking_vector
+ * FUNCTION:    acpi_hw_set_firmware_waking_vectors
  *
- * PARAMETERS:  physical_address    - 32-bit physical address of ACPI real mode
+ * PARAMETERS:  facs                - Pointer to FACS table
+ *              physical_address    - 32-bit physical address of ACPI real mode
  *                                    entry point.
+ *              physical_address64  - 64-bit physical address of ACPI protected
+ *                                    mode entry point.
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
+ * DESCRIPTION: Sets the firmware_waking_vector fields of the FACS
  *
  ******************************************************************************/
 
-acpi_status acpi_set_firmware_waking_vector(u32 physical_address)
+static acpi_status
+acpi_hw_set_firmware_waking_vectors(struct acpi_table_facs *facs,
+                                   acpi_physical_address physical_address,
+                                   acpi_physical_address physical_address64)
 {
-       ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
+       ACPI_FUNCTION_TRACE(acpi_hw_set_firmware_waking_vectors);
 
 
        /*
@@ -106,17 +120,92 @@ acpi_status acpi_set_firmware_waking_vector(u32 physical_address)
 
        /* Set the 32-bit vector */
 
-       acpi_gbl_FACS->firmware_waking_vector = physical_address;
+       facs->firmware_waking_vector = (u32)physical_address;
 
-       /* Clear the 64-bit vector if it exists */
+       if (facs->length > 32) {
+               if (facs->version >= 1) {
 
-       if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) {
-               acpi_gbl_FACS->xfirmware_waking_vector = 0;
+                       /* Set the 64-bit vector */
+
+                       facs->xfirmware_waking_vector = physical_address64;
+               } else {
+                       /* Clear the 64-bit vector if it exists */
+
+                       facs->xfirmware_waking_vector = 0;
+               }
        }
 
        return_ACPI_STATUS(AE_OK);
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_set_firmware_waking_vectors
+ *
+ * PARAMETERS:  physical_address    - 32-bit physical address of ACPI real mode
+ *                                    entry point.
+ *              physical_address64  - 64-bit physical address of ACPI protected
+ *                                    mode entry point.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Sets the firmware_waking_vector fields of the FACS
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_set_firmware_waking_vectors(acpi_physical_address physical_address,
+                                acpi_physical_address physical_address64)
+{
+
+       ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vectors);
+
+       /* If Hardware Reduced flag is set, there is no FACS */
+
+       if (acpi_gbl_reduced_hardware) {
+               return_ACPI_STATUS (AE_OK);
+       }
+
+       if (acpi_gbl_facs32) {
+               (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs32,
+                                                         physical_address,
+                                                         physical_address64);
+       }
+       if (acpi_gbl_facs64) {
+               (void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs64,
+                                                         physical_address,
+                                                         physical_address64);
+       }
+
+       return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vectors)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_set_firmware_waking_vector
+ *
+ * PARAMETERS:  physical_address    - 32-bit physical address of ACPI real mode
+ *                                    entry point.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
+ *
+ ******************************************************************************/
+acpi_status acpi_set_firmware_waking_vector(u32 physical_address)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
+
+       status = acpi_set_firmware_waking_vectors((acpi_physical_address)
+                                                 physical_address, 0);
+
+       return_ACPI_STATUS(status);
+}
+
 ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
 
 #if ACPI_MACHINE_WIDTH == 64
@@ -136,25 +225,19 @@ ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
  ******************************************************************************/
 acpi_status acpi_set_firmware_waking_vector64(u64 physical_address)
 {
-       ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
-
-
-       /* Determine if the 64-bit vector actually exists */
+       acpi_status status;
 
-       if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) {
-               return_ACPI_STATUS(AE_NOT_EXIST);
-       }
+       ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
 
-       /* Clear 32-bit vector, set the 64-bit X_ vector */
+       status = acpi_set_firmware_waking_vectors(0,
+                                                 (acpi_physical_address)
+                                                 physical_address);
 
-       acpi_gbl_FACS->firmware_waking_vector = 0;
-       acpi_gbl_FACS->xfirmware_waking_vector = physical_address;
-       return_ACPI_STATUS(AE_OK);
+       return_ACPI_STATUS(status);
 }
 
 ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
 #endif
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_enter_sleep_state_s4bios
index 24fa19a76d704102b89ada50aa836dbf95d0171e..c687b9979fb2a29930c822e7b9bf5f3acb359d30 100644 (file)
@@ -102,7 +102,7 @@ acpi_status acpi_ns_root_initialize(void)
 
                /* _OSI is optional for now, will be permanent later */
 
-               if (!ACPI_STRCMP(init_val->name, "_OSI")
+               if (!strcmp(init_val->name, "_OSI")
                    && !acpi_gbl_create_osi_method) {
                        continue;
                }
@@ -180,7 +180,7 @@ acpi_status acpi_ns_root_initialize(void)
 
                                /* Build an object around the static string */
 
-                               obj_desc->string.length = (u32)ACPI_STRLEN(val);
+                               obj_desc->string.length = (u32)strlen(val);
                                obj_desc->string.pointer = val;
                                obj_desc->common.flags |= AOPOBJ_STATIC_POINTER;
                                break;
@@ -203,7 +203,7 @@ acpi_status acpi_ns_root_initialize(void)
 
                                /* Special case for ACPI Global Lock */
 
-                               if (ACPI_STRCMP(init_val->name, "_GL_") == 0) {
+                               if (strcmp(init_val->name, "_GL_") == 0) {
                                        acpi_gbl_global_lock_mutex = obj_desc;
 
                                        /* Create additional counting semaphore for global lock */
@@ -304,7 +304,9 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
                return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
 
-       local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_SEARCH_PARENT);
+       local_flags = flags &
+           ~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_OVERRIDE_IF_FOUND |
+             ACPI_NS_SEARCH_PARENT);
        *return_node = ACPI_ENTRY_NOT_FOUND;
        acpi_gbl_ns_lookup_count++;
 
@@ -547,6 +549,12 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
                        if (flags & ACPI_NS_ERROR_IF_FOUND) {
                                local_flags |= ACPI_NS_ERROR_IF_FOUND;
                        }
+
+                       /* Set override flag according to caller */
+
+                       if (flags & ACPI_NS_OVERRIDE_IF_FOUND) {
+                               local_flags |= ACPI_NS_OVERRIDE_IF_FOUND;
+                       }
                }
 
                /* Extract one ACPI name from the front of the pathname */
index 1a8b39c8d969d9567654c23cff984285b241f0c1..da55a1c60da180cf4f2aeec66f1dd0f41e22d974 100644 (file)
@@ -187,8 +187,8 @@ acpi_ns_convert_to_string(union acpi_operand_object *original_object,
                 * Copy the raw buffer data with no transform. String is already NULL
                 * terminated at Length+1.
                 */
-               ACPI_MEMCPY(new_object->string.pointer,
-                           original_object->buffer.pointer, length);
+               memcpy(new_object->string.pointer,
+                      original_object->buffer.pointer, length);
                break;
 
        default:
@@ -251,9 +251,9 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
                        return (AE_NO_MEMORY);
                }
 
-               ACPI_MEMCPY(new_object->buffer.pointer,
-                           original_object->string.pointer,
-                           original_object->string.length);
+               memcpy(new_object->buffer.pointer,
+                      original_object->string.pointer,
+                      original_object->string.length);
                break;
 
        case ACPI_TYPE_PACKAGE:
index d259393505fa9bb9230ef51a152a01c317c5350c..0f1daba640e7a5f70c5a6f0196398f65551219d8 100644 (file)
@@ -101,7 +101,7 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
 
        while (num_segments) {
                for (i = 0; i < 4; i++) {
-                       ACPI_IS_PRINT(pathname[i]) ?
+                       isprint((int)pathname[i]) ?
                            acpi_os_printf("%c", pathname[i]) :
                            acpi_os_printf("?");
                }
index 7bcc68f57afa61d24d6d170e04bb9c2db8271900..80670cb32b5a3fe9438e735d26ef1b269be1e3f7 100644 (file)
@@ -59,15 +59,14 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
  *
  * FUNCTION:    acpi_ns_evaluate
  *
- * PARAMETERS:  info            - Evaluation info block, contains:
+ * PARAMETERS:  info            - Evaluation info block, contains these fields
+ *                                and more:
  *                  prefix_node     - Prefix or Method/Object Node to execute
  *                  relative_path   - Name of method to execute, If NULL, the
  *                                    Node is the object to execute
  *                  parameters      - List of parameters to pass to the method,
  *                                    terminated by NULL. Params itself may be
  *                                    NULL if no parameters are being passed.
- *                  return_object   - Where to put method's return value (if
- *                                    any). If NULL, no value is returned.
  *                  parameter_type  - Type of Parameter list
  *                  return_object   - Where to put method's return value (if
  *                                    any). If NULL, no value is returned.
@@ -440,7 +439,7 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
 
        /* Initialize the evaluation information block */
 
-       ACPI_MEMSET(info, 0, sizeof(struct acpi_evaluate_info));
+       memset(info, 0, sizeof(struct acpi_evaluate_info));
        info->prefix_node = parent_node;
 
        /*
index 4a85c45179883b14421f324d4fcc2b9f923f0309..b744a53618eb3977663ab75a6a31e808e96b5aeb 100644 (file)
@@ -90,7 +90,7 @@ acpi_status acpi_ns_initialize_objects(void)
 
        /* Set all init info to zero */
 
-       ACPI_MEMSET(&info, 0, sizeof(struct acpi_init_walk_info));
+       memset(&info, 0, sizeof(struct acpi_init_walk_info));
 
        /* Walk entire namespace from the supplied root */
 
@@ -566,7 +566,7 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
        ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
                        (ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
 
-       ACPI_MEMSET(info, 0, sizeof(struct acpi_evaluate_info));
+       memset(info, 0, sizeof(struct acpi_evaluate_info));
        info->prefix_node = device_node;
        info->relative_pathname = METHOD_NAME__INI;
        info->parameters = NULL;
index c95a119767b56fad8569b31d904c42edb643361f..57a4cfe547e4921d09cd41c1a2005cd769b22f83 100644 (file)
@@ -117,6 +117,13 @@ acpi_ns_one_complete_parse(u32 pass_number,
                                               (u8) pass_number);
        }
 
+       /* Found OSDT table, enable the namespace override feature */
+
+       if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT) &&
+           pass_number == ACPI_IMODE_LOAD_PASS1) {
+               walk_state->namespace_override = TRUE;
+       }
+
        if (ACPI_FAILURE(status)) {
                acpi_ds_delete_walk_state(walk_state);
                goto cleanup;
index c30672d238789668fdfd70ea7ad512838e3e7f55..0515a70f42a4fb6f27a78466b004d34517c8b51c 100644 (file)
@@ -580,7 +580,7 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info,
         * # is a hex digit.
         */
        for (dest = new_string->string.pointer; *source; dest++, source++) {
-               *dest = (char)ACPI_TOUPPER(*source);
+               *dest = (char)toupper((int)*source);
        }
 
        acpi_ut_remove_reference(return_object);
index 4a9d4a66016e51eefcb851979489bb871edab2a2..d7390401383043d7af82cf51233cdfea4f825566 100644 (file)
@@ -325,8 +325,41 @@ acpi_ns_search_and_enter(u32 target_name,
                 * If we found it AND the request specifies that a find is an error,
                 * return the error
                 */
-               if ((status == AE_OK) && (flags & ACPI_NS_ERROR_IF_FOUND)) {
-                       status = AE_ALREADY_EXISTS;
+               if (status == AE_OK) {
+
+                       /* The node was found in the namespace */
+
+                       /*
+                        * If the namespace override feature is enabled for this node,
+                        * delete any existing attached sub-object and make the node
+                        * look like a new node that is owned by the override table.
+                        */
+                       if (flags & ACPI_NS_OVERRIDE_IF_FOUND) {
+                               ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+                                                 "Namespace override: %4.4s pass %u type %X Owner %X\n",
+                                                 ACPI_CAST_PTR(char,
+                                                               &target_name),
+                                                 interpreter_mode,
+                                                 (*return_node)->type,
+                                                 walk_state->owner_id));
+
+                               acpi_ns_delete_children(*return_node);
+                               if (acpi_gbl_runtime_namespace_override) {
+                                       acpi_ut_remove_reference((*return_node)->object);
+                                       (*return_node)->object = NULL;
+                                       (*return_node)->owner_id =
+                                           walk_state->owner_id;
+                               } else {
+                                       acpi_ns_remove_node(*return_node);
+                                       *return_node = ACPI_ENTRY_NOT_FOUND;
+                               }
+                       }
+
+                       /* Return an error if we don't expect to find the object */
+
+                       else if (flags & ACPI_NS_ERROR_IF_FOUND) {
+                               status = AE_ALREADY_EXISTS;
+                       }
                }
 #ifdef ACPI_ASL_COMPILER
                if (*return_node && (*return_node)->type == ACPI_TYPE_ANY) {
index 6ad02008c0c23ae9cb7bbd3f4d8fa86b348f2dc6..8d8104b8bd28affdf70a35c38295116c25ec423c 100644 (file)
@@ -292,8 +292,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
                        } else {
                                /* Convert the character to uppercase and save it */
 
-                               result[i] =
-                                   (char)ACPI_TOUPPER((int)*external_name);
+                               result[i] = (char)toupper((int)*external_name);
                                external_name++;
                        }
                }
index b6030a2deee1248ab9b783bc3d30ce7ac9fbaa75..6ee1e52b903d344d31ed5b140f8385204cb5c94c 100644 (file)
@@ -696,7 +696,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
                        return (AE_CTRL_DEPTH);
                }
 
-               no_match = ACPI_STRCMP(hid->string, info->hid);
+               no_match = strcmp(hid->string, info->hid);
                ACPI_FREE(hid);
 
                if (no_match) {
@@ -715,8 +715,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
 
                        found = FALSE;
                        for (i = 0; i < cid->count; i++) {
-                               if (ACPI_STRCMP(cid->ids[i].string, info->hid)
-                                   == 0) {
+                               if (strcmp(cid->ids[i].string, info->hid) == 0) {
 
                                        /* Found a matching CID */
 
index d66c326485d82e769d5c93d4692ac0b02ca277f1..9ff643b9553fe9a4d97a803b4a0b0e7def2b299f 100644 (file)
@@ -114,7 +114,7 @@ acpi_get_handle(acpi_handle parent,
 
                /* Special case for root-only, since we can't search for it */
 
-               if (!ACPI_STRCMP(pathname, ACPI_NS_ROOT_PATH)) {
+               if (!strcmp(pathname, ACPI_NS_ROOT_PATH)) {
                        *ret_handle =
                            ACPI_CAST_PTR(acpi_handle, acpi_gbl_root_node);
                        return (AE_OK);
@@ -242,7 +242,7 @@ static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest,
 
        /* Copy actual string and return a pointer to the next string area */
 
-       ACPI_MEMCPY(string_area, source->string, source->length);
+       memcpy(string_area, source->string, source->length);
        return (string_area + source->length);
 }
 
@@ -260,7 +260,7 @@ static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest,
  *              control methods (Such as in the case of a device.)
  *
  * For Device and Processor objects, run the Device _HID, _UID, _CID, _SUB,
- * _STA, _ADR, _sx_w, and _sx_d methods.
+ * _CLS, _STA, _ADR, _sx_w, and _sx_d methods.
  *
  * Note: Allocates the return buffer, must be freed by the caller.
  *
@@ -276,11 +276,12 @@ acpi_get_object_info(acpi_handle handle,
        struct acpi_pnp_device_id *hid = NULL;
        struct acpi_pnp_device_id *uid = NULL;
        struct acpi_pnp_device_id *sub = NULL;
+       struct acpi_pnp_device_id *cls = NULL;
        char *next_id_string;
        acpi_object_type type;
        acpi_name name;
        u8 param_count = 0;
-       u8 valid = 0;
+       u16 valid = 0;
        u32 info_size;
        u32 i;
        acpi_status status;
@@ -320,7 +321,7 @@ acpi_get_object_info(acpi_handle handle,
        if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) {
                /*
                 * Get extra info for ACPI Device/Processor objects only:
-                * Run the Device _HID, _UID, _SUB, and _CID methods.
+                * Run the Device _HID, _UID, _SUB, _CID, and _CLS methods.
                 *
                 * Note: none of these methods are required, so they may or may
                 * not be present for this device. The Info->Valid bitfield is used
@@ -363,6 +364,14 @@ acpi_get_object_info(acpi_handle handle,
                             sizeof(struct acpi_pnp_device_id_list));
                        valid |= ACPI_VALID_CID;
                }
+
+               /* Execute the Device._CLS method */
+
+               status = acpi_ut_execute_CLS(node, &cls);
+               if (ACPI_SUCCESS(status)) {
+                       info_size += cls->length;
+                       valid |= ACPI_VALID_CLS;
+               }
        }
 
        /*
@@ -486,6 +495,11 @@ acpi_get_object_info(acpi_handle handle,
                }
        }
 
+       if (cls) {
+               next_id_string = acpi_ns_copy_device_id(&info->class_code,
+                                                       cls, next_id_string);
+       }
+
        /* Copy the fixed-length data */
 
        info->info_size = info_size;
@@ -510,6 +524,9 @@ cleanup:
        if (cid_list) {
                ACPI_FREE(cid_list);
        }
+       if (cls) {
+               ACPI_FREE(cls);
+       }
        return (status);
 }
 
@@ -620,7 +637,7 @@ acpi_status acpi_install_method(u8 *buffer)
 
        /* Copy the method AML to the local buffer */
 
-       ACPI_MEMCPY(aml_buffer, aml_start, aml_length);
+       memcpy(aml_buffer, aml_start, aml_length);
 
        /* Initialize the method object with the new method's information */
 
index 960505ab409a8b3958336058bb26837b6c9109c0..32440912023a7d4a8ab65bf84de8df7beb37d633 100644 (file)
@@ -93,10 +93,9 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
        op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
        op->common.aml_opcode = opcode;
 
-       ACPI_DISASM_ONLY_MEMBERS(ACPI_STRNCPY(op->common.aml_op_name,
-                                             (acpi_ps_get_opcode_info
-                                              (opcode))->name,
-                                             sizeof(op->common.aml_op_name)));
+       ACPI_DISASM_ONLY_MEMBERS(strncpy(op->common.aml_op_name,
+                                        (acpi_ps_get_opcode_info(opcode))->
+                                        name, sizeof(op->common.aml_op_name)));
 }
 
 /*******************************************************************************
index 15434e4c9b344411f6b3c0ec8dbb5193f81b9e41..3fa829e96c2a0de77e079fb8d95a3a7f25240a4f 100644 (file)
@@ -353,13 +353,13 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
                                /* +1 to include null terminator */
 
                                user_prt->length +=
-                                   (u32) ACPI_STRLEN(user_prt->source) + 1;
+                                   (u32)strlen(user_prt->source) + 1;
                                break;
 
                        case ACPI_TYPE_STRING:
 
-                               ACPI_STRCPY(user_prt->source,
-                                           obj_desc->string.pointer);
+                               strcpy(user_prt->source,
+                                      obj_desc->string.pointer);
 
                                /*
                                 * Add to the Length field the length of the string
index 1fe49d22366333b2172e82d622bfe7f6289d8f0d..ac37852e082173869fef4cc872e6734451d0419f 100644 (file)
@@ -119,7 +119,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
                        /*
                         * Get the resource type and the initial (minimum) length
                         */
-                       ACPI_MEMSET(resource, 0, INIT_RESOURCE_LENGTH(info));
+                       memset(resource, 0, INIT_RESOURCE_LENGTH(info));
                        resource->type = INIT_RESOURCE_TYPE(info);
                        resource->length = INIT_RESOURCE_LENGTH(info);
                        break;
@@ -324,13 +324,13 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
 
                case ACPI_RSC_SET8:
 
-                       ACPI_MEMSET(destination, info->aml_offset, info->value);
+                       memset(destination, info->aml_offset, info->value);
                        break;
 
                case ACPI_RSC_DATA8:
 
                        target = ACPI_ADD_PTR(char, resource, info->value);
-                       ACPI_MEMCPY(destination, source, ACPI_GET16(target));
+                       memcpy(destination, source, ACPI_GET16(target));
                        break;
 
                case ACPI_RSC_ADDRESS:
@@ -502,7 +502,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
                switch (info->opcode) {
                case ACPI_RSC_INITSET:
 
-                       ACPI_MEMSET(aml, 0, INIT_RESOURCE_LENGTH(info));
+                       memset(aml, 0, INIT_RESOURCE_LENGTH(info));
                        aml_length = INIT_RESOURCE_LENGTH(info);
                        acpi_rs_set_resource_header(INIT_RESOURCE_TYPE(info),
                                                    aml_length, aml);
index ece3cd60cc6a0664608cbe97b1110845ce26330d..52b024df00524bffae3bb1117cffa0d3dc31eb3b 100644 (file)
@@ -148,7 +148,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
                case ACPI_RSC_MOVE_SERIAL_VEN:
                case ACPI_RSC_MOVE_SERIAL_RES:
 
-                       ACPI_MEMCPY(destination, source, item_count);
+                       memcpy(destination, source, item_count);
                        return;
 
                        /*
@@ -364,12 +364,11 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
                 * Zero the entire area of the buffer.
                 */
                total_length =
-                   (u32)
-                   ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) +
+                   (u32)strlen(ACPI_CAST_PTR(char, &aml_resource_source[1])) +
                    1;
-               total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
+               total_length = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
 
-               ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
+               memset(resource_source->string_ptr, 0, total_length);
 
                /* Copy the resource_source string to the destination */
 
@@ -432,8 +431,8 @@ acpi_rs_set_resource_source(union aml_resource * aml,
 
                /* Copy the resource_source string */
 
-               ACPI_STRCPY(ACPI_CAST_PTR(char, &aml_resource_source[1]),
-                           resource_source->string_ptr);
+               strcpy(ACPI_CAST_PTR(char, &aml_resource_source[1]),
+                      resource_source->string_ptr);
 
                /*
                 * Add the length of the string (+ 1 for null terminator) to the
index 8e6276df0226ef5ff5e5297afa0d5708e471c738..de51f836ef68e23280897bda0aca300b5341db90 100644 (file)
@@ -398,8 +398,8 @@ acpi_resource_to_address64(struct acpi_resource *resource,
 
                /* Simple copy for 64 bit source */
 
-               ACPI_MEMCPY(out, &resource->data,
-                           sizeof(struct acpi_resource_address64));
+               memcpy(out, &resource->data,
+                      sizeof(struct acpi_resource_address64));
                break;
 
        default:
@@ -499,7 +499,7 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
         */
        if ((vendor->byte_length < (ACPI_UUID_LENGTH + 1)) ||
            (vendor->uuid_subtype != info->uuid->subtype) ||
-           (ACPI_MEMCMP(vendor->uuid, info->uuid->data, ACPI_UUID_LENGTH))) {
+           (memcmp(vendor->uuid, info->uuid->data, ACPI_UUID_LENGTH))) {
                return (AE_OK);
        }
 
@@ -513,7 +513,7 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
 
        /* Found the correct resource, copy and return it */
 
-       ACPI_MEMCPY(buffer->pointer, resource, resource->length);
+       memcpy(buffer->pointer, resource, resource->length);
        buffer->length = resource->length;
 
        /* Found the desired descriptor, terminate resource walk */
index d7f8386455bdc0e2ce35a36b7f80c188d376b86f..5c9d5abf15887e4e5d42f3e8651eee0876bb1697 100644 (file)
@@ -73,7 +73,7 @@ acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
         * Initialize the table descriptor. Set the pointer to NULL, since the
         * table is not fully mapped at this time.
         */
-       ACPI_MEMSET(table_desc, 0, sizeof(struct acpi_table_desc));
+       memset(table_desc, 0, sizeof(struct acpi_table_desc));
        table_desc->address = address;
        table_desc->length = table->length;
        table_desc->flags = flags;
@@ -465,9 +465,9 @@ acpi_status acpi_tb_resize_root_table_list(void)
        /* Copy and free the previous table array */
 
        if (acpi_gbl_root_table_list.tables) {
-               ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
-                           (acpi_size) table_count *
-                           sizeof(struct acpi_table_desc));
+               memcpy(tables, acpi_gbl_root_table_list.tables,
+                      (acpi_size) table_count *
+                      sizeof(struct acpi_table_desc));
 
                if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
                        ACPI_FREE(acpi_gbl_root_table_list.tables);
index 7d2486005e3f24fe0ca38d61d8dc36d21362066e..6253001b6375d16629b7066f23377460ecbf0fce 100644 (file)
@@ -350,9 +350,18 @@ void acpi_tb_parse_fadt(u32 table_index)
        /* If Hardware Reduced flag is set, there is no FACS */
 
        if (!acpi_gbl_reduced_hardware) {
-               acpi_tb_install_fixed_table((acpi_physical_address)
-                                           acpi_gbl_FADT.Xfacs, ACPI_SIG_FACS,
-                                           ACPI_TABLE_INDEX_FACS);
+               if (acpi_gbl_FADT.facs) {
+                       acpi_tb_install_fixed_table((acpi_physical_address)
+                                                   acpi_gbl_FADT.facs,
+                                                   ACPI_SIG_FACS,
+                                                   ACPI_TABLE_INDEX_FACS);
+               }
+               if (acpi_gbl_FADT.Xfacs) {
+                       acpi_tb_install_fixed_table((acpi_physical_address)
+                                                   acpi_gbl_FADT.Xfacs,
+                                                   ACPI_SIG_FACS,
+                                                   ACPI_TABLE_INDEX_X_FACS);
+               }
        }
 }
 
@@ -389,12 +398,12 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
 
        /* Clear the entire local FADT */
 
-       ACPI_MEMSET(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt));
+       memset(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt));
 
        /* Copy the original FADT, up to sizeof (struct acpi_table_fadt) */
 
-       ACPI_MEMCPY(&acpi_gbl_FADT, table,
-                   ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
+       memcpy(&acpi_gbl_FADT, table,
+              ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
 
        /* Take a copy of the Hardware Reduced flag */
 
@@ -491,13 +500,9 @@ static void acpi_tb_convert_fadt(void)
        acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
 
        /*
-        * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
+        * Expand the 32-bit DSDT addresses to 64-bit as necessary.
         * Later ACPICA code will always use the X 64-bit field.
         */
-       acpi_gbl_FADT.Xfacs = acpi_tb_select_address("FACS",
-                                                    acpi_gbl_FADT.facs,
-                                                    acpi_gbl_FADT.Xfacs);
-
        acpi_gbl_FADT.Xdsdt = acpi_tb_select_address("DSDT",
                                                     acpi_gbl_FADT.dsdt,
                                                     acpi_gbl_FADT.Xdsdt);
index 0b879fcfef670c535f30ac57cb403ff87dcbf266..119c84ad98334e2404fb72e585c4ec30ee49c684 100644 (file)
@@ -76,16 +76,16 @@ acpi_tb_find_table(char *signature,
 
        /* Normalize the input strings */
 
-       ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header));
+       memset(&header, 0, sizeof(struct acpi_table_header));
        ACPI_MOVE_NAME(header.signature, signature);
-       ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
-       ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+       strncpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
+       strncpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
 
        /* Search for the table */
 
        for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
-               if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
-                               header.signature, ACPI_NAME_SIZE)) {
+               if (memcmp(&(acpi_gbl_root_table_list.tables[i].signature),
+                          header.signature, ACPI_NAME_SIZE)) {
 
                        /* Not the requested table */
 
@@ -112,21 +112,20 @@ acpi_tb_find_table(char *signature,
 
                /* Check for table match on all IDs */
 
-               if (!ACPI_MEMCMP
+               if (!memcmp
                    (acpi_gbl_root_table_list.tables[i].pointer->signature,
                     header.signature, ACPI_NAME_SIZE) && (!oem_id[0]
                                                           ||
-                                                          !ACPI_MEMCMP
+                                                          !memcmp
                                                           (acpi_gbl_root_table_list.
                                                            tables[i].pointer->
                                                            oem_id,
                                                            header.oem_id,
                                                            ACPI_OEM_ID_SIZE))
                    && (!oem_table_id[0]
-                       || !ACPI_MEMCMP(acpi_gbl_root_table_list.tables[i].
-                                       pointer->oem_table_id,
-                                       header.oem_table_id,
-                                       ACPI_OEM_TABLE_ID_SIZE))) {
+                       || !memcmp(acpi_gbl_root_table_list.tables[i].pointer->
+                                  oem_table_id, header.oem_table_id,
+                                  ACPI_OEM_TABLE_ID_SIZE))) {
                        *table_index = i;
 
                        ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
index 008a251780f4c955194c7193dd943bf955028b34..15ea98e0068d80971834a5d15bd5c7ef30356f41 100644 (file)
@@ -87,8 +87,8 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
         * not just the header.
         */
        is_identical = (u8)((table_desc->length != table_length ||
-                            ACPI_MEMCMP(table_desc->pointer, table,
-                                        table_length)) ? FALSE : TRUE);
+                            memcmp(table_desc->pointer, table, table_length)) ?
+                           FALSE : TRUE);
 
        /* Release the acquired table */
 
@@ -289,8 +289,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                if ((new_table_desc.signature.ascii[0] != 0x00) &&
                    (!ACPI_COMPARE_NAME
                     (&new_table_desc.signature, ACPI_SIG_SSDT))
-                   && (ACPI_STRNCMP(new_table_desc.signature.ascii, "OEM", 3)))
-               {
+                   && (strncmp(new_table_desc.signature.ascii, "OEM", 3))) {
                        ACPI_BIOS_ERROR((AE_INFO,
                                         "Table has invalid signature [%4.4s] (0x%8.8X), "
                                         "must be SSDT or OEMx",
index 77ba5c71c6e787e88a7f7923e44e1bc3adfeb7ca..709d5112fc1679db4a6ff28412a27bbe5c334d81 100644 (file)
@@ -73,7 +73,7 @@ static void acpi_tb_fix_string(char *string, acpi_size length)
 {
 
        while (length && *string) {
-               if (!ACPI_IS_PRINT(*string)) {
+               if (!isprint((int)*string)) {
                        *string = '?';
                }
                string++;
@@ -100,7 +100,7 @@ acpi_tb_cleanup_table_header(struct acpi_table_header *out_header,
                             struct acpi_table_header *header)
 {
 
-       ACPI_MEMCPY(out_header, header, sizeof(struct acpi_table_header));
+       memcpy(out_header, header, sizeof(struct acpi_table_header));
 
        acpi_tb_fix_string(out_header->signature, ACPI_NAME_SIZE);
        acpi_tb_fix_string(out_header->oem_id, ACPI_OEM_ID_SIZE);
@@ -138,9 +138,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
 
                /* RSDP has no common fields */
 
-               ACPI_MEMCPY(local_header.oem_id,
-                           ACPI_CAST_PTR(struct acpi_table_rsdp,
-                                         header)->oem_id, ACPI_OEM_ID_SIZE);
+               memcpy(local_header.oem_id,
+                      ACPI_CAST_PTR(struct acpi_table_rsdp, header)->oem_id,
+                      ACPI_OEM_ID_SIZE);
                acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
 
                ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
index 6559a58439c5dfadb0adbb19ac8c016d4197573b..568ac0e4a3c6a784efe38349213c26146888f6d5 100644 (file)
@@ -68,7 +68,6 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
 
 acpi_status acpi_tb_initialize_facs(void)
 {
-       acpi_status status;
 
        /* If Hardware Reduced flag is set, there is no FACS */
 
@@ -77,11 +76,25 @@ acpi_status acpi_tb_initialize_facs(void)
                return (AE_OK);
        }
 
-       status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
-                                        ACPI_CAST_INDIRECT_PTR(struct
-                                                               acpi_table_header,
-                                                               &acpi_gbl_FACS));
-       return (status);
+       (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
+                                     ACPI_CAST_INDIRECT_PTR(struct
+                                                            acpi_table_header,
+                                                            &acpi_gbl_facs32));
+       (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_X_FACS,
+                                     ACPI_CAST_INDIRECT_PTR(struct
+                                                            acpi_table_header,
+                                                            &acpi_gbl_facs64));
+
+       if (acpi_gbl_facs64
+           && (!acpi_gbl_facs32 || !acpi_gbl_use32_bit_facs_addresses)) {
+               acpi_gbl_FACS = acpi_gbl_facs64;
+       } else if (acpi_gbl_facs32) {
+               acpi_gbl_FACS = acpi_gbl_facs32;
+       }
+
+       /* If there is no FACS, just continue. There was already an error msg */
+
+       return (AE_OK);
 }
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
@@ -101,7 +114,7 @@ acpi_status acpi_tb_initialize_facs(void)
 u8 acpi_tb_tables_loaded(void)
 {
 
-       if (acpi_gbl_root_table_list.current_table_count >= 3) {
+       if (acpi_gbl_root_table_list.current_table_count >= 4) {
                return (TRUE);
        }
 
@@ -175,7 +188,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
                return (NULL);
        }
 
-       ACPI_MEMCPY(new_table, table_desc->pointer, table_desc->length);
+       memcpy(new_table, table_desc->pointer, table_desc->length);
        acpi_tb_uninstall_table(table_desc);
 
        acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.
@@ -357,11 +370,11 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
        table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
 
        /*
-        * First two entries in the table array are reserved for the DSDT
-        * and FACS, which are not actually present in the RSDT/XSDT - they
-        * come from the FADT
+        * First three entries in the table array are reserved for the DSDT
+        * and 32bit/64bit FACS, which are not actually present in the
+        * RSDT/XSDT - they come from the FADT
         */
-       acpi_gbl_root_table_list.current_table_count = 2;
+       acpi_gbl_root_table_list.current_table_count = 3;
 
        /* Initialize the root table array from the RSDT/XSDT */
 
index 60e94f87f27aeea917c9705358ea4675666363d9..5559e2c70b15634384fa907b1934d1df4c97bdf3 100644 (file)
@@ -119,9 +119,9 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
        } else {
                /* Root Table Array has been statically allocated by the host */
 
-               ACPI_MEMSET(initial_table_array, 0,
-                           (acpi_size) initial_table_count *
-                           sizeof(struct acpi_table_desc));
+               memset(initial_table_array, 0,
+                      (acpi_size) initial_table_count *
+                      sizeof(struct acpi_table_desc));
 
                acpi_gbl_root_table_list.tables = initial_table_array;
                acpi_gbl_root_table_list.max_table_count = initial_table_count;
@@ -242,8 +242,9 @@ acpi_get_table_header(char *signature,
                                if (!header) {
                                        return (AE_NO_MEMORY);
                                }
-                               ACPI_MEMCPY(out_table_header, header,
-                                           sizeof(struct acpi_table_header));
+
+                               memcpy(out_table_header, header,
+                                      sizeof(struct acpi_table_header));
                                acpi_os_unmap_memory(header,
                                                     sizeof(struct
                                                            acpi_table_header));
@@ -251,9 +252,9 @@ acpi_get_table_header(char *signature,
                                return (AE_NOT_FOUND);
                        }
                } else {
-                       ACPI_MEMCPY(out_table_header,
-                                   acpi_gbl_root_table_list.tables[i].pointer,
-                                   sizeof(struct acpi_table_header));
+                       memcpy(out_table_header,
+                              acpi_gbl_root_table_list.tables[i].pointer,
+                              sizeof(struct acpi_table_header));
                }
                return (AE_OK);
        }
index aadb3002a2ddd9cfb020d2cbcf49919313c1c589..9682d40ca6ffe70f8c50078fc4f89172b7b37702 100644 (file)
@@ -150,8 +150,8 @@ static acpi_status acpi_tb_load_namespace(void)
         * Save the original DSDT header for detection of table corruption
         * and/or replacement of the DSDT from outside the OS.
         */
-       ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
-                   sizeof(struct acpi_table_header));
+       memcpy(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
+              sizeof(struct acpi_table_header));
 
        (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 
@@ -166,13 +166,18 @@ static acpi_status acpi_tb_load_namespace(void)
 
        (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
        for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
-               if ((!ACPI_COMPARE_NAME
+               if (!acpi_gbl_root_table_list.tables[i].address ||
+                   (!ACPI_COMPARE_NAME
                     (&(acpi_gbl_root_table_list.tables[i].signature),
                      ACPI_SIG_SSDT)
                     &&
                     !ACPI_COMPARE_NAME(&
                                        (acpi_gbl_root_table_list.tables[i].
-                                        signature), ACPI_SIG_PSDT))
+                                        signature), ACPI_SIG_PSDT)
+                    &&
+                    !ACPI_COMPARE_NAME(&
+                                       (acpi_gbl_root_table_list.tables[i].
+                                        signature), ACPI_SIG_OSDT))
                    ||
                    ACPI_FAILURE(acpi_tb_validate_table
                                 (&acpi_gbl_root_table_list.tables[i]))) {
@@ -219,9 +224,9 @@ acpi_install_table(acpi_physical_address address, u8 physical)
        ACPI_FUNCTION_TRACE(acpi_install_table);
 
        if (physical) {
-               flags = ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL;
-       } else {
                flags = ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL;
+       } else {
+               flags = ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL;
        }
 
        status = acpi_tb_install_standard_table(address, flags,
index 61d8f6d186d11c15c0f6f6c9489cf2b6d7fdae32..7a4101f0685e90d748fc8910255abf2df517331c 100644 (file)
@@ -73,7 +73,7 @@ void *acpi_os_allocate_zeroed(acpi_size size)
 
                /* Clear the memory block */
 
-               ACPI_MEMSET(allocation, 0, size);
+               memset(allocation, 0, size);
        }
 
        return (allocation);
@@ -181,7 +181,7 @@ acpi_status acpi_ut_delete_caches(void)
        char buffer[7];
 
        if (acpi_gbl_display_final_mem_stats) {
-               ACPI_STRCPY(buffer, "MEMORY");
+               strcpy(buffer, "MEMORY");
                (void)acpi_db_display_statistics(buffer);
        }
 #endif
@@ -337,6 +337,6 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
 
        /* Have a valid buffer, clear it */
 
-       ACPI_MEMSET(buffer->pointer, 0, required_length);
+       memset(buffer->pointer, 0, required_length);
        return (AE_OK);
 }
index a8c39643e6181fbebff0aa047b1391551720082a..01c8709ca58694a17c4e766fd852a14898ac19d8 100644 (file)
@@ -159,7 +159,7 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
                        }
 
                        buf_char = buffer[(acpi_size) i + j];
-                       if (ACPI_IS_PRINT(buf_char)) {
+                       if (isprint(buf_char)) {
                                acpi_os_printf("%c", buf_char);
                        } else {
                                acpi_os_printf(".");
@@ -319,7 +319,7 @@ acpi_ut_dump_buffer_to_file(ACPI_FILE file,
                        }
 
                        buf_char = buffer[(acpi_size) i + j];
-                       if (ACPI_IS_PRINT(buf_char)) {
+                       if (isprint(buf_char)) {
                                acpi_ut_file_printf(file, "%c", buf_char);
                        } else {
                                acpi_ut_file_printf(file, ".");
index eacc5eee362ebd9e7c5290ff6054ee5009ca633f..0d21fbd993633f774d78ca93f7bef2a33f2ff72f 100644 (file)
@@ -84,7 +84,7 @@ acpi_os_create_cache(char *cache_name,
 
        /* Populate the cache object and return it */
 
-       ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
+       memset(cache, 0, sizeof(struct acpi_memory_list));
        cache->list_name = cache_name;
        cache->object_size = object_size;
        cache->max_depth = max_depth;
@@ -212,7 +212,7 @@ acpi_os_release_object(struct acpi_memory_list * cache, void *object)
 
                /* Mark the object as cached */
 
-               ACPI_MEMSET(object, 0xCA, cache->object_size);
+               memset(object, 0xCA, cache->object_size);
                ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_CACHED);
 
                /* Put the object at the head of the cache list */
@@ -281,7 +281,7 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
 
                /* Clear (zero) the previously used Object */
 
-               ACPI_MEMSET(object, 0, cache->object_size);
+               memset(object, 0, cache->object_size);
        } else {
                /* The cache is empty, create a new object */
 
index c37ec5035f4c5f48e207cbd5082db3f55780550d..257221d452c8839262faf80465969b0ec75d8a54 100644 (file)
@@ -129,7 +129,7 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
 
        /* Always clear the external object */
 
-       ACPI_MEMSET(external_object, 0, sizeof(union acpi_object));
+       memset(external_object, 0, sizeof(union acpi_object));
 
        /*
         * In general, the external object will be the same type as
@@ -149,9 +149,9 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
                                                                  string.
                                                                  length + 1);
 
-               ACPI_MEMCPY((void *)data_space,
-                           (void *)internal_object->string.pointer,
-                           (acpi_size) internal_object->string.length + 1);
+               memcpy((void *)data_space,
+                      (void *)internal_object->string.pointer,
+                      (acpi_size) internal_object->string.length + 1);
                break;
 
        case ACPI_TYPE_BUFFER:
@@ -162,9 +162,9 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
                    ACPI_ROUND_UP_TO_NATIVE_WORD(internal_object->string.
                                                 length);
 
-               ACPI_MEMCPY((void *)data_space,
-                           (void *)internal_object->buffer.pointer,
-                           internal_object->buffer.length);
+               memcpy((void *)data_space,
+                      (void *)internal_object->buffer.pointer,
+                      internal_object->buffer.length);
                break;
 
        case ACPI_TYPE_INTEGER:
@@ -502,9 +502,9 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
                        goto error_exit;
                }
 
-               ACPI_MEMCPY(internal_object->string.pointer,
-                           external_object->string.pointer,
-                           external_object->string.length);
+               memcpy(internal_object->string.pointer,
+                      external_object->string.pointer,
+                      external_object->string.length);
 
                internal_object->string.length = external_object->string.length;
                break;
@@ -517,9 +517,9 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
                        goto error_exit;
                }
 
-               ACPI_MEMCPY(internal_object->buffer.pointer,
-                           external_object->buffer.pointer,
-                           external_object->buffer.length);
+               memcpy(internal_object->buffer.pointer,
+                      external_object->buffer.pointer,
+                      external_object->buffer.length);
 
                internal_object->buffer.length = external_object->buffer.length;
 
@@ -694,8 +694,8 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
                copy_size = sizeof(struct acpi_namespace_node);
        }
 
-       ACPI_MEMCPY(ACPI_CAST_PTR(char, dest_desc),
-                   ACPI_CAST_PTR(char, source_desc), copy_size);
+       memcpy(ACPI_CAST_PTR(char, dest_desc),
+              ACPI_CAST_PTR(char, source_desc), copy_size);
 
        /* Restore the saved fields */
 
@@ -725,9 +725,9 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
 
                        /* Copy the actual buffer data */
 
-                       ACPI_MEMCPY(dest_desc->buffer.pointer,
-                                   source_desc->buffer.pointer,
-                                   source_desc->buffer.length);
+                       memcpy(dest_desc->buffer.pointer,
+                              source_desc->buffer.pointer,
+                              source_desc->buffer.length);
                }
                break;
 
@@ -747,9 +747,9 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
 
                        /* Copy the actual string data */
 
-                       ACPI_MEMCPY(dest_desc->string.pointer,
-                                   source_desc->string.pointer,
-                                   (acpi_size) source_desc->string.length + 1);
+                       memcpy(dest_desc->string.pointer,
+                              source_desc->string.pointer,
+                              (acpi_size) source_desc->string.length + 1);
                }
                break;
 
index 4f3f888d33bb189793adc849a9c3217f981927d0..cd02693841db0bdeaa51dcf747c5f06fbf1d1e2f 100644 (file)
@@ -111,8 +111,8 @@ void acpi_ut_track_stack_ptr(void)
  * RETURN:      Updated pointer to the function name
  *
  * DESCRIPTION: Remove the "Acpi" prefix from the function name, if present.
- *              This allows compiler macros such as __func__ to be used with no
- *              change to the debug output.
+ *              This allows compiler macros such as __func__ to be used
+ *              with no change to the debug output.
  *
  ******************************************************************************/
 
index 5e8df9177da44781ac07942d146e47b97e954cda..a72685c1e819660768933e84abe9d1076c953bff 100644 (file)
@@ -102,12 +102,19 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
        {"_SB_", ACPI_TYPE_DEVICE, NULL},
        {"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL},
        {"_TZ_", ACPI_TYPE_DEVICE, NULL},
-       {"_REV", ACPI_TYPE_INTEGER, (char *)ACPI_CA_SUPPORT_LEVEL},
+       /*
+        * March, 2015:
+        * The _REV object is in the process of being deprecated, because
+        * other ACPI implementations permanently return 2. Thus, it
+        * has little or no value. Return 2 for compatibility with
+        * other ACPI implementations.
+        */
+       {"_REV", ACPI_TYPE_INTEGER, ACPI_CAST_PTR(char, 2)},
        {"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
-       {"_GL_", ACPI_TYPE_MUTEX, (char *)1},
+       {"_GL_", ACPI_TYPE_MUTEX, ACPI_CAST_PTR(char, 1)},
 
 #if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
-       {"_OSI", ACPI_TYPE_METHOD, (char *)1},
+       {"_OSI", ACPI_TYPE_METHOD, ACPI_CAST_PTR(char, 1)},
 #endif
 
        /* Table terminator */
index 27431cfc1c4476c6128c90f240bb8d090831486c..7956df1e263c1cb1271614cdba965d1592d9a18a 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Module Name: utids - support for device Ids - HID, UID, CID
+ * Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
  *
  *****************************************************************************/
 
@@ -111,7 +111,7 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
        if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
                acpi_ex_eisa_id_to_string(hid->string, obj_desc->integer.value);
        } else {
-               ACPI_STRCPY(hid->string, obj_desc->string.pointer);
+               strcpy(hid->string, obj_desc->string.pointer);
        }
 
        hid->length = length;
@@ -180,7 +180,7 @@ acpi_ut_execute_SUB(struct acpi_namespace_node *device_node,
 
        /* Simply copy existing string */
 
-       ACPI_STRCPY(sub->string, obj_desc->string.pointer);
+       strcpy(sub->string, obj_desc->string.pointer);
        sub->length = length;
        *return_id = sub;
 
@@ -256,7 +256,7 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
        if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
                acpi_ex_integer_to_string(uid->string, obj_desc->integer.value);
        } else {
-               ACPI_STRCPY(uid->string, obj_desc->string.pointer);
+               strcpy(uid->string, obj_desc->string.pointer);
        }
 
        uid->length = length;
@@ -393,8 +393,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
 
                        /* Copy the String CID from the returned object */
 
-                       ACPI_STRCPY(next_id_string,
-                                   cid_objects[i]->string.pointer);
+                       strcpy(next_id_string, cid_objects[i]->string.pointer);
                        length = cid_objects[i]->string.length + 1;
                }
 
@@ -416,3 +415,92 @@ cleanup:
        acpi_ut_remove_reference(obj_desc);
        return_ACPI_STATUS(status);
 }
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_execute_CLS
+ *
+ * PARAMETERS:  device_node         - Node for the device
+ *              return_id           - Where the _CLS is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Executes the _CLS control method that returns PCI-defined
+ *              class code of the device. The _CLS value is always a package
+ *              containing PCI class information as a list of integers.
+ *              The returned string has format "BBSSPP", where:
+ *                BB = Base-class code
+ *                SS = Sub-class code
+ *                PP = Programming Interface code
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_CLS(struct acpi_namespace_node *device_node,
+                   struct acpi_pnp_device_id **return_id)
+{
+       union acpi_operand_object *obj_desc;
+       union acpi_operand_object **cls_objects;
+       u32 count;
+       struct acpi_pnp_device_id *cls;
+       u32 length;
+       acpi_status status;
+       u8 class_code[3] = { 0, 0, 0 };
+
+       ACPI_FUNCTION_TRACE(ut_execute_CLS);
+
+       status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CLS,
+                                        ACPI_BTYPE_PACKAGE, &obj_desc);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /* Get the size of the String to be returned, includes null terminator */
+
+       length = ACPI_PCICLS_STRING_SIZE;
+       cls_objects = obj_desc->package.elements;
+       count = obj_desc->package.count;
+
+       if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
+               if (count > 0
+                   && cls_objects[0]->common.type == ACPI_TYPE_INTEGER) {
+                       class_code[0] = (u8)cls_objects[0]->integer.value;
+               }
+               if (count > 1
+                   && cls_objects[1]->common.type == ACPI_TYPE_INTEGER) {
+                       class_code[1] = (u8)cls_objects[1]->integer.value;
+               }
+               if (count > 2
+                   && cls_objects[2]->common.type == ACPI_TYPE_INTEGER) {
+                       class_code[2] = (u8)cls_objects[2]->integer.value;
+               }
+       }
+
+       /* Allocate a buffer for the CLS */
+
+       cls =
+           ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
+                                (acpi_size) length);
+       if (!cls) {
+               status = AE_NO_MEMORY;
+               goto cleanup;
+       }
+
+       /* Area for the string starts after PNP_DEVICE_ID struct */
+
+       cls->string =
+           ACPI_ADD_PTR(char, cls, sizeof(struct acpi_pnp_device_id));
+
+       /* Simply copy existing string */
+
+       acpi_ex_pci_cls_to_string(cls->string, class_code);
+       cls->length = length;
+       *return_id = cls;
+
+cleanup:
+
+       /* On exit, we must delete the return object */
+
+       acpi_ut_remove_reference(obj_desc);
+       return_ACPI_STATUS(status);
+}
index cbb7034d28d89d10944096ed8359caa5b6274b94..71b66537f8260daf7e09ceae9747790bcf71bd8c 100644 (file)
@@ -66,9 +66,9 @@ u8 acpi_ut_is_pci_root_bridge(char *id)
         * Check if this is a PCI root bridge.
         * ACPI 3.0+: check for a PCI Express root also.
         */
-       if (!(ACPI_STRCMP(id,
-                         PCI_ROOT_HID_STRING)) ||
-           !(ACPI_STRCMP(id, PCI_EXPRESS_ROOT_HID_STRING))) {
+       if (!(strcmp(id,
+                    PCI_ROOT_HID_STRING)) ||
+           !(strcmp(id, PCI_EXPRESS_ROOT_HID_STRING))) {
                return (TRUE);
        }
 
@@ -97,7 +97,8 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
 
        if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) ||
            ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) ||
-           ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
+           ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT) ||
+           ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT)) {
                return (TRUE);
        }
 
index 44035abdbf2948000ae2ffd3861d07ff08746b6e..8f3d203aed79844ee78859ae954492f09e9fce95 100644 (file)
@@ -232,8 +232,7 @@ acpi_status acpi_ut_install_interface(acpi_string interface_name)
                return (AE_NO_MEMORY);
        }
 
-       interface_info->name =
-           ACPI_ALLOCATE_ZEROED(ACPI_STRLEN(interface_name) + 1);
+       interface_info->name = ACPI_ALLOCATE_ZEROED(strlen(interface_name) + 1);
        if (!interface_info->name) {
                ACPI_FREE(interface_info);
                return (AE_NO_MEMORY);
@@ -241,7 +240,7 @@ acpi_status acpi_ut_install_interface(acpi_string interface_name)
 
        /* Initialize new info and insert at the head of the global list */
 
-       ACPI_STRCPY(interface_info->name, interface_name);
+       strcpy(interface_info->name, interface_name);
        interface_info->flags = ACPI_OSI_DYNAMIC;
        interface_info->next = acpi_gbl_supported_interfaces;
 
@@ -269,7 +268,7 @@ acpi_status acpi_ut_remove_interface(acpi_string interface_name)
 
        previous_interface = next_interface = acpi_gbl_supported_interfaces;
        while (next_interface) {
-               if (!ACPI_STRCMP(interface_name, next_interface->name)) {
+               if (!strcmp(interface_name, next_interface->name)) {
 
                        /* Found: name is in either the static list or was added at runtime */
 
@@ -373,7 +372,7 @@ struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name)
 
        next_interface = acpi_gbl_supported_interfaces;
        while (next_interface) {
-               if (!ACPI_STRCMP(interface_name, next_interface->name)) {
+               if (!strcmp(interface_name, next_interface->name)) {
                        return (next_interface);
                }
 
index 29e449935a82e5f802a96f8e3c09d257a2bb2529..97898ed71b4b3a9a9a9e8d510b83fdbe52546a71 100644 (file)
@@ -148,7 +148,7 @@ void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes)
        u32 j;
 
        if (!expected_btypes) {
-               ACPI_STRCPY(buffer, "NONE");
+               strcpy(buffer, "NONE");
                return;
        }
 
@@ -161,7 +161,7 @@ void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes)
                /* If one of the expected types, concatenate the name of this type */
 
                if (expected_btypes & this_rtype) {
-                       ACPI_STRCAT(buffer, &ut_rtype_names[i][j]);
+                       strcat(buffer, &ut_rtype_names[i][j]);
                        j = 0;  /* Use name separator from now on */
                }
 
index 2be6bd4bdc09d7662c15a9ee3ab733310be3d077..b26297c5de49d528f19e8d0aacba119f94c69a71 100644 (file)
@@ -180,7 +180,7 @@ const char *acpi_ut_scan_number(const char *string, u64 *number_ptr)
 {
        u64 number = 0;
 
-       while (ACPI_IS_DIGIT(*string)) {
+       while (isdigit((int)*string)) {
                number *= 10;
                number += *(string++) - '0';
        }
@@ -405,7 +405,7 @@ acpi_ut_vsnprintf(char *string,
                /* Process width */
 
                width = -1;
-               if (ACPI_IS_DIGIT(*format)) {
+               if (isdigit((int)*format)) {
                        format = acpi_ut_scan_number(format, &number);
                        width = (s32) number;
                } else if (*format == '*') {
@@ -422,7 +422,7 @@ acpi_ut_vsnprintf(char *string,
                precision = -1;
                if (*format == '.') {
                        ++format;
-                       if (ACPI_IS_DIGIT(*format)) {
+                       if (isdigit((int)*format)) {
                                format = acpi_ut_scan_number(format, &number);
                                precision = (s32) number;
                        } else if (*format == '*') {
index 83b6c52490dc06097d945fb0245189543adb3835..8f3c883dfe0ec305aa167bf0db0970d7cc60cf82 100644 (file)
@@ -79,7 +79,7 @@ void acpi_ut_strlwr(char *src_string)
        /* Walk entire string, lowercasing the letters */
 
        for (string = src_string; *string; string++) {
-               *string = (char)ACPI_TOLOWER(*string);
+               *string = (char)tolower((int)*string);
        }
 
        return;
@@ -145,7 +145,7 @@ void acpi_ut_strupr(char *src_string)
        /* Walk entire string, uppercasing the letters */
 
        for (string = src_string; *string; string++) {
-               *string = (char)ACPI_TOUPPER(*string);
+               *string = (char)toupper((int)*string);
        }
 
        return;
@@ -202,7 +202,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
 
        /* Skip over any white space in the buffer */
 
-       while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) {
+       while ((*string) && (isspace((int)*string) || *string == '\t')) {
                string++;
        }
 
@@ -211,7 +211,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
                 * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
                 * We need to determine if it is decimal or hexadecimal.
                 */
-               if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
+               if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) {
                        sign_of0x = 1;
                        base = 16;
 
@@ -224,7 +224,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
 
        /* Any string left? Check that '0x' is not followed by white space. */
 
-       if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') {
+       if (!(*string) || isspace((int)*string) || *string == '\t') {
                if (to_integer_op) {
                        goto error_exit;
                } else {
@@ -241,7 +241,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
        /* Main loop: convert the string to a 32- or 64-bit integer */
 
        while (*string) {
-               if (ACPI_IS_DIGIT(*string)) {
+               if (isdigit((int)*string)) {
 
                        /* Convert ASCII 0-9 to Decimal value */
 
@@ -252,8 +252,8 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
 
                        term = 1;
                } else {
-                       this_digit = (u8)ACPI_TOUPPER(*string);
-                       if (ACPI_IS_XDIGIT((char)this_digit)) {
+                       this_digit = (u8)toupper((int)*string);
+                       if (isxdigit((int)this_digit)) {
 
                                /* Convert ASCII Hex char to value */
 
@@ -404,7 +404,7 @@ void acpi_ut_print_string(char *string, u16 max_length)
 
                        /* Check for printable character or hex escape */
 
-                       if (ACPI_IS_PRINT(string[i])) {
+                       if (isprint((int)string[i])) {
                                /* This is a normal character */
 
                                acpi_os_printf("%c", (int)string[i]);
@@ -609,22 +609,22 @@ void ut_convert_backslashes(char *pathname)
 u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
 {
 
-       if (ACPI_STRLEN(source) >= dest_size) {
+       if (strlen(source) >= dest_size) {
                return (TRUE);
        }
 
-       ACPI_STRCPY(dest, source);
+       strcpy(dest, source);
        return (FALSE);
 }
 
 u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
 {
 
-       if ((ACPI_STRLEN(dest) + ACPI_STRLEN(source)) >= dest_size) {
+       if ((strlen(dest) + strlen(source)) >= dest_size) {
                return (TRUE);
        }
 
-       ACPI_STRCAT(dest, source);
+       strcat(dest, source);
        return (FALSE);
 }
 
@@ -635,14 +635,13 @@ acpi_ut_safe_strncat(char *dest,
 {
        acpi_size actual_transfer_length;
 
-       actual_transfer_length =
-           ACPI_MIN(max_transfer_length, ACPI_STRLEN(source));
+       actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
 
-       if ((ACPI_STRLEN(dest) + actual_transfer_length) >= dest_size) {
+       if ((strlen(dest) + actual_transfer_length) >= dest_size) {
                return (TRUE);
        }
 
-       ACPI_STRNCAT(dest, source, max_transfer_length);
+       strncat(dest, source, max_transfer_length);
        return (FALSE);
 }
 #endif
index 130dd9f96f0fe72ee03f79f6fa3631d70e608086..9a7dc8196a5da76779f10c855d17fc0e5f4430c8 100644 (file)
@@ -100,7 +100,7 @@ acpi_ut_create_list(char *list_name,
                return (AE_NO_MEMORY);
        }
 
-       ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
+       memset(cache, 0, sizeof(struct acpi_memory_list));
 
        cache->list_name = list_name;
        cache->object_size = object_size;
@@ -402,7 +402,7 @@ acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
        allocation->component = component;
        allocation->line = line;
 
-       ACPI_STRNCPY(allocation->module, module, ACPI_MAX_MODULE_NAME);
+       strncpy(allocation->module, module, ACPI_MAX_MODULE_NAME);
        allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0;
 
        if (!element) {
@@ -497,7 +497,7 @@ acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
 
        /* Mark the segment as deleted */
 
-       ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size);
+       memset(&allocation->user_space, 0xEA, allocation->size);
 
        status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
        return (status);
@@ -595,7 +595,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
        while (element) {
                if ((element->component & component) &&
                    ((module == NULL)
-                    || (0 == ACPI_STRCMP(module, element->module)))) {
+                    || (0 == strcmp(module, element->module)))) {
                        descriptor =
                            ACPI_CAST_PTR(union acpi_descriptor,
                                          &element->user_space);
index 0929187bdce09c74f07e8d1a68f48eed4f2880e6..51cf52d52243c99233728a2d00d73e6171b38a28 100644 (file)
@@ -234,8 +234,8 @@ acpi_status acpi_get_statistics(struct acpi_statistics *stats)
        stats->sci_count = acpi_sci_count;
        stats->gpe_count = acpi_gpe_count;
 
-       ACPI_MEMCPY(stats->fixed_event_count, acpi_fixed_event_count,
-                   sizeof(acpi_fixed_event_count));
+       memcpy(stats->fixed_event_count, acpi_fixed_event_count,
+              sizeof(acpi_fixed_event_count));
 
        /* Other counters */
 
@@ -322,7 +322,7 @@ acpi_status acpi_install_interface(acpi_string interface_name)
 
        /* Parameter validation */
 
-       if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
+       if (!interface_name || (strlen(interface_name) == 0)) {
                return (AE_BAD_PARAMETER);
        }
 
@@ -374,7 +374,7 @@ acpi_status acpi_remove_interface(acpi_string interface_name)
 
        /* Parameter validation */
 
-       if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
+       if (!interface_name || (strlen(interface_name) == 0)) {
                return (AE_BAD_PARAMETER);
        }
 
index 083a76891889244596453059885ef3007aab8320..42a32a66ef22a9fc87558aa1e8901259146519be 100644 (file)
@@ -179,10 +179,12 @@ acpi_status __init acpi_enable_subsystem(u32 flags)
         * Obtain a permanent mapping for the FACS. This is required for the
         * Global Lock and the Firmware Waking Vector
         */
-       status = acpi_tb_initialize_facs();
-       if (ACPI_FAILURE(status)) {
-               ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
-               return_ACPI_STATUS(status);
+       if (!(flags & ACPI_NO_FACS_INIT)) {
+               status = acpi_tb_initialize_facs();
+               if (ACPI_FAILURE(status)) {
+                       ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
+                       return_ACPI_STATUS(status);
+               }
        }
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
index 1d1791935c318c71148a5da573effe51f9031a15..278dc4be992a49b7663223a5c6bf2215699f4c20 100644 (file)
@@ -162,6 +162,15 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
        acpi_osi_setup("!Windows 2012");
        return 0;
 }
+#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
+static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
+{
+       printk(KERN_NOTICE PREFIX "DMI detected: %s (force ACPI _REV to 5)\n",
+              d->ident);
+       acpi_rev_override_setup(NULL);
+       return 0;
+}
+#endif
 
 static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
        {
@@ -325,6 +334,23 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
                },
        },
+
+#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
+       /*
+        * DELL XPS 13 (2015) switches sound between HDA and I2S
+        * depending on the ACPI _REV callback. If userspace supports
+        * I2S sufficiently (or if you do not care about sound), you
+        * can safely disable this quirk.
+        */
+       {
+        .callback = dmi_enable_rev_override,
+        .ident = "DELL XPS 13 (2015)",
+        .matches = {
+                     DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                     DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"),
+               },
+       },
+#endif
        {}
 };
 
index 787c629bc9b41e83ac11496f76bb646437d3b077..4683a96932b917fcc5f3efc04e290eb07012efad 100644 (file)
@@ -58,6 +58,7 @@ void acpi_cmos_rtc_init(void);
 #else
 static inline void acpi_cmos_rtc_init(void) {}
 #endif
+int acpi_rev_override_setup(char *str);
 
 extern bool acpi_force_hot_remove;
 
index a5dc9034efeeda6f06044521600518f70bd322d3..c262e4acd68d827cba1273d79e28515c6ebe95fa 100644 (file)
@@ -530,6 +530,19 @@ acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
 }
 #endif
 
+#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
+static bool acpi_rev_override;
+
+int __init acpi_rev_override_setup(char *str)
+{
+       acpi_rev_override = true;
+       return 1;
+}
+__setup("acpi_rev_override", acpi_rev_override_setup);
+#else
+#define acpi_rev_override      false
+#endif
+
 #define ACPI_MAX_OVERRIDE_LEN 100
 
 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
@@ -548,6 +561,11 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
                *new_val = acpi_os_name;
        }
 
+       if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
+               printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
+               *new_val = (char *)5;
+       }
+
        return AE_OK;
 }
 
index fcb7807ea8b73de79163bb99c20091f4b202da0d..10561ce16ed135165cdbc8e558cb2aaea263d13b 100644 (file)
@@ -660,8 +660,10 @@ static int add_region_before(u64 start, u64 end, u8 space_id,
                return -ENOMEM;
 
        error = request_range(start, end, space_id, flags, desc);
-       if (error)
+       if (error) {
+               kfree(reg);
                return error;
+       }
 
        reg->start = start;
        reg->end = end;
index a2aa65b4215d390925c8470d6790b129c16ac88a..31df474d72f4a275ba0c87fb79a26c538fc661ca 100644 (file)
@@ -359,12 +359,16 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 #define page_initialized(page)  (page->lru.next)
 
-static int get_nid_for_pfn(unsigned long pfn)
+static int __init_refok get_nid_for_pfn(unsigned long pfn)
 {
        struct page *page;
 
        if (!pfn_valid_within(pfn))
                return -1;
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+       if (system_state == SYSTEM_BOOTING)
+               return early_pfn_to_nid(pfn);
+#endif
        page = pfn_to_page(pfn);
        if (!page_initialized(page))
                return -1;
index e645852396ba44430d77273b0a513cb5e8deb78a..f3f6d167f3f1f015fec8e36ede66b7e8e274338f 100644 (file)
@@ -129,9 +129,9 @@ EXPORT_SYMBOL_GPL(device_property_present);
 bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
 {
        if (is_of_node(fwnode))
-               return of_property_read_bool(of_node(fwnode), propname);
+               return of_property_read_bool(to_of_node(fwnode), propname);
        else if (is_acpi_node(fwnode))
-               return !acpi_dev_prop_get(acpi_node(fwnode), propname, NULL);
+               return !acpi_dev_prop_get(to_acpi_node(fwnode), propname, NULL);
 
        return !!pset_prop_get(to_pset(fwnode), propname);
 }
@@ -286,10 +286,10 @@ EXPORT_SYMBOL_GPL(device_property_read_string);
 ({ \
        int _ret_; \
        if (is_of_node(_fwnode_)) \
-               _ret_ = OF_DEV_PROP_READ_ARRAY(of_node(_fwnode_), _propname_, \
+               _ret_ = OF_DEV_PROP_READ_ARRAY(to_of_node(_fwnode_), _propname_, \
                                               _type_, _val_, _nval_); \
        else if (is_acpi_node(_fwnode_)) \
-               _ret_ = acpi_dev_prop_read(acpi_node(_fwnode_), _propname_, \
+               _ret_ = acpi_dev_prop_read(to_acpi_node(_fwnode_), _propname_, \
                                           _proptype_, _val_, _nval_); \
        else \
                _ret_ = pset_prop_read_array(to_pset(_fwnode_), _propname_, \
@@ -425,11 +425,11 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
 {
        if (is_of_node(fwnode))
                return val ?
-                       of_property_read_string_array(of_node(fwnode), propname,
-                                                     val, nval) :
-                       of_property_count_strings(of_node(fwnode), propname);
+                       of_property_read_string_array(to_of_node(fwnode),
+                                                     propname, val, nval) :
+                       of_property_count_strings(to_of_node(fwnode), propname);
        else if (is_acpi_node(fwnode))
-               return acpi_dev_prop_read(acpi_node(fwnode), propname,
+               return acpi_dev_prop_read(to_acpi_node(fwnode), propname,
                                          DEV_PROP_STRING, val, nval);
 
        return pset_prop_read_array(to_pset(fwnode), propname,
@@ -456,9 +456,9 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
                                const char *propname, const char **val)
 {
        if (is_of_node(fwnode))
-               return of_property_read_string(of_node(fwnode), propname, val);
+               return of_property_read_string(to_of_node(fwnode), propname, val);
        else if (is_acpi_node(fwnode))
-               return acpi_dev_prop_read(acpi_node(fwnode), propname,
+               return acpi_dev_prop_read(to_acpi_node(fwnode), propname,
                                          DEV_PROP_STRING, val, 1);
 
        return -ENXIO;
@@ -476,13 +476,13 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
        if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
                struct device_node *node;
 
-               node = of_get_next_available_child(dev->of_node, of_node(child));
+               node = of_get_next_available_child(dev->of_node, to_of_node(child));
                if (node)
                        return &node->fwnode;
        } else if (IS_ENABLED(CONFIG_ACPI)) {
                struct acpi_device *node;
 
-               node = acpi_get_next_child(dev, acpi_node(child));
+               node = acpi_get_next_child(dev, to_acpi_node(child));
                if (node)
                        return acpi_fwnode_handle(node);
        }
@@ -501,7 +501,7 @@ EXPORT_SYMBOL_GPL(device_get_next_child_node);
 void fwnode_handle_put(struct fwnode_handle *fwnode)
 {
        if (is_of_node(fwnode))
-               of_node_put(of_node(fwnode));
+               of_node_put(to_of_node(fwnode));
 }
 EXPORT_SYMBOL_GPL(fwnode_handle_put);
 
index a6ee3d750c302b435a41533d83c9fa83efd90d06..6b88a35fb048ed8f302f6025b743d30d7646ec41 100644 (file)
@@ -419,14 +419,6 @@ static int in_flight_summary_show(struct seq_file *m, void *pos)
        return 0;
 }
 
-/* simple_positive(file->f_path.dentry) respectively debugfs_positive(),
- * but neither is "reachable" from here.
- * So we have our own inline version of it above.  :-( */
-static inline int debugfs_positive(struct dentry *dentry)
-{
-        return d_really_is_positive(dentry) && !d_unhashed(dentry);
-}
-
 /* make sure at *open* time that the respective object won't go away. */
 static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *),
                                void *data, struct kref *kref,
@@ -444,7 +436,7 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo
        /* serialize with d_delete() */
        mutex_lock(&d_inode(parent)->i_mutex);
        /* Make sure the object is still alive */
-       if (debugfs_positive(file->f_path.dentry)
+       if (simple_positive(file->f_path.dentry)
        && kref_get_unless_zero(kref))
                ret = 0;
        mutex_unlock(&d_inode(parent)->i_mutex);
index 40580dc7f41cacef42eedafeebe725e96943c91e..f7a4c9d7f721816666a76e2d667adf9153e162e9 100644 (file)
@@ -588,7 +588,7 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
 
        spin_lock_irq(&lo->lo_lock);
        if (lo->lo_backing_file)
-               p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1);
+               p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
        spin_unlock_irq(&lo->lo_lock);
 
        if (IS_ERR_OR_NULL(p))
index 6f9b7534928e2a8db2d35732d6e3511a1c4455bc..69de41a87b74311b2b7478fb0226b8bc253c6ebc 100644 (file)
@@ -99,7 +99,7 @@ static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
        return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
 }
 
-static struct kernel_param_ops null_queue_mode_param_ops = {
+static const struct kernel_param_ops null_queue_mode_param_ops = {
        .set    = null_set_queue_mode,
        .get    = param_get_int,
 };
@@ -127,7 +127,7 @@ static int null_set_irqmode(const char *str, const struct kernel_param *kp)
                                        NULL_IRQ_TIMER);
 }
 
-static struct kernel_param_ops null_irqmode_param_ops = {
+static const struct kernel_param_ops null_irqmode_param_ops = {
        .set    = null_set_irqmode,
        .get    = param_get_int,
 };
index 34338d7438f56895f76ac8b110d8e92a120b3202..d1d6141920d3ced742d850b051d273d65aa3728e 100644 (file)
@@ -1474,6 +1474,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
        nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
        nvmeq->q_depth = depth;
        nvmeq->qid = qid;
+       nvmeq->cq_vector = -1;
        dev->queues[qid] = nvmeq;
 
        /* make sure queue descriptor is set before queue count, for kthread */
@@ -1726,8 +1727,10 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
 
        nvmeq->cq_vector = 0;
        result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
-       if (result)
+       if (result) {
+               nvmeq->cq_vector = -1;
                goto free_nvmeq;
+       }
 
        return result;
 
@@ -2213,8 +2216,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        dev->max_qid = nr_io_queues;
 
        result = queue_request_irq(dev, adminq, adminq->irqname);
-       if (result)
+       if (result) {
+               adminq->cq_vector = -1;
                goto free_queues;
+       }
 
        /* Free previously allocated queues that are no longer usable */
        nvme_free_queues(dev, nr_io_queues + 1);
index ec6c5c6e1ac94b2bcbe0619a7fe62b9e7d0ce4a5..d94529d5c8e951378eaf62d74b708edf271a550f 100644 (file)
@@ -346,6 +346,7 @@ struct rbd_device {
        struct rbd_image_header header;
        unsigned long           flags;          /* possibly lock protected */
        struct rbd_spec         *spec;
+       struct rbd_options      *opts;
 
        char                    *header_name;
 
@@ -724,34 +725,36 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
 }
 
 /*
- * mount options
+ * (Per device) rbd map options
  */
 enum {
+       Opt_queue_depth,
        Opt_last_int,
        /* int args above */
        Opt_last_string,
        /* string args above */
        Opt_read_only,
        Opt_read_write,
-       /* Boolean args above */
-       Opt_last_bool,
+       Opt_err
 };
 
 static match_table_t rbd_opts_tokens = {
+       {Opt_queue_depth, "queue_depth=%d"},
        /* int args above */
        /* string args above */
        {Opt_read_only, "read_only"},
        {Opt_read_only, "ro"},          /* Alternate spelling */
        {Opt_read_write, "read_write"},
        {Opt_read_write, "rw"},         /* Alternate spelling */
-       /* Boolean args above */
-       {-1, NULL}
+       {Opt_err, NULL}
 };
 
 struct rbd_options {
+       int     queue_depth;
        bool    read_only;
 };
 
+#define RBD_QUEUE_DEPTH_DEFAULT        BLKDEV_MAX_RQ
 #define RBD_READ_ONLY_DEFAULT  false
 
 static int parse_rbd_opts_token(char *c, void *private)
@@ -761,27 +764,27 @@ static int parse_rbd_opts_token(char *c, void *private)
        int token, intval, ret;
 
        token = match_token(c, rbd_opts_tokens, argstr);
-       if (token < 0)
-               return -EINVAL;
-
        if (token < Opt_last_int) {
                ret = match_int(&argstr[0], &intval);
                if (ret < 0) {
-                       pr_err("bad mount option arg (not int) "
-                              "at '%s'\n", c);
+                       pr_err("bad mount option arg (not int) at '%s'\n", c);
                        return ret;
                }
                dout("got int token %d val %d\n", token, intval);
        } else if (token > Opt_last_int && token < Opt_last_string) {
-               dout("got string token %d val %s\n", token,
-                    argstr[0].from);
-       } else if (token > Opt_last_string && token < Opt_last_bool) {
-               dout("got Boolean token %d\n", token);
+               dout("got string token %d val %s\n", token, argstr[0].from);
        } else {
                dout("got token %d\n", token);
        }
 
        switch (token) {
+       case Opt_queue_depth:
+               if (intval < 1) {
+                       pr_err("queue_depth out of range\n");
+                       return -EINVAL;
+               }
+               rbd_opts->queue_depth = intval;
+               break;
        case Opt_read_only:
                rbd_opts->read_only = true;
                break;
@@ -789,9 +792,10 @@ static int parse_rbd_opts_token(char *c, void *private)
                rbd_opts->read_only = false;
                break;
        default:
-               rbd_assert(false);
-               break;
+               /* libceph prints "bad option" msg */
+               return -EINVAL;
        }
+
        return 0;
 }
 
@@ -1563,22 +1567,39 @@ static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
 /*
  * Wait for an object request to complete.  If interrupted, cancel the
  * underlying osd request.
+ *
+ * @timeout: in jiffies, 0 means "wait forever"
  */
-static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
+static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
+                                 unsigned long timeout)
 {
-       int ret;
+       long ret;
 
        dout("%s %p\n", __func__, obj_request);
-
-       ret = wait_for_completion_interruptible(&obj_request->completion);
-       if (ret < 0) {
-               dout("%s %p interrupted\n", __func__, obj_request);
+       ret = wait_for_completion_interruptible_timeout(
+                                       &obj_request->completion,
+                                       ceph_timeout_jiffies(timeout));
+       if (ret <= 0) {
+               if (ret == 0)
+                       ret = -ETIMEDOUT;
                rbd_obj_request_end(obj_request);
-               return ret;
+       } else {
+               ret = 0;
        }
 
-       dout("%s %p done\n", __func__, obj_request);
-       return 0;
+       dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
+       return ret;
+}
+
+static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
+{
+       return __rbd_obj_request_wait(obj_request, 0);
+}
+
+static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
+                                       unsigned long timeout)
+{
+       return __rbd_obj_request_wait(obj_request, timeout);
 }
 
 static void rbd_img_request_complete(struct rbd_img_request *img_request)
@@ -2001,11 +2022,11 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
        rbd_assert(obj_request_type_valid(type));
 
        size = strlen(object_name) + 1;
-       name = kmalloc(size, GFP_KERNEL);
+       name = kmalloc(size, GFP_NOIO);
        if (!name)
                return NULL;
 
-       obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
+       obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
        if (!obj_request) {
                kfree(name);
                return NULL;
@@ -2376,7 +2397,7 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
        }
 
        if (opcode == CEPH_OSD_OP_DELETE)
-               osd_req_op_init(osd_request, num_ops, opcode);
+               osd_req_op_init(osd_request, num_ops, opcode, 0);
        else
                osd_req_op_extent_init(osd_request, num_ops, opcode,
                                       offset, length, 0, 0);
@@ -2848,7 +2869,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
                goto out;
        stat_request->callback = rbd_img_obj_exists_callback;
 
-       osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
+       osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
        osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
                                        false, false);
        rbd_osd_req_format_read(stat_request);
@@ -3122,6 +3143,7 @@ static struct rbd_obj_request *rbd_obj_watch_request_helper(
                                                bool watch)
 {
        struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+       struct ceph_options *opts = osdc->client->options;
        struct rbd_obj_request *obj_request;
        int ret;
 
@@ -3148,7 +3170,7 @@ static struct rbd_obj_request *rbd_obj_watch_request_helper(
        if (ret)
                goto out;
 
-       ret = rbd_obj_request_wait(obj_request);
+       ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
        if (ret)
                goto out;
 
@@ -3750,10 +3772,9 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
 
        memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
        rbd_dev->tag_set.ops = &rbd_mq_ops;
-       rbd_dev->tag_set.queue_depth = BLKDEV_MAX_RQ;
+       rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
        rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
-       rbd_dev->tag_set.flags =
-               BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+       rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
        rbd_dev->tag_set.nr_hw_queues = 1;
        rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
 
@@ -3773,6 +3794,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        /* set io sizes to object size */
        segment_size = rbd_obj_bytes(&rbd_dev->header);
        blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
+       blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
        blk_queue_max_segment_size(q, segment_size);
        blk_queue_io_min(q, segment_size);
        blk_queue_io_opt(q, segment_size);
@@ -4044,7 +4066,8 @@ static void rbd_spec_free(struct kref *kref)
 }
 
 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
-                               struct rbd_spec *spec)
+                                        struct rbd_spec *spec,
+                                        struct rbd_options *opts)
 {
        struct rbd_device *rbd_dev;
 
@@ -4058,8 +4081,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
        INIT_LIST_HEAD(&rbd_dev->node);
        init_rwsem(&rbd_dev->header_rwsem);
 
-       rbd_dev->spec = spec;
        rbd_dev->rbd_client = rbdc;
+       rbd_dev->spec = spec;
+       rbd_dev->opts = opts;
 
        /* Initialize the layout used for all rbd requests */
 
@@ -4075,6 +4099,7 @@ static void rbd_dev_destroy(struct rbd_device *rbd_dev)
 {
        rbd_put_client(rbd_dev->rbd_client);
        rbd_spec_put(rbd_dev->spec);
+       kfree(rbd_dev->opts);
        kfree(rbd_dev);
 }
 
@@ -4933,6 +4958,7 @@ static int rbd_add_parse_args(const char *buf,
                goto out_mem;
 
        rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
+       rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
 
        copts = ceph_parse_options(options, mon_addrs,
                                        mon_addrs + mon_addrs_size - 1,
@@ -4963,8 +4989,8 @@ out_err:
  */
 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
 {
+       struct ceph_options *opts = rbdc->client->options;
        u64 newest_epoch;
-       unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
        int tries = 0;
        int ret;
 
@@ -4979,7 +5005,8 @@ again:
                if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
                        ceph_monc_request_next_osdmap(&rbdc->client->monc);
                        (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
-                                                    newest_epoch, timeout);
+                                                    newest_epoch,
+                                                    opts->mount_timeout);
                        goto again;
                } else {
                        /* the osdmap we have is new enough */
@@ -5148,7 +5175,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
        rbdc = __rbd_get_client(rbd_dev->rbd_client);
 
        ret = -ENOMEM;
-       parent = rbd_dev_create(rbdc, parent_spec);
+       parent = rbd_dev_create(rbdc, parent_spec, NULL);
        if (!parent)
                goto out_err;
 
@@ -5394,9 +5421,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
        rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
        if (rc < 0)
                goto err_out_module;
-       read_only = rbd_opts->read_only;
-       kfree(rbd_opts);
-       rbd_opts = NULL;        /* done with this */
 
        rbdc = rbd_get_client(ceph_opts);
        if (IS_ERR(rbdc)) {
@@ -5422,11 +5446,12 @@ static ssize_t do_rbd_add(struct bus_type *bus,
                goto err_out_client;
        }
 
-       rbd_dev = rbd_dev_create(rbdc, spec);
+       rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
        if (!rbd_dev)
                goto err_out_client;
        rbdc = NULL;            /* rbd_dev now owns this */
        spec = NULL;            /* rbd_dev now owns this */
+       rbd_opts = NULL;        /* rbd_dev now owns this */
 
        rc = rbd_dev_image_probe(rbd_dev, true);
        if (rc < 0)
@@ -5434,6 +5459,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
 
        /* If we are mapping a snapshot it must be marked read-only */
 
+       read_only = rbd_dev->opts->read_only;
        if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
                read_only = true;
        rbd_dev->mapping.read_only = read_only;
@@ -5458,6 +5484,7 @@ err_out_client:
        rbd_put_client(rbdc);
 err_out_args:
        rbd_spec_put(spec);
+       kfree(rbd_opts);
 err_out_module:
        module_put(THIS_MODULE);
 
index 2126842fb6e8a862a36b733b8eb709cf785b0591..ced96777b677b9bcddd65bae004a7a51b5cf0dc3 100644 (file)
@@ -736,7 +736,7 @@ static void xen_blkbk_unmap_and_respond(struct pending_req *req)
        struct grant_page **pages = req->segments;
        unsigned int invcount;
 
-       invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages,
+       invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_segs,
                                           req->unmap, req->unmap_pages);
 
        work->data = req;
@@ -922,7 +922,7 @@ static int xen_blkbk_map_seg(struct pending_req *pending_req)
        int rc;
 
        rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
-                          pending_req->nr_pages,
+                          pending_req->nr_segs,
                           (pending_req->operation != BLKIF_OP_READ));
 
        return rc;
@@ -938,7 +938,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
        int indirect_grefs, rc, n, nseg, i;
        struct blkif_request_segment *segments = NULL;
 
-       nseg = pending_req->nr_pages;
+       nseg = pending_req->nr_segs;
        indirect_grefs = INDIRECT_PAGES(nseg);
        BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
 
@@ -1258,7 +1258,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
        pending_req->id        = req->u.rw.id;
        pending_req->operation = req_operation;
        pending_req->status    = BLKIF_RSP_OKAY;
-       pending_req->nr_pages  = nseg;
+       pending_req->nr_segs   = nseg;
 
        if (req->operation != BLKIF_OP_INDIRECT) {
                preq.dev               = req->u.rw.handle;
@@ -1379,7 +1379,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
 
  fail_flush:
        xen_blkbk_unmap(blkif, pending_req->segments,
-                       pending_req->nr_pages);
+                       pending_req->nr_segs);
  fail_response:
        /* Haven't submitted any bio's yet. */
        make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
index 8ccc49d01c8eb7c5fd821df8909bf8fbeeaa35a7..45a044a53d1e562db4e606623840d0c667aa56e3 100644 (file)
@@ -345,7 +345,7 @@ struct grant_page {
 struct pending_req {
        struct xen_blkif        *blkif;
        u64                     id;
-       int                     nr_pages;
+       int                     nr_segs;
        atomic_t                pendcnt;
        unsigned short          operation;
        int                     status;
index fc770b7d3beb1951e80c2f16956f3dd0897efe48..6d89ed35d80c0caaf8bf57ba82c7e9f3a9194bb9 100644 (file)
@@ -1074,12 +1074,6 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
                s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
 
        if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
-               /*
-                * Copy the data received from the backend into the bvec.
-                * Since bv_offset can be different than 0, and bv_len different
-                * than PAGE_SIZE, we have to keep track of the current offset,
-                * to be sure we are copying the data from the right shared page.
-                */
                for_each_sg(s->sg, sg, nseg, i) {
                        BUG_ON(sg->offset + sg->length > PAGE_SIZE);
                        shared_data = kmap_atomic(
index 0b4188b9af7cd055571851d9f0e0e25e7d03033c..c6dea3f6917bdcfc144fc70540cbbd26ea1918ee 100644 (file)
@@ -581,7 +581,7 @@ static inline int needs_ilk_vtd_wa(void)
        /* Query intel_iommu to see if we need the workaround. Presumably that
         * was loaded first.
         */
-       if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
+       if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
             gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
             intel_iommu_gfx_mapped)
                return 1;
index 37b8be7cba95f61a8f8788fda52cd8ecd3291c42..0ac3bd1a5497c5bae41f1cc51ac8cdf667ed73b4 100644 (file)
@@ -208,7 +208,7 @@ static int set_param_timeout(const char *val, const struct kernel_param *kp)
        return rv;
 }
 
-static struct kernel_param_ops param_ops_timeout = {
+static const struct kernel_param_ops param_ops_timeout = {
        .set = set_param_timeout,
        .get = param_get_int,
 };
@@ -270,14 +270,14 @@ static int set_param_wdog_ifnum(const char *val, const struct kernel_param *kp)
        return 0;
 }
 
-static struct kernel_param_ops param_ops_wdog_ifnum = {
+static const struct kernel_param_ops param_ops_wdog_ifnum = {
        .set = set_param_wdog_ifnum,
        .get = param_get_int,
 };
 
 #define param_check_wdog_ifnum param_check_int
 
-static struct kernel_param_ops param_ops_str = {
+static const struct kernel_param_ops param_ops_str = {
        .set = set_param_str,
        .get = get_param_str,
 };
index 9897f353bf1a63f24a0192aeff7ea9785a7c32bf..42f7120ca9ceaa1b900e737efeeb67f0913ae4e4 100644 (file)
@@ -78,6 +78,23 @@ config COMMON_CLK_SI570
          This driver supports Silicon Labs 570/571/598/599 programmable
          clock generators.
 
+config COMMON_CLK_CDCE925
+       tristate "Clock driver for TI CDCE925 devices"
+       depends on I2C
+       depends on OF
+       select REGMAP_I2C
+       help
+       ---help---
+         This driver supports the TI CDCE925 programmable clock synthesizer.
+         The chip contains two PLLs with spread-spectrum clocking support and
+         five output dividers. The driver only supports the following setup,
+         and uses a fixed setting for the output muxes.
+         Y1 is derived from the input clock
+         Y2 and Y3 derive from PLL1
+         Y4 and Y5 derive from PLL2
+         Given a target output frequency, the driver will set the PLL and
+         divider to best approximate the desired output.
+
 config COMMON_CLK_S2MPS11
        tristate "Clock driver for S2MPS1X/S5M8767 MFD"
        depends on MFD_SEC_CORE
@@ -150,11 +167,13 @@ config COMMON_CLK_CDCE706
        ---help---
          This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
 
+source "drivers/clk/bcm/Kconfig"
+source "drivers/clk/hisilicon/Kconfig"
 source "drivers/clk/qcom/Kconfig"
 
 endmenu
 
-source "drivers/clk/bcm/Kconfig"
 source "drivers/clk/mvebu/Kconfig"
 
 source "drivers/clk/samsung/Kconfig"
+source "drivers/clk/tegra/Kconfig"
index 8732e4c5bf3c131678ec7ee97f9b6e98c2663a39..c4cf075a2320f7ae33327fd79e9d9525c15921c0 100644 (file)
@@ -38,6 +38,8 @@ obj-$(CONFIG_COMMON_CLK_RK808)                += clk-rk808.o
 obj-$(CONFIG_COMMON_CLK_S2MPS11)       += clk-s2mps11.o
 obj-$(CONFIG_COMMON_CLK_SI5351)                += clk-si5351.o
 obj-$(CONFIG_COMMON_CLK_SI570)         += clk-si570.o
+obj-$(CONFIG_COMMON_CLK_CDCE925)       += clk-cdce925.o
+obj-$(CONFIG_ARCH_STM32)               += clk-stm32f4.o
 obj-$(CONFIG_CLK_TWL6040)              += clk-twl6040.o
 obj-$(CONFIG_ARCH_U300)                        += clk-u300.o
 obj-$(CONFIG_ARCH_VT8500)              += clk-vt8500.o
@@ -45,19 +47,20 @@ obj-$(CONFIG_COMMON_CLK_WM831X)             += clk-wm831x.o
 obj-$(CONFIG_COMMON_CLK_XGENE)         += clk-xgene.o
 obj-$(CONFIG_COMMON_CLK_PWM)           += clk-pwm.o
 obj-$(CONFIG_COMMON_CLK_AT91)          += at91/
-obj-$(CONFIG_ARCH_BCM_MOBILE)          += bcm/
+obj-$(CONFIG_ARCH_BCM)                 += bcm/
 obj-$(CONFIG_ARCH_BERLIN)              += berlin/
-obj-$(CONFIG_ARCH_HI3xxx)              += hisilicon/
-obj-$(CONFIG_ARCH_HIP04)               += hisilicon/
-obj-$(CONFIG_ARCH_HIX5HD2)             += hisilicon/
+obj-$(CONFIG_ARCH_HISI)                        += hisilicon/
 obj-$(CONFIG_ARCH_MXC)                 += imx/
 obj-$(CONFIG_MACH_INGENIC)             += ingenic/
 obj-$(CONFIG_COMMON_CLK_KEYSTONE)      += keystone/
+obj-$(CONFIG_ARCH_MEDIATEK)            += mediatek/
 ifeq ($(CONFIG_COMMON_CLK), y)
 obj-$(CONFIG_ARCH_MMP)                 += mmp/
 endif
 obj-$(CONFIG_PLAT_ORION)               += mvebu/
+obj-$(CONFIG_ARCH_MESON)               += meson/
 obj-$(CONFIG_ARCH_MXS)                 += mxs/
+obj-$(CONFIG_ARCH_LPC18XX)             += nxp/
 obj-$(CONFIG_MACH_PISTACHIO)           += pistachio/
 obj-$(CONFIG_COMMON_CLK_PXA)           += pxa/
 obj-$(CONFIG_COMMON_CLK_QCOM)          += qcom/
index 59fa3cc96c9eb3411691970c63799996613dc8fe..c2400456a04471186ec210d8236951d9e4780901 100644 (file)
@@ -614,7 +614,7 @@ void __init of_at91sam9x5_clk_main_setup(struct device_node *np,
        const char *name = np->name;
        int i;
 
-       num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+       num_parents = of_clk_get_parent_count(np);
        if (num_parents <= 0 || num_parents > 2)
                return;
 
index c1af80bcdf2082c8d0d7b32e28eb9122c59ac53f..f98eafe9b12dc92d1c68f99cd5d9cc86addfb75f 100644 (file)
@@ -224,7 +224,7 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
        const char *name = np->name;
        struct clk_master_characteristics *characteristics;
 
-       num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+       num_parents = of_clk_get_parent_count(np);
        if (num_parents <= 0 || num_parents > MASTER_SOURCE_MAX)
                return;
 
index 86c8a073dcc32a20b98f4c862d5622b11ffe1d57..8c86c0f7847a55a0cdc257289c49c67777216d73 100644 (file)
@@ -237,7 +237,7 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
        const char *name;
        struct device_node *progclknp;
 
-       num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+       num_parents = of_clk_get_parent_count(np);
        if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX)
                return;
 
index 2f13bd5246b5563ec399058029fcd0740c58c3d2..98a84a865fe1cf1b13a437741631b83804e32bf6 100644 (file)
@@ -373,7 +373,7 @@ void __init of_at91sam9x5_clk_slow_setup(struct device_node *np,
        const char *name = np->name;
        int i;
 
-       num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+       num_parents = of_clk_get_parent_count(np);
        if (num_parents <= 0 || num_parents > 2)
                return;
 
@@ -451,7 +451,7 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
        const char *name = np->name;
        int i;
 
-       num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+       num_parents = of_clk_get_parent_count(np);
        if (num_parents != 2)
                return;
 
index 144d47ecfe63c6eb5dd383e2b396e4c59471244f..3817ea865ca258ecb8675f6e35c39be4f53fd424 100644 (file)
@@ -150,7 +150,7 @@ void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
        const char *parent_names[SMD_SOURCE_MAX];
        const char *name = np->name;
 
-       num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+       num_parents = of_clk_get_parent_count(np);
        if (num_parents <= 0 || num_parents > SMD_SOURCE_MAX)
                return;
 
index 0b7c3e8840bae0ebf2c97cf276e44e8ab7116f6e..b0cbd2b1ff5957027a5643963d45038435e8b1ce 100644 (file)
@@ -378,7 +378,7 @@ void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
        const char *parent_names[USB_SOURCE_MAX];
        const char *name = np->name;
 
-       num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+       num_parents = of_clk_get_parent_count(np);
        if (num_parents <= 0 || num_parents > USB_SOURCE_MAX)
                return;
 
index 3f27d21fb7297e70494bf35743f027f8c8005d73..39be2be82b0a04b730258b4a7154aa5be103cc73 100644 (file)
@@ -153,7 +153,7 @@ static int pmc_irq_domain_xlate(struct irq_domain *d,
        return 0;
 }
 
-static struct irq_domain_ops pmc_irq_ops = {
+static const struct irq_domain_ops pmc_irq_ops = {
        .map    = pmc_irq_map,
        .xlate  = pmc_irq_domain_xlate,
 };
index 75506e53075b95697727763ae155dcc8d7613402..88febf53b276a9253bfaf4045799197901180687 100644 (file)
@@ -7,3 +7,12 @@ config CLK_BCM_KONA
          Enable common clock framework support for Broadcom SoCs
          using "Kona" style clock control units, including those
          in the BCM281xx and BCM21664 families.
+
+config COMMON_CLK_IPROC
+       bool "Broadcom iProc clock support"
+       depends on ARCH_BCM_IPROC
+       depends on COMMON_CLK
+       default ARCH_BCM_IPROC
+       help
+         Enable common clock framework support for Broadcom SoCs
+         based on the iProc architecture
index 6297d05a9a1040a924fcf58914cf8ee2284c11ec..8a7a477862c7037d4d0ff1e9f3cfe0e736df6e51 100644 (file)
@@ -2,3 +2,5 @@ obj-$(CONFIG_CLK_BCM_KONA)      += clk-kona.o
 obj-$(CONFIG_CLK_BCM_KONA)     += clk-kona-setup.o
 obj-$(CONFIG_CLK_BCM_KONA)     += clk-bcm281xx.o
 obj-$(CONFIG_CLK_BCM_KONA)     += clk-bcm21664.o
+obj-$(CONFIG_COMMON_CLK_IPROC) += clk-iproc-armpll.o clk-iproc-pll.o clk-iproc-asiu.o
+obj-$(CONFIG_ARCH_BCM_CYGNUS)  += clk-cygnus.o
diff --git a/drivers/clk/bcm/clk-cygnus.c b/drivers/clk/bcm/clk-cygnus.c
new file mode 100644 (file)
index 0000000..316c603
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+
+#include <dt-bindings/clock/bcm-cygnus.h>
+#include "clk-iproc.h"
+
+#define reg_val(o, s, w) { .offset = o, .shift = s, .width = w, }
+
+#define aon_val(o, pw, ps, is) { .offset = o, .pwr_width = pw, \
+       .pwr_shift = ps, .iso_shift = is }
+
+#define sw_ctrl_val(o, s) { .offset = o, .shift = s, }
+
+#define asiu_div_val(o, es, hs, hw, ls, lw) \
+               { .offset = o, .en_shift = es, .high_shift = hs, \
+               .high_width = hw, .low_shift = ls, .low_width = lw }
+
+#define reset_val(o, rs, prs, kis, kiw, kps, kpw, kas, kaw) { .offset = o, \
+       .reset_shift = rs, .p_reset_shift = prs, .ki_shift = kis, \
+       .ki_width = kiw, .kp_shift = kps, .kp_width = kpw, .ka_shift = kas, \
+       .ka_width = kaw }
+
+#define vco_ctrl_val(uo, lo) { .u_offset = uo, .l_offset = lo }
+
+#define enable_val(o, es, hs, bs) { .offset = o, .enable_shift = es, \
+       .hold_shift = hs, .bypass_shift = bs }
+
+#define asiu_gate_val(o, es) { .offset = o, .en_shift = es }
+
+static void __init cygnus_armpll_init(struct device_node *node)
+{
+       iproc_armpll_setup(node);
+}
+CLK_OF_DECLARE(cygnus_armpll, "brcm,cygnus-armpll", cygnus_armpll_init);
+
+static const struct iproc_pll_ctrl genpll = {
+       .flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+               IPROC_CLK_PLL_NEEDS_SW_CFG,
+       .aon = aon_val(0x0, 2, 1, 0),
+       .reset = reset_val(0x0, 11, 10, 4, 3, 0, 4, 7, 3),
+       .sw_ctrl = sw_ctrl_val(0x10, 31),
+       .ndiv_int = reg_val(0x10, 20, 10),
+       .ndiv_frac = reg_val(0x10, 0, 20),
+       .pdiv = reg_val(0x14, 0, 4),
+       .vco_ctrl = vco_ctrl_val(0x18, 0x1c),
+       .status = reg_val(0x28, 12, 1),
+};
+
+static const struct iproc_clk_ctrl genpll_clk[] = {
+       [BCM_CYGNUS_GENPLL_AXI21_CLK] = {
+               .channel = BCM_CYGNUS_GENPLL_AXI21_CLK,
+               .flags = IPROC_CLK_AON,
+               .enable = enable_val(0x4, 6, 0, 12),
+               .mdiv = reg_val(0x20, 0, 8),
+       },
+       [BCM_CYGNUS_GENPLL_250MHZ_CLK] = {
+               .channel = BCM_CYGNUS_GENPLL_250MHZ_CLK,
+               .flags = IPROC_CLK_AON,
+               .enable = enable_val(0x4, 7, 1, 13),
+               .mdiv = reg_val(0x20, 10, 8),
+       },
+       [BCM_CYGNUS_GENPLL_IHOST_SYS_CLK] = {
+               .channel = BCM_CYGNUS_GENPLL_IHOST_SYS_CLK,
+               .flags = IPROC_CLK_AON,
+               .enable = enable_val(0x4, 8, 2, 14),
+               .mdiv = reg_val(0x20, 20, 8),
+       },
+       [BCM_CYGNUS_GENPLL_ENET_SW_CLK] = {
+               .channel = BCM_CYGNUS_GENPLL_ENET_SW_CLK,
+               .flags = IPROC_CLK_AON,
+               .enable = enable_val(0x4, 9, 3, 15),
+               .mdiv = reg_val(0x24, 0, 8),
+       },
+       [BCM_CYGNUS_GENPLL_AUDIO_125_CLK] = {
+               .channel = BCM_CYGNUS_GENPLL_AUDIO_125_CLK,
+               .flags = IPROC_CLK_AON,
+               .enable = enable_val(0x4, 10, 4, 16),
+               .mdiv = reg_val(0x24, 10, 8),
+       },
+       [BCM_CYGNUS_GENPLL_CAN_CLK] = {
+               .channel = BCM_CYGNUS_GENPLL_CAN_CLK,
+               .flags = IPROC_CLK_AON,
+               .enable = enable_val(0x4, 11, 5, 17),
+               .mdiv = reg_val(0x24, 20, 8),
+       },
+};
+
+static void __init cygnus_genpll_clk_init(struct device_node *node)
+{
+       iproc_pll_clk_setup(node, &genpll, NULL, 0, genpll_clk,
+                           ARRAY_SIZE(genpll_clk));
+}
+CLK_OF_DECLARE(cygnus_genpll, "brcm,cygnus-genpll", cygnus_genpll_clk_init);
+
+static const struct iproc_pll_ctrl lcpll0 = {
+       .flags = IPROC_CLK_AON | IPROC_CLK_PLL_NEEDS_SW_CFG,
+       .aon = aon_val(0x0, 2, 5, 4),
+       .reset = reset_val(0x0, 31, 30, 27, 3, 23, 4, 19, 4),
+       .sw_ctrl = sw_ctrl_val(0x4, 31),
+       .ndiv_int = reg_val(0x4, 16, 10),
+       .pdiv = reg_val(0x4, 26, 4),
+       .vco_ctrl = vco_ctrl_val(0x10, 0x14),
+       .status = reg_val(0x18, 12, 1),
+};
+
+static const struct iproc_clk_ctrl lcpll0_clk[] = {
+       [BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK] = {
+               .channel = BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK,
+               .flags = IPROC_CLK_AON,
+               .enable = enable_val(0x0, 7, 1, 13),
+               .mdiv = reg_val(0x8, 0, 8),
+       },
+       [BCM_CYGNUS_LCPLL0_DDR_PHY_CLK] = {
+               .channel = BCM_CYGNUS_LCPLL0_DDR_PHY_CLK,
+               .flags = IPROC_CLK_AON,
+               .enable = enable_val(0x0, 8, 2, 14),
+               .mdiv = reg_val(0x8, 10, 8),
+       },
+       [BCM_CYGNUS_LCPLL0_SDIO_CLK] = {
+               .channel = BCM_CYGNUS_LCPLL0_SDIO_CLK,
+               .flags = IPROC_CLK_AON,
+               .enable = enable_val(0x0, 9, 3, 15),
+               .mdiv = reg_val(0x8, 20, 8),
+       },
+       [BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK] = {
+               .channel = BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK,
+               .flags = IPROC_CLK_AON,
+               .enable = enable_val(0x0, 10, 4, 16),
+               .mdiv = reg_val(0xc, 0, 8),
+       },
+       [BCM_CYGNUS_LCPLL0_SMART_CARD_CLK] = {
+               .channel = BCM_CYGNUS_LCPLL0_SMART_CARD_CLK,
+               .flags = IPROC_CLK_AON,
+               .enable = enable_val(0x0, 11, 5, 17),
+               .mdiv = reg_val(0xc, 10, 8),
+       },
+       [BCM_CYGNUS_LCPLL0_CH5_UNUSED] = {
+               .channel = BCM_CYGNUS_LCPLL0_CH5_UNUSED,
+               .flags = IPROC_CLK_AON,
+               .enable = enable_val(0x0, 12, 6, 18),
+               .mdiv = reg_val(0xc, 20, 8),
+       },
+};
+
+static void __init cygnus_lcpll0_clk_init(struct device_node *node)
+{
+       iproc_pll_clk_setup(node, &lcpll0, NULL, 0, lcpll0_clk,
+                           ARRAY_SIZE(lcpll0_clk));
+}
+CLK_OF_DECLARE(cygnus_lcpll0, "brcm,cygnus-lcpll0", cygnus_lcpll0_clk_init);
+
+/*
+ * MIPI PLL VCO frequency parameter table
+ */
+static const struct iproc_pll_vco_param mipipll_vco_params[] = {
+       /* rate (Hz) ndiv_int ndiv_frac pdiv */
+       { 750000000UL,   30,     0,        1 },
+       { 1000000000UL,  40,     0,        1 },
+       { 1350000000ul,  54,     0,        1 },
+       { 2000000000UL,  80,     0,        1 },
+       { 2100000000UL,  84,     0,        1 },
+       { 2250000000UL,  90,     0,        1 },
+       { 2500000000UL,  100,    0,        1 },
+       { 2700000000UL,  54,     0,        0 },
+       { 2975000000UL,  119,    0,        1 },
+       { 3100000000UL,  124,    0,        1 },
+       { 3150000000UL,  126,    0,        1 },
+};
+
+static const struct iproc_pll_ctrl mipipll = {
+       .flags = IPROC_CLK_PLL_ASIU | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+                IPROC_CLK_NEEDS_READ_BACK,
+       .aon = aon_val(0x0, 4, 17, 16),
+       .asiu = asiu_gate_val(0x0, 3),
+       .reset = reset_val(0x0, 11, 10, 4, 3, 0, 4, 7, 4),
+       .ndiv_int = reg_val(0x10, 20, 10),
+       .ndiv_frac = reg_val(0x10, 0, 20),
+       .pdiv = reg_val(0x14, 0, 4),
+       .vco_ctrl = vco_ctrl_val(0x18, 0x1c),
+       .status = reg_val(0x28, 12, 1),
+};
+
+static const struct iproc_clk_ctrl mipipll_clk[] = {
+       [BCM_CYGNUS_MIPIPLL_CH0_UNUSED] = {
+               .channel = BCM_CYGNUS_MIPIPLL_CH0_UNUSED,
+               .flags = IPROC_CLK_NEEDS_READ_BACK,
+               .enable = enable_val(0x4, 12, 6, 18),
+               .mdiv = reg_val(0x20, 0, 8),
+       },
+       [BCM_CYGNUS_MIPIPLL_CH1_LCD] = {
+               .channel = BCM_CYGNUS_MIPIPLL_CH1_LCD,
+               .flags = IPROC_CLK_NEEDS_READ_BACK,
+               .enable = enable_val(0x4, 13, 7, 19),
+               .mdiv = reg_val(0x20, 10, 8),
+       },
+       [BCM_CYGNUS_MIPIPLL_CH2_V3D] = {
+               .channel = BCM_CYGNUS_MIPIPLL_CH2_V3D,
+               .flags = IPROC_CLK_NEEDS_READ_BACK,
+               .enable = enable_val(0x4, 14, 8, 20),
+               .mdiv = reg_val(0x20, 20, 8),
+       },
+       [BCM_CYGNUS_MIPIPLL_CH3_UNUSED] = {
+               .channel = BCM_CYGNUS_MIPIPLL_CH3_UNUSED,
+               .flags = IPROC_CLK_NEEDS_READ_BACK,
+               .enable = enable_val(0x4, 15, 9, 21),
+               .mdiv = reg_val(0x24, 0, 8),
+       },
+       [BCM_CYGNUS_MIPIPLL_CH4_UNUSED] = {
+               .channel = BCM_CYGNUS_MIPIPLL_CH4_UNUSED,
+               .flags = IPROC_CLK_NEEDS_READ_BACK,
+               .enable = enable_val(0x4, 16, 10, 22),
+               .mdiv = reg_val(0x24, 10, 8),
+       },
+       [BCM_CYGNUS_MIPIPLL_CH5_UNUSED] = {
+               .channel = BCM_CYGNUS_MIPIPLL_CH5_UNUSED,
+               .flags = IPROC_CLK_NEEDS_READ_BACK,
+               .enable = enable_val(0x4, 17, 11, 23),
+               .mdiv = reg_val(0x24, 20, 8),
+       },
+};
+
+static void __init cygnus_mipipll_clk_init(struct device_node *node)
+{
+       iproc_pll_clk_setup(node, &mipipll, mipipll_vco_params,
+                           ARRAY_SIZE(mipipll_vco_params), mipipll_clk,
+                           ARRAY_SIZE(mipipll_clk));
+}
+CLK_OF_DECLARE(cygnus_mipipll, "brcm,cygnus-mipipll", cygnus_mipipll_clk_init);
+
+static const struct iproc_asiu_div asiu_div[] = {
+       [BCM_CYGNUS_ASIU_KEYPAD_CLK] = asiu_div_val(0x0, 31, 16, 10, 0, 10),
+       [BCM_CYGNUS_ASIU_ADC_CLK] = asiu_div_val(0x4, 31, 16, 10, 0, 10),
+       [BCM_CYGNUS_ASIU_PWM_CLK] = asiu_div_val(0x8, 31, 16, 10, 0, 10),
+};
+
+static const struct iproc_asiu_gate asiu_gate[] = {
+       [BCM_CYGNUS_ASIU_KEYPAD_CLK] = asiu_gate_val(0x0, 7),
+       [BCM_CYGNUS_ASIU_ADC_CLK] = asiu_gate_val(0x0, 9),
+       [BCM_CYGNUS_ASIU_PWM_CLK] = asiu_gate_val(IPROC_CLK_INVALID_OFFSET, 0),
+};
+
+static void __init cygnus_asiu_init(struct device_node *node)
+{
+       iproc_asiu_setup(node, asiu_div, asiu_gate, ARRAY_SIZE(asiu_div));
+}
+CLK_OF_DECLARE(cygnus_asiu_clk, "brcm,cygnus-asiu-clk", cygnus_asiu_init);
diff --git a/drivers/clk/bcm/clk-iproc-armpll.c b/drivers/clk/bcm/clk-iproc-armpll.c
new file mode 100644 (file)
index 0000000..a196ee2
--- /dev/null
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+
+#define IPROC_CLK_MAX_FREQ_POLICY                    0x3
+#define IPROC_CLK_POLICY_FREQ_OFFSET                 0x008
+#define IPROC_CLK_POLICY_FREQ_POLICY_FREQ_SHIFT      8
+#define IPROC_CLK_POLICY_FREQ_POLICY_FREQ_MASK       0x7
+
+#define IPROC_CLK_PLLARMA_OFFSET                     0xc00
+#define IPROC_CLK_PLLARMA_LOCK_SHIFT                 28
+#define IPROC_CLK_PLLARMA_PDIV_SHIFT                 24
+#define IPROC_CLK_PLLARMA_PDIV_MASK                  0xf
+#define IPROC_CLK_PLLARMA_NDIV_INT_SHIFT             8
+#define IPROC_CLK_PLLARMA_NDIV_INT_MASK              0x3ff
+
+#define IPROC_CLK_PLLARMB_OFFSET                     0xc04
+#define IPROC_CLK_PLLARMB_NDIV_FRAC_MASK             0xfffff
+
+#define IPROC_CLK_PLLARMC_OFFSET                     0xc08
+#define IPROC_CLK_PLLARMC_BYPCLK_EN_SHIFT            8
+#define IPROC_CLK_PLLARMC_MDIV_MASK                  0xff
+
+#define IPROC_CLK_PLLARMCTL5_OFFSET                  0xc20
+#define IPROC_CLK_PLLARMCTL5_H_MDIV_MASK             0xff
+
+#define IPROC_CLK_PLLARM_OFFSET_OFFSET               0xc24
+#define IPROC_CLK_PLLARM_SW_CTL_SHIFT                29
+#define IPROC_CLK_PLLARM_NDIV_INT_OFFSET_SHIFT       20
+#define IPROC_CLK_PLLARM_NDIV_INT_OFFSET_MASK        0xff
+#define IPROC_CLK_PLLARM_NDIV_FRAC_OFFSET_MASK       0xfffff
+
+#define IPROC_CLK_ARM_DIV_OFFSET                     0xe00
+#define IPROC_CLK_ARM_DIV_PLL_SELECT_OVERRIDE_SHIFT  4
+#define IPROC_CLK_ARM_DIV_ARM_PLL_SELECT_MASK        0xf
+
+#define IPROC_CLK_POLICY_DBG_OFFSET                  0xec0
+#define IPROC_CLK_POLICY_DBG_ACT_FREQ_SHIFT          12
+#define IPROC_CLK_POLICY_DBG_ACT_FREQ_MASK           0x7
+
+enum iproc_arm_pll_fid {
+       ARM_PLL_FID_CRYSTAL_CLK   = 0,
+       ARM_PLL_FID_SYS_CLK       = 2,
+       ARM_PLL_FID_CH0_SLOW_CLK  = 6,
+       ARM_PLL_FID_CH1_FAST_CLK  = 7
+};
+
+struct iproc_arm_pll {
+       struct clk_hw hw;
+       void __iomem *base;
+       unsigned long rate;
+};
+
+#define to_iproc_arm_pll(hw) container_of(hw, struct iproc_arm_pll, hw)
+
+static unsigned int __get_fid(struct iproc_arm_pll *pll)
+{
+       u32 val;
+       unsigned int policy, fid, active_fid;
+
+       val = readl(pll->base + IPROC_CLK_ARM_DIV_OFFSET);
+       if (val & (1 << IPROC_CLK_ARM_DIV_PLL_SELECT_OVERRIDE_SHIFT))
+               policy = val & IPROC_CLK_ARM_DIV_ARM_PLL_SELECT_MASK;
+       else
+               policy = 0;
+
+       /* something is seriously wrong */
+       BUG_ON(policy > IPROC_CLK_MAX_FREQ_POLICY);
+
+       val = readl(pll->base + IPROC_CLK_POLICY_FREQ_OFFSET);
+       fid = (val >> (IPROC_CLK_POLICY_FREQ_POLICY_FREQ_SHIFT * policy)) &
+               IPROC_CLK_POLICY_FREQ_POLICY_FREQ_MASK;
+
+       val = readl(pll->base + IPROC_CLK_POLICY_DBG_OFFSET);
+       active_fid = IPROC_CLK_POLICY_DBG_ACT_FREQ_MASK &
+               (val >> IPROC_CLK_POLICY_DBG_ACT_FREQ_SHIFT);
+       if (fid != active_fid) {
+               pr_debug("%s: fid override %u->%u\n", __func__, fid,
+                               active_fid);
+               fid = active_fid;
+       }
+
+       pr_debug("%s: active fid: %u\n", __func__, fid);
+
+       return fid;
+}
+
+/*
+ * Determine the mdiv (post divider) based on the frequency ID being used.
+ * There are 4 sources that can be used to derive the output clock rate:
+ *    - 25 MHz Crystal
+ *    - System clock
+ *    - PLL channel 0 (slow clock)
+ *    - PLL channel 1 (fast clock)
+ */
+static int __get_mdiv(struct iproc_arm_pll *pll)
+{
+       unsigned int fid;
+       int mdiv;
+       u32 val;
+
+       fid = __get_fid(pll);
+
+       switch (fid) {
+       case ARM_PLL_FID_CRYSTAL_CLK:
+       case ARM_PLL_FID_SYS_CLK:
+               mdiv = 1;
+               break;
+
+       case ARM_PLL_FID_CH0_SLOW_CLK:
+               val = readl(pll->base + IPROC_CLK_PLLARMC_OFFSET);
+               mdiv = val & IPROC_CLK_PLLARMC_MDIV_MASK;
+               if (mdiv == 0)
+                       mdiv = 256;
+               break;
+
+       case ARM_PLL_FID_CH1_FAST_CLK:
+               val = readl(pll->base + IPROC_CLK_PLLARMCTL5_OFFSET);
+               mdiv = val & IPROC_CLK_PLLARMCTL5_H_MDIV_MASK;
+               if (mdiv == 0)
+                       mdiv = 256;
+               break;
+
+       default:
+               mdiv = -EFAULT;
+       }
+
+       return mdiv;
+}
+
+static unsigned int __get_ndiv(struct iproc_arm_pll *pll)
+{
+       u32 val;
+       unsigned int ndiv_int, ndiv_frac, ndiv;
+
+       val = readl(pll->base + IPROC_CLK_PLLARM_OFFSET_OFFSET);
+       if (val & (1 << IPROC_CLK_PLLARM_SW_CTL_SHIFT)) {
+               /*
+                * offset mode is active. Read the ndiv from the PLLARM OFFSET
+                * register
+                */
+               ndiv_int = (val >> IPROC_CLK_PLLARM_NDIV_INT_OFFSET_SHIFT) &
+                       IPROC_CLK_PLLARM_NDIV_INT_OFFSET_MASK;
+               if (ndiv_int == 0)
+                       ndiv_int = 256;
+
+               ndiv_frac = val & IPROC_CLK_PLLARM_NDIV_FRAC_OFFSET_MASK;
+       } else {
+               /* offset mode not active */
+               val = readl(pll->base + IPROC_CLK_PLLARMA_OFFSET);
+               ndiv_int = (val >> IPROC_CLK_PLLARMA_NDIV_INT_SHIFT) &
+                       IPROC_CLK_PLLARMA_NDIV_INT_MASK;
+               if (ndiv_int == 0)
+                       ndiv_int = 1024;
+
+               val = readl(pll->base + IPROC_CLK_PLLARMB_OFFSET);
+               ndiv_frac = val & IPROC_CLK_PLLARMB_NDIV_FRAC_MASK;
+       }
+
+       ndiv = (ndiv_int << 20) | ndiv_frac;
+
+       return ndiv;
+}
+
+/*
+ * The output frequency of the ARM PLL is calculated based on the ARM PLL
+ * divider values:
+ *   pdiv = ARM PLL pre-divider
+ *   ndiv = ARM PLL multiplier
+ *   mdiv = ARM PLL post divider
+ *
+ * The frequency is calculated by:
+ *   ((ndiv * parent clock rate) / pdiv) / mdiv
+ */
+static unsigned long iproc_arm_pll_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct iproc_arm_pll *pll = to_iproc_arm_pll(hw);
+       u32 val;
+       int mdiv;
+       u64 ndiv;
+       unsigned int pdiv;
+
+       /* in bypass mode, use parent rate */
+       val = readl(pll->base + IPROC_CLK_PLLARMC_OFFSET);
+       if (val & (1 << IPROC_CLK_PLLARMC_BYPCLK_EN_SHIFT)) {
+               pll->rate = parent_rate;
+               return pll->rate;
+       }
+
+       /* PLL needs to be locked */
+       val = readl(pll->base + IPROC_CLK_PLLARMA_OFFSET);
+       if (!(val & (1 << IPROC_CLK_PLLARMA_LOCK_SHIFT))) {
+               pll->rate = 0;
+               return 0;
+       }
+
+       pdiv = (val >> IPROC_CLK_PLLARMA_PDIV_SHIFT) &
+               IPROC_CLK_PLLARMA_PDIV_MASK;
+       if (pdiv == 0)
+               pdiv = 16;
+
+       ndiv = __get_ndiv(pll);
+       mdiv = __get_mdiv(pll);
+       if (mdiv <= 0) {
+               pll->rate = 0;
+               return 0;
+       }
+       pll->rate = (ndiv * parent_rate) >> 20;
+       pll->rate = (pll->rate / pdiv) / mdiv;
+
+       pr_debug("%s: ARM PLL rate: %lu. parent rate: %lu\n", __func__,
+                pll->rate, parent_rate);
+       pr_debug("%s: ndiv_int: %u, pdiv: %u, mdiv: %d\n", __func__,
+                (unsigned int)(ndiv >> 20), pdiv, mdiv);
+
+       return pll->rate;
+}
+
+static const struct clk_ops iproc_arm_pll_ops = {
+       .recalc_rate = iproc_arm_pll_recalc_rate,
+};
+
+void __init iproc_armpll_setup(struct device_node *node)
+{
+       int ret;
+       struct clk *clk;
+       struct iproc_arm_pll *pll;
+       struct clk_init_data init;
+       const char *parent_name;
+
+       pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+       if (WARN_ON(!pll))
+               return;
+
+       pll->base = of_iomap(node, 0);
+       if (WARN_ON(!pll->base))
+               goto err_free_pll;
+
+       init.name = node->name;
+       init.ops = &iproc_arm_pll_ops;
+       init.flags = 0;
+       parent_name = of_clk_get_parent_name(node, 0);
+       init.parent_names = (parent_name ? &parent_name : NULL);
+       init.num_parents = (parent_name ? 1 : 0);
+       pll->hw.init = &init;
+
+       clk = clk_register(NULL, &pll->hw);
+       if (WARN_ON(IS_ERR(clk)))
+               goto err_iounmap;
+
+       ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       if (WARN_ON(ret))
+               goto err_clk_unregister;
+
+       return;
+
+err_clk_unregister:
+       clk_unregister(clk);
+err_iounmap:
+       iounmap(pll->base);
+err_free_pll:
+       kfree(pll);
+}
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
new file mode 100644 (file)
index 0000000..e19c09c
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+
+#include "clk-iproc.h"
+
+struct iproc_asiu;
+
+struct iproc_asiu_clk {
+       struct clk_hw hw;
+       const char *name;
+       struct iproc_asiu *asiu;
+       unsigned long rate;
+       struct iproc_asiu_div div;
+       struct iproc_asiu_gate gate;
+};
+
+struct iproc_asiu {
+       void __iomem *div_base;
+       void __iomem *gate_base;
+
+       struct clk_onecell_data clk_data;
+       struct iproc_asiu_clk *clks;
+};
+
+#define to_asiu_clk(hw) container_of(hw, struct iproc_asiu_clk, hw)
+
+static int iproc_asiu_clk_enable(struct clk_hw *hw)
+{
+       struct iproc_asiu_clk *clk = to_asiu_clk(hw);
+       struct iproc_asiu *asiu = clk->asiu;
+       u32 val;
+
+       /* some clocks at the ASIU level are always enabled */
+       if (clk->gate.offset == IPROC_CLK_INVALID_OFFSET)
+               return 0;
+
+       val = readl(asiu->gate_base + clk->gate.offset);
+       val |= (1 << clk->gate.en_shift);
+       writel(val, asiu->gate_base + clk->gate.offset);
+
+       return 0;
+}
+
+static void iproc_asiu_clk_disable(struct clk_hw *hw)
+{
+       struct iproc_asiu_clk *clk = to_asiu_clk(hw);
+       struct iproc_asiu *asiu = clk->asiu;
+       u32 val;
+
+       /* some clocks at the ASIU level are always enabled */
+       if (clk->gate.offset == IPROC_CLK_INVALID_OFFSET)
+               return;
+
+       val = readl(asiu->gate_base + clk->gate.offset);
+       val &= ~(1 << clk->gate.en_shift);
+       writel(val, asiu->gate_base + clk->gate.offset);
+}
+
+static unsigned long iproc_asiu_clk_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       struct iproc_asiu_clk *clk = to_asiu_clk(hw);
+       struct iproc_asiu *asiu = clk->asiu;
+       u32 val;
+       unsigned int div_h, div_l;
+
+       if (parent_rate == 0) {
+               clk->rate = 0;
+               return 0;
+       }
+
+       /* if clock divisor is not enabled, simply return parent rate */
+       val = readl(asiu->div_base + clk->div.offset);
+       if ((val & (1 << clk->div.en_shift)) == 0) {
+               clk->rate = parent_rate;
+               return parent_rate;
+       }
+
+       /* clock rate = parent rate / (high_div + 1) + (low_div + 1) */
+       div_h = (val >> clk->div.high_shift) & bit_mask(clk->div.high_width);
+       div_h++;
+       div_l = (val >> clk->div.low_shift) & bit_mask(clk->div.low_width);
+       div_l++;
+
+       clk->rate = parent_rate / (div_h + div_l);
+       pr_debug("%s: rate: %lu. parent rate: %lu div_h: %u div_l: %u\n",
+                __func__, clk->rate, parent_rate, div_h, div_l);
+
+       return clk->rate;
+}
+
+static long iproc_asiu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+                                     unsigned long *parent_rate)
+{
+       unsigned int div;
+
+       if (rate == 0 || *parent_rate == 0)
+               return -EINVAL;
+
+       if (rate == *parent_rate)
+               return *parent_rate;
+
+       div = DIV_ROUND_UP(*parent_rate, rate);
+       if (div < 2)
+               return *parent_rate;
+
+       return *parent_rate / div;
+}
+
+static int iproc_asiu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long parent_rate)
+{
+       struct iproc_asiu_clk *clk = to_asiu_clk(hw);
+       struct iproc_asiu *asiu = clk->asiu;
+       unsigned int div, div_h, div_l;
+       u32 val;
+
+       if (rate == 0 || parent_rate == 0)
+               return -EINVAL;
+
+       /* simply disable the divisor if one wants the same rate as parent */
+       if (rate == parent_rate) {
+               val = readl(asiu->div_base + clk->div.offset);
+               val &= ~(1 << clk->div.en_shift);
+               writel(val, asiu->div_base + clk->div.offset);
+               return 0;
+       }
+
+       div = DIV_ROUND_UP(parent_rate, rate);
+       if (div < 2)
+               return -EINVAL;
+
+       div_h = div_l = div >> 1;
+       div_h--;
+       div_l--;
+
+       val = readl(asiu->div_base + clk->div.offset);
+       val |= 1 << clk->div.en_shift;
+       if (div_h) {
+               val &= ~(bit_mask(clk->div.high_width)
+                        << clk->div.high_shift);
+               val |= div_h << clk->div.high_shift;
+       } else {
+               val &= ~(bit_mask(clk->div.high_width)
+                        << clk->div.high_shift);
+       }
+       if (div_l) {
+               val &= ~(bit_mask(clk->div.low_width) << clk->div.low_shift);
+               val |= div_l << clk->div.low_shift;
+       } else {
+               val &= ~(bit_mask(clk->div.low_width) << clk->div.low_shift);
+       }
+       writel(val, asiu->div_base + clk->div.offset);
+
+       return 0;
+}
+
+static const struct clk_ops iproc_asiu_ops = {
+       .enable = iproc_asiu_clk_enable,
+       .disable = iproc_asiu_clk_disable,
+       .recalc_rate = iproc_asiu_clk_recalc_rate,
+       .round_rate = iproc_asiu_clk_round_rate,
+       .set_rate = iproc_asiu_clk_set_rate,
+};
+
+void __init iproc_asiu_setup(struct device_node *node,
+                            const struct iproc_asiu_div *div,
+                            const struct iproc_asiu_gate *gate,
+                            unsigned int num_clks)
+{
+       int i, ret;
+       struct iproc_asiu *asiu;
+
+       if (WARN_ON(!gate || !div))
+               return;
+
+       asiu = kzalloc(sizeof(*asiu), GFP_KERNEL);
+       if (WARN_ON(!asiu))
+               return;
+
+       asiu->clk_data.clk_num = num_clks;
+       asiu->clk_data.clks = kcalloc(num_clks, sizeof(*asiu->clk_data.clks),
+                                     GFP_KERNEL);
+       if (WARN_ON(!asiu->clk_data.clks))
+               goto err_clks;
+
+       asiu->clks = kcalloc(num_clks, sizeof(*asiu->clks), GFP_KERNEL);
+       if (WARN_ON(!asiu->clks))
+               goto err_asiu_clks;
+
+       asiu->div_base = of_iomap(node, 0);
+       if (WARN_ON(!asiu->div_base))
+               goto err_iomap_div;
+
+       asiu->gate_base = of_iomap(node, 1);
+       if (WARN_ON(!asiu->gate_base))
+               goto err_iomap_gate;
+
+       for (i = 0; i < num_clks; i++) {
+               struct clk_init_data init;
+               struct clk *clk;
+               const char *parent_name;
+               struct iproc_asiu_clk *asiu_clk;
+               const char *clk_name;
+
+               clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
+               if (WARN_ON(!clk_name))
+                       goto err_clk_register;
+
+               ret = of_property_read_string_index(node, "clock-output-names",
+                                                   i, &clk_name);
+               if (WARN_ON(ret))
+                       goto err_clk_register;
+
+               asiu_clk = &asiu->clks[i];
+               asiu_clk->name = clk_name;
+               asiu_clk->asiu = asiu;
+               asiu_clk->div = div[i];
+               asiu_clk->gate = gate[i];
+               init.name = clk_name;
+               init.ops = &iproc_asiu_ops;
+               init.flags = 0;
+               parent_name = of_clk_get_parent_name(node, 0);
+               init.parent_names = (parent_name ? &parent_name : NULL);
+               init.num_parents = (parent_name ? 1 : 0);
+               asiu_clk->hw.init = &init;
+
+               clk = clk_register(NULL, &asiu_clk->hw);
+               if (WARN_ON(IS_ERR(clk)))
+                       goto err_clk_register;
+               asiu->clk_data.clks[i] = clk;
+       }
+
+       ret = of_clk_add_provider(node, of_clk_src_onecell_get,
+                                 &asiu->clk_data);
+       if (WARN_ON(ret))
+               goto err_clk_register;
+
+       return;
+
+err_clk_register:
+       for (i = 0; i < num_clks; i++)
+               kfree(asiu->clks[i].name);
+       iounmap(asiu->gate_base);
+
+err_iomap_gate:
+       iounmap(asiu->div_base);
+
+err_iomap_div:
+       kfree(asiu->clks);
+
+err_asiu_clks:
+       kfree(asiu->clk_data.clks);
+
+err_clks:
+       kfree(asiu);
+}
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
new file mode 100644 (file)
index 0000000..46fb84b
--- /dev/null
@@ -0,0 +1,716 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+
+#include "clk-iproc.h"
+
+#define PLL_VCO_HIGH_SHIFT 19
+#define PLL_VCO_LOW_SHIFT  30
+
+/* number of delay loops waiting for PLL to lock */
+#define LOCK_DELAY 100
+
+/* number of VCO frequency bands */
+#define NUM_FREQ_BANDS 8
+
+#define NUM_KP_BANDS 3
+enum kp_band {
+       KP_BAND_MID = 0,
+       KP_BAND_HIGH,
+       KP_BAND_HIGH_HIGH
+};
+
+static const unsigned int kp_table[NUM_KP_BANDS][NUM_FREQ_BANDS] = {
+       { 5, 6, 6, 7, 7, 8, 9, 10 },
+       { 4, 4, 5, 5, 6, 7, 8, 9  },
+       { 4, 5, 5, 6, 7, 8, 9, 10 },
+};
+
+static const unsigned long ref_freq_table[NUM_FREQ_BANDS][2] = {
+       { 10000000,  12500000  },
+       { 12500000,  15000000  },
+       { 15000000,  20000000  },
+       { 20000000,  25000000  },
+       { 25000000,  50000000  },
+       { 50000000,  75000000  },
+       { 75000000,  100000000 },
+       { 100000000, 125000000 },
+};
+
+enum vco_freq_range {
+       VCO_LOW       = 700000000U,
+       VCO_MID       = 1200000000U,
+       VCO_HIGH      = 2200000000U,
+       VCO_HIGH_HIGH = 3100000000U,
+       VCO_MAX       = 4000000000U,
+};
+
+struct iproc_pll;
+
+struct iproc_clk {
+       struct clk_hw hw;
+       const char *name;
+       struct iproc_pll *pll;
+       unsigned long rate;
+       const struct iproc_clk_ctrl *ctrl;
+};
+
+struct iproc_pll {
+       void __iomem *pll_base;
+       void __iomem *pwr_base;
+       void __iomem *asiu_base;
+
+       const struct iproc_pll_ctrl *ctrl;
+       const struct iproc_pll_vco_param *vco_param;
+       unsigned int num_vco_entries;
+
+       struct clk_onecell_data clk_data;
+       struct iproc_clk *clks;
+};
+
+#define to_iproc_clk(hw) container_of(hw, struct iproc_clk, hw)
+
+/*
+ * Based on the target frequency, find a match from the VCO frequency parameter
+ * table and return its index
+ */
+static int pll_get_rate_index(struct iproc_pll *pll, unsigned int target_rate)
+{
+       int i;
+
+       for (i = 0; i < pll->num_vco_entries; i++)
+               if (target_rate == pll->vco_param[i].rate)
+                       break;
+
+       if (i >= pll->num_vco_entries)
+               return -EINVAL;
+
+       return i;
+}
+
+static int get_kp(unsigned long ref_freq, enum kp_band kp_index)
+{
+       int i;
+
+       if (ref_freq < ref_freq_table[0][0])
+               return -EINVAL;
+
+       for (i = 0; i < NUM_FREQ_BANDS; i++) {
+               if (ref_freq >= ref_freq_table[i][0] &&
+                   ref_freq < ref_freq_table[i][1])
+                       return kp_table[kp_index][i];
+       }
+       return -EINVAL;
+}
+
+static int pll_wait_for_lock(struct iproc_pll *pll)
+{
+       int i;
+       const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+
+       for (i = 0; i < LOCK_DELAY; i++) {
+               u32 val = readl(pll->pll_base + ctrl->status.offset);
+
+               if (val & (1 << ctrl->status.shift))
+                       return 0;
+               udelay(10);
+       }
+
+       return -EIO;
+}
+
+static void __pll_disable(struct iproc_pll *pll)
+{
+       const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+       u32 val;
+
+       if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
+               val = readl(pll->asiu_base + ctrl->asiu.offset);
+               val &= ~(1 << ctrl->asiu.en_shift);
+               writel(val, pll->asiu_base + ctrl->asiu.offset);
+       }
+
+       /* latch input value so core power can be shut down */
+       val = readl(pll->pwr_base + ctrl->aon.offset);
+       val |= (1 << ctrl->aon.iso_shift);
+       writel(val, pll->pwr_base + ctrl->aon.offset);
+
+       /* power down the core */
+       val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
+       writel(val, pll->pwr_base + ctrl->aon.offset);
+}
+
+static int __pll_enable(struct iproc_pll *pll)
+{
+       const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+       u32 val;
+
+       /* power up the PLL and make sure it's not latched */
+       val = readl(pll->pwr_base + ctrl->aon.offset);
+       val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
+       val &= ~(1 << ctrl->aon.iso_shift);
+       writel(val, pll->pwr_base + ctrl->aon.offset);
+
+       /* certain PLLs also need to be ungated from the ASIU top level */
+       if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
+               val = readl(pll->asiu_base + ctrl->asiu.offset);
+               val |= (1 << ctrl->asiu.en_shift);
+               writel(val, pll->asiu_base + ctrl->asiu.offset);
+       }
+
+       return 0;
+}
+
+static void __pll_put_in_reset(struct iproc_pll *pll)
+{
+       u32 val;
+       const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+       const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
+
+       val = readl(pll->pll_base + reset->offset);
+       val &= ~(1 << reset->reset_shift | 1 << reset->p_reset_shift);
+       writel(val, pll->pll_base + reset->offset);
+       if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
+               readl(pll->pll_base + reset->offset);
+}
+
+static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp,
+                                 unsigned int ka, unsigned int ki)
+{
+       u32 val;
+       const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+       const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
+
+       val = readl(pll->pll_base + reset->offset);
+       val &= ~(bit_mask(reset->ki_width) << reset->ki_shift |
+                bit_mask(reset->kp_width) << reset->kp_shift |
+                bit_mask(reset->ka_width) << reset->ka_shift);
+       val |=  ki << reset->ki_shift | kp << reset->kp_shift |
+               ka << reset->ka_shift;
+       val |= 1 << reset->reset_shift | 1 << reset->p_reset_shift;
+       writel(val, pll->pll_base + reset->offset);
+       if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
+               readl(pll->pll_base + reset->offset);
+}
+
+static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
+                       unsigned long parent_rate)
+{
+       struct iproc_pll *pll = clk->pll;
+       const struct iproc_pll_vco_param *vco = &pll->vco_param[rate_index];
+       const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+       int ka = 0, ki, kp, ret;
+       unsigned long rate = vco->rate;
+       u32 val;
+       enum kp_band kp_index;
+       unsigned long ref_freq;
+
+       /*
+        * reference frequency = parent frequency / PDIV
+        * If PDIV = 0, then it becomes a multiplier (x2)
+        */
+       if (vco->pdiv == 0)
+               ref_freq = parent_rate * 2;
+       else
+               ref_freq = parent_rate / vco->pdiv;
+
+       /* determine Ki and Kp index based on target VCO frequency */
+       if (rate >= VCO_LOW && rate < VCO_HIGH) {
+               ki = 4;
+               kp_index = KP_BAND_MID;
+       } else if (rate >= VCO_HIGH && rate && rate < VCO_HIGH_HIGH) {
+               ki = 3;
+               kp_index = KP_BAND_HIGH;
+       } else if (rate >= VCO_HIGH_HIGH && rate < VCO_MAX) {
+               ki = 3;
+               kp_index = KP_BAND_HIGH_HIGH;
+       } else {
+               pr_err("%s: pll: %s has invalid rate: %lu\n", __func__,
+                               clk->name, rate);
+               return -EINVAL;
+       }
+
+       kp = get_kp(ref_freq, kp_index);
+       if (kp < 0) {
+               pr_err("%s: pll: %s has invalid kp\n", __func__, clk->name);
+               return kp;
+       }
+
+       ret = __pll_enable(pll);
+       if (ret) {
+               pr_err("%s: pll: %s fails to enable\n", __func__, clk->name);
+               return ret;
+       }
+
+       /* put PLL in reset */
+       __pll_put_in_reset(pll);
+
+       writel(0, pll->pll_base + ctrl->vco_ctrl.u_offset);
+       if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
+               readl(pll->pll_base + ctrl->vco_ctrl.u_offset);
+       val = readl(pll->pll_base + ctrl->vco_ctrl.l_offset);
+
+       if (rate >= VCO_LOW && rate < VCO_MID)
+               val |= (1 << PLL_VCO_LOW_SHIFT);
+
+       if (rate < VCO_HIGH)
+               val &= ~(1 << PLL_VCO_HIGH_SHIFT);
+       else
+               val |= (1 << PLL_VCO_HIGH_SHIFT);
+
+       writel(val, pll->pll_base + ctrl->vco_ctrl.l_offset);
+       if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
+               readl(pll->pll_base + ctrl->vco_ctrl.l_offset);
+
+       /* program integer part of NDIV */
+       val = readl(pll->pll_base + ctrl->ndiv_int.offset);
+       val &= ~(bit_mask(ctrl->ndiv_int.width) << ctrl->ndiv_int.shift);
+       val |= vco->ndiv_int << ctrl->ndiv_int.shift;
+       writel(val, pll->pll_base + ctrl->ndiv_int.offset);
+       if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
+               readl(pll->pll_base + ctrl->ndiv_int.offset);
+
+       /* program fractional part of NDIV */
+       if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
+               val = readl(pll->pll_base + ctrl->ndiv_frac.offset);
+               val &= ~(bit_mask(ctrl->ndiv_frac.width) <<
+                        ctrl->ndiv_frac.shift);
+               val |= vco->ndiv_frac << ctrl->ndiv_frac.shift;
+               writel(val, pll->pll_base + ctrl->ndiv_frac.offset);
+               if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
+                       readl(pll->pll_base + ctrl->ndiv_frac.offset);
+       }
+
+       /* program PDIV */
+       val = readl(pll->pll_base + ctrl->pdiv.offset);
+       val &= ~(bit_mask(ctrl->pdiv.width) << ctrl->pdiv.shift);
+       val |= vco->pdiv << ctrl->pdiv.shift;
+       writel(val, pll->pll_base + ctrl->pdiv.offset);
+       if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
+               readl(pll->pll_base + ctrl->pdiv.offset);
+
+       __pll_bring_out_reset(pll, kp, ka, ki);
+
+       ret = pll_wait_for_lock(pll);
+       if (ret < 0) {
+               pr_err("%s: pll: %s failed to lock\n", __func__, clk->name);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int iproc_pll_enable(struct clk_hw *hw)
+{
+       struct iproc_clk *clk = to_iproc_clk(hw);
+       struct iproc_pll *pll = clk->pll;
+
+       return __pll_enable(pll);
+}
+
+static void iproc_pll_disable(struct clk_hw *hw)
+{
+       struct iproc_clk *clk = to_iproc_clk(hw);
+       struct iproc_pll *pll = clk->pll;
+       const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+
+       if (ctrl->flags & IPROC_CLK_AON)
+               return;
+
+       __pll_disable(pll);
+}
+
+static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
+                                          unsigned long parent_rate)
+{
+       struct iproc_clk *clk = to_iproc_clk(hw);
+       struct iproc_pll *pll = clk->pll;
+       const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+       u32 val;
+       u64 ndiv;
+       unsigned int ndiv_int, ndiv_frac, pdiv;
+
+       if (parent_rate == 0)
+               return 0;
+
+       /* PLL needs to be locked */
+       val = readl(pll->pll_base + ctrl->status.offset);
+       if ((val & (1 << ctrl->status.shift)) == 0) {
+               clk->rate = 0;
+               return 0;
+       }
+
+       /*
+        * PLL output frequency =
+        *
+        * ((ndiv_int + ndiv_frac / 2^20) * (parent clock rate / pdiv)
+        */
+       val = readl(pll->pll_base + ctrl->ndiv_int.offset);
+       ndiv_int = (val >> ctrl->ndiv_int.shift) &
+               bit_mask(ctrl->ndiv_int.width);
+       ndiv = ndiv_int << ctrl->ndiv_int.shift;
+
+       if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
+               val = readl(pll->pll_base + ctrl->ndiv_frac.offset);
+               ndiv_frac = (val >> ctrl->ndiv_frac.shift) &
+                       bit_mask(ctrl->ndiv_frac.width);
+
+               if (ndiv_frac != 0)
+                       ndiv = (ndiv_int << ctrl->ndiv_int.shift) | ndiv_frac;
+       }
+
+       val = readl(pll->pll_base + ctrl->pdiv.offset);
+       pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
+
+       clk->rate = (ndiv * parent_rate) >> ctrl->ndiv_int.shift;
+
+       if (pdiv == 0)
+               clk->rate *= 2;
+       else
+               clk->rate /= pdiv;
+
+       return clk->rate;
+}
+
+static long iproc_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long *parent_rate)
+{
+       unsigned i;
+       struct iproc_clk *clk = to_iproc_clk(hw);
+       struct iproc_pll *pll = clk->pll;
+
+       if (rate == 0 || *parent_rate == 0 || !pll->vco_param)
+               return -EINVAL;
+
+       for (i = 0; i < pll->num_vco_entries; i++) {
+               if (rate <= pll->vco_param[i].rate)
+                       break;
+       }
+
+       if (i == pll->num_vco_entries)
+               i--;
+
+       return pll->vco_param[i].rate;
+}
+
+static int iproc_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct iproc_clk *clk = to_iproc_clk(hw);
+       struct iproc_pll *pll = clk->pll;
+       int rate_index, ret;
+
+       rate_index = pll_get_rate_index(pll, rate);
+       if (rate_index < 0)
+               return rate_index;
+
+       ret = pll_set_rate(clk, rate_index, parent_rate);
+       return ret;
+}
+
+static const struct clk_ops iproc_pll_ops = {
+       .enable = iproc_pll_enable,
+       .disable = iproc_pll_disable,
+       .recalc_rate = iproc_pll_recalc_rate,
+       .round_rate = iproc_pll_round_rate,
+       .set_rate = iproc_pll_set_rate,
+};
+
+static int iproc_clk_enable(struct clk_hw *hw)
+{
+       struct iproc_clk *clk = to_iproc_clk(hw);
+       const struct iproc_clk_ctrl *ctrl = clk->ctrl;
+       struct iproc_pll *pll = clk->pll;
+       u32 val;
+
+       /* channel enable is active low */
+       val = readl(pll->pll_base + ctrl->enable.offset);
+       val &= ~(1 << ctrl->enable.enable_shift);
+       writel(val, pll->pll_base + ctrl->enable.offset);
+
+       /* also make sure channel is not held */
+       val = readl(pll->pll_base + ctrl->enable.offset);
+       val &= ~(1 << ctrl->enable.hold_shift);
+       writel(val, pll->pll_base + ctrl->enable.offset);
+       if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
+               readl(pll->pll_base + ctrl->enable.offset);
+
+       return 0;
+}
+
+static void iproc_clk_disable(struct clk_hw *hw)
+{
+       struct iproc_clk *clk = to_iproc_clk(hw);
+       const struct iproc_clk_ctrl *ctrl = clk->ctrl;
+       struct iproc_pll *pll = clk->pll;
+       u32 val;
+
+       if (ctrl->flags & IPROC_CLK_AON)
+               return;
+
+       val = readl(pll->pll_base + ctrl->enable.offset);
+       val |= 1 << ctrl->enable.enable_shift;
+       writel(val, pll->pll_base + ctrl->enable.offset);
+       if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
+               readl(pll->pll_base + ctrl->enable.offset);
+}
+
+static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct iproc_clk *clk = to_iproc_clk(hw);
+       const struct iproc_clk_ctrl *ctrl = clk->ctrl;
+       struct iproc_pll *pll = clk->pll;
+       u32 val;
+       unsigned int mdiv;
+
+       if (parent_rate == 0)
+               return 0;
+
+       val = readl(pll->pll_base + ctrl->mdiv.offset);
+       mdiv = (val >> ctrl->mdiv.shift) & bit_mask(ctrl->mdiv.width);
+       if (mdiv == 0)
+               mdiv = 256;
+
+       clk->rate = parent_rate / mdiv;
+
+       return clk->rate;
+}
+
+static long iproc_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long *parent_rate)
+{
+       unsigned int div;
+
+       if (rate == 0 || *parent_rate == 0)
+               return -EINVAL;
+
+       if (rate == *parent_rate)
+               return *parent_rate;
+
+       div = DIV_ROUND_UP(*parent_rate, rate);
+       if (div < 2)
+               return *parent_rate;
+
+       if (div > 256)
+               div = 256;
+
+       return *parent_rate / div;
+}
+
+static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct iproc_clk *clk = to_iproc_clk(hw);
+       const struct iproc_clk_ctrl *ctrl = clk->ctrl;
+       struct iproc_pll *pll = clk->pll;
+       u32 val;
+       unsigned int div;
+
+       if (rate == 0 || parent_rate == 0)
+               return -EINVAL;
+
+       div = DIV_ROUND_UP(parent_rate, rate);
+       if (div > 256)
+               return -EINVAL;
+
+       val = readl(pll->pll_base + ctrl->mdiv.offset);
+       if (div == 256) {
+               val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
+       } else {
+               val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
+               val |= div << ctrl->mdiv.shift;
+       }
+       writel(val, pll->pll_base + ctrl->mdiv.offset);
+       if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
+               readl(pll->pll_base + ctrl->mdiv.offset);
+       clk->rate = parent_rate / div;
+
+       return 0;
+}
+
+static const struct clk_ops iproc_clk_ops = {
+       .enable = iproc_clk_enable,
+       .disable = iproc_clk_disable,
+       .recalc_rate = iproc_clk_recalc_rate,
+       .round_rate = iproc_clk_round_rate,
+       .set_rate = iproc_clk_set_rate,
+};
+
+/**
+ * Some PLLs require the PLL SW override bit to be set before changes can be
+ * applied to the PLL
+ */
+static void iproc_pll_sw_cfg(struct iproc_pll *pll)
+{
+       const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+
+       if (ctrl->flags & IPROC_CLK_PLL_NEEDS_SW_CFG) {
+               u32 val;
+
+               val = readl(pll->pll_base + ctrl->sw_ctrl.offset);
+               val |= BIT(ctrl->sw_ctrl.shift);
+               writel(val, pll->pll_base + ctrl->sw_ctrl.offset);
+               if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK))
+                       readl(pll->pll_base + ctrl->sw_ctrl.offset);
+       }
+}
+
+void __init iproc_pll_clk_setup(struct device_node *node,
+                               const struct iproc_pll_ctrl *pll_ctrl,
+                               const struct iproc_pll_vco_param *vco,
+                               unsigned int num_vco_entries,
+                               const struct iproc_clk_ctrl *clk_ctrl,
+                               unsigned int num_clks)
+{
+       int i, ret;
+       struct clk *clk;
+       struct iproc_pll *pll;
+       struct iproc_clk *iclk;
+       struct clk_init_data init;
+       const char *parent_name;
+
+       if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
+               return;
+
+       pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+       if (WARN_ON(!pll))
+               return;
+
+       pll->clk_data.clk_num = num_clks;
+       pll->clk_data.clks = kcalloc(num_clks, sizeof(*pll->clk_data.clks),
+                                    GFP_KERNEL);
+       if (WARN_ON(!pll->clk_data.clks))
+               goto err_clk_data;
+
+       pll->clks = kcalloc(num_clks, sizeof(*pll->clks), GFP_KERNEL);
+       if (WARN_ON(!pll->clks))
+               goto err_clks;
+
+       pll->pll_base = of_iomap(node, 0);
+       if (WARN_ON(!pll->pll_base))
+               goto err_pll_iomap;
+
+       pll->pwr_base = of_iomap(node, 1);
+       if (WARN_ON(!pll->pwr_base))
+               goto err_pwr_iomap;
+
+       /* some PLLs require gating control at the top ASIU level */
+       if (pll_ctrl->flags & IPROC_CLK_PLL_ASIU) {
+               pll->asiu_base = of_iomap(node, 2);
+               if (WARN_ON(!pll->asiu_base))
+                       goto err_asiu_iomap;
+       }
+
+       /* initialize and register the PLL itself */
+       pll->ctrl = pll_ctrl;
+
+       iclk = &pll->clks[0];
+       iclk->pll = pll;
+       iclk->name = node->name;
+
+       init.name = node->name;
+       init.ops = &iproc_pll_ops;
+       init.flags = 0;
+       parent_name = of_clk_get_parent_name(node, 0);
+       init.parent_names = (parent_name ? &parent_name : NULL);
+       init.num_parents = (parent_name ? 1 : 0);
+       iclk->hw.init = &init;
+
+       if (vco) {
+               pll->num_vco_entries = num_vco_entries;
+               pll->vco_param = vco;
+       }
+
+       iproc_pll_sw_cfg(pll);
+
+       clk = clk_register(NULL, &iclk->hw);
+       if (WARN_ON(IS_ERR(clk)))
+               goto err_pll_register;
+
+       pll->clk_data.clks[0] = clk;
+
+       /* now initialize and register all leaf clocks */
+       for (i = 1; i < num_clks; i++) {
+               const char *clk_name;
+
+               memset(&init, 0, sizeof(init));
+               parent_name = node->name;
+
+               clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
+               if (WARN_ON(!clk_name))
+                       goto err_clk_register;
+
+               ret = of_property_read_string_index(node, "clock-output-names",
+                                                   i, &clk_name);
+               if (WARN_ON(ret))
+                       goto err_clk_register;
+
+               iclk = &pll->clks[i];
+               iclk->name = clk_name;
+               iclk->pll = pll;
+               iclk->ctrl = &clk_ctrl[i];
+
+               init.name = clk_name;
+               init.ops = &iproc_clk_ops;
+               init.flags = 0;
+               init.parent_names = (parent_name ? &parent_name : NULL);
+               init.num_parents = (parent_name ? 1 : 0);
+               iclk->hw.init = &init;
+
+               clk = clk_register(NULL, &iclk->hw);
+               if (WARN_ON(IS_ERR(clk)))
+                       goto err_clk_register;
+
+               pll->clk_data.clks[i] = clk;
+       }
+
+       ret = of_clk_add_provider(node, of_clk_src_onecell_get, &pll->clk_data);
+       if (WARN_ON(ret))
+               goto err_clk_register;
+
+       return;
+
+err_clk_register:
+       for (i = 0; i < num_clks; i++) {
+               kfree(pll->clks[i].name);
+               clk_unregister(pll->clk_data.clks[i]);
+       }
+
+err_pll_register:
+       if (pll->asiu_base)
+               iounmap(pll->asiu_base);
+
+err_asiu_iomap:
+       iounmap(pll->pwr_base);
+
+err_pwr_iomap:
+       iounmap(pll->pll_base);
+
+err_pll_iomap:
+       kfree(pll->clks);
+
+err_clks:
+       kfree(pll->clk_data.clks);
+
+err_clk_data:
+       kfree(pll);
+}
diff --git a/drivers/clk/bcm/clk-iproc.h b/drivers/clk/bcm/clk-iproc.h
new file mode 100644 (file)
index 0000000..d834b7a
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CLK_IPROC_H
+#define _CLK_IPROC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/clk-provider.h>
+
+#define IPROC_CLK_NAME_LEN 25
+#define IPROC_CLK_INVALID_OFFSET 0xffffffff
+#define bit_mask(width) ((1 << (width)) - 1)
+
+/* clocks that should not be disabled at runtime */
+#define IPROC_CLK_AON BIT(0)
+
+/* PLL that requires gating through ASIU */
+#define IPROC_CLK_PLL_ASIU BIT(1)
+
+/* PLL that has fractional part of the NDIV */
+#define IPROC_CLK_PLL_HAS_NDIV_FRAC BIT(2)
+
+/*
+ * Some of the iProc PLL/clocks may have an ASIC bug that requires read back
+ * of the same register following the write to flush the write transaction into
+ * the intended register
+ */
+#define IPROC_CLK_NEEDS_READ_BACK BIT(3)
+
+/*
+ * Some PLLs require the PLL SW override bit to be set before changes can be
+ * applied to the PLL
+ */
+#define IPROC_CLK_PLL_NEEDS_SW_CFG BIT(4)
+
+/*
+ * Parameters for VCO frequency configuration
+ *
+ * VCO frequency =
+ * ((ndiv_int + ndiv_frac / 2^20) * (ref freqeuncy  / pdiv)
+ */
+struct iproc_pll_vco_param {
+       unsigned long rate;
+       unsigned int ndiv_int;
+       unsigned int ndiv_frac;
+       unsigned int pdiv;
+};
+
+struct iproc_clk_reg_op {
+       unsigned int offset;
+       unsigned int shift;
+       unsigned int width;
+};
+
+/*
+ * Clock gating control at the top ASIU level
+ */
+struct iproc_asiu_gate {
+       unsigned int offset;
+       unsigned int en_shift;
+};
+
+/*
+ * Control of powering on/off of a PLL
+ *
+ * Before powering off a PLL, input isolation (ISO) needs to be enabled
+ */
+struct iproc_pll_aon_pwr_ctrl {
+       unsigned int offset;
+       unsigned int pwr_width;
+       unsigned int pwr_shift;
+       unsigned int iso_shift;
+};
+
+/*
+ * Control of the PLL reset, with Ki, Kp, and Ka parameters
+ */
+struct iproc_pll_reset_ctrl {
+       unsigned int offset;
+       unsigned int reset_shift;
+       unsigned int p_reset_shift;
+       unsigned int ki_shift;
+       unsigned int ki_width;
+       unsigned int kp_shift;
+       unsigned int kp_width;
+       unsigned int ka_shift;
+       unsigned int ka_width;
+};
+
+/*
+ * To enable SW control of the PLL
+ */
+struct iproc_pll_sw_ctrl {
+       unsigned int offset;
+       unsigned int shift;
+};
+
+struct iproc_pll_vco_ctrl {
+       unsigned int u_offset;
+       unsigned int l_offset;
+};
+
+/*
+ * Main PLL control parameters
+ */
+struct iproc_pll_ctrl {
+       unsigned long flags;
+       struct iproc_pll_aon_pwr_ctrl aon;
+       struct iproc_asiu_gate asiu;
+       struct iproc_pll_reset_ctrl reset;
+       struct iproc_pll_sw_ctrl sw_ctrl;
+       struct iproc_clk_reg_op ndiv_int;
+       struct iproc_clk_reg_op ndiv_frac;
+       struct iproc_clk_reg_op pdiv;
+       struct iproc_pll_vco_ctrl vco_ctrl;
+       struct iproc_clk_reg_op status;
+};
+
+/*
+ * Controls enabling/disabling a PLL derived clock
+ */
+struct iproc_clk_enable_ctrl {
+       unsigned int offset;
+       unsigned int enable_shift;
+       unsigned int hold_shift;
+       unsigned int bypass_shift;
+};
+
+/*
+ * Main clock control parameters for clocks derived from the PLLs
+ */
+struct iproc_clk_ctrl {
+       unsigned int channel;
+       unsigned long flags;
+       struct iproc_clk_enable_ctrl enable;
+       struct iproc_clk_reg_op mdiv;
+};
+
+/*
+ * Divisor of the ASIU clocks
+ */
+struct iproc_asiu_div {
+       unsigned int offset;
+       unsigned int en_shift;
+       unsigned int high_shift;
+       unsigned int high_width;
+       unsigned int low_shift;
+       unsigned int low_width;
+};
+
+void __init iproc_armpll_setup(struct device_node *node);
+void __init iproc_pll_clk_setup(struct device_node *node,
+                               const struct iproc_pll_ctrl *pll_ctrl,
+                               const struct iproc_pll_vco_param *vco,
+                               unsigned int num_vco_entries,
+                               const struct iproc_clk_ctrl *clk_ctrl,
+                               unsigned int num_clks);
+void __init iproc_asiu_setup(struct device_node *node,
+                            const struct iproc_asiu_div *div,
+                            const struct iproc_asiu_gate *gate,
+                            unsigned int num_clks);
+
+#endif /* _CLK_IPROC_H */
index e5aededdd3221cf34313228767eb72e755215b72..deaa7f962b84ac814eb2445d698d343d45018aec 100644 (file)
@@ -21,8 +21,6 @@
 #define selector_clear_exists(sel)     ((sel)->width = 0)
 #define trigger_clear_exists(trig)     FLAG_CLEAR(trig, TRIG, EXISTS)
 
-LIST_HEAD(ccu_list);   /* The list of set up CCUs */
-
 /* Validity checking */
 
 static bool ccu_data_offsets_valid(struct ccu_data *ccu)
@@ -773,7 +771,6 @@ static void kona_ccu_teardown(struct ccu_data *ccu)
 
        of_clk_del_provider(ccu->node); /* safe if never added */
        ccu_clks_teardown(ccu);
-       list_del(&ccu->links);
        of_node_put(ccu->node);
        ccu->node = NULL;
        iounmap(ccu->base);
@@ -847,7 +844,6 @@ void __init kona_dt_ccu_setup(struct ccu_data *ccu,
                goto out_err;
        }
        ccu->node = of_node_get(node);
-       list_add_tail(&ccu->links, &ccu_list);
 
        /*
         * Set up each defined kona clock and save the result in
index a0ef4f75d4573b2db17ec1435003c50bdd7ef350..79a98506c433bbbbc12bd50d0f7cf82a029d3b8b 100644 (file)
@@ -1240,7 +1240,7 @@ static bool __kona_clk_init(struct kona_clk *bcm_clk)
        default:
                BUG();
        }
-       return -EINVAL;
+       return false;
 }
 
 /* Set a CCU and all its clocks into their desired initial state */
index 6849a64baf6db9969a94203228fb0240bc7df09d..906576ec97b6f9176fa6e0cfe6390e2856dcfc06 100644 (file)
@@ -480,7 +480,6 @@ struct ccu_data {
        spinlock_t lock;        /* serialization lock */
        bool write_enabled;     /* write access is currently enabled */
        struct ccu_policy policy;
-       struct list_head links; /* for ccu_list */
        struct device_node *node;
        struct clk_onecell_data clk_data;
        const char *name;
@@ -492,7 +491,6 @@ struct ccu_data {
 #define KONA_CCU_COMMON(_prefix, _name, _ccuname)                          \
        .name           = #_name "_ccu",                                    \
        .lock           = __SPIN_LOCK_UNLOCKED(_name ## _ccu_data.lock),    \
-       .links          = LIST_HEAD_INIT(_name ## _ccu_data.links),         \
        .clk_data       = {                                                 \
                .clk_num = _prefix ## _ ## _ccuname ## _CCU_CLOCK_COUNT,    \
        }
index bdc506b03824f4f56c68f864ee1092f4ce648170..f4b8d324b083dfda89d3766cc6958b533b7802d4 100644 (file)
 #include <asm/div64.h>
 
 #include "berlin2-div.h"
-
-struct berlin2_pll_map {
-       const u8 vcodiv[16];
-       u8 mult;
-       u8 fbdiv_shift;
-       u8 rfdiv_shift;
-       u8 divsel_shift;
-};
+#include "berlin2-pll.h"
 
 struct berlin2_pll {
        struct clk_hw hw;
index 88f4ff6916fe72ff840dae2906f7a3082beee722..90897af8d9f74b6eb1ec7b4ee309c04f500aea4c 100644 (file)
@@ -274,7 +274,7 @@ static void __init asm9260_acc_init(struct device_node *np)
        u32 accuracy = 0;
 
        base = of_io_request_and_map(np, 0, np->name);
-       if (!base)
+       if (IS_ERR(base))
                panic("%s: unable to map resource", np->name);
 
        /* register pll */
index 0f6368ceec4c970a670cbe63297dcbfbef125869..c7c91a5ecf8be8a2ceab3b143d762f1daabd8810 100644 (file)
@@ -556,7 +556,7 @@ static int axmclk_probe(struct platform_device *pdev)
                return PTR_ERR(regmap);
 
        num_clks = ARRAY_SIZE(axmclk_clocks);
-       pr_info("axmclk: supporting %u clocks\n", num_clks);
+       pr_info("axmclk: supporting %zu clocks\n", num_clks);
        priv = devm_kzalloc(dev, sizeof(*priv) + sizeof(*priv->clks) * num_clks,
                            GFP_KERNEL);
        if (!priv)
index b8e4f8a822e9f334d6db9071136903e88699a280..f01164fada5dafd4e80d0eac8f5f0242847399dd 100644 (file)
@@ -94,7 +94,7 @@ static const char * const cdce706_source_name[] = {
        "clk_in0", "clk_in1",
 };
 
-static const char *cdce706_clkin_name[] = {
+static const char * const cdce706_clkin_name[] = {
        "clk_in",
 };
 
@@ -102,7 +102,7 @@ static const char * const cdce706_pll_name[] = {
        "pll1", "pll2", "pll3",
 };
 
-static const char *cdce706_divider_parent_name[] = {
+static const char * const cdce706_divider_parent_name[] = {
        "clk_in", "pll1", "pll2", "pll2", "pll3",
 };
 
@@ -666,6 +666,7 @@ static int cdce706_probe(struct i2c_client *client,
 
 static int cdce706_remove(struct i2c_client *client)
 {
+       of_clk_del_provider(client->dev.of_node);
        return 0;
 }
 
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
new file mode 100644 (file)
index 0000000..85fafb4
--- /dev/null
@@ -0,0 +1,749 @@
+/*
+ * Driver for TI Dual PLL CDCE925 clock synthesizer
+ *
+ * This driver always connects the Y1 to the input clock, Y2/Y3 to PLL1
+ * and Y4/Y5 to PLL2. PLL frequency is set on a first-come-first-serve
+ * basis. Clients can directly request any frequency that the chip can
+ * deliver using the standard clk framework. In addition, the device can
+ * be configured and activated via the devicetree.
+ *
+ * Copyright (C) 2014, Topic Embedded Products
+ * Licenced under GPL
+ */
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/gcd.h>
+
+/* The chip has 2 PLLs which can be routed through dividers to 5 outputs.
+ * Model this as 2 PLL clocks which are parents to the outputs.
+ */
+#define NUMBER_OF_PLLS 2
+#define NUMBER_OF_OUTPUTS      5
+
+#define CDCE925_REG_GLOBAL1    0x01
+#define CDCE925_REG_Y1SPIPDIVH 0x02
+#define CDCE925_REG_PDIVL      0x03
+#define CDCE925_REG_XCSEL      0x05
+/* PLL parameters start at 0x10, steps of 0x10 */
+#define CDCE925_OFFSET_PLL     0x10
+/* Add CDCE925_OFFSET_PLL * (pll) to these registers before sending */
+#define CDCE925_PLL_MUX_OUTPUTS        0x14
+#define CDCE925_PLL_MULDIV     0x18
+
+#define CDCE925_PLL_FREQUENCY_MIN       80000000ul
+#define CDCE925_PLL_FREQUENCY_MAX      230000000ul
+struct clk_cdce925_chip;
+
+struct clk_cdce925_output {
+       struct clk_hw hw;
+       struct clk_cdce925_chip *chip;
+       u8 index;
+       u16 pdiv; /* 1..127 for Y2-Y5; 1..1023 for Y1 */
+};
+#define to_clk_cdce925_output(_hw) \
+       container_of(_hw, struct clk_cdce925_output, hw)
+
+struct clk_cdce925_pll {
+       struct clk_hw hw;
+       struct clk_cdce925_chip *chip;
+       u8 index;
+       u16 m;   /* 1..511 */
+       u16 n;   /* 1..4095 */
+};
+#define to_clk_cdce925_pll(_hw)        container_of(_hw, struct clk_cdce925_pll, hw)
+
+struct clk_cdce925_chip {
+       struct regmap *regmap;
+       struct i2c_client *i2c_client;
+       struct clk_cdce925_pll pll[NUMBER_OF_PLLS];
+       struct clk_cdce925_output clk[NUMBER_OF_OUTPUTS];
+       struct clk *dt_clk[NUMBER_OF_OUTPUTS];
+       struct clk_onecell_data onecell;
+};
+
+/* ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** */
+
+static unsigned long cdce925_pll_calculate_rate(unsigned long parent_rate,
+       u16 n, u16 m)
+{
+       if ((!m || !n) || (m == n))
+               return parent_rate; /* In bypass mode runs at same frequency */
+       return mult_frac(parent_rate, (unsigned long)n, (unsigned long)m);
+}
+
+static unsigned long cdce925_pll_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       /* Output frequency of PLL is Fout = (Fin/Pdiv)*(N/M) */
+       struct clk_cdce925_pll *data = to_clk_cdce925_pll(hw);
+
+       return cdce925_pll_calculate_rate(parent_rate, data->n, data->m);
+}
+
+static void cdce925_pll_find_rate(unsigned long rate,
+               unsigned long parent_rate, u16 *n, u16 *m)
+{
+       unsigned long un;
+       unsigned long um;
+       unsigned long g;
+
+       if (rate <= parent_rate) {
+               /* Can always deliver parent_rate in bypass mode */
+               rate = parent_rate;
+               *n = 0;
+               *m = 0;
+       } else {
+               /* In PLL mode, need to apply min/max range */
+               if (rate < CDCE925_PLL_FREQUENCY_MIN)
+                       rate = CDCE925_PLL_FREQUENCY_MIN;
+               else if (rate > CDCE925_PLL_FREQUENCY_MAX)
+                       rate = CDCE925_PLL_FREQUENCY_MAX;
+
+               g = gcd(rate, parent_rate);
+               um = parent_rate / g;
+               un = rate / g;
+               /* When outside hw range, reduce to fit (rounding errors) */
+               while ((un > 4095) || (um > 511)) {
+                       un >>= 1;
+                       um >>= 1;
+               }
+               if (un == 0)
+                       un = 1;
+               if (um == 0)
+                       um = 1;
+
+               *n = un;
+               *m = um;
+       }
+}
+
+static long cdce925_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long *parent_rate)
+{
+       u16 n, m;
+
+       cdce925_pll_find_rate(rate, *parent_rate, &n, &m);
+       return (long)cdce925_pll_calculate_rate(*parent_rate, n, m);
+}
+
+static int cdce925_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_cdce925_pll *data = to_clk_cdce925_pll(hw);
+
+       if (!rate || (rate == parent_rate)) {
+               data->m = 0; /* Bypass mode */
+               data->n = 0;
+               return 0;
+       }
+
+       if ((rate < CDCE925_PLL_FREQUENCY_MIN) ||
+               (rate > CDCE925_PLL_FREQUENCY_MAX)) {
+               pr_debug("%s: rate %lu outside PLL range.\n", __func__, rate);
+               return -EINVAL;
+       }
+
+       if (rate < parent_rate) {
+               pr_debug("%s: rate %lu less than parent rate %lu.\n", __func__,
+                       rate, parent_rate);
+               return -EINVAL;
+       }
+
+       cdce925_pll_find_rate(rate, parent_rate, &data->n, &data->m);
+       return 0;
+}
+
+
+/* calculate p = max(0, 4 - int(log2 (n/m))) */
+static u8 cdce925_pll_calc_p(u16 n, u16 m)
+{
+       u8 p;
+       u16 r = n / m;
+
+       if (r >= 16)
+               return 0;
+       p = 4;
+       while (r > 1) {
+               r >>= 1;
+               --p;
+       }
+       return p;
+}
+
+/* Returns VCO range bits for VCO1_0_RANGE */
+static u8 cdce925_pll_calc_range_bits(struct clk_hw *hw, u16 n, u16 m)
+{
+       struct clk *parent = clk_get_parent(hw->clk);
+       unsigned long rate = clk_get_rate(parent);
+
+       rate = mult_frac(rate, (unsigned long)n, (unsigned long)m);
+       if (rate >= 175000000)
+               return 0x3;
+       if (rate >= 150000000)
+               return 0x02;
+       if (rate >= 125000000)
+               return 0x01;
+       return 0x00;
+}
+
+/* I2C clock, hence everything must happen in (un)prepare because this
+ * may sleep */
+static int cdce925_pll_prepare(struct clk_hw *hw)
+{
+       struct clk_cdce925_pll *data = to_clk_cdce925_pll(hw);
+       u16 n = data->n;
+       u16 m = data->m;
+       u16 r;
+       u8 q;
+       u8 p;
+       u16 nn;
+       u8 pll[4]; /* Bits are spread out over 4 byte registers */
+       u8 reg_ofs = data->index * CDCE925_OFFSET_PLL;
+       unsigned i;
+
+       if ((!m || !n) || (m == n)) {
+               /* Set PLL mux to bypass mode, leave the rest as is */
+               regmap_update_bits(data->chip->regmap,
+                       reg_ofs + CDCE925_PLL_MUX_OUTPUTS, 0x80, 0x80);
+       } else {
+               /* According to data sheet: */
+               /* p = max(0, 4 - int(log2 (n/m))) */
+               p = cdce925_pll_calc_p(n, m);
+               /* nn = n * 2^p */
+               nn = n * BIT(p);
+               /* q = int(nn/m) */
+               q = nn / m;
+               if ((q < 16) || (1 > 64)) {
+                       pr_debug("%s invalid q=%d\n", __func__, q);
+                       return -EINVAL;
+               }
+               r = nn - (m*q);
+               if (r > 511) {
+                       pr_debug("%s invalid r=%d\n", __func__, r);
+                       return -EINVAL;
+               }
+               pr_debug("%s n=%d m=%d p=%d q=%d r=%d\n", __func__,
+                       n, m, p, q, r);
+               /* encode into register bits */
+               pll[0] = n >> 4;
+               pll[1] = ((n & 0x0F) << 4) | ((r >> 5) & 0x0F);
+               pll[2] = ((r & 0x1F) << 3) | ((q >> 3) & 0x07);
+               pll[3] = ((q & 0x07) << 5) | (p << 2) |
+                               cdce925_pll_calc_range_bits(hw, n, m);
+               /* Write to registers */
+               for (i = 0; i < ARRAY_SIZE(pll); ++i)
+                       regmap_write(data->chip->regmap,
+                               reg_ofs + CDCE925_PLL_MULDIV + i, pll[i]);
+               /* Enable PLL */
+               regmap_update_bits(data->chip->regmap,
+                       reg_ofs + CDCE925_PLL_MUX_OUTPUTS, 0x80, 0x00);
+       }
+
+       return 0;
+}
+
+static void cdce925_pll_unprepare(struct clk_hw *hw)
+{
+       struct clk_cdce925_pll *data = to_clk_cdce925_pll(hw);
+       u8 reg_ofs = data->index * CDCE925_OFFSET_PLL;
+
+       regmap_update_bits(data->chip->regmap,
+                       reg_ofs + CDCE925_PLL_MUX_OUTPUTS, 0x80, 0x80);
+}
+
+static const struct clk_ops cdce925_pll_ops = {
+       .prepare = cdce925_pll_prepare,
+       .unprepare = cdce925_pll_unprepare,
+       .recalc_rate = cdce925_pll_recalc_rate,
+       .round_rate = cdce925_pll_round_rate,
+       .set_rate = cdce925_pll_set_rate,
+};
+
+
+static void cdce925_clk_set_pdiv(struct clk_cdce925_output *data, u16 pdiv)
+{
+       switch (data->index) {
+       case 0:
+               regmap_update_bits(data->chip->regmap,
+                       CDCE925_REG_Y1SPIPDIVH,
+                       0x03, (pdiv >> 8) & 0x03);
+               regmap_write(data->chip->regmap, 0x03, pdiv & 0xFF);
+               break;
+       case 1:
+               regmap_update_bits(data->chip->regmap, 0x16, 0x7F, pdiv);
+               break;
+       case 2:
+               regmap_update_bits(data->chip->regmap, 0x17, 0x7F, pdiv);
+               break;
+       case 3:
+               regmap_update_bits(data->chip->regmap, 0x26, 0x7F, pdiv);
+               break;
+       case 4:
+               regmap_update_bits(data->chip->regmap, 0x27, 0x7F, pdiv);
+               break;
+       }
+}
+
+static void cdce925_clk_activate(struct clk_cdce925_output *data)
+{
+       switch (data->index) {
+       case 0:
+               regmap_update_bits(data->chip->regmap,
+                       CDCE925_REG_Y1SPIPDIVH, 0x0c, 0x0c);
+               break;
+       case 1:
+       case 2:
+               regmap_update_bits(data->chip->regmap, 0x14, 0x03, 0x03);
+               break;
+       case 3:
+       case 4:
+               regmap_update_bits(data->chip->regmap, 0x24, 0x03, 0x03);
+               break;
+       }
+}
+
+static int cdce925_clk_prepare(struct clk_hw *hw)
+{
+       struct clk_cdce925_output *data = to_clk_cdce925_output(hw);
+
+       cdce925_clk_set_pdiv(data, data->pdiv);
+       cdce925_clk_activate(data);
+       return 0;
+}
+
+static void cdce925_clk_unprepare(struct clk_hw *hw)
+{
+       struct clk_cdce925_output *data = to_clk_cdce925_output(hw);
+
+       /* Disable clock by setting divider to "0" */
+       cdce925_clk_set_pdiv(data, 0);
+}
+
+static unsigned long cdce925_clk_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct clk_cdce925_output *data = to_clk_cdce925_output(hw);
+
+       if (data->pdiv)
+               return parent_rate / data->pdiv;
+       return 0;
+}
+
+static u16 cdce925_calc_divider(unsigned long rate,
+               unsigned long parent_rate)
+{
+       unsigned long divider;
+
+       if (!rate)
+               return 0;
+       if (rate >= parent_rate)
+               return 1;
+
+       divider = DIV_ROUND_CLOSEST(parent_rate, rate);
+       if (divider > 0x7F)
+               divider = 0x7F;
+
+       return (u16)divider;
+}
+
+static unsigned long cdce925_clk_best_parent_rate(
+       struct clk_hw *hw, unsigned long rate)
+{
+       struct clk *pll = clk_get_parent(hw->clk);
+       struct clk *root = clk_get_parent(pll);
+       unsigned long root_rate = clk_get_rate(root);
+       unsigned long best_rate_error = rate;
+       u16 pdiv_min;
+       u16 pdiv_max;
+       u16 pdiv_best;
+       u16 pdiv_now;
+
+       if (root_rate % rate == 0)
+               return root_rate; /* Don't need the PLL, use bypass */
+
+       pdiv_min = (u16)max(1ul, DIV_ROUND_UP(CDCE925_PLL_FREQUENCY_MIN, rate));
+       pdiv_max = (u16)min(127ul, CDCE925_PLL_FREQUENCY_MAX / rate);
+
+       if (pdiv_min > pdiv_max)
+               return 0; /* No can do? */
+
+       pdiv_best = pdiv_min;
+       for (pdiv_now = pdiv_min; pdiv_now < pdiv_max; ++pdiv_now) {
+               unsigned long target_rate = rate * pdiv_now;
+               long pll_rate = clk_round_rate(pll, target_rate);
+               unsigned long actual_rate;
+               unsigned long rate_error;
+
+               if (pll_rate <= 0)
+                       continue;
+               actual_rate = pll_rate / pdiv_now;
+               rate_error = abs((long)actual_rate - (long)rate);
+               if (rate_error < best_rate_error) {
+                       pdiv_best = pdiv_now;
+                       best_rate_error = rate_error;
+               }
+               /* TODO: Consider PLL frequency based on smaller n/m values
+                * and pick the better one if the error is equal */
+       }
+
+       return rate * pdiv_best;
+}
+
+static long cdce925_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long *parent_rate)
+{
+       unsigned long l_parent_rate = *parent_rate;
+       u16 divider = cdce925_calc_divider(rate, l_parent_rate);
+
+       if (l_parent_rate / divider != rate) {
+               l_parent_rate = cdce925_clk_best_parent_rate(hw, rate);
+               divider = cdce925_calc_divider(rate, l_parent_rate);
+               *parent_rate = l_parent_rate;
+       }
+
+       if (divider)
+               return (long)(l_parent_rate / divider);
+       return 0;
+}
+
+static int cdce925_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_cdce925_output *data = to_clk_cdce925_output(hw);
+
+       data->pdiv = cdce925_calc_divider(rate, parent_rate);
+
+       return 0;
+}
+
+static const struct clk_ops cdce925_clk_ops = {
+       .prepare = cdce925_clk_prepare,
+       .unprepare = cdce925_clk_unprepare,
+       .recalc_rate = cdce925_clk_recalc_rate,
+       .round_rate = cdce925_clk_round_rate,
+       .set_rate = cdce925_clk_set_rate,
+};
+
+
+static u16 cdce925_y1_calc_divider(unsigned long rate,
+               unsigned long parent_rate)
+{
+       unsigned long divider;
+
+       if (!rate)
+               return 0;
+       if (rate >= parent_rate)
+               return 1;
+
+       divider = DIV_ROUND_CLOSEST(parent_rate, rate);
+       if (divider > 0x3FF) /* Y1 has 10-bit divider */
+               divider = 0x3FF;
+
+       return (u16)divider;
+}
+
+static long cdce925_clk_y1_round_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long *parent_rate)
+{
+       unsigned long l_parent_rate = *parent_rate;
+       u16 divider = cdce925_y1_calc_divider(rate, l_parent_rate);
+
+       if (divider)
+               return (long)(l_parent_rate / divider);
+       return 0;
+}
+
+static int cdce925_clk_y1_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct clk_cdce925_output *data = to_clk_cdce925_output(hw);
+
+       data->pdiv = cdce925_y1_calc_divider(rate, parent_rate);
+
+       return 0;
+}
+
+static const struct clk_ops cdce925_clk_y1_ops = {
+       .prepare = cdce925_clk_prepare,
+       .unprepare = cdce925_clk_unprepare,
+       .recalc_rate = cdce925_clk_recalc_rate,
+       .round_rate = cdce925_clk_y1_round_rate,
+       .set_rate = cdce925_clk_y1_set_rate,
+};
+
+
+static struct regmap_config cdce925_regmap_config = {
+       .name = "configuration0",
+       .reg_bits = 8,
+       .val_bits = 8,
+       .cache_type = REGCACHE_RBTREE,
+       .max_register = 0x2F,
+};
+
+#define CDCE925_I2C_COMMAND_BLOCK_TRANSFER     0x00
+#define CDCE925_I2C_COMMAND_BYTE_TRANSFER      0x80
+
+static int cdce925_regmap_i2c_write(
+       void *context, const void *data, size_t count)
+{
+       struct device *dev = context;
+       struct i2c_client *i2c = to_i2c_client(dev);
+       int ret;
+       u8 reg_data[2];
+
+       if (count != 2)
+               return -ENOTSUPP;
+
+       /* First byte is command code */
+       reg_data[0] = CDCE925_I2C_COMMAND_BYTE_TRANSFER | ((u8 *)data)[0];
+       reg_data[1] = ((u8 *)data)[1];
+
+       dev_dbg(&i2c->dev, "%s(%zu) %#x %#x\n", __func__, count,
+                       reg_data[0], reg_data[1]);
+
+       ret = i2c_master_send(i2c, reg_data, count);
+       if (likely(ret == count))
+               return 0;
+       else if (ret < 0)
+               return ret;
+       else
+               return -EIO;
+}
+
+static int cdce925_regmap_i2c_read(void *context,
+          const void *reg, size_t reg_size, void *val, size_t val_size)
+{
+       struct device *dev = context;
+       struct i2c_client *i2c = to_i2c_client(dev);
+       struct i2c_msg xfer[2];
+       int ret;
+       u8 reg_data[2];
+
+       if (reg_size != 1)
+               return -ENOTSUPP;
+
+       xfer[0].addr = i2c->addr;
+       xfer[0].flags = 0;
+       xfer[0].buf = reg_data;
+       if (val_size == 1) {
+               reg_data[0] =
+                       CDCE925_I2C_COMMAND_BYTE_TRANSFER | ((u8 *)reg)[0];
+               xfer[0].len = 1;
+       } else {
+               reg_data[0] =
+                       CDCE925_I2C_COMMAND_BLOCK_TRANSFER | ((u8 *)reg)[0];
+               reg_data[1] = val_size;
+               xfer[0].len = 2;
+       }
+
+       xfer[1].addr = i2c->addr;
+       xfer[1].flags = I2C_M_RD;
+       xfer[1].len = val_size;
+       xfer[1].buf = val;
+
+       ret = i2c_transfer(i2c->adapter, xfer, 2);
+       if (likely(ret == 2)) {
+               dev_dbg(&i2c->dev, "%s(%zu, %zu) %#x %#x\n", __func__,
+                               reg_size, val_size, reg_data[0], *((u8 *)val));
+               return 0;
+       } else if (ret < 0)
+               return ret;
+       else
+               return -EIO;
+}
+
+/* The CDCE925 uses a funky way to read/write registers. Bulk mode is
+ * just weird, so just use the single byte mode exclusively. */
+static struct regmap_bus regmap_cdce925_bus = {
+       .write = cdce925_regmap_i2c_write,
+       .read = cdce925_regmap_i2c_read,
+};
+
+static int cdce925_probe(struct i2c_client *client,
+               const struct i2c_device_id *id)
+{
+       struct clk_cdce925_chip *data;
+       struct device_node *node = client->dev.of_node;
+       const char *parent_name;
+       const char *pll_clk_name[NUMBER_OF_PLLS] = {NULL,};
+       struct clk_init_data init;
+       struct clk *clk;
+       u32 value;
+       int i;
+       int err;
+       struct device_node *np_output;
+       char child_name[6];
+
+       dev_dbg(&client->dev, "%s\n", __func__);
+       data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->i2c_client = client;
+       data->regmap = devm_regmap_init(&client->dev, &regmap_cdce925_bus,
+                       &client->dev, &cdce925_regmap_config);
+       if (IS_ERR(data->regmap)) {
+               dev_err(&client->dev, "failed to allocate register map\n");
+               return PTR_ERR(data->regmap);
+       }
+       i2c_set_clientdata(client, data);
+
+       parent_name = of_clk_get_parent_name(node, 0);
+       if (!parent_name) {
+               dev_err(&client->dev, "missing parent clock\n");
+               return -ENODEV;
+       }
+       dev_dbg(&client->dev, "parent is: %s\n", parent_name);
+
+       if (of_property_read_u32(node, "xtal-load-pf", &value) == 0)
+               regmap_write(data->regmap,
+                       CDCE925_REG_XCSEL, (value << 3) & 0xF8);
+       /* PWDN bit */
+       regmap_update_bits(data->regmap, CDCE925_REG_GLOBAL1, BIT(4), 0);
+
+       /* Set input source for Y1 to be the XTAL */
+       regmap_update_bits(data->regmap, 0x02, BIT(7), 0);
+
+       init.ops = &cdce925_pll_ops;
+       init.flags = 0;
+       init.parent_names = &parent_name;
+       init.num_parents = parent_name ? 1 : 0;
+
+       /* Register PLL clocks */
+       for (i = 0; i < NUMBER_OF_PLLS; ++i) {
+               pll_clk_name[i] = kasprintf(GFP_KERNEL, "%s.pll%d",
+                       client->dev.of_node->name, i);
+               init.name = pll_clk_name[i];
+               data->pll[i].chip = data;
+               data->pll[i].hw.init = &init;
+               data->pll[i].index = i;
+               clk = devm_clk_register(&client->dev, &data->pll[i].hw);
+               if (IS_ERR(clk)) {
+                       dev_err(&client->dev, "Failed register PLL %d\n", i);
+                       err = PTR_ERR(clk);
+                       goto error;
+               }
+               sprintf(child_name, "PLL%d", i+1);
+               np_output = of_get_child_by_name(node, child_name);
+               if (!np_output)
+                       continue;
+               if (!of_property_read_u32(np_output,
+                       "clock-frequency", &value)) {
+                       err = clk_set_rate(clk, value);
+                       if (err)
+                               dev_err(&client->dev,
+                                       "unable to set PLL frequency %ud\n",
+                                       value);
+               }
+               if (!of_property_read_u32(np_output,
+                       "spread-spectrum", &value)) {
+                       u8 flag = of_property_read_bool(np_output,
+                               "spread-spectrum-center") ? 0x80 : 0x00;
+                       regmap_update_bits(data->regmap,
+                               0x16 + (i*CDCE925_OFFSET_PLL),
+                               0x80, flag);
+                       regmap_update_bits(data->regmap,
+                               0x12 + (i*CDCE925_OFFSET_PLL),
+                               0x07, value & 0x07);
+               }
+       }
+
+       /* Register output clock Y1 */
+       init.ops = &cdce925_clk_y1_ops;
+       init.flags = 0;
+       init.num_parents = 1;
+       init.parent_names = &parent_name; /* Mux Y1 to input */
+       init.name = kasprintf(GFP_KERNEL, "%s.Y1", client->dev.of_node->name);
+       data->clk[0].chip = data;
+       data->clk[0].hw.init = &init;
+       data->clk[0].index = 0;
+       data->clk[0].pdiv = 1;
+       clk = devm_clk_register(&client->dev, &data->clk[0].hw);
+       kfree(init.name); /* clock framework made a copy of the name */
+       if (IS_ERR(clk)) {
+               dev_err(&client->dev, "clock registration Y1 failed\n");
+               err = PTR_ERR(clk);
+               goto error;
+       }
+       data->dt_clk[0] = clk;
+
+       /* Register output clocks Y2 .. Y5*/
+       init.ops = &cdce925_clk_ops;
+       init.flags = CLK_SET_RATE_PARENT;
+       init.num_parents = 1;
+       for (i = 1; i < NUMBER_OF_OUTPUTS; ++i) {
+               init.name = kasprintf(GFP_KERNEL, "%s.Y%d",
+                       client->dev.of_node->name, i+1);
+               data->clk[i].chip = data;
+               data->clk[i].hw.init = &init;
+               data->clk[i].index = i;
+               data->clk[i].pdiv = 1;
+               switch (i) {
+               case 1:
+               case 2:
+                       /* Mux Y2/3 to PLL1 */
+                       init.parent_names = &pll_clk_name[0];
+                       break;
+               case 3:
+               case 4:
+                       /* Mux Y4/5 to PLL2 */
+                       init.parent_names = &pll_clk_name[1];
+                       break;
+               }
+               clk = devm_clk_register(&client->dev, &data->clk[i].hw);
+               kfree(init.name); /* clock framework made a copy of the name */
+               if (IS_ERR(clk)) {
+                       dev_err(&client->dev, "clock registration failed\n");
+                       err = PTR_ERR(clk);
+                       goto error;
+               }
+               data->dt_clk[i] = clk;
+       }
+
+       /* Register the output clocks */
+       data->onecell.clk_num = NUMBER_OF_OUTPUTS;
+       data->onecell.clks = data->dt_clk;
+       err = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
+               &data->onecell);
+       if (err)
+               dev_err(&client->dev, "unable to add OF clock provider\n");
+
+       err = 0;
+
+error:
+       for (i = 0; i < NUMBER_OF_PLLS; ++i)
+               /* clock framework made a copy of the name */
+               kfree(pll_clk_name[i]);
+
+       return err;
+}
+
+static const struct i2c_device_id cdce925_id[] = {
+       { "cdce925", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, cdce925_id);
+
+static const struct of_device_id clk_cdce925_of_match[] = {
+       { .compatible = "ti,cdce925" },
+       { },
+};
+MODULE_DEVICE_TABLE(of, clk_cdce925_of_match);
+
+static struct i2c_driver cdce925_driver = {
+       .driver = {
+               .name = "cdce925",
+               .of_match_table = of_match_ptr(clk_cdce925_of_match),
+       },
+       .probe          = cdce925_probe,
+       .id_table       = cdce925_id,
+};
+module_i2c_driver(cdce925_driver);
+
+MODULE_AUTHOR("Mike Looijmans <mike.looijmans@topic.nl>");
+MODULE_DESCRIPTION("cdce925 driver");
+MODULE_LICENSE("GPL");
index 956b7e54fa1c5f4f3583ac642c3f3f2ae255fee2..616f5aef3c26c7ad24334b1291d7d8178ef76ede 100644 (file)
@@ -188,7 +188,7 @@ static void clk_composite_disable(struct clk_hw *hw)
 }
 
 struct clk *clk_register_composite(struct device *dev, const char *name,
-                       const char **parent_names, int num_parents,
+                       const char * const *parent_names, int num_parents,
                        struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
                        struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
                        struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
@@ -200,10 +200,8 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
        struct clk_ops *clk_composite_ops;
 
        composite = kzalloc(sizeof(*composite), GFP_KERNEL);
-       if (!composite) {
-               pr_err("%s: could not allocate composite clk\n", __func__);
+       if (!composite)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        init.flags = flags | CLK_IS_BASIC;
index 48a65b2b402785659d25120f6e18d70b478df4fe..43a218f35b190a21e62bc520cdac38f275f5da82 100644 (file)
@@ -106,8 +106,9 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
 
                        rc = clk_set_rate(clk, rate);
                        if (rc < 0)
-                               pr_err("clk: couldn't set %s clock rate: %d\n",
-                                      __clk_get_name(clk), rc);
+                               pr_err("clk: couldn't set %s clk rate to %d (%d), current rate: %ld\n",
+                                      __clk_get_name(clk), rate, rc,
+                                      clk_get_rate(clk));
                        clk_put(clk);
                }
                index++;
@@ -124,7 +125,7 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
  * and sets any specified clock parents and rates. The @clk_supplier argument
  * should be set to true if @node may be also a clock supplier of any clock
  * listed in its 'assigned-clocks' or 'assigned-clock-parents' properties.
- * If @clk_supplier is false the function exits returnning 0 as soon as it
+ * If @clk_supplier is false the function exits returning 0 as soon as it
  * determines the @node is also a supplier of any of the clocks.
  */
 int of_clk_set_defaults(struct device_node *node, bool clk_supplier)
index 25006a8bb8e6d5af8d145472fc76e2ef45281f09..706b5783c360dfc5ba60c6f7054374f5f06ff4f8 100644 (file)
@@ -430,11 +430,9 @@ static struct clk *_register_divider(struct device *dev, const char *name,
        }
 
        /* allocate the divider */
-       div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
-       if (!div) {
-               pr_err("%s: could not allocate divider clk\n", __func__);
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        init.ops = &clk_divider_ops;
index d9e3f671c2ea634012982c2228493cba8eb71903..fccabe497f6e5db42f6585acd9ecb1c4440bcb4e 100644 (file)
@@ -55,10 +55,16 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
 static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
                                unsigned long parent_rate)
 {
+       /*
+        * We must report success but we can do so unconditionally because
+        * clk_factor_round_rate returns values that ensure this call is a
+        * nop.
+        */
+
        return 0;
 }
 
-struct clk_ops clk_fixed_factor_ops = {
+const struct clk_ops clk_fixed_factor_ops = {
        .round_rate = clk_factor_round_rate,
        .set_rate = clk_factor_set_rate,
        .recalc_rate = clk_factor_recalc_rate,
@@ -74,10 +80,8 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
        struct clk *clk;
 
        fix = kmalloc(sizeof(*fix), GFP_KERNEL);
-       if (!fix) {
-               pr_err("%s: could not allocate fixed factor clk\n", __func__);
+       if (!fix)
                return ERR_PTR(-ENOMEM);
-       }
 
        /* struct clk_fixed_factor assignments */
        fix->mult = mult;
index 0fc56ab6e844c88493e68f86b6a1fed8884a516e..f85ec8d1711fb7f36ac2e724308745dbf7b1ef47 100644 (file)
@@ -65,11 +65,9 @@ struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
        struct clk_init_data init;
 
        /* allocate fixed-rate clock */
-       fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
-       if (!fixed) {
-               pr_err("%s: could not allocate fixed clk\n", __func__);
+       fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
+       if (!fixed)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        init.ops = &clk_fixed_rate_ops;
index 6aa72d9d79bad43d5c8f79f47fa30134d710a624..140eb5844dc4b86d1fb6b7034b5c375117dee0f8 100644 (file)
@@ -109,10 +109,8 @@ struct clk *clk_register_fractional_divider(struct device *dev,
        struct clk *clk;
 
        fd = kzalloc(sizeof(*fd), GFP_KERNEL);
-       if (!fd) {
-               dev_err(dev, "could not allocate fractional divider clk\n");
+       if (!fd)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        init.ops = &clk_fractional_divider_ops;
index 3f0e4200cb5d4ca4a680c78479ac86ed116766a5..551dd067279402abbd45685315ee9df1c5f7c5d5 100644 (file)
@@ -135,11 +135,9 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
        }
 
        /* allocate the gate */
-       gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
-       if (!gate) {
-               pr_err("%s: could not allocate gated clk\n", __func__);
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        init.ops = &clk_gate_ops;
index a71cabedda93c88d87230c038398413bb48e9c7b..f564e624fb935f3b2e4b77a768b6fae4b7d19563 100644 (file)
@@ -189,7 +189,7 @@ static struct clk *of_clk_gpio_gate_delayed_register_get(
 /**
  * of_gpio_gate_clk_setup() - Setup function for gpio controlled clock
  */
-void __init of_gpio_gate_clk_setup(struct device_node *node)
+static void __init of_gpio_gate_clk_setup(struct device_node *node)
 {
        struct clk_gpio_gate_delayed_register_data *data;
 
@@ -203,6 +203,5 @@ void __init of_gpio_gate_clk_setup(struct device_node *node)
 
        of_clk_add_provider(node, of_clk_gpio_gate_delayed_register_get, data);
 }
-EXPORT_SYMBOL_GPL(of_gpio_gate_clk_setup);
 CLK_OF_DECLARE(gpio_gate_clk, "gpio-gate-clock", of_gpio_gate_clk_setup);
 #endif
index ca80103ac1888f734fc25de4dd3ee6f3792ea1c9..d4c61985f4488ac1fac3ebc2f6061d98e922d2e3 100644 (file)
@@ -80,9 +80,9 @@ static struct clk *__init clk_register_pll(struct device *dev,
        return clk;
 }
 
-static const char const *cpu_parents[] = { "cpu_clk_div", "osc_33m_clk", };
-static const char const *ahb_parents[] = { "ahb_clk_div", "osc_33m_clk", };
-static const char const *dc_parents[] = { "dc_clk_div", "osc_33m_clk", };
+static const char * const cpu_parents[] = { "cpu_clk_div", "osc_33m_clk", };
+static const char * const ahb_parents[] = { "ahb_clk_div", "osc_33m_clk", };
+static const char * const dc_parents[] = { "dc_clk_div", "osc_33m_clk", };
 
 void __init ls1x_clk_init(void)
 {
index 6505049d50f1e54720864b58694ef0be26e314b9..35af9cb6da4ff3c06601139c7db544a5bf88578f 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/of.h>
 #include <linux/export.h>
 
+#include "clk-max-gen.h"
+
 struct max_gen_clk {
        struct regmap *regmap;
        u32 mask;
index 86cdb3a28629ab09394292e2b210eb5b689b4504..446c2fe76dc28635b28b0fb116aaf5cd7e79e30e 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/max77686.h>
 #include <linux/mfd/max77686-private.h>
index 0729dc723a8ff81099bc9e0071a9784f6fd9e4d3..74c49b93a6eba5c40f28342f7d4ab4225c477795 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/max77686-private.h>
 #include <linux/clk-provider.h>
index 30a3b6999e10e16581bdac42911e0d9ca2c24989..5181b89c3cb2f18de2047a70a0bc43c562be4326 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/of_address.h>
 #include <linux/clkdev.h>
 
-void __init moxart_of_pll_clk_init(struct device_node *node)
+static void __init moxart_of_pll_clk_init(struct device_node *node)
 {
        static void __iomem *base;
        struct clk *clk, *ref_clk;
@@ -53,7 +53,7 @@ void __init moxart_of_pll_clk_init(struct device_node *node)
 CLK_OF_DECLARE(moxart_pll_clock, "moxa,moxart-pll-clock",
               moxart_of_pll_clk_init);
 
-void __init moxart_of_apb_clk_init(struct device_node *node)
+static void __init moxart_of_apb_clk_init(struct device_node *node)
 {
        static void __iomem *base;
        struct clk *clk, *pll_clk;
index 69a094c3783d8eb2a2c0d3624f3a641f97a5d484..6066a01b20ea8d0e304dca2ff27bc34da9e0bf1c 100644 (file)
@@ -114,7 +114,8 @@ const struct clk_ops clk_mux_ro_ops = {
 EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
 
 struct clk *clk_register_mux_table(struct device *dev, const char *name,
-               const char **parent_names, u8 num_parents, unsigned long flags,
+               const char * const *parent_names, u8 num_parents,
+               unsigned long flags,
                void __iomem *reg, u8 shift, u32 mask,
                u8 clk_mux_flags, u32 *table, spinlock_t *lock)
 {
@@ -166,7 +167,8 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
 EXPORT_SYMBOL_GPL(clk_register_mux_table);
 
 struct clk *clk_register_mux(struct device *dev, const char *name,
-               const char **parent_names, u8 num_parents, unsigned long flags,
+               const char * const *parent_names, u8 num_parents,
+               unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
                u8 clk_mux_flags, spinlock_t *lock)
 {
index 05e04ce0f1488f7301ad5153f687860bf872f8bc..c9487179f25f46d79e511ace153e3a3e7078e49e 100644 (file)
@@ -503,8 +503,7 @@ static int __init nomadik_src_clk_init_debugfs(void)
                            NULL, NULL, &nomadik_src_clk_debugfs_ops);
        return 0;
 }
-
-module_init(nomadik_src_clk_init_debugfs);
+device_initcall(nomadik_src_clk_init_debugfs);
 
 #endif
 
index 30335d3b99afb197332505d5be045f2d5d75be4c..e39e1e680b3c6c09b117e7a32a35b7d1a808bf37 100644 (file)
@@ -552,7 +552,8 @@ static const struct clk_ops si5351_pll_ops = {
  * MSx_P2[19:0] = 128 * b - c * floor(128 * b/c) = (128*b) mod c
  * MSx_P3[19:0] = c
  *
- * MS[6,7] are integer (P1) divide only, P2 = 0, P3 = 0
+ * MS[6,7] are integer (P1) divide only, P1 = divide value,
+ * P2 and P3 are not applicable
  *
  * for 150MHz < fOUT <= 160MHz:
  *
@@ -606,9 +607,6 @@ static unsigned long si5351_msynth_recalc_rate(struct clk_hw *hw,
        if (!hwdata->params.valid)
                si5351_read_parameters(hwdata->drvdata, reg, &hwdata->params);
 
-       if (hwdata->params.p3 == 0)
-               return parent_rate;
-
        /*
         * multisync0-5: fOUT = (128 * P3 * fIN) / (P1*P3 + P2 + 512*P3)
         * multisync6-7: fOUT = fIN / P1
@@ -616,6 +614,8 @@ static unsigned long si5351_msynth_recalc_rate(struct clk_hw *hw,
        rate = parent_rate;
        if (hwdata->num > 5) {
                m = hwdata->params.p1;
+       } else if (hwdata->params.p3 == 0) {
+               return parent_rate;
        } else if ((si5351_reg_read(hwdata->drvdata, reg + 2) &
                    SI5351_OUTPUT_CLK_DIVBY4) == SI5351_OUTPUT_CLK_DIVBY4) {
                m = 4;
@@ -679,6 +679,16 @@ static long si5351_msynth_round_rate(struct clk_hw *hw, unsigned long rate,
                c = 1;
 
                *parent_rate = a * rate;
+       } else if (hwdata->num >= 6) {
+               /* determine the closest integer divider */
+               a = DIV_ROUND_CLOSEST(*parent_rate, rate);
+               if (a < SI5351_MULTISYNTH_A_MIN)
+                       a = SI5351_MULTISYNTH_A_MIN;
+               if (a > SI5351_MULTISYNTH67_A_MAX)
+                       a = SI5351_MULTISYNTH67_A_MAX;
+
+               b = 0;
+               c = 1;
        } else {
                unsigned long rfrac, denom;
 
@@ -692,9 +702,7 @@ static long si5351_msynth_round_rate(struct clk_hw *hw, unsigned long rate,
                a = *parent_rate / rate;
                if (a < SI5351_MULTISYNTH_A_MIN)
                        a = SI5351_MULTISYNTH_A_MIN;
-               if (hwdata->num >= 6 && a > SI5351_MULTISYNTH67_A_MAX)
-                       a = SI5351_MULTISYNTH67_A_MAX;
-               else if (a > SI5351_MULTISYNTH_A_MAX)
+               if (a > SI5351_MULTISYNTH_A_MAX)
                        a = SI5351_MULTISYNTH_A_MAX;
 
                /* find best approximation for b/c = fVCO mod fOUT */
@@ -723,6 +731,10 @@ static long si5351_msynth_round_rate(struct clk_hw *hw, unsigned long rate,
                hwdata->params.p3 = 1;
                hwdata->params.p2 = 0;
                hwdata->params.p1 = 0;
+       } else if (hwdata->num >= 6) {
+               hwdata->params.p3 = 0;
+               hwdata->params.p2 = 0;
+               hwdata->params.p1 = a;
        } else {
                hwdata->params.p3  = c;
                hwdata->params.p2  = (128 * b) % c;
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
new file mode 100644 (file)
index 0000000..b9b12a7
--- /dev/null
@@ -0,0 +1,380 @@
+/*
+ * Author: Daniel Thompson <daniel.thompson@linaro.org>
+ *
+ * Inspired by clk-asm9260.c .
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#define STM32F4_RCC_PLLCFGR            0x04
+#define STM32F4_RCC_CFGR               0x08
+#define STM32F4_RCC_AHB1ENR            0x30
+#define STM32F4_RCC_AHB2ENR            0x34
+#define STM32F4_RCC_AHB3ENR            0x38
+#define STM32F4_RCC_APB1ENR            0x40
+#define STM32F4_RCC_APB2ENR            0x44
+
+struct stm32f4_gate_data {
+       u8      offset;
+       u8      bit_idx;
+       const char *name;
+       const char *parent_name;
+       unsigned long flags;
+};
+
+static const struct stm32f4_gate_data stm32f4_gates[] __initconst = {
+       { STM32F4_RCC_AHB1ENR,  0,      "gpioa",        "ahb_div" },
+       { STM32F4_RCC_AHB1ENR,  1,      "gpiob",        "ahb_div" },
+       { STM32F4_RCC_AHB1ENR,  2,      "gpioc",        "ahb_div" },
+       { STM32F4_RCC_AHB1ENR,  3,      "gpiod",        "ahb_div" },
+       { STM32F4_RCC_AHB1ENR,  4,      "gpioe",        "ahb_div" },
+       { STM32F4_RCC_AHB1ENR,  5,      "gpiof",        "ahb_div" },
+       { STM32F4_RCC_AHB1ENR,  6,      "gpiog",        "ahb_div" },
+       { STM32F4_RCC_AHB1ENR,  7,      "gpioh",        "ahb_div" },
+       { STM32F4_RCC_AHB1ENR,  8,      "gpioi",        "ahb_div" },
+       { STM32F4_RCC_AHB1ENR,  9,      "gpioj",        "ahb_div" },
+       { STM32F4_RCC_AHB1ENR, 10,      "gpiok",        "ahb_div" },
+       { STM32F4_RCC_AHB1ENR, 12,      "crc",          "ahb_div" },
+       { STM32F4_RCC_AHB1ENR, 18,      "bkpsra",       "ahb_div" },
+       { STM32F4_RCC_AHB1ENR, 20,      "ccmdatam",     "ahb_div" },
+       { STM32F4_RCC_AHB1ENR, 21,      "dma1",         "ahb_div" },
+       { STM32F4_RCC_AHB1ENR, 22,      "dma2",         "ahb_div" },
+       { STM32F4_RCC_AHB1ENR, 23,      "dma2d",        "ahb_div" },
+       { STM32F4_RCC_AHB1ENR, 25,      "ethmac",       "ahb_div" },
+       { STM32F4_RCC_AHB1ENR, 26,      "ethmactx",     "ahb_div" },
+       { STM32F4_RCC_AHB1ENR, 27,      "ethmacrx",     "ahb_div" },
+       { STM32F4_RCC_AHB1ENR, 28,      "ethmacptp",    "ahb_div" },
+       { STM32F4_RCC_AHB1ENR, 29,      "otghs",        "ahb_div" },
+       { STM32F4_RCC_AHB1ENR, 30,      "otghsulpi",    "ahb_div" },
+
+       { STM32F4_RCC_AHB2ENR,  0,      "dcmi",         "ahb_div" },
+       { STM32F4_RCC_AHB2ENR,  4,      "cryp",         "ahb_div" },
+       { STM32F4_RCC_AHB2ENR,  5,      "hash",         "ahb_div" },
+       { STM32F4_RCC_AHB2ENR,  6,      "rng",          "pll48" },
+       { STM32F4_RCC_AHB2ENR,  7,      "otgfs",        "pll48" },
+
+       { STM32F4_RCC_AHB3ENR,  0,      "fmc",          "ahb_div",
+               CLK_IGNORE_UNUSED },
+
+       { STM32F4_RCC_APB1ENR,  0,      "tim2",         "apb1_mul" },
+       { STM32F4_RCC_APB1ENR,  1,      "tim3",         "apb1_mul" },
+       { STM32F4_RCC_APB1ENR,  2,      "tim4",         "apb1_mul" },
+       { STM32F4_RCC_APB1ENR,  3,      "tim5",         "apb1_mul" },
+       { STM32F4_RCC_APB1ENR,  4,      "tim6",         "apb1_mul" },
+       { STM32F4_RCC_APB1ENR,  5,      "tim7",         "apb1_mul" },
+       { STM32F4_RCC_APB1ENR,  6,      "tim12",        "apb1_mul" },
+       { STM32F4_RCC_APB1ENR,  7,      "tim13",        "apb1_mul" },
+       { STM32F4_RCC_APB1ENR,  8,      "tim14",        "apb1_mul" },
+       { STM32F4_RCC_APB1ENR, 11,      "wwdg",         "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 14,      "spi2",         "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 15,      "spi3",         "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 17,      "uart2",        "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 18,      "uart3",        "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 19,      "uart4",        "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 20,      "uart5",        "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 21,      "i2c1",         "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 22,      "i2c2",         "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 23,      "i2c3",         "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 25,      "can1",         "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 26,      "can2",         "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 28,      "pwr",          "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 29,      "dac",          "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 30,      "uart7",        "apb1_div" },
+       { STM32F4_RCC_APB1ENR, 31,      "uart8",        "apb1_div" },
+
+       { STM32F4_RCC_APB2ENR,  0,      "tim1",         "apb2_mul" },
+       { STM32F4_RCC_APB2ENR,  1,      "tim8",         "apb2_mul" },
+       { STM32F4_RCC_APB2ENR,  4,      "usart1",       "apb2_div" },
+       { STM32F4_RCC_APB2ENR,  5,      "usart6",       "apb2_div" },
+       { STM32F4_RCC_APB2ENR,  8,      "adc1",         "apb2_div" },
+       { STM32F4_RCC_APB2ENR,  9,      "adc2",         "apb2_div" },
+       { STM32F4_RCC_APB2ENR, 10,      "adc3",         "apb2_div" },
+       { STM32F4_RCC_APB2ENR, 11,      "sdio",         "pll48" },
+       { STM32F4_RCC_APB2ENR, 12,      "spi1",         "apb2_div" },
+       { STM32F4_RCC_APB2ENR, 13,      "spi4",         "apb2_div" },
+       { STM32F4_RCC_APB2ENR, 14,      "syscfg",       "apb2_div" },
+       { STM32F4_RCC_APB2ENR, 16,      "tim9",         "apb2_mul" },
+       { STM32F4_RCC_APB2ENR, 17,      "tim10",        "apb2_mul" },
+       { STM32F4_RCC_APB2ENR, 18,      "tim11",        "apb2_mul" },
+       { STM32F4_RCC_APB2ENR, 20,      "spi5",         "apb2_div" },
+       { STM32F4_RCC_APB2ENR, 21,      "spi6",         "apb2_div" },
+       { STM32F4_RCC_APB2ENR, 22,      "sai1",         "apb2_div" },
+       { STM32F4_RCC_APB2ENR, 26,      "ltdc",         "apb2_div" },
+};
+
+/*
+ * MAX_CLKS is the maximum value in the enumeration below plus the combined
+ * hweight of stm32f42xx_gate_map (plus one).
+ */
+#define MAX_CLKS 74
+
+enum { SYSTICK, FCLK };
+
+/*
+ * This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx
+ * have gate bits associated with them. Its combined hweight is 71.
+ */
+static const u64 stm32f42xx_gate_map[] = { 0x000000f17ef417ffull,
+                                          0x0000000000000001ull,
+                                          0x04777f33f6fec9ffull };
+
+static struct clk *clks[MAX_CLKS];
+static DEFINE_SPINLOCK(stm32f4_clk_lock);
+static void __iomem *base;
+
+/*
+ * "Multiplier" device for APBx clocks.
+ *
+ * The APBx dividers are power-of-two dividers and, if *not* running in 1:1
+ * mode, they also tap out the one of the low order state bits to run the
+ * timers. ST datasheets represent this feature as a (conditional) clock
+ * multiplier.
+ */
+struct clk_apb_mul {
+       struct clk_hw hw;
+       u8 bit_idx;
+};
+
+#define to_clk_apb_mul(_hw) container_of(_hw, struct clk_apb_mul, hw)
+
+static unsigned long clk_apb_mul_recalc_rate(struct clk_hw *hw,
+                                            unsigned long parent_rate)
+{
+       struct clk_apb_mul *am = to_clk_apb_mul(hw);
+
+       if (readl(base + STM32F4_RCC_CFGR) & BIT(am->bit_idx))
+               return parent_rate * 2;
+
+       return parent_rate;
+}
+
+static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long *prate)
+{
+       struct clk_apb_mul *am = to_clk_apb_mul(hw);
+       unsigned long mult = 1;
+
+       if (readl(base + STM32F4_RCC_CFGR) & BIT(am->bit_idx))
+               mult = 2;
+
+       if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+               unsigned long best_parent = rate / mult;
+
+               *prate =
+                   __clk_round_rate(__clk_get_parent(hw->clk), best_parent);
+       }
+
+       return *prate * mult;
+}
+
+static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate)
+{
+       /*
+        * We must report success but we can do so unconditionally because
+        * clk_apb_mul_round_rate returns values that ensure this call is a
+        * nop.
+        */
+
+       return 0;
+}
+
+static const struct clk_ops clk_apb_mul_factor_ops = {
+       .round_rate = clk_apb_mul_round_rate,
+       .set_rate = clk_apb_mul_set_rate,
+       .recalc_rate = clk_apb_mul_recalc_rate,
+};
+
+static struct clk *clk_register_apb_mul(struct device *dev, const char *name,
+                                       const char *parent_name,
+                                       unsigned long flags, u8 bit_idx)
+{
+       struct clk_apb_mul *am;
+       struct clk_init_data init;
+       struct clk *clk;
+
+       am = kzalloc(sizeof(*am), GFP_KERNEL);
+       if (!am)
+               return ERR_PTR(-ENOMEM);
+
+       am->bit_idx = bit_idx;
+       am->hw.init = &init;
+
+       init.name = name;
+       init.ops = &clk_apb_mul_factor_ops;
+       init.flags = flags;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       clk = clk_register(dev, &am->hw);
+
+       if (IS_ERR(clk))
+               kfree(am);
+
+       return clk;
+}
+
+/*
+ * Decode current PLL state and (statically) model the state we inherit from
+ * the bootloader.
+ */
+static void stm32f4_rcc_register_pll(const char *hse_clk, const char *hsi_clk)
+{
+       unsigned long pllcfgr = readl(base + STM32F4_RCC_PLLCFGR);
+
+       unsigned long pllm   = pllcfgr & 0x3f;
+       unsigned long plln   = (pllcfgr >> 6) & 0x1ff;
+       unsigned long pllp   = BIT(((pllcfgr >> 16) & 3) + 1);
+       const char   *pllsrc = pllcfgr & BIT(22) ? hse_clk : hsi_clk;
+       unsigned long pllq   = (pllcfgr >> 24) & 0xf;
+
+       clk_register_fixed_factor(NULL, "vco", pllsrc, 0, plln, pllm);
+       clk_register_fixed_factor(NULL, "pll", "vco", 0, 1, pllp);
+       clk_register_fixed_factor(NULL, "pll48", "vco", 0, 1, pllq);
+}
+
+/*
+ * Converts the primary and secondary indices (as they appear in DT) to an
+ * offset into our struct clock array.
+ */
+static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary)
+{
+       u64 table[ARRAY_SIZE(stm32f42xx_gate_map)];
+
+       if (primary == 1) {
+               if (WARN_ON(secondary > FCLK))
+                       return -EINVAL;
+               return secondary;
+       }
+
+       memcpy(table, stm32f42xx_gate_map, sizeof(table));
+
+       /* only bits set in table can be used as indices */
+       if (WARN_ON(secondary > 8 * sizeof(table) ||
+                   0 == (table[BIT_ULL_WORD(secondary)] &
+                         BIT_ULL_MASK(secondary))))
+               return -EINVAL;
+
+       /* mask out bits above our current index */
+       table[BIT_ULL_WORD(secondary)] &=
+           GENMASK_ULL(secondary % BITS_PER_LONG_LONG, 0);
+
+       return FCLK + hweight64(table[0]) +
+              (BIT_ULL_WORD(secondary) >= 1 ? hweight64(table[1]) : 0) +
+              (BIT_ULL_WORD(secondary) >= 2 ? hweight64(table[2]) : 0);
+}
+
+static struct clk *
+stm32f4_rcc_lookup_clk(struct of_phandle_args *clkspec, void *data)
+{
+       int i = stm32f4_rcc_lookup_clk_idx(clkspec->args[0], clkspec->args[1]);
+
+       if (i < 0)
+               return ERR_PTR(-EINVAL);
+
+       return clks[i];
+}
+
+static const char *sys_parents[] __initdata =   { "hsi", NULL, "pll" };
+
+static const struct clk_div_table ahb_div_table[] = {
+       { 0x0,   1 }, { 0x1,   1 }, { 0x2,   1 }, { 0x3,   1 },
+       { 0x4,   1 }, { 0x5,   1 }, { 0x6,   1 }, { 0x7,   1 },
+       { 0x8,   2 }, { 0x9,   4 }, { 0xa,   8 }, { 0xb,  16 },
+       { 0xc,  64 }, { 0xd, 128 }, { 0xe, 256 }, { 0xf, 512 },
+       { 0 },
+};
+
+static const struct clk_div_table apb_div_table[] = {
+       { 0,  1 }, { 0,  1 }, { 0,  1 }, { 0,  1 },
+       { 4,  2 }, { 5,  4 }, { 6,  8 }, { 7, 16 },
+       { 0 },
+};
+
+static void __init stm32f4_rcc_init(struct device_node *np)
+{
+       const char *hse_clk;
+       int n;
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               pr_err("%s: unable to map resource", np->name);
+               return;
+       }
+
+       hse_clk = of_clk_get_parent_name(np, 0);
+
+       clk_register_fixed_rate_with_accuracy(NULL, "hsi", NULL, 0,
+                       16000000, 160000);
+       stm32f4_rcc_register_pll(hse_clk, "hsi");
+
+       sys_parents[1] = hse_clk;
+       clk_register_mux_table(
+           NULL, "sys", sys_parents, ARRAY_SIZE(sys_parents), 0,
+           base + STM32F4_RCC_CFGR, 0, 3, 0, NULL, &stm32f4_clk_lock);
+
+       clk_register_divider_table(NULL, "ahb_div", "sys",
+                                  CLK_SET_RATE_PARENT, base + STM32F4_RCC_CFGR,
+                                  4, 4, 0, ahb_div_table, &stm32f4_clk_lock);
+
+       clk_register_divider_table(NULL, "apb1_div", "ahb_div",
+                                  CLK_SET_RATE_PARENT, base + STM32F4_RCC_CFGR,
+                                  10, 3, 0, apb_div_table, &stm32f4_clk_lock);
+       clk_register_apb_mul(NULL, "apb1_mul", "apb1_div",
+                            CLK_SET_RATE_PARENT, 12);
+
+       clk_register_divider_table(NULL, "apb2_div", "ahb_div",
+                                  CLK_SET_RATE_PARENT, base + STM32F4_RCC_CFGR,
+                                  13, 3, 0, apb_div_table, &stm32f4_clk_lock);
+       clk_register_apb_mul(NULL, "apb2_mul", "apb2_div",
+                            CLK_SET_RATE_PARENT, 15);
+
+       clks[SYSTICK] = clk_register_fixed_factor(NULL, "systick", "ahb_div",
+                                                 0, 1, 8);
+       clks[FCLK] = clk_register_fixed_factor(NULL, "fclk", "ahb_div",
+                                              0, 1, 1);
+
+       for (n = 0; n < ARRAY_SIZE(stm32f4_gates); n++) {
+               const struct stm32f4_gate_data *gd = &stm32f4_gates[n];
+               unsigned int secondary =
+                   8 * (gd->offset - STM32F4_RCC_AHB1ENR) + gd->bit_idx;
+               int idx = stm32f4_rcc_lookup_clk_idx(0, secondary);
+
+               if (idx < 0)
+                       goto fail;
+
+               clks[idx] = clk_register_gate(
+                   NULL, gd->name, gd->parent_name, gd->flags,
+                   base + gd->offset, gd->bit_idx, 0, &stm32f4_clk_lock);
+
+               if (IS_ERR(clks[n])) {
+                       pr_err("%s: Unable to register leaf clock %s\n",
+                              np->full_name, gd->name);
+                       goto fail;
+               }
+       }
+
+       of_clk_add_provider(np, stm32f4_rcc_lookup_clk, NULL);
+       return;
+fail:
+       iounmap(base);
+}
+CLK_OF_DECLARE(stm32f4_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
index 406bfc1375b2d4a44e9e4993786cca2c48c713f3..18bf5e576b9390c680b4e9a08637104cb07cdde4 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/clk-provider.h>
 #include <linux/spinlock.h>
 #include <linux/of.h>
+#include <linux/platform_data/clk-u300.h>
 
 /* APP side SYSCON registers */
 /* CLK Control Register 16bit (R/W) */
index dd8a62d8f11f12da7aa51b6bace5287bf4532b7d..f26b3ac36b27a1aa06fd7db2b3352420f96c7768 100644 (file)
 
 static DEFINE_SPINLOCK(clk_lock);
 
-static inline u32 xgene_clk_read(void *csr)
+static inline u32 xgene_clk_read(void __iomem *csr)
 {
        return readl_relaxed(csr);
 }
 
-static inline void xgene_clk_write(u32 data, void *csr)
+static inline void xgene_clk_write(u32 data, void __iomem *csr)
 {
        return writel_relaxed(data, csr);
 }
@@ -119,7 +119,7 @@ static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw,
        return fvco / nout;
 }
 
-const struct clk_ops xgene_clk_pll_ops = {
+static const struct clk_ops xgene_clk_pll_ops = {
        .is_enabled = xgene_clk_pll_is_enabled,
        .recalc_rate = xgene_clk_pll_recalc_rate,
 };
@@ -167,7 +167,7 @@ static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_ty
 {
         const char *clk_name = np->full_name;
         struct clk *clk;
-        void *reg;
+        void __iomem *reg;
 
         reg = of_iomap(np, 0);
         if (reg == NULL) {
@@ -222,20 +222,22 @@ static int xgene_clk_enable(struct clk_hw *hw)
        struct xgene_clk *pclk = to_xgene_clk(hw);
        unsigned long flags = 0;
        u32 data;
+       phys_addr_t reg;
 
        if (pclk->lock)
                spin_lock_irqsave(pclk->lock, flags);
 
        if (pclk->param.csr_reg != NULL) {
                pr_debug("%s clock enabled\n", pclk->name);
+               reg = __pa(pclk->param.csr_reg);
                /* First enable the clock */
                data = xgene_clk_read(pclk->param.csr_reg +
                                        pclk->param.reg_clk_offset);
                data |= pclk->param.reg_clk_mask;
                xgene_clk_write(data, pclk->param.csr_reg +
                                        pclk->param.reg_clk_offset);
-               pr_debug("%s clock PADDR base 0x%016LX clk offset 0x%08X mask 0x%08X value 0x%08X\n",
-                       pclk->name, __pa(pclk->param.csr_reg),
+               pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n",
+                       pclk->name, &reg,
                        pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
                        data);
 
@@ -245,8 +247,8 @@ static int xgene_clk_enable(struct clk_hw *hw)
                data &= ~pclk->param.reg_csr_mask;
                xgene_clk_write(data, pclk->param.csr_reg +
                                        pclk->param.reg_csr_offset);
-               pr_debug("%s CSR RESET PADDR base 0x%016LX csr offset 0x%08X mask 0x%08X value 0x%08X\n",
-                       pclk->name, __pa(pclk->param.csr_reg),
+               pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n",
+                       pclk->name, &reg,
                        pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
                        data);
        }
@@ -386,7 +388,7 @@ static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
        return parent_rate / divider;
 }
 
-const struct clk_ops xgene_clk_ops = {
+static const struct clk_ops xgene_clk_ops = {
        .enable = xgene_clk_enable,
        .disable = xgene_clk_disable,
        .is_enabled = xgene_clk_is_enabled,
@@ -456,7 +458,7 @@ static void __init xgene_devclk_init(struct device_node *np)
        parameters.csr_reg = NULL;
        parameters.divider_reg = NULL;
        for (i = 0; i < 2; i++) {
-               void *map_res;
+               void __iomem *map_res;
                rc = of_address_to_resource(np, i, &res);
                if (rc != 0) {
                        if (i == 0) {
index 5b0f41868b425672e6295ac6b30a52e43cf5730c..ddb4b541016fe986860a95a4a482d2f6c6713a3e 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/sched.h>
+#include <linux/clkdev.h>
 
 #include "clk.h"
 
@@ -37,13 +38,6 @@ static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
-static long clk_core_get_accuracy(struct clk_core *clk);
-static unsigned long clk_core_get_rate(struct clk_core *clk);
-static int clk_core_get_phase(struct clk_core *clk);
-static bool clk_core_is_prepared(struct clk_core *clk);
-static bool clk_core_is_enabled(struct clk_core *clk);
-static struct clk_core *clk_core_lookup(const char *name);
-
 /***    private data structures    ***/
 
 struct clk_core {
@@ -68,11 +62,11 @@ struct clk_core {
        int                     phase;
        struct hlist_head       children;
        struct hlist_node       child_node;
-       struct hlist_node       debug_node;
        struct hlist_head       clks;
        unsigned int            notifier_count;
 #ifdef CONFIG_DEBUG_FS
        struct dentry           *dentry;
+       struct hlist_node       debug_node;
 #endif
        struct kref             ref;
 };
@@ -145,516 +139,248 @@ static void clk_enable_unlock(unsigned long flags)
        spin_unlock_irqrestore(&enable_lock, flags);
 }
 
-/***        debugfs support        ***/
-
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-
-static struct dentry *rootdir;
-static int inited = 0;
-static DEFINE_MUTEX(clk_debug_lock);
-static HLIST_HEAD(clk_debug_list);
-
-static struct hlist_head *all_lists[] = {
-       &clk_root_list,
-       &clk_orphan_list,
-       NULL,
-};
+static bool clk_core_is_prepared(struct clk_core *core)
+{
+       /*
+        * .is_prepared is optional for clocks that can prepare
+        * fall back to software usage counter if it is missing
+        */
+       if (!core->ops->is_prepared)
+               return core->prepare_count;
 
-static struct hlist_head *orphan_list[] = {
-       &clk_orphan_list,
-       NULL,
-};
+       return core->ops->is_prepared(core->hw);
+}
 
-static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
-                                int level)
+static bool clk_core_is_enabled(struct clk_core *core)
 {
-       if (!c)
-               return;
+       /*
+        * .is_enabled is only mandatory for clocks that gate
+        * fall back to software usage counter if .is_enabled is missing
+        */
+       if (!core->ops->is_enabled)
+               return core->enable_count;
 
-       seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
-                  level * 3 + 1, "",
-                  30 - level * 3, c->name,
-                  c->enable_count, c->prepare_count, clk_core_get_rate(c),
-                  clk_core_get_accuracy(c), clk_core_get_phase(c));
+       return core->ops->is_enabled(core->hw);
 }
 
-static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
-                                    int level)
+static void clk_unprepare_unused_subtree(struct clk_core *core)
 {
        struct clk_core *child;
 
-       if (!c)
+       lockdep_assert_held(&prepare_lock);
+
+       hlist_for_each_entry(child, &core->children, child_node)
+               clk_unprepare_unused_subtree(child);
+
+       if (core->prepare_count)
                return;
 
-       clk_summary_show_one(s, c, level);
+       if (core->flags & CLK_IGNORE_UNUSED)
+               return;
 
-       hlist_for_each_entry(child, &c->children, child_node)
-               clk_summary_show_subtree(s, child, level + 1);
+       if (clk_core_is_prepared(core)) {
+               trace_clk_unprepare(core);
+               if (core->ops->unprepare_unused)
+                       core->ops->unprepare_unused(core->hw);
+               else if (core->ops->unprepare)
+                       core->ops->unprepare(core->hw);
+               trace_clk_unprepare_complete(core);
+       }
 }
 
-static int clk_summary_show(struct seq_file *s, void *data)
+static void clk_disable_unused_subtree(struct clk_core *core)
 {
-       struct clk_core *c;
-       struct hlist_head **lists = (struct hlist_head **)s->private;
+       struct clk_core *child;
+       unsigned long flags;
 
-       seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy   phase\n");
-       seq_puts(s, "----------------------------------------------------------------------------------------\n");
+       lockdep_assert_held(&prepare_lock);
 
-       clk_prepare_lock();
+       hlist_for_each_entry(child, &core->children, child_node)
+               clk_disable_unused_subtree(child);
 
-       for (; *lists; lists++)
-               hlist_for_each_entry(c, *lists, child_node)
-                       clk_summary_show_subtree(s, c, 0);
+       flags = clk_enable_lock();
 
-       clk_prepare_unlock();
+       if (core->enable_count)
+               goto unlock_out;
 
-       return 0;
-}
+       if (core->flags & CLK_IGNORE_UNUSED)
+               goto unlock_out;
 
+       /*
+        * some gate clocks have special needs during the disable-unused
+        * sequence.  call .disable_unused if available, otherwise fall
+        * back to .disable
+        */
+       if (clk_core_is_enabled(core)) {
+               trace_clk_disable(core);
+               if (core->ops->disable_unused)
+                       core->ops->disable_unused(core->hw);
+               else if (core->ops->disable)
+                       core->ops->disable(core->hw);
+               trace_clk_disable_complete(core);
+       }
 
-static int clk_summary_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, clk_summary_show, inode->i_private);
+unlock_out:
+       clk_enable_unlock(flags);
 }
 
-static const struct file_operations clk_summary_fops = {
-       .open           = clk_summary_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
+static bool clk_ignore_unused;
+static int __init clk_ignore_unused_setup(char *__unused)
 {
-       if (!c)
-               return;
-
-       seq_printf(s, "\"%s\": { ", c->name);
-       seq_printf(s, "\"enable_count\": %d,", c->enable_count);
-       seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
-       seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
-       seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
-       seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
+       clk_ignore_unused = true;
+       return 1;
 }
+__setup("clk_ignore_unused", clk_ignore_unused_setup);
 
-static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
+static int clk_disable_unused(void)
 {
-       struct clk_core *child;
-
-       if (!c)
-               return;
-
-       clk_dump_one(s, c, level);
+       struct clk_core *core;
 
-       hlist_for_each_entry(child, &c->children, child_node) {
-               seq_printf(s, ",");
-               clk_dump_subtree(s, child, level + 1);
+       if (clk_ignore_unused) {
+               pr_warn("clk: Not disabling unused clocks\n");
+               return 0;
        }
 
-       seq_printf(s, "}");
-}
+       clk_prepare_lock();
 
-static int clk_dump(struct seq_file *s, void *data)
-{
-       struct clk_core *c;
-       bool first_node = true;
-       struct hlist_head **lists = (struct hlist_head **)s->private;
+       hlist_for_each_entry(core, &clk_root_list, child_node)
+               clk_disable_unused_subtree(core);
 
-       seq_printf(s, "{");
+       hlist_for_each_entry(core, &clk_orphan_list, child_node)
+               clk_disable_unused_subtree(core);
 
-       clk_prepare_lock();
+       hlist_for_each_entry(core, &clk_root_list, child_node)
+               clk_unprepare_unused_subtree(core);
 
-       for (; *lists; lists++) {
-               hlist_for_each_entry(c, *lists, child_node) {
-                       if (!first_node)
-                               seq_puts(s, ",");
-                       first_node = false;
-                       clk_dump_subtree(s, c, 0);
-               }
-       }
+       hlist_for_each_entry(core, &clk_orphan_list, child_node)
+               clk_unprepare_unused_subtree(core);
 
        clk_prepare_unlock();
 
-       seq_printf(s, "}");
        return 0;
 }
+late_initcall_sync(clk_disable_unused);
 
+/***    helper functions   ***/
 
-static int clk_dump_open(struct inode *inode, struct file *file)
+const char *__clk_get_name(struct clk *clk)
 {
-       return single_open(file, clk_dump, inode->i_private);
+       return !clk ? NULL : clk->core->name;
 }
+EXPORT_SYMBOL_GPL(__clk_get_name);
 
-static const struct file_operations clk_dump_fops = {
-       .open           = clk_dump_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int clk_debug_create_one(struct clk_core *clk, struct dentry *pdentry)
+struct clk_hw *__clk_get_hw(struct clk *clk)
 {
-       struct dentry *d;
-       int ret = -ENOMEM;
-
-       if (!clk || !pdentry) {
-               ret = -EINVAL;
-               goto out;
-       }
+       return !clk ? NULL : clk->core->hw;
+}
+EXPORT_SYMBOL_GPL(__clk_get_hw);
 
-       d = debugfs_create_dir(clk->name, pdentry);
-       if (!d)
-               goto out;
+u8 __clk_get_num_parents(struct clk *clk)
+{
+       return !clk ? 0 : clk->core->num_parents;
+}
+EXPORT_SYMBOL_GPL(__clk_get_num_parents);
 
-       clk->dentry = d;
+struct clk *__clk_get_parent(struct clk *clk)
+{
+       if (!clk)
+               return NULL;
 
-       d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
-                       (u32 *)&clk->rate);
-       if (!d)
-               goto err_out;
+       /* TODO: Create a per-user clk and change callers to call clk_put */
+       return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
+}
+EXPORT_SYMBOL_GPL(__clk_get_parent);
 
-       d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry,
-                       (u32 *)&clk->accuracy);
-       if (!d)
-               goto err_out;
+static struct clk_core *__clk_lookup_subtree(const char *name,
+                                            struct clk_core *core)
+{
+       struct clk_core *child;
+       struct clk_core *ret;
 
-       d = debugfs_create_u32("clk_phase", S_IRUGO, clk->dentry,
-                       (u32 *)&clk->phase);
-       if (!d)
-               goto err_out;
+       if (!strcmp(core->name, name))
+               return core;
 
-       d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
-                       (u32 *)&clk->flags);
-       if (!d)
-               goto err_out;
+       hlist_for_each_entry(child, &core->children, child_node) {
+               ret = __clk_lookup_subtree(name, child);
+               if (ret)
+                       return ret;
+       }
 
-       d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
-                       (u32 *)&clk->prepare_count);
-       if (!d)
-               goto err_out;
+       return NULL;
+}
 
-       d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
-                       (u32 *)&clk->enable_count);
-       if (!d)
-               goto err_out;
+static struct clk_core *clk_core_lookup(const char *name)
+{
+       struct clk_core *root_clk;
+       struct clk_core *ret;
 
-       d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
-                       (u32 *)&clk->notifier_count);
-       if (!d)
-               goto err_out;
+       if (!name)
+               return NULL;
 
-       if (clk->ops->debug_init) {
-               ret = clk->ops->debug_init(clk->hw, clk->dentry);
+       /* search the 'proper' clk tree first */
+       hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
+               ret = __clk_lookup_subtree(name, root_clk);
                if (ret)
-                       goto err_out;
+                       return ret;
        }
 
-       ret = 0;
-       goto out;
+       /* if not found, then search the orphan tree */
+       hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
+               ret = __clk_lookup_subtree(name, root_clk);
+               if (ret)
+                       return ret;
+       }
 
-err_out:
-       debugfs_remove_recursive(clk->dentry);
-       clk->dentry = NULL;
-out:
-       return ret;
+       return NULL;
 }
 
-/**
- * clk_debug_register - add a clk node to the debugfs clk tree
- * @clk: the clk being added to the debugfs clk tree
- *
- * Dynamically adds a clk to the debugfs clk tree if debugfs has been
- * initialized.  Otherwise it bails out early since the debugfs clk tree
- * will be created lazily by clk_debug_init as part of a late_initcall.
- */
-static int clk_debug_register(struct clk_core *clk)
+static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
+                                                        u8 index)
 {
-       int ret = 0;
+       if (!core || index >= core->num_parents)
+               return NULL;
+       else if (!core->parents)
+               return clk_core_lookup(core->parent_names[index]);
+       else if (!core->parents[index])
+               return core->parents[index] =
+                       clk_core_lookup(core->parent_names[index]);
+       else
+               return core->parents[index];
+}
 
-       mutex_lock(&clk_debug_lock);
-       hlist_add_head(&clk->debug_node, &clk_debug_list);
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+{
+       struct clk_core *parent;
 
-       if (!inited)
-               goto unlock;
+       if (!clk)
+               return NULL;
 
-       ret = clk_debug_create_one(clk, rootdir);
-unlock:
-       mutex_unlock(&clk_debug_lock);
+       parent = clk_core_get_parent_by_index(clk->core, index);
 
-       return ret;
+       return !parent ? NULL : parent->hw->clk;
 }
+EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
 
- /**
- * clk_debug_unregister - remove a clk node from the debugfs clk tree
- * @clk: the clk being removed from the debugfs clk tree
- *
- * Dynamically removes a clk and all it's children clk nodes from the
- * debugfs clk tree if clk->dentry points to debugfs created by
- * clk_debug_register in __clk_init.
- */
-static void clk_debug_unregister(struct clk_core *clk)
+unsigned int __clk_get_enable_count(struct clk *clk)
 {
-       mutex_lock(&clk_debug_lock);
-       hlist_del_init(&clk->debug_node);
-       debugfs_remove_recursive(clk->dentry);
-       clk->dentry = NULL;
-       mutex_unlock(&clk_debug_lock);
+       return !clk ? 0 : clk->core->enable_count;
 }
 
-struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
-                               void *data, const struct file_operations *fops)
-{
-       struct dentry *d = NULL;
-
-       if (hw->core->dentry)
-               d = debugfs_create_file(name, mode, hw->core->dentry, data,
-                                       fops);
-
-       return d;
-}
-EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
-
-/**
- * clk_debug_init - lazily create the debugfs clk tree visualization
- *
- * clks are often initialized very early during boot before memory can
- * be dynamically allocated and well before debugfs is setup.
- * clk_debug_init walks the clk tree hierarchy while holding
- * prepare_lock and creates the topology as part of a late_initcall,
- * thus insuring that clks initialized very early will still be
- * represented in the debugfs clk tree.  This function should only be
- * called once at boot-time, and all other clks added dynamically will
- * be done so with clk_debug_register.
- */
-static int __init clk_debug_init(void)
-{
-       struct clk_core *clk;
-       struct dentry *d;
-
-       rootdir = debugfs_create_dir("clk", NULL);
-
-       if (!rootdir)
-               return -ENOMEM;
-
-       d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
-                               &clk_summary_fops);
-       if (!d)
-               return -ENOMEM;
-
-       d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
-                               &clk_dump_fops);
-       if (!d)
-               return -ENOMEM;
-
-       d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
-                               &orphan_list, &clk_summary_fops);
-       if (!d)
-               return -ENOMEM;
-
-       d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
-                               &orphan_list, &clk_dump_fops);
-       if (!d)
-               return -ENOMEM;
-
-       mutex_lock(&clk_debug_lock);
-       hlist_for_each_entry(clk, &clk_debug_list, debug_node)
-               clk_debug_create_one(clk, rootdir);
-
-       inited = 1;
-       mutex_unlock(&clk_debug_lock);
-
-       return 0;
-}
-late_initcall(clk_debug_init);
-#else
-static inline int clk_debug_register(struct clk_core *clk) { return 0; }
-static inline void clk_debug_reparent(struct clk_core *clk,
-                                     struct clk_core *new_parent)
-{
-}
-static inline void clk_debug_unregister(struct clk_core *clk)
-{
-}
-#endif
-
-/* caller must hold prepare_lock */
-static void clk_unprepare_unused_subtree(struct clk_core *clk)
-{
-       struct clk_core *child;
-
-       lockdep_assert_held(&prepare_lock);
-
-       hlist_for_each_entry(child, &clk->children, child_node)
-               clk_unprepare_unused_subtree(child);
-
-       if (clk->prepare_count)
-               return;
-
-       if (clk->flags & CLK_IGNORE_UNUSED)
-               return;
-
-       if (clk_core_is_prepared(clk)) {
-               trace_clk_unprepare(clk);
-               if (clk->ops->unprepare_unused)
-                       clk->ops->unprepare_unused(clk->hw);
-               else if (clk->ops->unprepare)
-                       clk->ops->unprepare(clk->hw);
-               trace_clk_unprepare_complete(clk);
-       }
-}
-
-/* caller must hold prepare_lock */
-static void clk_disable_unused_subtree(struct clk_core *clk)
-{
-       struct clk_core *child;
-       unsigned long flags;
-
-       lockdep_assert_held(&prepare_lock);
-
-       hlist_for_each_entry(child, &clk->children, child_node)
-               clk_disable_unused_subtree(child);
-
-       flags = clk_enable_lock();
-
-       if (clk->enable_count)
-               goto unlock_out;
-
-       if (clk->flags & CLK_IGNORE_UNUSED)
-               goto unlock_out;
-
-       /*
-        * some gate clocks have special needs during the disable-unused
-        * sequence.  call .disable_unused if available, otherwise fall
-        * back to .disable
-        */
-       if (clk_core_is_enabled(clk)) {
-               trace_clk_disable(clk);
-               if (clk->ops->disable_unused)
-                       clk->ops->disable_unused(clk->hw);
-               else if (clk->ops->disable)
-                       clk->ops->disable(clk->hw);
-               trace_clk_disable_complete(clk);
-       }
-
-unlock_out:
-       clk_enable_unlock(flags);
-}
-
-static bool clk_ignore_unused;
-static int __init clk_ignore_unused_setup(char *__unused)
-{
-       clk_ignore_unused = true;
-       return 1;
-}
-__setup("clk_ignore_unused", clk_ignore_unused_setup);
-
-static int clk_disable_unused(void)
-{
-       struct clk_core *clk;
-
-       if (clk_ignore_unused) {
-               pr_warn("clk: Not disabling unused clocks\n");
-               return 0;
-       }
-
-       clk_prepare_lock();
-
-       hlist_for_each_entry(clk, &clk_root_list, child_node)
-               clk_disable_unused_subtree(clk);
-
-       hlist_for_each_entry(clk, &clk_orphan_list, child_node)
-               clk_disable_unused_subtree(clk);
-
-       hlist_for_each_entry(clk, &clk_root_list, child_node)
-               clk_unprepare_unused_subtree(clk);
-
-       hlist_for_each_entry(clk, &clk_orphan_list, child_node)
-               clk_unprepare_unused_subtree(clk);
-
-       clk_prepare_unlock();
-
-       return 0;
-}
-late_initcall_sync(clk_disable_unused);
-
-/***    helper functions   ***/
-
-const char *__clk_get_name(struct clk *clk)
-{
-       return !clk ? NULL : clk->core->name;
-}
-EXPORT_SYMBOL_GPL(__clk_get_name);
-
-struct clk_hw *__clk_get_hw(struct clk *clk)
-{
-       return !clk ? NULL : clk->core->hw;
-}
-EXPORT_SYMBOL_GPL(__clk_get_hw);
-
-u8 __clk_get_num_parents(struct clk *clk)
-{
-       return !clk ? 0 : clk->core->num_parents;
-}
-EXPORT_SYMBOL_GPL(__clk_get_num_parents);
-
-struct clk *__clk_get_parent(struct clk *clk)
-{
-       if (!clk)
-               return NULL;
-
-       /* TODO: Create a per-user clk and change callers to call clk_put */
-       return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
-}
-EXPORT_SYMBOL_GPL(__clk_get_parent);
-
-static struct clk_core *clk_core_get_parent_by_index(struct clk_core *clk,
-                                                        u8 index)
-{
-       if (!clk || index >= clk->num_parents)
-               return NULL;
-       else if (!clk->parents)
-               return clk_core_lookup(clk->parent_names[index]);
-       else if (!clk->parents[index])
-               return clk->parents[index] =
-                       clk_core_lookup(clk->parent_names[index]);
-       else
-               return clk->parents[index];
-}
-
-struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
-{
-       struct clk_core *parent;
-
-       if (!clk)
-               return NULL;
-
-       parent = clk_core_get_parent_by_index(clk->core, index);
-
-       return !parent ? NULL : parent->hw->clk;
-}
-EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
-
-unsigned int __clk_get_enable_count(struct clk *clk)
-{
-       return !clk ? 0 : clk->core->enable_count;
-}
-
-static unsigned long clk_core_get_rate_nolock(struct clk_core *clk)
+static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
 {
        unsigned long ret;
 
-       if (!clk) {
+       if (!core) {
                ret = 0;
                goto out;
        }
 
-       ret = clk->rate;
+       ret = core->rate;
 
-       if (clk->flags & CLK_IS_ROOT)
+       if (core->flags & CLK_IS_ROOT)
                goto out;
 
-       if (!clk->parent)
+       if (!core->parent)
                ret = 0;
 
 out:
@@ -670,12 +396,12 @@ unsigned long __clk_get_rate(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(__clk_get_rate);
 
-static unsigned long __clk_get_accuracy(struct clk_core *clk)
+static unsigned long __clk_get_accuracy(struct clk_core *core)
 {
-       if (!clk)
+       if (!core)
                return 0;
 
-       return clk->accuracy;
+       return core->accuracy;
 }
 
 unsigned long __clk_get_flags(struct clk *clk)
@@ -684,27 +410,6 @@ unsigned long __clk_get_flags(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(__clk_get_flags);
 
-static bool clk_core_is_prepared(struct clk_core *clk)
-{
-       int ret;
-
-       if (!clk)
-               return false;
-
-       /*
-        * .is_prepared is optional for clocks that can prepare
-        * fall back to software usage counter if it is missing
-        */
-       if (!clk->ops->is_prepared) {
-               ret = clk->prepare_count ? 1 : 0;
-               goto out;
-       }
-
-       ret = clk->ops->is_prepared(clk->hw);
-out:
-       return !!ret;
-}
-
 bool __clk_is_prepared(struct clk *clk)
 {
        if (!clk)
@@ -713,27 +418,6 @@ bool __clk_is_prepared(struct clk *clk)
        return clk_core_is_prepared(clk->core);
 }
 
-static bool clk_core_is_enabled(struct clk_core *clk)
-{
-       int ret;
-
-       if (!clk)
-               return false;
-
-       /*
-        * .is_enabled is only mandatory for clocks that gate
-        * fall back to software usage counter if .is_enabled is missing
-        */
-       if (!clk->ops->is_enabled) {
-               ret = clk->enable_count ? 1 : 0;
-               goto out;
-       }
-
-       ret = clk->ops->is_enabled(clk->hw);
-out:
-       return !!ret;
-}
-
 bool __clk_is_enabled(struct clk *clk)
 {
        if (!clk)
@@ -743,49 +427,6 @@ bool __clk_is_enabled(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(__clk_is_enabled);
 
-static struct clk_core *__clk_lookup_subtree(const char *name,
-                                            struct clk_core *clk)
-{
-       struct clk_core *child;
-       struct clk_core *ret;
-
-       if (!strcmp(clk->name, name))
-               return clk;
-
-       hlist_for_each_entry(child, &clk->children, child_node) {
-               ret = __clk_lookup_subtree(name, child);
-               if (ret)
-                       return ret;
-       }
-
-       return NULL;
-}
-
-static struct clk_core *clk_core_lookup(const char *name)
-{
-       struct clk_core *root_clk;
-       struct clk_core *ret;
-
-       if (!name)
-               return NULL;
-
-       /* search the 'proper' clk tree first */
-       hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
-               ret = __clk_lookup_subtree(name, root_clk);
-               if (ret)
-                       return ret;
-       }
-
-       /* if not found, then search the orphan tree */
-       hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
-               ret = __clk_lookup_subtree(name, root_clk);
-               if (ret)
-                       return ret;
-       }
-
-       return NULL;
-}
-
 static bool mux_is_better_rate(unsigned long rate, unsigned long now,
                           unsigned long best, unsigned long flags)
 {
@@ -853,7 +494,7 @@ struct clk *__clk_lookup(const char *name)
        return !core ? NULL : core->hw->clk;
 }
 
-static void clk_core_get_boundaries(struct clk_core *clk,
+static void clk_core_get_boundaries(struct clk_core *core,
                                    unsigned long *min_rate,
                                    unsigned long *max_rate)
 {
@@ -862,10 +503,10 @@ static void clk_core_get_boundaries(struct clk_core *clk,
        *min_rate = 0;
        *max_rate = ULONG_MAX;
 
-       hlist_for_each_entry(clk_user, &clk->clks, clks_node)
+       hlist_for_each_entry(clk_user, &core->clks, clks_node)
                *min_rate = max(*min_rate, clk_user->min_rate);
 
-       hlist_for_each_entry(clk_user, &clk->clks, clks_node)
+       hlist_for_each_entry(clk_user, &core->clks, clks_node)
                *max_rate = min(*max_rate, clk_user->max_rate);
 }
 
@@ -901,26 +542,28 @@ EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
 
 /***        clk api        ***/
 
-static void clk_core_unprepare(struct clk_core *clk)
+static void clk_core_unprepare(struct clk_core *core)
 {
-       if (!clk)
+       lockdep_assert_held(&prepare_lock);
+
+       if (!core)
                return;
 
-       if (WARN_ON(clk->prepare_count == 0))
+       if (WARN_ON(core->prepare_count == 0))
                return;
 
-       if (--clk->prepare_count > 0)
+       if (--core->prepare_count > 0)
                return;
 
-       WARN_ON(clk->enable_count > 0);
+       WARN_ON(core->enable_count > 0);
 
-       trace_clk_unprepare(clk);
+       trace_clk_unprepare(core);
 
-       if (clk->ops->unprepare)
-               clk->ops->unprepare(clk->hw);
+       if (core->ops->unprepare)
+               core->ops->unprepare(core->hw);
 
-       trace_clk_unprepare_complete(clk);
-       clk_core_unprepare(clk->parent);
+       trace_clk_unprepare_complete(core);
+       clk_core_unprepare(core->parent);
 }
 
 /**
@@ -945,32 +588,34 @@ void clk_unprepare(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
-static int clk_core_prepare(struct clk_core *clk)
+static int clk_core_prepare(struct clk_core *core)
 {
        int ret = 0;
 
-       if (!clk)
-               return 0;
+       lockdep_assert_held(&prepare_lock);
+
+       if (!core)
+               return 0;
 
-       if (clk->prepare_count == 0) {
-               ret = clk_core_prepare(clk->parent);
+       if (core->prepare_count == 0) {
+               ret = clk_core_prepare(core->parent);
                if (ret)
                        return ret;
 
-               trace_clk_prepare(clk);
+               trace_clk_prepare(core);
 
-               if (clk->ops->prepare)
-                       ret = clk->ops->prepare(clk->hw);
+               if (core->ops->prepare)
+                       ret = core->ops->prepare(core->hw);
 
-               trace_clk_prepare_complete(clk);
+               trace_clk_prepare_complete(core);
 
                if (ret) {
-                       clk_core_unprepare(clk->parent);
+                       clk_core_unprepare(core->parent);
                        return ret;
                }
        }
 
-       clk->prepare_count++;
+       core->prepare_count++;
 
        return 0;
 }
@@ -1002,33 +647,27 @@ int clk_prepare(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(clk_prepare);
 
-static void clk_core_disable(struct clk_core *clk)
+static void clk_core_disable(struct clk_core *core)
 {
-       if (!clk)
-               return;
+       lockdep_assert_held(&enable_lock);
 
-       if (WARN_ON(clk->enable_count == 0))
+       if (!core)
                return;
 
-       if (--clk->enable_count > 0)
+       if (WARN_ON(core->enable_count == 0))
                return;
 
-       trace_clk_disable(clk);
-
-       if (clk->ops->disable)
-               clk->ops->disable(clk->hw);
+       if (--core->enable_count > 0)
+               return;
 
-       trace_clk_disable_complete(clk);
+       trace_clk_disable(core);
 
-       clk_core_disable(clk->parent);
-}
+       if (core->ops->disable)
+               core->ops->disable(core->hw);
 
-static void __clk_disable(struct clk *clk)
-{
-       if (!clk)
-               return;
+       trace_clk_disable_complete(core);
 
-       clk_core_disable(clk->core);
+       clk_core_disable(core->parent);
 }
 
 /**
@@ -1051,52 +690,46 @@ void clk_disable(struct clk *clk)
                return;
 
        flags = clk_enable_lock();
-       __clk_disable(clk);
+       clk_core_disable(clk->core);
        clk_enable_unlock(flags);
 }
 EXPORT_SYMBOL_GPL(clk_disable);
 
-static int clk_core_enable(struct clk_core *clk)
+static int clk_core_enable(struct clk_core *core)
 {
        int ret = 0;
 
-       if (!clk)
+       lockdep_assert_held(&enable_lock);
+
+       if (!core)
                return 0;
 
-       if (WARN_ON(clk->prepare_count == 0))
+       if (WARN_ON(core->prepare_count == 0))
                return -ESHUTDOWN;
 
-       if (clk->enable_count == 0) {
-               ret = clk_core_enable(clk->parent);
+       if (core->enable_count == 0) {
+               ret = clk_core_enable(core->parent);
 
                if (ret)
                        return ret;
 
-               trace_clk_enable(clk);
+               trace_clk_enable(core);
 
-               if (clk->ops->enable)
-                       ret = clk->ops->enable(clk->hw);
+               if (core->ops->enable)
+                       ret = core->ops->enable(core->hw);
 
-               trace_clk_enable_complete(clk);
+               trace_clk_enable_complete(core);
 
                if (ret) {
-                       clk_core_disable(clk->parent);
+                       clk_core_disable(core->parent);
                        return ret;
                }
        }
 
-       clk->enable_count++;
+       core->enable_count++;
        return 0;
 }
 
-static int __clk_enable(struct clk *clk)
-{
-       if (!clk)
-               return 0;
-
-       return clk_core_enable(clk->core);
-}
-
 /**
  * clk_enable - ungate a clock
  * @clk: the clk being ungated
@@ -1115,15 +748,18 @@ int clk_enable(struct clk *clk)
        unsigned long flags;
        int ret;
 
+       if (!clk)
+               return 0;
+
        flags = clk_enable_lock();
-       ret = __clk_enable(clk);
+       ret = clk_core_enable(clk->core);
        clk_enable_unlock(flags);
 
        return ret;
 }
 EXPORT_SYMBOL_GPL(clk_enable);
 
-static unsigned long clk_core_round_rate_nolock(struct clk_core *clk,
+static unsigned long clk_core_round_rate_nolock(struct clk_core *core,
                                                unsigned long rate,
                                                unsigned long min_rate,
                                                unsigned long max_rate)
@@ -1134,25 +770,25 @@ static unsigned long clk_core_round_rate_nolock(struct clk_core *clk,
 
        lockdep_assert_held(&prepare_lock);
 
-       if (!clk)
+       if (!core)
                return 0;
 
-       parent = clk->parent;
+       parent = core->parent;
        if (parent)
                parent_rate = parent->rate;
 
-       if (clk->ops->determine_rate) {
+       if (core->ops->determine_rate) {
                parent_hw = parent ? parent->hw : NULL;
-               return clk->ops->determine_rate(clk->hw, rate,
+               return core->ops->determine_rate(core->hw, rate,
                                                min_rate, max_rate,
                                                &parent_rate, &parent_hw);
-       } else if (clk->ops->round_rate)
-               return clk->ops->round_rate(clk->hw, rate, &parent_rate);
-       else if (clk->flags & CLK_SET_RATE_PARENT)
-               return clk_core_round_rate_nolock(clk->parent, rate, min_rate,
+       } else if (core->ops->round_rate)
+               return core->ops->round_rate(core->hw, rate, &parent_rate);
+       else if (core->flags & CLK_SET_RATE_PARENT)
+               return clk_core_round_rate_nolock(core->parent, rate, min_rate,
                                                  max_rate);
        else
-               return clk->rate;
+               return core->rate;
 }
 
 /**
@@ -1162,8 +798,7 @@ static unsigned long clk_core_round_rate_nolock(struct clk_core *clk,
  * @min_rate: returned rate must be greater than this rate
  * @max_rate: returned rate must be less than this rate
  *
- * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate and
- * .determine_rate.
+ * Useful for clk_ops such as .set_rate and .determine_rate.
  */
 unsigned long __clk_determine_rate(struct clk_hw *hw,
                                   unsigned long rate,
@@ -1182,7 +817,7 @@ EXPORT_SYMBOL_GPL(__clk_determine_rate);
  * @clk: round the rate of this clock
  * @rate: the rate which is to be rounded
  *
- * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
+ * Useful for clk_ops such as .set_rate
  */
 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
 {
@@ -1224,7 +859,7 @@ EXPORT_SYMBOL_GPL(clk_round_rate);
 
 /**
  * __clk_notify - call clk notifier chain
- * @clk: struct clk * that is changing rate
+ * @core: clk that is changing rate
  * @msg: clk notifier type (see include/linux/clk.h)
  * @old_rate: old clk rate
  * @new_rate: new clk rate
@@ -1236,7 +871,7 @@ EXPORT_SYMBOL_GPL(clk_round_rate);
  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
  * a driver returns that.
  */
-static int __clk_notify(struct clk_core *clk, unsigned long msg,
+static int __clk_notify(struct clk_core *core, unsigned long msg,
                unsigned long old_rate, unsigned long new_rate)
 {
        struct clk_notifier *cn;
@@ -1247,7 +882,7 @@ static int __clk_notify(struct clk_core *clk, unsigned long msg,
        cnd.new_rate = new_rate;
 
        list_for_each_entry(cn, &clk_notifier_list, node) {
-               if (cn->clk->core == clk) {
+               if (cn->clk->core == core) {
                        cnd.clk = cn->clk;
                        ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
                                        &cnd);
@@ -1259,44 +894,42 @@ static int __clk_notify(struct clk_core *clk, unsigned long msg,
 
 /**
  * __clk_recalc_accuracies
- * @clk: first clk in the subtree
+ * @core: first clk in the subtree
  *
  * Walks the subtree of clks starting with clk and recalculates accuracies as
  * it goes.  Note that if a clk does not implement the .recalc_accuracy
- * callback then it is assumed that the clock will take on the accuracy of it's
+ * callback then it is assumed that the clock will take on the accuracy of its
  * parent.
- *
- * Caller must hold prepare_lock.
  */
-static void __clk_recalc_accuracies(struct clk_core *clk)
+static void __clk_recalc_accuracies(struct clk_core *core)
 {
        unsigned long parent_accuracy = 0;
        struct clk_core *child;
 
        lockdep_assert_held(&prepare_lock);
 
-       if (clk->parent)
-               parent_accuracy = clk->parent->accuracy;
+       if (core->parent)
+               parent_accuracy = core->parent->accuracy;
 
-       if (clk->ops->recalc_accuracy)
-               clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
+       if (core->ops->recalc_accuracy)
+               core->accuracy = core->ops->recalc_accuracy(core->hw,
                                                          parent_accuracy);
        else
-               clk->accuracy = parent_accuracy;
+               core->accuracy = parent_accuracy;
 
-       hlist_for_each_entry(child, &clk->children, child_node)
+       hlist_for_each_entry(child, &core->children, child_node)
                __clk_recalc_accuracies(child);
 }
 
-static long clk_core_get_accuracy(struct clk_core *clk)
+static long clk_core_get_accuracy(struct clk_core *core)
 {
        unsigned long accuracy;
 
        clk_prepare_lock();
-       if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
-               __clk_recalc_accuracies(clk);
+       if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
+               __clk_recalc_accuracies(core);
 
-       accuracy = __clk_get_accuracy(clk);
+       accuracy = __clk_get_accuracy(core);
        clk_prepare_unlock();
 
        return accuracy;
@@ -1320,17 +953,17 @@ long clk_get_accuracy(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(clk_get_accuracy);
 
-static unsigned long clk_recalc(struct clk_core *clk,
+static unsigned long clk_recalc(struct clk_core *core,
                                unsigned long parent_rate)
 {
-       if (clk->ops->recalc_rate)
-               return clk->ops->recalc_rate(clk->hw, parent_rate);
+       if (core->ops->recalc_rate)
+               return core->ops->recalc_rate(core->hw, parent_rate);
        return parent_rate;
 }
 
 /**
  * __clk_recalc_rates
- * @clk: first clk in the subtree
+ * @core: first clk in the subtree
  * @msg: notification type (see include/linux/clk.h)
  *
  * Walks the subtree of clks starting with clk and recalculates rates as it
@@ -1339,10 +972,8 @@ static unsigned long clk_recalc(struct clk_core *clk,
  *
  * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
  * if necessary.
- *
- * Caller must hold prepare_lock.
  */
-static void __clk_recalc_rates(struct clk_core *clk, unsigned long msg)
+static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
 {
        unsigned long old_rate;
        unsigned long parent_rate = 0;
@@ -1350,34 +981,34 @@ static void __clk_recalc_rates(struct clk_core *clk, unsigned long msg)
 
        lockdep_assert_held(&prepare_lock);
 
-       old_rate = clk->rate;
+       old_rate = core->rate;
 
-       if (clk->parent)
-               parent_rate = clk->parent->rate;
+       if (core->parent)
+               parent_rate = core->parent->rate;
 
-       clk->rate = clk_recalc(clk, parent_rate);
+       core->rate = clk_recalc(core, parent_rate);
 
        /*
         * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
         * & ABORT_RATE_CHANGE notifiers
         */
-       if (clk->notifier_count && msg)
-               __clk_notify(clk, msg, old_rate, clk->rate);
+       if (core->notifier_count && msg)
+               __clk_notify(core, msg, old_rate, core->rate);
 
-       hlist_for_each_entry(child, &clk->children, child_node)
+       hlist_for_each_entry(child, &core->children, child_node)
                __clk_recalc_rates(child, msg);
 }
 
-static unsigned long clk_core_get_rate(struct clk_core *clk)
+static unsigned long clk_core_get_rate(struct clk_core *core)
 {
        unsigned long rate;
 
        clk_prepare_lock();
 
-       if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
-               __clk_recalc_rates(clk, 0);
+       if (core && (core->flags & CLK_GET_RATE_NOCACHE))
+               __clk_recalc_rates(core, 0);
 
-       rate = clk_core_get_rate_nolock(clk);
+       rate = clk_core_get_rate_nolock(core);
        clk_prepare_unlock();
 
        return rate;
@@ -1400,15 +1031,15 @@ unsigned long clk_get_rate(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(clk_get_rate);
 
-static int clk_fetch_parent_index(struct clk_core *clk,
+static int clk_fetch_parent_index(struct clk_core *core,
                                  struct clk_core *parent)
 {
        int i;
 
-       if (!clk->parents) {
-               clk->parents = kcalloc(clk->num_parents,
+       if (!core->parents) {
+               core->parents = kcalloc(core->num_parents,
                                        sizeof(struct clk *), GFP_KERNEL);
-               if (!clk->parents)
+               if (!core->parents)
                        return -ENOMEM;
        }
 
@@ -1417,15 +1048,15 @@ static int clk_fetch_parent_index(struct clk_core *clk,
         * or if not yet cached, use string name comparison and cache
         * them now to avoid future calls to clk_core_lookup.
         */
-       for (i = 0; i < clk->num_parents; i++) {
-               if (clk->parents[i] == parent)
+       for (i = 0; i < core->num_parents; i++) {
+               if (core->parents[i] == parent)
                        return i;
 
-               if (clk->parents[i])
+               if (core->parents[i])
                        continue;
 
-               if (!strcmp(clk->parent_names[i], parent->name)) {
-                       clk->parents[i] = clk_core_lookup(parent->name);
+               if (!strcmp(core->parent_names[i], parent->name)) {
+                       core->parents[i] = clk_core_lookup(parent->name);
                        return i;
                }
        }
@@ -1433,28 +1064,28 @@ static int clk_fetch_parent_index(struct clk_core *clk,
        return -EINVAL;
 }
 
-static void clk_reparent(struct clk_core *clk, struct clk_core *new_parent)
+static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
 {
-       hlist_del(&clk->child_node);
+       hlist_del(&core->child_node);
 
        if (new_parent) {
                /* avoid duplicate POST_RATE_CHANGE notifications */
-               if (new_parent->new_child == clk)
+               if (new_parent->new_child == core)
                        new_parent->new_child = NULL;
 
-               hlist_add_head(&clk->child_node, &new_parent->children);
+               hlist_add_head(&core->child_node, &new_parent->children);
        } else {
-               hlist_add_head(&clk->child_node, &clk_orphan_list);
+               hlist_add_head(&core->child_node, &clk_orphan_list);
        }
 
-       clk->parent = new_parent;
+       core->parent = new_parent;
 }
 
-static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
+static struct clk_core *__clk_set_parent_before(struct clk_core *core,
                                           struct clk_core *parent)
 {
        unsigned long flags;
-       struct clk_core *old_parent = clk->parent;
+       struct clk_core *old_parent = core->parent;
 
        /*
         * Migrate prepare state between parents and prevent race with
@@ -1473,17 +1104,17 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
         *
         * See also: Comment for clk_set_parent() below.
         */
-       if (clk->prepare_count) {
+       if (core->prepare_count) {
                clk_core_prepare(parent);
                flags = clk_enable_lock();
                clk_core_enable(parent);
-               clk_core_enable(clk);
+               clk_core_enable(core);
                clk_enable_unlock(flags);
        }
 
        /* update the clk tree topology */
        flags = clk_enable_lock();
-       clk_reparent(clk, parent);
+       clk_reparent(core, parent);
        clk_enable_unlock(flags);
 
        return old_parent;
@@ -1508,31 +1139,31 @@ static void __clk_set_parent_after(struct clk_core *core,
        }
 }
 
-static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
+static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
                            u8 p_index)
 {
        unsigned long flags;
        int ret = 0;
        struct clk_core *old_parent;
 
-       old_parent = __clk_set_parent_before(clk, parent);
+       old_parent = __clk_set_parent_before(core, parent);
 
-       trace_clk_set_parent(clk, parent);
+       trace_clk_set_parent(core, parent);
 
        /* change clock input source */
-       if (parent && clk->ops->set_parent)
-               ret = clk->ops->set_parent(clk->hw, p_index);
+       if (parent && core->ops->set_parent)
+               ret = core->ops->set_parent(core->hw, p_index);
 
-       trace_clk_set_parent_complete(clk, parent);
+       trace_clk_set_parent_complete(core, parent);
 
        if (ret) {
                flags = clk_enable_lock();
-               clk_reparent(clk, old_parent);
+               clk_reparent(core, old_parent);
                clk_enable_unlock(flags);
 
-               if (clk->prepare_count) {
+               if (core->prepare_count) {
                        flags = clk_enable_lock();
-                       clk_core_disable(clk);
+                       clk_core_disable(core);
                        clk_core_disable(parent);
                        clk_enable_unlock(flags);
                        clk_core_unprepare(parent);
@@ -1540,14 +1171,14 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
                return ret;
        }
 
-       __clk_set_parent_after(clk, parent, old_parent);
+       __clk_set_parent_after(core, parent, old_parent);
 
        return 0;
 }
 
 /**
  * __clk_speculate_rates
- * @clk: first clk in the subtree
+ * @core: first clk in the subtree
  * @parent_rate: the "future" rate of clk's parent
  *
  * Walks the subtree of clks starting with clk, speculating rates as it
@@ -1558,10 +1189,8 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
  * subtree have subscribed to the notifications.  Note that if a clk does not
  * implement the .recalc_rate callback then it is assumed that the clock will
  * take on the rate of its parent.
- *
- * Caller must hold prepare_lock.
  */
-static int __clk_speculate_rates(struct clk_core *clk,
+static int __clk_speculate_rates(struct clk_core *core,
                                 unsigned long parent_rate)
 {
        struct clk_core *child;
@@ -1570,19 +1199,19 @@ static int __clk_speculate_rates(struct clk_core *clk,
 
        lockdep_assert_held(&prepare_lock);
 
-       new_rate = clk_recalc(clk, parent_rate);
+       new_rate = clk_recalc(core, parent_rate);
 
        /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
-       if (clk->notifier_count)
-               ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
+       if (core->notifier_count)
+               ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
 
        if (ret & NOTIFY_STOP_MASK) {
                pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
-                               __func__, clk->name, ret);
+                               __func__, core->name, ret);
                goto out;
        }
 
-       hlist_for_each_entry(child, &clk->children, child_node) {
+       hlist_for_each_entry(child, &core->children, child_node) {
                ret = __clk_speculate_rates(child, new_rate);
                if (ret & NOTIFY_STOP_MASK)
                        break;
@@ -1592,20 +1221,20 @@ out:
        return ret;
 }
 
-static void clk_calc_subtree(struct clk_core *clk, unsigned long new_rate,
+static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
                             struct clk_core *new_parent, u8 p_index)
 {
        struct clk_core *child;
 
-       clk->new_rate = new_rate;
-       clk->new_parent = new_parent;
-       clk->new_parent_index = p_index;
+       core->new_rate = new_rate;
+       core->new_parent = new_parent;
+       core->new_parent_index = p_index;
        /* include clk in new parent's PRE_RATE_CHANGE notifications */
-       clk->new_child = NULL;
-       if (new_parent && new_parent != clk->parent)
-               new_parent->new_child = clk;
+       core->new_child = NULL;
+       if (new_parent && new_parent != core->parent)
+               new_parent->new_child = core;
 
-       hlist_for_each_entry(child, &clk->children, child_node) {
+       hlist_for_each_entry(child, &core->children, child_node) {
                child->new_rate = clk_recalc(child, new_rate);
                clk_calc_subtree(child, child->new_rate, NULL, 0);
        }
@@ -1615,10 +1244,10 @@ static void clk_calc_subtree(struct clk_core *clk, unsigned long new_rate,
  * calculate the new rates returning the topmost clock that has to be
  * changed.
  */
-static struct clk_core *clk_calc_new_rates(struct clk_core *clk,
+static struct clk_core *clk_calc_new_rates(struct clk_core *core,
                                           unsigned long rate)
 {
-       struct clk_core *top = clk;
+       struct clk_core *top = core;
        struct clk_core *old_parent, *parent;
        struct clk_hw *parent_hw;
        unsigned long best_parent_rate = 0;
@@ -1629,20 +1258,20 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *clk,
        long ret;
 
        /* sanity */
-       if (IS_ERR_OR_NULL(clk))
+       if (IS_ERR_OR_NULL(core))
                return NULL;
 
        /* save parent rate, if it exists */
-       parent = old_parent = clk->parent;
+       parent = old_parent = core->parent;
        if (parent)
                best_parent_rate = parent->rate;
 
-       clk_core_get_boundaries(clk, &min_rate, &max_rate);
+       clk_core_get_boundaries(core, &min_rate, &max_rate);
 
        /* find the closest rate and parent clk/rate */
-       if (clk->ops->determine_rate) {
+       if (core->ops->determine_rate) {
                parent_hw = parent ? parent->hw : NULL;
-               ret = clk->ops->determine_rate(clk->hw, rate,
+               ret = core->ops->determine_rate(core->hw, rate,
                                               min_rate,
                                               max_rate,
                                               &best_parent_rate,
@@ -1652,8 +1281,8 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *clk,
 
                new_rate = ret;
                parent = parent_hw ? parent_hw->core : NULL;
-       } else if (clk->ops->round_rate) {
-               ret = clk->ops->round_rate(clk->hw, rate,
+       } else if (core->ops->round_rate) {
+               ret = core->ops->round_rate(core->hw, rate,
                                           &best_parent_rate);
                if (ret < 0)
                        return NULL;
@@ -1661,9 +1290,9 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *clk,
                new_rate = ret;
                if (new_rate < min_rate || new_rate > max_rate)
                        return NULL;
-       } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
+       } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
                /* pass-through clock without adjustable parent */
-               clk->new_rate = clk->rate;
+               core->new_rate = core->rate;
                return NULL;
        } else {
                /* pass-through clock with adjustable parent */
@@ -1674,28 +1303,28 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *clk,
 
        /* some clocks must be gated to change parent */
        if (parent != old_parent &&
-           (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
+           (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
                pr_debug("%s: %s not gated but wants to reparent\n",
-                        __func__, clk->name);
+                        __func__, core->name);
                return NULL;
        }
 
        /* try finding the new parent index */
-       if (parent && clk->num_parents > 1) {
-               p_index = clk_fetch_parent_index(clk, parent);
+       if (parent && core->num_parents > 1) {
+               p_index = clk_fetch_parent_index(core, parent);
                if (p_index < 0) {
                        pr_debug("%s: clk %s can not be parent of clk %s\n",
-                                __func__, parent->name, clk->name);
+                                __func__, parent->name, core->name);
                        return NULL;
                }
        }
 
-       if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
+       if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
            best_parent_rate != parent->rate)
                top = clk_calc_new_rates(parent, best_parent_rate);
 
 out:
-       clk_calc_subtree(clk, new_rate, parent, p_index);
+       clk_calc_subtree(core, new_rate, parent, p_index);
 
        return top;
 }
@@ -1705,33 +1334,33 @@ out:
  * so that in case of an error we can walk down the whole tree again and
  * abort the change.
  */
-static struct clk_core *clk_propagate_rate_change(struct clk_core *clk,
+static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
                                                  unsigned long event)
 {
        struct clk_core *child, *tmp_clk, *fail_clk = NULL;
        int ret = NOTIFY_DONE;
 
-       if (clk->rate == clk->new_rate)
+       if (core->rate == core->new_rate)
                return NULL;
 
-       if (clk->notifier_count) {
-               ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
+       if (core->notifier_count) {
+               ret = __clk_notify(core, event, core->rate, core->new_rate);
                if (ret & NOTIFY_STOP_MASK)
-                       fail_clk = clk;
+                       fail_clk = core;
        }
 
-       hlist_for_each_entry(child, &clk->children, child_node) {
+       hlist_for_each_entry(child, &core->children, child_node) {
                /* Skip children who will be reparented to another clock */
-               if (child->new_parent && child->new_parent != clk)
+               if (child->new_parent && child->new_parent != core)
                        continue;
                tmp_clk = clk_propagate_rate_change(child, event);
                if (tmp_clk)
                        fail_clk = tmp_clk;
        }
 
-       /* handle the new child who might not be in clk->children yet */
-       if (clk->new_child) {
-               tmp_clk = clk_propagate_rate_change(clk->new_child, event);
+       /* handle the new child who might not be in core->children yet */
+       if (core->new_child) {
+               tmp_clk = clk_propagate_rate_change(core->new_child, event);
                if (tmp_clk)
                        fail_clk = tmp_clk;
        }
@@ -1743,7 +1372,7 @@ static struct clk_core *clk_propagate_rate_change(struct clk_core *clk,
  * walk down a subtree and set the new rates notifying the rate
  * change on the way
  */
-static void clk_change_rate(struct clk_core *clk)
+static void clk_change_rate(struct clk_core *core)
 {
        struct clk_core *child;
        struct hlist_node *tmp;
@@ -1752,77 +1381,80 @@ static void clk_change_rate(struct clk_core *clk)
        bool skip_set_rate = false;
        struct clk_core *old_parent;
 
-       old_rate = clk->rate;
+       old_rate = core->rate;
 
-       if (clk->new_parent)
-               best_parent_rate = clk->new_parent->rate;
-       else if (clk->parent)
-               best_parent_rate = clk->parent->rate;
+       if (core->new_parent)
+               best_parent_rate = core->new_parent->rate;
+       else if (core->parent)
+               best_parent_rate = core->parent->rate;
 
-       if (clk->new_parent && clk->new_parent != clk->parent) {
-               old_parent = __clk_set_parent_before(clk, clk->new_parent);
-               trace_clk_set_parent(clk, clk->new_parent);
+       if (core->new_parent && core->new_parent != core->parent) {
+               old_parent = __clk_set_parent_before(core, core->new_parent);
+               trace_clk_set_parent(core, core->new_parent);
 
-               if (clk->ops->set_rate_and_parent) {
+               if (core->ops->set_rate_and_parent) {
                        skip_set_rate = true;
-                       clk->ops->set_rate_and_parent(clk->hw, clk->new_rate,
+                       core->ops->set_rate_and_parent(core->hw, core->new_rate,
                                        best_parent_rate,
-                                       clk->new_parent_index);
-               } else if (clk->ops->set_parent) {
-                       clk->ops->set_parent(clk->hw, clk->new_parent_index);
+                                       core->new_parent_index);
+               } else if (core->ops->set_parent) {
+                       core->ops->set_parent(core->hw, core->new_parent_index);
                }
 
-               trace_clk_set_parent_complete(clk, clk->new_parent);
-               __clk_set_parent_after(clk, clk->new_parent, old_parent);
+               trace_clk_set_parent_complete(core, core->new_parent);
+               __clk_set_parent_after(core, core->new_parent, old_parent);
        }
 
-       trace_clk_set_rate(clk, clk->new_rate);
+       trace_clk_set_rate(core, core->new_rate);
+
+       if (!skip_set_rate && core->ops->set_rate)
+               core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
 
-       if (!skip_set_rate && clk->ops->set_rate)
-               clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
+       trace_clk_set_rate_complete(core, core->new_rate);
 
-       trace_clk_set_rate_complete(clk, clk->new_rate);
+       core->rate = clk_recalc(core, best_parent_rate);
 
-       clk->rate = clk_recalc(clk, best_parent_rate);
+       if (core->notifier_count && old_rate != core->rate)
+               __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
 
-       if (clk->notifier_count && old_rate != clk->rate)
-               __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
+       if (core->flags & CLK_RECALC_NEW_RATES)
+               (void)clk_calc_new_rates(core, core->new_rate);
 
        /*
         * Use safe iteration, as change_rate can actually swap parents
         * for certain clock types.
         */
-       hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) {
+       hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
                /* Skip children who will be reparented to another clock */
-               if (child->new_parent && child->new_parent != clk)
+               if (child->new_parent && child->new_parent != core)
                        continue;
                clk_change_rate(child);
        }
 
-       /* handle the new child who might not be in clk->children yet */
-       if (clk->new_child)
-               clk_change_rate(clk->new_child);
+       /* handle the new child who might not be in core->children yet */
+       if (core->new_child)
+               clk_change_rate(core->new_child);
 }
 
-static int clk_core_set_rate_nolock(struct clk_core *clk,
+static int clk_core_set_rate_nolock(struct clk_core *core,
                                    unsigned long req_rate)
 {
        struct clk_core *top, *fail_clk;
        unsigned long rate = req_rate;
        int ret = 0;
 
-       if (!clk)
+       if (!core)
                return 0;
 
        /* bail early if nothing to do */
-       if (rate == clk_core_get_rate_nolock(clk))
+       if (rate == clk_core_get_rate_nolock(core))
                return 0;
 
-       if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count)
+       if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
                return -EBUSY;
 
        /* calculate new rates and get the topmost changed clock */
-       top = clk_calc_new_rates(clk, rate);
+       top = clk_calc_new_rates(core, rate);
        if (!top)
                return -EINVAL;
 
@@ -1838,7 +1470,7 @@ static int clk_core_set_rate_nolock(struct clk_core *clk,
        /* change the rates */
        clk_change_rate(top);
 
-       clk->req_rate = req_rate;
+       core->req_rate = req_rate;
 
        return ret;
 }
@@ -1977,55 +1609,63 @@ EXPORT_SYMBOL_GPL(clk_get_parent);
  * .parents array exists, and if so use it to avoid an expensive tree
  * traversal.  If .parents does not exist then walk the tree.
  */
-static struct clk_core *__clk_init_parent(struct clk_core *clk)
+static struct clk_core *__clk_init_parent(struct clk_core *core)
 {
        struct clk_core *ret = NULL;
        u8 index;
 
        /* handle the trivial cases */
 
-       if (!clk->num_parents)
+       if (!core->num_parents)
                goto out;
 
-       if (clk->num_parents == 1) {
-               if (IS_ERR_OR_NULL(clk->parent))
-                       clk->parent = clk_core_lookup(clk->parent_names[0]);
-               ret = clk->parent;
+       if (core->num_parents == 1) {
+               if (IS_ERR_OR_NULL(core->parent))
+                       core->parent = clk_core_lookup(core->parent_names[0]);
+               ret = core->parent;
                goto out;
        }
 
-       if (!clk->ops->get_parent) {
-               WARN(!clk->ops->get_parent,
+       if (!core->ops->get_parent) {
+               WARN(!core->ops->get_parent,
                        "%s: multi-parent clocks must implement .get_parent\n",
                        __func__);
                goto out;
        };
 
        /*
-        * Do our best to cache parent clocks in clk->parents.  This prevents
-        * unnecessary and expensive lookups.  We don't set clk->parent here;
+        * Do our best to cache parent clocks in core->parents.  This prevents
+        * unnecessary and expensive lookups.  We don't set core->parent here;
         * that is done by the calling function.
         */
 
-       index = clk->ops->get_parent(clk->hw);
+       index = core->ops->get_parent(core->hw);
 
-       if (!clk->parents)
-               clk->parents =
-                       kcalloc(clk->num_parents, sizeof(struct clk *),
+       if (!core->parents)
+               core->parents =
+                       kcalloc(core->num_parents, sizeof(struct clk *),
                                        GFP_KERNEL);
 
-       ret = clk_core_get_parent_by_index(clk, index);
+       ret = clk_core_get_parent_by_index(core, index);
 
 out:
        return ret;
 }
 
-static void clk_core_reparent(struct clk_core *clk,
+static void clk_core_reparent(struct clk_core *core,
                                  struct clk_core *new_parent)
 {
-       clk_reparent(clk, new_parent);
-       __clk_recalc_accuracies(clk);
-       __clk_recalc_rates(clk, POST_RATE_CHANGE);
+       clk_reparent(core, new_parent);
+       __clk_recalc_accuracies(core);
+       __clk_recalc_rates(core, POST_RATE_CHANGE);
+}
+
+void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
+{
+       if (!hw)
+               return;
+
+       clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
 }
 
 /**
@@ -2054,209 +1694,536 @@ bool clk_has_parent(struct clk *clk, struct clk *parent)
        if (core->parent == parent_core)
                return true;
 
-       for (i = 0; i < core->num_parents; i++)
-               if (strcmp(core->parent_names[i], parent_core->name) == 0)
-                       return true;
+       for (i = 0; i < core->num_parents; i++)
+               if (strcmp(core->parent_names[i], parent_core->name) == 0)
+                       return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(clk_has_parent);
+
+static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
+{
+       int ret = 0;
+       int p_index = 0;
+       unsigned long p_rate = 0;
+
+       if (!core)
+               return 0;
+
+       /* prevent racing with updates to the clock topology */
+       clk_prepare_lock();
+
+       if (core->parent == parent)
+               goto out;
+
+       /* verify ops for for multi-parent clks */
+       if ((core->num_parents > 1) && (!core->ops->set_parent)) {
+               ret = -ENOSYS;
+               goto out;
+       }
+
+       /* check that we are allowed to re-parent if the clock is in use */
+       if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       /* try finding the new parent index */
+       if (parent) {
+               p_index = clk_fetch_parent_index(core, parent);
+               p_rate = parent->rate;
+               if (p_index < 0) {
+                       pr_debug("%s: clk %s can not be parent of clk %s\n",
+                                       __func__, parent->name, core->name);
+                       ret = p_index;
+                       goto out;
+               }
+       }
+
+       /* propagate PRE_RATE_CHANGE notifications */
+       ret = __clk_speculate_rates(core, p_rate);
+
+       /* abort if a driver objects */
+       if (ret & NOTIFY_STOP_MASK)
+               goto out;
+
+       /* do the re-parent */
+       ret = __clk_set_parent(core, parent, p_index);
+
+       /* propagate rate an accuracy recalculation accordingly */
+       if (ret) {
+               __clk_recalc_rates(core, ABORT_RATE_CHANGE);
+       } else {
+               __clk_recalc_rates(core, POST_RATE_CHANGE);
+               __clk_recalc_accuracies(core);
+       }
+
+out:
+       clk_prepare_unlock();
+
+       return ret;
+}
+
+/**
+ * clk_set_parent - switch the parent of a mux clk
+ * @clk: the mux clk whose input we are switching
+ * @parent: the new input to clk
+ *
+ * Re-parent clk to use parent as its new input source.  If clk is in
+ * prepared state, the clk will get enabled for the duration of this call. If
+ * that's not acceptable for a specific clk (Eg: the consumer can't handle
+ * that, the reparenting is glitchy in hardware, etc), use the
+ * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
+ *
+ * After successfully changing clk's parent clk_set_parent will update the
+ * clk topology, sysfs topology and propagate rate recalculation via
+ * __clk_recalc_rates.
+ *
+ * Returns 0 on success, -EERROR otherwise.
+ */
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
+}
+EXPORT_SYMBOL_GPL(clk_set_parent);
+
+/**
+ * clk_set_phase - adjust the phase shift of a clock signal
+ * @clk: clock signal source
+ * @degrees: number of degrees the signal is shifted
+ *
+ * Shifts the phase of a clock signal by the specified
+ * degrees. Returns 0 on success, -EERROR otherwise.
+ *
+ * This function makes no distinction about the input or reference
+ * signal that we adjust the clock signal phase against. For example
+ * phase locked-loop clock signal generators we may shift phase with
+ * respect to feedback clock signal input, but for other cases the
+ * clock phase may be shifted with respect to some other, unspecified
+ * signal.
+ *
+ * Additionally the concept of phase shift does not propagate through
+ * the clock tree hierarchy, which sets it apart from clock rates and
+ * clock accuracy. A parent clock phase attribute does not have an
+ * impact on the phase attribute of a child clock.
+ */
+int clk_set_phase(struct clk *clk, int degrees)
+{
+       int ret = -EINVAL;
+
+       if (!clk)
+               return 0;
+
+       /* sanity check degrees */
+       degrees %= 360;
+       if (degrees < 0)
+               degrees += 360;
+
+       clk_prepare_lock();
+
+       trace_clk_set_phase(clk->core, degrees);
+
+       if (clk->core->ops->set_phase)
+               ret = clk->core->ops->set_phase(clk->core->hw, degrees);
+
+       trace_clk_set_phase_complete(clk->core, degrees);
+
+       if (!ret)
+               clk->core->phase = degrees;
+
+       clk_prepare_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_phase);
+
+static int clk_core_get_phase(struct clk_core *core)
+{
+       int ret;
+
+       clk_prepare_lock();
+       ret = core->phase;
+       clk_prepare_unlock();
+
+       return ret;
+}
+
+/**
+ * clk_get_phase - return the phase shift of a clock signal
+ * @clk: clock signal source
+ *
+ * Returns the phase shift of a clock node in degrees, otherwise returns
+ * -EERROR.
+ */
+int clk_get_phase(struct clk *clk)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_get_phase(clk->core);
+}
+EXPORT_SYMBOL_GPL(clk_get_phase);
+
+/**
+ * clk_is_match - check if two clk's point to the same hardware clock
+ * @p: clk compared against q
+ * @q: clk compared against p
+ *
+ * Returns true if the two struct clk pointers both point to the same hardware
+ * clock node. Put differently, returns true if struct clk *p and struct clk *q
+ * share the same struct clk_core object.
+ *
+ * Returns false otherwise. Note that two NULL clks are treated as matching.
+ */
+bool clk_is_match(const struct clk *p, const struct clk *q)
+{
+       /* trivial case: identical struct clk's or both NULL */
+       if (p == q)
+               return true;
+
+       /* true if clk->core pointers match. Avoid derefing garbage */
+       if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
+               if (p->core == q->core)
+                       return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(clk_is_match);
+
+/***        debugfs support        ***/
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+static struct dentry *rootdir;
+static int inited = 0;
+static DEFINE_MUTEX(clk_debug_lock);
+static HLIST_HEAD(clk_debug_list);
+
+static struct hlist_head *all_lists[] = {
+       &clk_root_list,
+       &clk_orphan_list,
+       NULL,
+};
+
+static struct hlist_head *orphan_list[] = {
+       &clk_orphan_list,
+       NULL,
+};
+
+static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
+                                int level)
+{
+       if (!c)
+               return;
+
+       seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
+                  level * 3 + 1, "",
+                  30 - level * 3, c->name,
+                  c->enable_count, c->prepare_count, clk_core_get_rate(c),
+                  clk_core_get_accuracy(c), clk_core_get_phase(c));
+}
+
+static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
+                                    int level)
+{
+       struct clk_core *child;
+
+       if (!c)
+               return;
+
+       clk_summary_show_one(s, c, level);
+
+       hlist_for_each_entry(child, &c->children, child_node)
+               clk_summary_show_subtree(s, child, level + 1);
+}
+
+static int clk_summary_show(struct seq_file *s, void *data)
+{
+       struct clk_core *c;
+       struct hlist_head **lists = (struct hlist_head **)s->private;
+
+       seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy   phase\n");
+       seq_puts(s, "----------------------------------------------------------------------------------------\n");
+
+       clk_prepare_lock();
+
+       for (; *lists; lists++)
+               hlist_for_each_entry(c, *lists, child_node)
+                       clk_summary_show_subtree(s, c, 0);
+
+       clk_prepare_unlock();
+
+       return 0;
+}
+
+
+static int clk_summary_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, clk_summary_show, inode->i_private);
+}
+
+static const struct file_operations clk_summary_fops = {
+       .open           = clk_summary_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
+{
+       if (!c)
+               return;
+
+       /* This should be JSON format, i.e. elements separated with a comma */
+       seq_printf(s, "\"%s\": { ", c->name);
+       seq_printf(s, "\"enable_count\": %d,", c->enable_count);
+       seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
+       seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
+       seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
+       seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
+}
+
+static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
+{
+       struct clk_core *child;
+
+       if (!c)
+               return;
+
+       clk_dump_one(s, c, level);
+
+       hlist_for_each_entry(child, &c->children, child_node) {
+               seq_printf(s, ",");
+               clk_dump_subtree(s, child, level + 1);
+       }
+
+       seq_printf(s, "}");
+}
+
+static int clk_dump(struct seq_file *s, void *data)
+{
+       struct clk_core *c;
+       bool first_node = true;
+       struct hlist_head **lists = (struct hlist_head **)s->private;
+
+       seq_printf(s, "{");
+
+       clk_prepare_lock();
+
+       for (; *lists; lists++) {
+               hlist_for_each_entry(c, *lists, child_node) {
+                       if (!first_node)
+                               seq_puts(s, ",");
+                       first_node = false;
+                       clk_dump_subtree(s, c, 0);
+               }
+       }
+
+       clk_prepare_unlock();
 
-       return false;
+       seq_puts(s, "}\n");
+       return 0;
 }
-EXPORT_SYMBOL_GPL(clk_has_parent);
 
-static int clk_core_set_parent(struct clk_core *clk, struct clk_core *parent)
-{
-       int ret = 0;
-       int p_index = 0;
-       unsigned long p_rate = 0;
 
-       if (!clk)
-               return 0;
+static int clk_dump_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, clk_dump, inode->i_private);
+}
 
-       /* prevent racing with updates to the clock topology */
-       clk_prepare_lock();
+static const struct file_operations clk_dump_fops = {
+       .open           = clk_dump_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
 
-       if (clk->parent == parent)
-               goto out;
+static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
+{
+       struct dentry *d;
+       int ret = -ENOMEM;
 
-       /* verify ops for for multi-parent clks */
-       if ((clk->num_parents > 1) && (!clk->ops->set_parent)) {
-               ret = -ENOSYS;
+       if (!core || !pdentry) {
+               ret = -EINVAL;
                goto out;
        }
 
-       /* check that we are allowed to re-parent if the clock is in use */
-       if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
-               ret = -EBUSY;
+       d = debugfs_create_dir(core->name, pdentry);
+       if (!d)
                goto out;
-       }
 
-       /* try finding the new parent index */
-       if (parent) {
-               p_index = clk_fetch_parent_index(clk, parent);
-               p_rate = parent->rate;
-               if (p_index < 0) {
-                       pr_debug("%s: clk %s can not be parent of clk %s\n",
-                                       __func__, parent->name, clk->name);
-                       ret = p_index;
-                       goto out;
-               }
-       }
+       core->dentry = d;
 
-       /* propagate PRE_RATE_CHANGE notifications */
-       ret = __clk_speculate_rates(clk, p_rate);
+       d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
+                       (u32 *)&core->rate);
+       if (!d)
+               goto err_out;
 
-       /* abort if a driver objects */
-       if (ret & NOTIFY_STOP_MASK)
-               goto out;
+       d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
+                       (u32 *)&core->accuracy);
+       if (!d)
+               goto err_out;
 
-       /* do the re-parent */
-       ret = __clk_set_parent(clk, parent, p_index);
+       d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
+                       (u32 *)&core->phase);
+       if (!d)
+               goto err_out;
 
-       /* propagate rate an accuracy recalculation accordingly */
-       if (ret) {
-               __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
-       } else {
-               __clk_recalc_rates(clk, POST_RATE_CHANGE);
-               __clk_recalc_accuracies(clk);
+       d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
+                       (u32 *)&core->flags);
+       if (!d)
+               goto err_out;
+
+       d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
+                       (u32 *)&core->prepare_count);
+       if (!d)
+               goto err_out;
+
+       d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
+                       (u32 *)&core->enable_count);
+       if (!d)
+               goto err_out;
+
+       d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
+                       (u32 *)&core->notifier_count);
+       if (!d)
+               goto err_out;
+
+       if (core->ops->debug_init) {
+               ret = core->ops->debug_init(core->hw, core->dentry);
+               if (ret)
+                       goto err_out;
        }
 
-out:
-       clk_prepare_unlock();
+       ret = 0;
+       goto out;
 
+err_out:
+       debugfs_remove_recursive(core->dentry);
+       core->dentry = NULL;
+out:
        return ret;
 }
 
 /**
- * clk_set_parent - switch the parent of a mux clk
- * @clk: the mux clk whose input we are switching
- * @parent: the new input to clk
- *
- * Re-parent clk to use parent as its new input source.  If clk is in
- * prepared state, the clk will get enabled for the duration of this call. If
- * that's not acceptable for a specific clk (Eg: the consumer can't handle
- * that, the reparenting is glitchy in hardware, etc), use the
- * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
- *
- * After successfully changing clk's parent clk_set_parent will update the
- * clk topology, sysfs topology and propagate rate recalculation via
- * __clk_recalc_rates.
+ * clk_debug_register - add a clk node to the debugfs clk directory
+ * @core: the clk being added to the debugfs clk directory
  *
- * Returns 0 on success, -EERROR otherwise.
+ * Dynamically adds a clk to the debugfs clk directory if debugfs has been
+ * initialized.  Otherwise it bails out early since the debugfs clk directory
+ * will be created lazily by clk_debug_init as part of a late_initcall.
  */
-int clk_set_parent(struct clk *clk, struct clk *parent)
+static int clk_debug_register(struct clk_core *core)
 {
-       if (!clk)
-               return 0;
+       int ret = 0;
 
-       return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
+       mutex_lock(&clk_debug_lock);
+       hlist_add_head(&core->debug_node, &clk_debug_list);
+
+       if (!inited)
+               goto unlock;
+
+       ret = clk_debug_create_one(core, rootdir);
+unlock:
+       mutex_unlock(&clk_debug_lock);
+
+       return ret;
 }
-EXPORT_SYMBOL_GPL(clk_set_parent);
 
-/**
- * clk_set_phase - adjust the phase shift of a clock signal
- * @clk: clock signal source
- * @degrees: number of degrees the signal is shifted
- *
- * Shifts the phase of a clock signal by the specified
- * degrees. Returns 0 on success, -EERROR otherwise.
- *
- * This function makes no distinction about the input or reference
- * signal that we adjust the clock signal phase against. For example
- * phase locked-loop clock signal generators we may shift phase with
- * respect to feedback clock signal input, but for other cases the
- * clock phase may be shifted with respect to some other, unspecified
- * signal.
+ /**
+ * clk_debug_unregister - remove a clk node from the debugfs clk directory
+ * @core: the clk being removed from the debugfs clk directory
  *
- * Additionally the concept of phase shift does not propagate through
- * the clock tree hierarchy, which sets it apart from clock rates and
- * clock accuracy. A parent clock phase attribute does not have an
- * impact on the phase attribute of a child clock.
+ * Dynamically removes a clk and all its child nodes from the
+ * debugfs clk directory if clk->dentry points to debugfs created by
+ * clk_debug_register in __clk_init.
  */
-int clk_set_phase(struct clk *clk, int degrees)
+static void clk_debug_unregister(struct clk_core *core)
 {
-       int ret = -EINVAL;
+       mutex_lock(&clk_debug_lock);
+       hlist_del_init(&core->debug_node);
+       debugfs_remove_recursive(core->dentry);
+       core->dentry = NULL;
+       mutex_unlock(&clk_debug_lock);
+}
 
-       if (!clk)
-               return 0;
+struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
+                               void *data, const struct file_operations *fops)
+{
+       struct dentry *d = NULL;
 
-       /* sanity check degrees */
-       degrees %= 360;
-       if (degrees < 0)
-               degrees += 360;
+       if (hw->core->dentry)
+               d = debugfs_create_file(name, mode, hw->core->dentry, data,
+                                       fops);
 
-       clk_prepare_lock();
+       return d;
+}
+EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
 
-       trace_clk_set_phase(clk->core, degrees);
+/**
+ * clk_debug_init - lazily populate the debugfs clk directory
+ *
+ * clks are often initialized very early during boot before memory can be
+ * dynamically allocated and well before debugfs is setup. This function
+ * populates the debugfs clk directory once at boot-time when we know that
+ * debugfs is setup. It should only be called once at boot-time, all other clks
+ * added dynamically will be done so with clk_debug_register.
+ */
+static int __init clk_debug_init(void)
+{
+       struct clk_core *core;
+       struct dentry *d;
 
-       if (clk->core->ops->set_phase)
-               ret = clk->core->ops->set_phase(clk->core->hw, degrees);
+       rootdir = debugfs_create_dir("clk", NULL);
 
-       trace_clk_set_phase_complete(clk->core, degrees);
+       if (!rootdir)
+               return -ENOMEM;
 
-       if (!ret)
-               clk->core->phase = degrees;
+       d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
+                               &clk_summary_fops);
+       if (!d)
+               return -ENOMEM;
 
-       clk_prepare_unlock();
+       d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
+                               &clk_dump_fops);
+       if (!d)
+               return -ENOMEM;
 
-       return ret;
-}
-EXPORT_SYMBOL_GPL(clk_set_phase);
+       d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
+                               &orphan_list, &clk_summary_fops);
+       if (!d)
+               return -ENOMEM;
 
-static int clk_core_get_phase(struct clk_core *clk)
-{
-       int ret = 0;
+       d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
+                               &orphan_list, &clk_dump_fops);
+       if (!d)
+               return -ENOMEM;
 
-       if (!clk)
-               goto out;
+       mutex_lock(&clk_debug_lock);
+       hlist_for_each_entry(core, &clk_debug_list, debug_node)
+               clk_debug_create_one(core, rootdir);
 
-       clk_prepare_lock();
-       ret = clk->phase;
-       clk_prepare_unlock();
+       inited = 1;
+       mutex_unlock(&clk_debug_lock);
 
-out:
-       return ret;
+       return 0;
 }
-EXPORT_SYMBOL_GPL(clk_get_phase);
-
-/**
- * clk_get_phase - return the phase shift of a clock signal
- * @clk: clock signal source
- *
- * Returns the phase shift of a clock node in degrees, otherwise returns
- * -EERROR.
- */
-int clk_get_phase(struct clk *clk)
+late_initcall(clk_debug_init);
+#else
+static inline int clk_debug_register(struct clk_core *core) { return 0; }
+static inline void clk_debug_reparent(struct clk_core *core,
+                                     struct clk_core *new_parent)
 {
-       if (!clk)
-               return 0;
-
-       return clk_core_get_phase(clk->core);
 }
-
-/**
- * clk_is_match - check if two clk's point to the same hardware clock
- * @p: clk compared against q
- * @q: clk compared against p
- *
- * Returns true if the two struct clk pointers both point to the same hardware
- * clock node. Put differently, returns true if struct clk *p and struct clk *q
- * share the same struct clk_core object.
- *
- * Returns false otherwise. Note that two NULL clks are treated as matching.
- */
-bool clk_is_match(const struct clk *p, const struct clk *q)
+static inline void clk_debug_unregister(struct clk_core *core)
 {
-       /* trivial case: identical struct clk's or both NULL */
-       if (p == q)
-               return true;
-
-       /* true if clk->core pointers match. Avoid derefing garbage */
-       if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
-               if (p->core == q->core)
-                       return true;
-
-       return false;
 }
-EXPORT_SYMBOL_GPL(clk_is_match);
+#endif
 
 /**
  * __clk_init - initialize the data structures in a struct clk
@@ -2271,67 +2238,67 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
        int i, ret = 0;
        struct clk_core *orphan;
        struct hlist_node *tmp2;
-       struct clk_core *clk;
+       struct clk_core *core;
        unsigned long rate;
 
        if (!clk_user)
                return -EINVAL;
 
-       clk = clk_user->core;
+       core = clk_user->core;
 
        clk_prepare_lock();
 
        /* check to see if a clock with this name is already registered */
-       if (clk_core_lookup(clk->name)) {
+       if (clk_core_lookup(core->name)) {
                pr_debug("%s: clk %s already initialized\n",
-                               __func__, clk->name);
+                               __func__, core->name);
                ret = -EEXIST;
                goto out;
        }
 
        /* check that clk_ops are sane.  See Documentation/clk.txt */
-       if (clk->ops->set_rate &&
-           !((clk->ops->round_rate || clk->ops->determine_rate) &&
-             clk->ops->recalc_rate)) {
+       if (core->ops->set_rate &&
+           !((core->ops->round_rate || core->ops->determine_rate) &&
+             core->ops->recalc_rate)) {
                pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
-                               __func__, clk->name);
+                               __func__, core->name);
                ret = -EINVAL;
                goto out;
        }
 
-       if (clk->ops->set_parent && !clk->ops->get_parent) {
+       if (core->ops->set_parent && !core->ops->get_parent) {
                pr_warning("%s: %s must implement .get_parent & .set_parent\n",
-                               __func__, clk->name);
+                               __func__, core->name);
                ret = -EINVAL;
                goto out;
        }
 
-       if (clk->ops->set_rate_and_parent &&
-                       !(clk->ops->set_parent && clk->ops->set_rate)) {
+       if (core->ops->set_rate_and_parent &&
+                       !(core->ops->set_parent && core->ops->set_rate)) {
                pr_warn("%s: %s must implement .set_parent & .set_rate\n",
-                               __func__, clk->name);
+                               __func__, core->name);
                ret = -EINVAL;
                goto out;
        }
 
        /* throw a WARN if any entries in parent_names are NULL */
-       for (i = 0; i < clk->num_parents; i++)
-               WARN(!clk->parent_names[i],
+       for (i = 0; i < core->num_parents; i++)
+               WARN(!core->parent_names[i],
                                "%s: invalid NULL in %s's .parent_names\n",
-                               __func__, clk->name);
+                               __func__, core->name);
 
        /*
         * Allocate an array of struct clk *'s to avoid unnecessary string
         * look-ups of clk's possible parents.  This can fail for clocks passed
-        * in to clk_init during early boot; thus any access to clk->parents[]
+        * in to clk_init during early boot; thus any access to core->parents[]
         * must always check for a NULL pointer and try to populate it if
         * necessary.
         *
-        * If clk->parents is not NULL we skip this entire block.  This allows
-        * for clock drivers to statically initialize clk->parents.
+        * If core->parents is not NULL we skip this entire block.  This allows
+        * for clock drivers to statically initialize core->parents.
         */
-       if (clk->num_parents > 1 && !clk->parents) {
-               clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
+       if (core->num_parents > 1 && !core->parents) {
+               core->parents = kcalloc(core->num_parents, sizeof(struct clk *),
                                        GFP_KERNEL);
                /*
                 * clk_core_lookup returns NULL for parents that have not been
@@ -2339,16 +2306,16 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
                 * for a NULL pointer.  We can always perform lazy lookups for
                 * missing parents later on.
                 */
-               if (clk->parents)
-                       for (i = 0; i < clk->num_parents; i++)
-                               clk->parents[i] =
-                                       clk_core_lookup(clk->parent_names[i]);
+               if (core->parents)
+                       for (i = 0; i < core->num_parents; i++)
+                               core->parents[i] =
+                                       clk_core_lookup(core->parent_names[i]);
        }
 
-       clk->parent = __clk_init_parent(clk);
+       core->parent = __clk_init_parent(core);
 
        /*
-        * Populate clk->parent if parent has already been __clk_init'd.  If
+        * Populate core->parent if parent has already been __clk_init'd.  If
         * parent has not yet been __clk_init'd then place clk in the orphan
         * list.  If clk has set the CLK_IS_ROOT flag then place it in the root
         * clk list.
@@ -2357,13 +2324,13 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
         * clocks and re-parent any that are children of the clock currently
         * being clk_init'd.
         */
-       if (clk->parent)
-               hlist_add_head(&clk->child_node,
-                               &clk->parent->children);
-       else if (clk->flags & CLK_IS_ROOT)
-               hlist_add_head(&clk->child_node, &clk_root_list);
+       if (core->parent)
+               hlist_add_head(&core->child_node,
+                               &core->parent->children);
+       else if (core->flags & CLK_IS_ROOT)
+               hlist_add_head(&core->child_node, &clk_root_list);
        else
-               hlist_add_head(&clk->child_node, &clk_orphan_list);
+               hlist_add_head(&core->child_node, &clk_orphan_list);
 
        /*
         * Set clk's accuracy.  The preferred method is to use
@@ -2372,23 +2339,23 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
         * parent (or is orphaned) then accuracy is set to zero (perfect
         * clock).
         */
-       if (clk->ops->recalc_accuracy)
-               clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
-                                       __clk_get_accuracy(clk->parent));
-       else if (clk->parent)
-               clk->accuracy = clk->parent->accuracy;
+       if (core->ops->recalc_accuracy)
+               core->accuracy = core->ops->recalc_accuracy(core->hw,
+                                       __clk_get_accuracy(core->parent));
+       else if (core->parent)
+               core->accuracy = core->parent->accuracy;
        else
-               clk->accuracy = 0;
+               core->accuracy = 0;
 
        /*
         * Set clk's phase.
         * Since a phase is by definition relative to its parent, just
         * query the current clock phase, or just assume it's in phase.
         */
-       if (clk->ops->get_phase)
-               clk->phase = clk->ops->get_phase(clk->hw);
+       if (core->ops->get_phase)
+               core->phase = core->ops->get_phase(core->hw);
        else
-               clk->phase = 0;
+               core->phase = 0;
 
        /*
         * Set clk's rate.  The preferred method is to use .recalc_rate.  For
@@ -2396,14 +2363,14 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
         * parent's rate.  If a clock doesn't have a parent (or is orphaned)
         * then rate is set to zero.
         */
-       if (clk->ops->recalc_rate)
-               rate = clk->ops->recalc_rate(clk->hw,
-                               clk_core_get_rate_nolock(clk->parent));
-       else if (clk->parent)
-               rate = clk->parent->rate;
+       if (core->ops->recalc_rate)
+               rate = core->ops->recalc_rate(core->hw,
+                               clk_core_get_rate_nolock(core->parent));
+       else if (core->parent)
+               rate = core->parent->rate;
        else
                rate = 0;
-       clk->rate = clk->req_rate = rate;
+       core->rate = core->req_rate = rate;
 
        /*
         * walk the list of orphan clocks and reparent any that are children of
@@ -2412,14 +2379,14 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
        hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
                if (orphan->num_parents && orphan->ops->get_parent) {
                        i = orphan->ops->get_parent(orphan->hw);
-                       if (!strcmp(clk->name, orphan->parent_names[i]))
-                               clk_core_reparent(orphan, clk);
+                       if (!strcmp(core->name, orphan->parent_names[i]))
+                               clk_core_reparent(orphan, core);
                        continue;
                }
 
                for (i = 0; i < orphan->num_parents; i++)
-                       if (!strcmp(clk->name, orphan->parent_names[i])) {
-                               clk_core_reparent(orphan, clk);
+                       if (!strcmp(core->name, orphan->parent_names[i])) {
+                               clk_core_reparent(orphan, core);
                                break;
                        }
         }
@@ -2432,15 +2399,15 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
         * Please consider other ways of solving initialization problems before
         * using this callback, as its use is discouraged.
         */
-       if (clk->ops->init)
-               clk->ops->init(clk->hw);
+       if (core->ops->init)
+               core->ops->init(core->hw);
 
-       kref_init(&clk->ref);
+       kref_init(&core->ref);
 out:
        clk_prepare_unlock();
 
        if (!ret)
-               clk_debug_register(clk);
+               clk_debug_register(core);
 
        return ret;
 }
@@ -2486,63 +2453,58 @@ void __clk_free_clk(struct clk *clk)
  *
  * clk_register is the primary interface for populating the clock tree with new
  * clock nodes.  It returns a pointer to the newly allocated struct clk which
- * cannot be dereferenced by driver code but may be used in conjuction with the
+ * cannot be dereferenced by driver code but may be used in conjunction with the
  * rest of the clock API.  In the event of an error clk_register will return an
  * error code; drivers must test for an error code after calling clk_register.
  */
 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
 {
        int i, ret;
-       struct clk_core *clk;
+       struct clk_core *core;
 
-       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
-       if (!clk) {
-               pr_err("%s: could not allocate clk\n", __func__);
+       core = kzalloc(sizeof(*core), GFP_KERNEL);
+       if (!core) {
                ret = -ENOMEM;
                goto fail_out;
        }
 
-       clk->name = kstrdup_const(hw->init->name, GFP_KERNEL);
-       if (!clk->name) {
-               pr_err("%s: could not allocate clk->name\n", __func__);
+       core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
+       if (!core->name) {
                ret = -ENOMEM;
                goto fail_name;
        }
-       clk->ops = hw->init->ops;
+       core->ops = hw->init->ops;
        if (dev && dev->driver)
-               clk->owner = dev->driver->owner;
-       clk->hw = hw;
-       clk->flags = hw->init->flags;
-       clk->num_parents = hw->init->num_parents;
-       hw->core = clk;
+               core->owner = dev->driver->owner;
+       core->hw = hw;
+       core->flags = hw->init->flags;
+       core->num_parents = hw->init->num_parents;
+       hw->core = core;
 
        /* allocate local copy in case parent_names is __initdata */
-       clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
+       core->parent_names = kcalloc(core->num_parents, sizeof(char *),
                                        GFP_KERNEL);
 
-       if (!clk->parent_names) {
-               pr_err("%s: could not allocate clk->parent_names\n", __func__);
+       if (!core->parent_names) {
                ret = -ENOMEM;
                goto fail_parent_names;
        }
 
 
        /* copy each string name in case parent_names is __initdata */
-       for (i = 0; i < clk->num_parents; i++) {
-               clk->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
+       for (i = 0; i < core->num_parents; i++) {
+               core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
                                                GFP_KERNEL);
-               if (!clk->parent_names[i]) {
-                       pr_err("%s: could not copy parent_names\n", __func__);
+               if (!core->parent_names[i]) {
                        ret = -ENOMEM;
                        goto fail_parent_names_copy;
                }
        }
 
-       INIT_HLIST_HEAD(&clk->clks);
+       INIT_HLIST_HEAD(&core->clks);
 
        hw->clk = __clk_create_clk(hw, NULL, NULL);
        if (IS_ERR(hw->clk)) {
-               pr_err("%s: could not allocate per-user clk\n", __func__);
                ret = PTR_ERR(hw->clk);
                goto fail_parent_names_copy;
        }
@@ -2556,35 +2518,32 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
 
 fail_parent_names_copy:
        while (--i >= 0)
-               kfree_const(clk->parent_names[i]);
-       kfree(clk->parent_names);
+               kfree_const(core->parent_names[i]);
+       kfree(core->parent_names);
 fail_parent_names:
-       kfree_const(clk->name);
+       kfree_const(core->name);
 fail_name:
-       kfree(clk);
+       kfree(core);
 fail_out:
        return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(clk_register);
 
-/*
- * Free memory allocated for a clock.
- * Caller must hold prepare_lock.
- */
+/* Free memory allocated for a clock. */
 static void __clk_release(struct kref *ref)
 {
-       struct clk_core *clk = container_of(ref, struct clk_core, ref);
-       int i = clk->num_parents;
+       struct clk_core *core = container_of(ref, struct clk_core, ref);
+       int i = core->num_parents;
 
        lockdep_assert_held(&prepare_lock);
 
-       kfree(clk->parents);
+       kfree(core->parents);
        while (--i >= 0)
-               kfree_const(clk->parent_names[i]);
+               kfree_const(core->parent_names[i]);
 
-       kfree(clk->parent_names);
-       kfree_const(clk->name);
-       kfree(clk);
+       kfree(core->parent_names);
+       kfree_const(core->name);
+       kfree(core);
 }
 
 /*
@@ -3068,6 +3027,27 @@ const char *of_clk_get_parent_name(struct device_node *np, int index)
 }
 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
 
+/**
+ * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
+ * number of parents
+ * @np: Device node pointer associated with clock provider
+ * @parents: pointer to char array that hold the parents' names
+ * @size: size of the @parents array
+ *
+ * Return: number of parents for the clock node.
+ */
+int of_clk_parent_fill(struct device_node *np, const char **parents,
+                      unsigned int size)
+{
+       unsigned int i = 0;
+
+       while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
+               i++;
+
+       return i;
+}
+EXPORT_SYMBOL_GPL(of_clk_parent_fill);
+
 struct clock_provider {
        of_clk_init_cb_t clk_init_cb;
        struct device_node *np;
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig
new file mode 100644 (file)
index 0000000..b4165ba
--- /dev/null
@@ -0,0 +1,6 @@
+config COMMON_CLK_HI6220
+       bool "Hi6220 Clock Driver"
+       depends on ARCH_HISI || COMPILE_TEST
+       default ARCH_HISI
+       help
+         Build the Hisilicon Hi6220 clock driver based on the common clock framework.
index 038c02f4d0e740df187b769764bfb28c3f891615..48f0116a032a4e0b9606107e4dec92c90ff11bd0 100644 (file)
@@ -2,8 +2,9 @@
 # Hisilicon Clock specific Makefile
 #
 
-obj-y  += clk.o clkgate-separated.o
+obj-y  += clk.o clkgate-separated.o clkdivider-hi6220.o
 
 obj-$(CONFIG_ARCH_HI3xxx)      += clk-hi3620.o
 obj-$(CONFIG_ARCH_HIP04)       += clk-hip04.o
 obj-$(CONFIG_ARCH_HIX5HD2)     += clk-hix5hd2.o
+obj-$(CONFIG_COMMON_CLK_HI6220)        += clk-hi6220.o
index 472dd2cb10b3bfe71a2e811c6e82db75f2d5df66..715d34a5ef9bc766b330726493445ffb02cfbb4d 100644 (file)
 #include "clk.h"
 
 /* clock parent list */
-static const char *timer0_mux_p[] __initdata = { "osc32k", "timerclk01", };
-static const char *timer1_mux_p[] __initdata = { "osc32k", "timerclk01", };
-static const char *timer2_mux_p[] __initdata = { "osc32k", "timerclk23", };
-static const char *timer3_mux_p[] __initdata = { "osc32k", "timerclk23", };
-static const char *timer4_mux_p[] __initdata = { "osc32k", "timerclk45", };
-static const char *timer5_mux_p[] __initdata = { "osc32k", "timerclk45", };
-static const char *timer6_mux_p[] __initdata = { "osc32k", "timerclk67", };
-static const char *timer7_mux_p[] __initdata = { "osc32k", "timerclk67", };
-static const char *timer8_mux_p[] __initdata = { "osc32k", "timerclk89", };
-static const char *timer9_mux_p[] __initdata = { "osc32k", "timerclk89", };
-static const char *uart0_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *uart1_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *uart2_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *uart3_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *uart4_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *spi0_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
-static const char *spi1_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
-static const char *spi2_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
+static const char *const timer0_mux_p[] __initconst = { "osc32k", "timerclk01", };
+static const char *const timer1_mux_p[] __initconst = { "osc32k", "timerclk01", };
+static const char *const timer2_mux_p[] __initconst = { "osc32k", "timerclk23", };
+static const char *const timer3_mux_p[] __initconst = { "osc32k", "timerclk23", };
+static const char *const timer4_mux_p[] __initconst = { "osc32k", "timerclk45", };
+static const char *const timer5_mux_p[] __initconst = { "osc32k", "timerclk45", };
+static const char *const timer6_mux_p[] __initconst = { "osc32k", "timerclk67", };
+static const char *const timer7_mux_p[] __initconst = { "osc32k", "timerclk67", };
+static const char *const timer8_mux_p[] __initconst = { "osc32k", "timerclk89", };
+static const char *const timer9_mux_p[] __initconst = { "osc32k", "timerclk89", };
+static const char *const uart0_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *const uart1_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *const uart2_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *const uart3_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *const uart4_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *const spi0_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
+static const char *const spi1_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
+static const char *const spi2_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
 /* share axi parent */
-static const char *saxi_mux_p[] __initdata = { "armpll3", "armpll2", };
-static const char *pwm0_mux_p[] __initdata = { "osc32k", "osc26m", };
-static const char *pwm1_mux_p[] __initdata = { "osc32k", "osc26m", };
-static const char *sd_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *mmc1_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *mmc1_mux2_p[] __initdata = { "osc26m", "mmc1_div", };
-static const char *g2d_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *venc_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *vdec_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *vpp_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *edc0_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *ldi0_mux_p[] __initdata = { "armpll2", "armpll4",
+static const char *const saxi_mux_p[] __initconst = { "armpll3", "armpll2", };
+static const char *const pwm0_mux_p[] __initconst = { "osc32k", "osc26m", };
+static const char *const pwm1_mux_p[] __initconst = { "osc32k", "osc26m", };
+static const char *const sd_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *const mmc1_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *const mmc1_mux2_p[] __initconst = { "osc26m", "mmc1_div", };
+static const char *const g2d_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *const venc_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *const vdec_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *const vpp_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *const edc0_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *const ldi0_mux_p[] __initconst = { "armpll2", "armpll4",
                                             "armpll3", "armpll5", };
-static const char *edc1_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *ldi1_mux_p[] __initdata = { "armpll2", "armpll4",
+static const char *const edc1_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *const ldi1_mux_p[] __initconst = { "armpll2", "armpll4",
                                             "armpll3", "armpll5", };
-static const char *rclk_hsic_p[] __initdata = { "armpll3", "armpll2", };
-static const char *mmc2_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *mmc3_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *const rclk_hsic_p[] __initconst = { "armpll3", "armpll2", };
+static const char *const mmc2_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *const mmc3_mux_p[] __initconst = { "armpll2", "armpll3", };
 
 
 /* fixed rate clocks */
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
new file mode 100644 (file)
index 0000000..4563343
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+ * Hisilicon Hi6220 clock driver
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ *
+ * Author: Bintian Wang <bintian.wang@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/hi6220-clock.h>
+
+#include "clk.h"
+
+
+/* clocks in AO (always on) controller */
+static struct hisi_fixed_rate_clock hi6220_fixed_rate_clks[] __initdata = {
+       { HI6220_REF32K,        "ref32k",       NULL, CLK_IS_ROOT, 32764,     },
+       { HI6220_CLK_TCXO,      "clk_tcxo",     NULL, CLK_IS_ROOT, 19200000,  },
+       { HI6220_MMC1_PAD,      "mmc1_pad",     NULL, CLK_IS_ROOT, 100000000, },
+       { HI6220_MMC2_PAD,      "mmc2_pad",     NULL, CLK_IS_ROOT, 100000000, },
+       { HI6220_MMC0_PAD,      "mmc0_pad",     NULL, CLK_IS_ROOT, 200000000, },
+       { HI6220_PLL_BBP,       "bbppll0",      NULL, CLK_IS_ROOT, 245760000, },
+       { HI6220_PLL_GPU,       "gpupll",       NULL, CLK_IS_ROOT, 1000000000,},
+       { HI6220_PLL1_DDR,      "ddrpll1",      NULL, CLK_IS_ROOT, 1066000000,},
+       { HI6220_PLL_SYS,       "syspll",       NULL, CLK_IS_ROOT, 1200000000,},
+       { HI6220_PLL_SYS_MEDIA, "media_syspll", NULL, CLK_IS_ROOT, 1200000000,},
+       { HI6220_DDR_SRC,       "ddr_sel_src",  NULL, CLK_IS_ROOT, 1200000000,},
+       { HI6220_PLL_MEDIA,     "media_pll",    NULL, CLK_IS_ROOT, 1440000000,},
+       { HI6220_PLL_DDR,       "ddrpll0",      NULL, CLK_IS_ROOT, 1600000000,},
+};
+
+static struct hisi_fixed_factor_clock hi6220_fixed_factor_clks[] __initdata = {
+       { HI6220_300M,         "clk_300m",    "syspll",          1, 4, 0, },
+       { HI6220_150M,         "clk_150m",    "clk_300m",        1, 2, 0, },
+       { HI6220_PICOPHY_SRC,  "picophy_src", "clk_150m",        1, 4, 0, },
+       { HI6220_MMC0_SRC_SEL, "mmc0srcsel",  "mmc0_sel",        1, 8, 0, },
+       { HI6220_MMC1_SRC_SEL, "mmc1srcsel",  "mmc1_sel",        1, 8, 0, },
+       { HI6220_MMC2_SRC_SEL, "mmc2srcsel",  "mmc2_sel",        1, 8, 0, },
+       { HI6220_VPU_CODEC,    "vpucodec",    "codec_jpeg_aclk", 1, 2, 0, },
+       { HI6220_MMC0_SMP,     "mmc0_sample", "mmc0_sel",        1, 8, 0, },
+       { HI6220_MMC1_SMP,     "mmc1_sample", "mmc1_sel",        1, 8, 0, },
+       { HI6220_MMC2_SMP,     "mmc2_sample", "mmc2_sel",        1, 8, 0, },
+};
+
+static struct hisi_gate_clock hi6220_separated_gate_clks_ao[] __initdata = {
+       { HI6220_WDT0_PCLK,   "wdt0_pclk",   "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 12, 0, },
+       { HI6220_WDT1_PCLK,   "wdt1_pclk",   "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 13, 0, },
+       { HI6220_WDT2_PCLK,   "wdt2_pclk",   "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 14, 0, },
+       { HI6220_TIMER0_PCLK, "timer0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 15, 0, },
+       { HI6220_TIMER1_PCLK, "timer1_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 16, 0, },
+       { HI6220_TIMER2_PCLK, "timer2_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 17, 0, },
+       { HI6220_TIMER3_PCLK, "timer3_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 18, 0, },
+       { HI6220_TIMER4_PCLK, "timer4_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 19, 0, },
+       { HI6220_TIMER5_PCLK, "timer5_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 20, 0, },
+       { HI6220_TIMER6_PCLK, "timer6_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 21, 0, },
+       { HI6220_TIMER7_PCLK, "timer7_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 22, 0, },
+       { HI6220_TIMER8_PCLK, "timer8_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 23, 0, },
+       { HI6220_UART0_PCLK,  "uart0_pclk",  "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 24, 0, },
+};
+
+static void __init hi6220_clk_ao_init(struct device_node *np)
+{
+       struct hisi_clock_data *clk_data_ao;
+
+       clk_data_ao = hisi_clk_init(np, HI6220_AO_NR_CLKS);
+       if (!clk_data_ao)
+               return;
+
+       hisi_clk_register_fixed_rate(hi6220_fixed_rate_clks,
+                               ARRAY_SIZE(hi6220_fixed_rate_clks), clk_data_ao);
+
+       hisi_clk_register_fixed_factor(hi6220_fixed_factor_clks,
+                               ARRAY_SIZE(hi6220_fixed_factor_clks), clk_data_ao);
+
+       hisi_clk_register_gate_sep(hi6220_separated_gate_clks_ao,
+                               ARRAY_SIZE(hi6220_separated_gate_clks_ao), clk_data_ao);
+}
+CLK_OF_DECLARE(hi6220_clk_ao, "hisilicon,hi6220-aoctrl", hi6220_clk_ao_init);
+
+
+/* clocks in sysctrl */
+static const char *mmc0_mux0_p[] __initdata = { "pll_ddr_gate", "syspll", };
+static const char *mmc0_mux1_p[] __initdata = { "mmc0_mux0", "pll_media_gate", };
+static const char *mmc0_src_p[] __initdata = { "mmc0srcsel", "mmc0_div", };
+static const char *mmc1_mux0_p[] __initdata = { "pll_ddr_gate", "syspll", };
+static const char *mmc1_mux1_p[] __initdata = { "mmc1_mux0", "pll_media_gate", };
+static const char *mmc1_src_p[]  __initdata = { "mmc1srcsel", "mmc1_div", };
+static const char *mmc2_mux0_p[] __initdata = { "pll_ddr_gate", "syspll", };
+static const char *mmc2_mux1_p[] __initdata = { "mmc2_mux0", "pll_media_gate", };
+static const char *mmc2_src_p[]  __initdata = { "mmc2srcsel", "mmc2_div", };
+static const char *mmc0_sample_in[] __initdata = { "mmc0_sample", "mmc0_pad", };
+static const char *mmc1_sample_in[] __initdata = { "mmc1_sample", "mmc1_pad", };
+static const char *mmc2_sample_in[] __initdata = { "mmc2_sample", "mmc2_pad", };
+static const char *uart1_src[] __initdata = { "clk_tcxo", "clk_150m", };
+static const char *uart2_src[] __initdata = { "clk_tcxo", "clk_150m", };
+static const char *uart3_src[] __initdata = { "clk_tcxo", "clk_150m", };
+static const char *uart4_src[] __initdata = { "clk_tcxo", "clk_150m", };
+static const char *hifi_src[] __initdata = { "syspll", "pll_media_gate", };
+
+static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = {
+       { HI6220_MMC0_CLK,      "mmc0_clk",      "mmc0_src",       CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 0,  0, },
+       { HI6220_MMC0_CIUCLK,   "mmc0_ciuclk",   "mmc0_smp_in",    CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 0,  0, },
+       { HI6220_MMC1_CLK,      "mmc1_clk",      "mmc1_src",       CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 1,  0, },
+       { HI6220_MMC1_CIUCLK,   "mmc1_ciuclk",   "mmc1_smp_in",    CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 1,  0, },
+       { HI6220_MMC2_CLK,      "mmc2_clk",      "mmc2_src",       CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 2,  0, },
+       { HI6220_MMC2_CIUCLK,   "mmc2_ciuclk",   "mmc2_smp_in",    CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 2,  0, },
+       { HI6220_USBOTG_HCLK,   "usbotg_hclk",   "clk_bus",        CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 4,  0, },
+       { HI6220_CLK_PICOPHY,   "clk_picophy",   "cs_dapb",        CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 5,  0, },
+       { HI6220_HIFI,          "hifi_clk",      "hifi_div",       CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x210, 0,  0, },
+       { HI6220_DACODEC_PCLK,  "dacodec_pclk",  "clk_bus",        CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x210, 5,  0, },
+       { HI6220_EDMAC_ACLK,    "edmac_aclk",    "clk_bus",        CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x220, 2,  0, },
+       { HI6220_CS_ATB,        "cs_atb",        "cs_atb_div",     CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 0,  0, },
+       { HI6220_I2C0_CLK,      "i2c0_clk",      "clk_150m",       CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 1,  0, },
+       { HI6220_I2C1_CLK,      "i2c1_clk",      "clk_150m",       CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 2,  0, },
+       { HI6220_I2C2_CLK,      "i2c2_clk",      "clk_150m",       CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 3,  0, },
+       { HI6220_I2C3_CLK,      "i2c3_clk",      "clk_150m",       CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 4,  0, },
+       { HI6220_UART1_PCLK,    "uart1_pclk",    "uart1_src",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 5,  0, },
+       { HI6220_UART2_PCLK,    "uart2_pclk",    "uart2_src",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 6,  0, },
+       { HI6220_UART3_PCLK,    "uart3_pclk",    "uart3_src",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 7,  0, },
+       { HI6220_UART4_PCLK,    "uart4_pclk",    "uart4_src",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 8,  0, },
+       { HI6220_SPI_CLK,       "spi_clk",       "clk_150m",       CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 9,  0, },
+       { HI6220_TSENSOR_CLK,   "tsensor_clk",   "clk_bus",        CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 12, 0, },
+       { HI6220_MMU_CLK,       "mmu_clk",       "ddrc_axi1",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x240, 11, 0, },
+       { HI6220_HIFI_SEL,      "hifi_sel",      "hifi_src",       CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 0,  0, },
+       { HI6220_MMC0_SYSPLL,   "mmc0_syspll",   "syspll",         CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 1,  0, },
+       { HI6220_MMC1_SYSPLL,   "mmc1_syspll",   "syspll",         CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 2,  0, },
+       { HI6220_MMC2_SYSPLL,   "mmc2_syspll",   "syspll",         CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 3,  0, },
+       { HI6220_MMC0_SEL,      "mmc0_sel",      "mmc0_mux1",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 6,  0, },
+       { HI6220_MMC1_SEL,      "mmc1_sel",      "mmc1_mux1",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 7,  0, },
+       { HI6220_BBPPLL_SEL,    "bbppll_sel",    "pll0_bbp_gate",  CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 9,  0, },
+       { HI6220_MEDIA_PLL_SRC, "media_pll_src", "pll_media_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 10, 0, },
+       { HI6220_MMC2_SEL,      "mmc2_sel",      "mmc2_mux1",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 11, 0, },
+       { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll",         CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 12, 0, },
+};
+
+static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = {
+       { HI6220_MMC0_SRC,    "mmc0_src",    mmc0_src_p,     ARRAY_SIZE(mmc0_src_p),     CLK_SET_RATE_PARENT, 0x4,   0,  1, 0, },
+       { HI6220_MMC0_SMP_IN, "mmc0_smp_in", mmc0_sample_in, ARRAY_SIZE(mmc0_sample_in), CLK_SET_RATE_PARENT, 0x4,   0,  1, 0, },
+       { HI6220_MMC1_SRC,    "mmc1_src",    mmc1_src_p,     ARRAY_SIZE(mmc1_src_p),     CLK_SET_RATE_PARENT, 0x4,   2,  1, 0, },
+       { HI6220_MMC1_SMP_IN, "mmc1_smp_in", mmc1_sample_in, ARRAY_SIZE(mmc1_sample_in), CLK_SET_RATE_PARENT, 0x4,   2,  1, 0, },
+       { HI6220_MMC2_SRC,    "mmc2_src",    mmc2_src_p,     ARRAY_SIZE(mmc2_src_p),     CLK_SET_RATE_PARENT, 0x4,   4,  1, 0, },
+       { HI6220_MMC2_SMP_IN, "mmc2_smp_in", mmc2_sample_in, ARRAY_SIZE(mmc2_sample_in), CLK_SET_RATE_PARENT, 0x4,   4,  1, 0, },
+       { HI6220_HIFI_SRC,    "hifi_src",    hifi_src,       ARRAY_SIZE(hifi_src),       CLK_SET_RATE_PARENT, 0x400, 0,  1, CLK_MUX_HIWORD_MASK,},
+       { HI6220_UART1_SRC,   "uart1_src",   uart1_src,      ARRAY_SIZE(uart1_src),      CLK_SET_RATE_PARENT, 0x400, 1,  1, CLK_MUX_HIWORD_MASK,},
+       { HI6220_UART2_SRC,   "uart2_src",   uart2_src,      ARRAY_SIZE(uart2_src),      CLK_SET_RATE_PARENT, 0x400, 2,  1, CLK_MUX_HIWORD_MASK,},
+       { HI6220_UART3_SRC,   "uart3_src",   uart3_src,      ARRAY_SIZE(uart3_src),      CLK_SET_RATE_PARENT, 0x400, 3,  1, CLK_MUX_HIWORD_MASK,},
+       { HI6220_UART4_SRC,   "uart4_src",   uart4_src,      ARRAY_SIZE(uart4_src),      CLK_SET_RATE_PARENT, 0x400, 4,  1, CLK_MUX_HIWORD_MASK,},
+       { HI6220_MMC0_MUX0,   "mmc0_mux0",   mmc0_mux0_p,    ARRAY_SIZE(mmc0_mux0_p),    CLK_SET_RATE_PARENT, 0x400, 5,  1, CLK_MUX_HIWORD_MASK,},
+       { HI6220_MMC1_MUX0,   "mmc1_mux0",   mmc1_mux0_p,    ARRAY_SIZE(mmc1_mux0_p),    CLK_SET_RATE_PARENT, 0x400, 11, 1, CLK_MUX_HIWORD_MASK,},
+       { HI6220_MMC2_MUX0,   "mmc2_mux0",   mmc2_mux0_p,    ARRAY_SIZE(mmc2_mux0_p),    CLK_SET_RATE_PARENT, 0x400, 12, 1, CLK_MUX_HIWORD_MASK,},
+       { HI6220_MMC0_MUX1,   "mmc0_mux1",   mmc0_mux1_p,    ARRAY_SIZE(mmc0_mux1_p),    CLK_SET_RATE_PARENT, 0x400, 13, 1, CLK_MUX_HIWORD_MASK,},
+       { HI6220_MMC1_MUX1,   "mmc1_mux1",   mmc1_mux1_p,    ARRAY_SIZE(mmc1_mux1_p),    CLK_SET_RATE_PARENT, 0x400, 14, 1, CLK_MUX_HIWORD_MASK,},
+       { HI6220_MMC2_MUX1,   "mmc2_mux1",   mmc2_mux1_p,    ARRAY_SIZE(mmc2_mux1_p),    CLK_SET_RATE_PARENT, 0x400, 15, 1, CLK_MUX_HIWORD_MASK,},
+};
+
+static struct hi6220_divider_clock hi6220_div_clks_sys[] __initdata = {
+       { HI6220_CLK_BUS,     "clk_bus",     "clk_300m",      CLK_SET_RATE_PARENT, 0x490, 0,  4, 7, },
+       { HI6220_MMC0_DIV,    "mmc0_div",    "mmc0_syspll",   CLK_SET_RATE_PARENT, 0x494, 0,  6, 7, },
+       { HI6220_MMC1_DIV,    "mmc1_div",    "mmc1_syspll",   CLK_SET_RATE_PARENT, 0x498, 0,  6, 7, },
+       { HI6220_MMC2_DIV,    "mmc2_div",    "mmc2_syspll",   CLK_SET_RATE_PARENT, 0x49c, 0,  6, 7, },
+       { HI6220_HIFI_DIV,    "hifi_div",    "hifi_sel",      CLK_SET_RATE_PARENT, 0x4a0, 0,  4, 7, },
+       { HI6220_BBPPLL0_DIV, "bbppll0_div", "bbppll_sel",    CLK_SET_RATE_PARENT, 0x4a0, 8,  6, 15,},
+       { HI6220_CS_DAPB,     "cs_dapb",     "picophy_src",   CLK_SET_RATE_PARENT, 0x4a0, 24, 2, 31,},
+       { HI6220_CS_ATB_DIV,  "cs_atb_div",  "cs_atb_syspll", CLK_SET_RATE_PARENT, 0x4a4, 0,  4, 7, },
+};
+
+static void __init hi6220_clk_sys_init(struct device_node *np)
+{
+       struct hisi_clock_data *clk_data;
+
+       clk_data = hisi_clk_init(np, HI6220_SYS_NR_CLKS);
+       if (!clk_data)
+               return;
+
+       hisi_clk_register_gate_sep(hi6220_separated_gate_clks_sys,
+                       ARRAY_SIZE(hi6220_separated_gate_clks_sys), clk_data);
+
+       hisi_clk_register_mux(hi6220_mux_clks_sys,
+                       ARRAY_SIZE(hi6220_mux_clks_sys), clk_data);
+
+       hi6220_clk_register_divider(hi6220_div_clks_sys,
+                       ARRAY_SIZE(hi6220_div_clks_sys), clk_data);
+}
+CLK_OF_DECLARE(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
+
+
+/* clocks in media controller */
+static const char *clk_1000_1200_src[] __initdata = { "pll_gpu_gate", "media_syspll_src", };
+static const char *clk_1440_1200_src[] __initdata = { "media_syspll_src", "media_pll_src", };
+static const char *clk_1000_1440_src[] __initdata = { "pll_gpu_gate", "media_pll_src", };
+
+static struct hisi_gate_clock hi6220_separated_gate_clks_media[] __initdata = {
+       { HI6220_DSI_PCLK,       "dsi_pclk",         "vpucodec",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 0,  0, },
+       { HI6220_G3D_PCLK,       "g3d_pclk",         "vpucodec",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 1,  0, },
+       { HI6220_ACLK_CODEC_VPU, "aclk_codec_vpu",   "ade_core_src",  CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 3,  0, },
+       { HI6220_ISP_SCLK,       "isp_sclk",         "isp_sclk_src",  CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 5,  0, },
+       { HI6220_ADE_CORE,       "ade_core",         "ade_core_src",  CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 6,  0, },
+       { HI6220_MED_MMU,        "media_mmu",        "mmu_clk",       CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 8,  0, },
+       { HI6220_CFG_CSI4PHY,    "cfg_csi4phy",      "clk_tcxo",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 9,  0, },
+       { HI6220_CFG_CSI2PHY,    "cfg_csi2phy",      "clk_tcxo",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 10, 0, },
+       { HI6220_ISP_SCLK_GATE,  "isp_sclk_gate",    "media_pll_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 11, 0, },
+       { HI6220_ISP_SCLK_GATE1, "isp_sclk_gate1",   "media_pll_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 12, 0, },
+       { HI6220_ADE_CORE_GATE,  "ade_core_gate",    "media_pll_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 14, 0, },
+       { HI6220_CODEC_VPU_GATE, "codec_vpu_gate",   "clk_1000_1440", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 15, 0, },
+       { HI6220_MED_SYSPLL,     "media_syspll_src", "media_syspll",  CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 17, 0, },
+};
+
+static struct hisi_mux_clock hi6220_mux_clks_media[] __initdata = {
+       { HI6220_1440_1200, "clk_1440_1200", clk_1440_1200_src, ARRAY_SIZE(clk_1440_1200_src), CLK_SET_RATE_PARENT, 0x51c, 0, 1, 0, },
+       { HI6220_1000_1200, "clk_1000_1200", clk_1000_1200_src, ARRAY_SIZE(clk_1000_1200_src), CLK_SET_RATE_PARENT, 0x51c, 1, 1, 0, },
+       { HI6220_1000_1440, "clk_1000_1440", clk_1000_1440_src, ARRAY_SIZE(clk_1000_1440_src), CLK_SET_RATE_PARENT, 0x51c, 6, 1, 0, },
+};
+
+static struct hi6220_divider_clock hi6220_div_clks_media[] __initdata = {
+       { HI6220_CODEC_JPEG,    "codec_jpeg_aclk", "media_pll_src",  CLK_SET_RATE_PARENT, 0xcbc, 0,  4, 23, },
+       { HI6220_ISP_SCLK_SRC,  "isp_sclk_src",    "isp_sclk_gate",  CLK_SET_RATE_PARENT, 0xcbc, 8,  4, 15, },
+       { HI6220_ISP_SCLK1,     "isp_sclk1",       "isp_sclk_gate1", CLK_SET_RATE_PARENT, 0xcbc, 24, 4, 31, },
+       { HI6220_ADE_CORE_SRC,  "ade_core_src",    "ade_core_gate",  CLK_SET_RATE_PARENT, 0xcc0, 16, 3, 23, },
+       { HI6220_ADE_PIX_SRC,   "ade_pix_src",     "clk_1440_1200",  CLK_SET_RATE_PARENT, 0xcc0, 24, 6, 31, },
+       { HI6220_G3D_CLK,       "g3d_clk",         "clk_1000_1200",  CLK_SET_RATE_PARENT, 0xcc4, 8,  4, 15, },
+       { HI6220_CODEC_VPU_SRC, "codec_vpu_src",   "codec_vpu_gate", CLK_SET_RATE_PARENT, 0xcc4, 24, 6, 31, },
+};
+
+static void __init hi6220_clk_media_init(struct device_node *np)
+{
+       struct hisi_clock_data *clk_data;
+
+       clk_data = hisi_clk_init(np, HI6220_MEDIA_NR_CLKS);
+       if (!clk_data)
+               return;
+
+       hisi_clk_register_gate_sep(hi6220_separated_gate_clks_media,
+                               ARRAY_SIZE(hi6220_separated_gate_clks_media), clk_data);
+
+       hisi_clk_register_mux(hi6220_mux_clks_media,
+                               ARRAY_SIZE(hi6220_mux_clks_media), clk_data);
+
+       hi6220_clk_register_divider(hi6220_div_clks_media,
+                               ARRAY_SIZE(hi6220_div_clks_media), clk_data);
+}
+CLK_OF_DECLARE(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
+
+
+/* clocks in pmctrl */
+static struct hisi_gate_clock hi6220_gate_clks_power[] __initdata = {
+       { HI6220_PLL_GPU_GATE,   "pll_gpu_gate",   "gpupll",    CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x8,  0,  0, },
+       { HI6220_PLL1_DDR_GATE,  "pll1_ddr_gate",  "ddrpll1",   CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x10, 0,  0, },
+       { HI6220_PLL_DDR_GATE,   "pll_ddr_gate",   "ddrpll0",   CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x18, 0,  0, },
+       { HI6220_PLL_MEDIA_GATE, "pll_media_gate", "media_pll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x38, 0,  0, },
+       { HI6220_PLL0_BBP_GATE,  "pll0_bbp_gate",  "bbppll0",   CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x48, 0,  0, },
+};
+
+static struct hi6220_divider_clock hi6220_div_clks_power[] __initdata = {
+       { HI6220_DDRC_SRC,  "ddrc_src",  "ddr_sel_src", CLK_SET_RATE_PARENT, 0x5a8, 0, 4, 0, },
+       { HI6220_DDRC_AXI1, "ddrc_axi1", "ddrc_src",    CLK_SET_RATE_PARENT, 0x5a8, 8, 2, 0, },
+};
+
+static void __init hi6220_clk_power_init(struct device_node *np)
+{
+       struct hisi_clock_data *clk_data;
+
+       clk_data = hisi_clk_init(np, HI6220_POWER_NR_CLKS);
+       if (!clk_data)
+               return;
+
+       hisi_clk_register_gate(hi6220_gate_clks_power,
+                               ARRAY_SIZE(hi6220_gate_clks_power), clk_data);
+
+       hi6220_clk_register_divider(hi6220_div_clks_power,
+                               ARRAY_SIZE(hi6220_div_clks_power), clk_data);
+}
+CLK_OF_DECLARE(hi6220_clk_power, "hisilicon,hi6220-pmctrl", hi6220_clk_power_init);
index f1d239435826ca6e7a873d08c9dcaa0c8590e630..0aaf29da84918337bb89dd83c1aec8c5dcb1a75a 100644 (file)
@@ -46,15 +46,15 @@ static struct hisi_fixed_rate_clock hix5hd2_fixed_rate_clks[] __initdata = {
        { HIX5HD2_FIXED_83M, "83m", NULL, CLK_IS_ROOT, 83333333, },
 };
 
-static const char *sfc_mux_p[] __initdata = {
+static const char *const sfc_mux_p[] __initconst = {
                "24m", "150m", "200m", "100m", "75m", };
 static u32 sfc_mux_table[] = {0, 4, 5, 6, 7};
 
-static const char *sdio_mux_p[] __initdata = {
+static const char *const sdio_mux_p[] __initconst = {
                "75m", "100m", "50m", "15m", };
 static u32 sdio_mux_table[] = {0, 1, 2, 3};
 
-static const char *fephy_mux_p[] __initdata = { "25m", "125m"};
+static const char *const fephy_mux_p[] __initconst = { "25m", "125m"};
 static u32 fephy_mux_table[] = {0, 1};
 
 
@@ -252,8 +252,9 @@ static struct clk_ops clk_complex_ops = {
        .disable = clk_complex_disable,
 };
 
-void __init hix5hd2_clk_register_complex(struct hix5hd2_complex_clock *clks,
-                                        int nums, struct hisi_clock_data *data)
+static void __init
+hix5hd2_clk_register_complex(struct hix5hd2_complex_clock *clks, int nums,
+                            struct hisi_clock_data *data)
 {
        void __iomem *base = data->base;
        int i;
index a078e84f7b05629a4d27bb3a6cc5a945a1beac8e..c90a89739b033480253641db138c99afde08be6c 100644 (file)
@@ -232,3 +232,32 @@ void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *clks,
                data->clk_data.clks[clks[i].id] = clk;
        }
 }
+
+void __init hi6220_clk_register_divider(struct hi6220_divider_clock *clks,
+                                       int nums, struct hisi_clock_data *data)
+{
+       struct clk *clk;
+       void __iomem *base = data->base;
+       int i;
+
+       for (i = 0; i < nums; i++) {
+               clk = hi6220_register_clkdiv(NULL, clks[i].name,
+                                               clks[i].parent_name,
+                                               clks[i].flags,
+                                               base + clks[i].offset,
+                                               clks[i].shift,
+                                               clks[i].width,
+                                               clks[i].mask_bit,
+                                               &hisi_clk_lock);
+               if (IS_ERR(clk)) {
+                       pr_err("%s: failed to register clock %s\n",
+                              __func__, clks[i].name);
+                       continue;
+               }
+
+               if (clks[i].alias)
+                       clk_register_clkdev(clk, clks[i].alias, NULL);
+
+               data->clk_data.clks[clks[i].id] = clk;
+       }
+}
index 31083ffc0650b326f2232dead75672b2df6400e8..b56fbc1c5f27c4e0a8886fdeb4721891553113a1 100644 (file)
@@ -55,7 +55,7 @@ struct hisi_fixed_factor_clock {
 struct hisi_mux_clock {
        unsigned int            id;
        const char              *name;
-       const char              **parent_names;
+       const char              *const *parent_names;
        u8                      num_parents;
        unsigned long           flags;
        unsigned long           offset;
@@ -79,6 +79,18 @@ struct hisi_divider_clock {
        const char              *alias;
 };
 
+struct hi6220_divider_clock {
+       unsigned int            id;
+       const char              *name;
+       const char              *parent_name;
+       unsigned long           flags;
+       unsigned long           offset;
+       u8                      shift;
+       u8                      width;
+       u32                     mask_bit;
+       const char              *alias;
+};
+
 struct hisi_gate_clock {
        unsigned int            id;
        const char              *name;
@@ -94,18 +106,23 @@ struct clk *hisi_register_clkgate_sep(struct device *, const char *,
                                const char *, unsigned long,
                                void __iomem *, u8,
                                u8, spinlock_t *);
+struct clk *hi6220_register_clkdiv(struct device *dev, const char *name,
+       const char *parent_name, unsigned long flags, void __iomem *reg,
+       u8 shift, u8 width, u32 mask_bit, spinlock_t *lock);
 
-struct hisi_clock_data __init *hisi_clk_init(struct device_node *, int);
-void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *,
-                                       int, struct hisi_clock_data *);
-void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *,
-                                       int, struct hisi_clock_data *);
-void __init hisi_clk_register_mux(struct hisi_mux_clock *, int,
+struct hisi_clock_data *hisi_clk_init(struct device_node *, int);
+void hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *,
+                               int, struct hisi_clock_data *);
+void hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *,
+                               int, struct hisi_clock_data *);
+void hisi_clk_register_mux(struct hisi_mux_clock *, int,
                                struct hisi_clock_data *);
-void __init hisi_clk_register_divider(struct hisi_divider_clock *,
+void hisi_clk_register_divider(struct hisi_divider_clock *,
+                               int, struct hisi_clock_data *);
+void hisi_clk_register_gate(struct hisi_gate_clock *,
+                               int, struct hisi_clock_data *);
+void hisi_clk_register_gate_sep(struct hisi_gate_clock *,
+                               int, struct hisi_clock_data *);
+void hi6220_clk_register_divider(struct hi6220_divider_clock *,
                                int, struct hisi_clock_data *);
-void __init hisi_clk_register_gate(struct hisi_gate_clock *,
-                                       int, struct hisi_clock_data *);
-void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *,
-                                       int, struct hisi_clock_data *);
 #endif /* __HISI_CLK_H */
diff --git a/drivers/clk/hisilicon/clkdivider-hi6220.c b/drivers/clk/hisilicon/clkdivider-hi6220.c
new file mode 100644 (file)
index 0000000..113eee8
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * Hisilicon hi6220 SoC divider clock driver
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ *
+ * Author: Bintian Wang <bintian.wang@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+
+#define div_mask(width)        ((1 << (width)) - 1)
+
+/**
+ * struct hi6220_clk_divider - divider clock for hi6220
+ *
+ * @hw:                handle between common and hardware-specific interfaces
+ * @reg:       register containing divider
+ * @shift:     shift to the divider bit field
+ * @width:     width of the divider bit field
+ * @mask:      mask for setting divider rate
+ * @table:     the div table that the divider supports
+ * @lock:      register lock
+ */
+struct hi6220_clk_divider {
+       struct clk_hw   hw;
+       void __iomem    *reg;
+       u8              shift;
+       u8              width;
+       u32             mask;
+       const struct clk_div_table *table;
+       spinlock_t      *lock;
+};
+
+#define to_hi6220_clk_divider(_hw)     \
+       container_of(_hw, struct hi6220_clk_divider, hw)
+
+static unsigned long hi6220_clkdiv_recalc_rate(struct clk_hw *hw,
+                                       unsigned long parent_rate)
+{
+       unsigned int val;
+       struct hi6220_clk_divider *dclk = to_hi6220_clk_divider(hw);
+
+       val = readl_relaxed(dclk->reg) >> dclk->shift;
+       val &= div_mask(dclk->width);
+
+       return divider_recalc_rate(hw, parent_rate, val, dclk->table,
+                                  CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static long hi6220_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long *prate)
+{
+       struct hi6220_clk_divider *dclk = to_hi6220_clk_divider(hw);
+
+       return divider_round_rate(hw, rate, prate, dclk->table,
+                                 dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int hi6220_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long parent_rate)
+{
+       int value;
+       unsigned long flags = 0;
+       u32 data;
+       struct hi6220_clk_divider *dclk = to_hi6220_clk_divider(hw);
+
+       value = divider_get_val(rate, parent_rate, dclk->table,
+                               dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+       if (dclk->lock)
+               spin_lock_irqsave(dclk->lock, flags);
+
+       data = readl_relaxed(dclk->reg);
+       data &= ~(div_mask(dclk->width) << dclk->shift);
+       data |= value << dclk->shift;
+       data |= dclk->mask;
+
+       writel_relaxed(data, dclk->reg);
+
+       if (dclk->lock)
+               spin_unlock_irqrestore(dclk->lock, flags);
+
+       return 0;
+}
+
+static const struct clk_ops hi6220_clkdiv_ops = {
+       .recalc_rate = hi6220_clkdiv_recalc_rate,
+       .round_rate = hi6220_clkdiv_round_rate,
+       .set_rate = hi6220_clkdiv_set_rate,
+};
+
+struct clk *hi6220_register_clkdiv(struct device *dev, const char *name,
+       const char *parent_name, unsigned long flags, void __iomem *reg,
+       u8 shift, u8 width, u32 mask_bit, spinlock_t *lock)
+{
+       struct hi6220_clk_divider *div;
+       struct clk *clk;
+       struct clk_init_data init;
+       struct clk_div_table *table;
+       u32 max_div, min_div;
+       int i;
+
+       /* allocate the divider */
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+               return ERR_PTR(-ENOMEM);
+
+       /* Init the divider table */
+       max_div = div_mask(width) + 1;
+       min_div = 1;
+
+       table = kcalloc(max_div + 1, sizeof(*table), GFP_KERNEL);
+       if (!table) {
+               kfree(div);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       for (i = 0; i < max_div; i++) {
+               table[i].div = min_div + i;
+               table[i].val = table[i].div - 1;
+       }
+
+       init.name = name;
+       init.ops = &hi6220_clkdiv_ops;
+       init.flags = flags;
+       init.parent_names = parent_name ? &parent_name : NULL;
+       init.num_parents = parent_name ? 1 : 0;
+
+       /* struct hi6220_clk_divider assignments */
+       div->reg = reg;
+       div->shift = shift;
+       div->width = width;
+       div->mask = mask_bit ? BIT(mask_bit) : 0;
+       div->lock = lock;
+       div->hw.init = &init;
+       div->table = table;
+
+       /* register the clock */
+       clk = clk_register(dev, &div->hw);
+       if (IS_ERR(clk)) {
+               kfree(table);
+               kfree(div);
+       }
+
+       return clk;
+}
index 0dd8a4b12747b6df0ce1c88f34aaf28781fe85f8..4a375ead70e9f2c58c1eb6aaced07ea39e579b7b 100644 (file)
@@ -37,7 +37,8 @@
  *     Main PLL or any other PLLs in the device such as ARM PLL, DDR PLL
  *     or PA PLL available on keystone2. These PLLs are controlled by
  *     this register. Main PLL is controlled by a PLL controller.
- * @pllm: PLL register map address
+ * @pllm: PLL register map address for multiplier bits
+ * @pllod: PLL register map address for post divider bits
  * @pll_ctl0: PLL controller map address
  * @pllm_lower_mask: multiplier lower mask
  * @pllm_upper_mask: multiplier upper mask
@@ -53,6 +54,7 @@ struct clk_pll_data {
        u32 phy_pllm;
        u32 phy_pll_ctl0;
        void __iomem *pllm;
+       void __iomem *pllod;
        void __iomem *pll_ctl0;
        u32 pllm_lower_mask;
        u32 pllm_upper_mask;
@@ -102,7 +104,11 @@ static unsigned long clk_pllclk_recalc(struct clk_hw *hw,
                /* read post divider from od bits*/
                postdiv = ((val & pll_data->clkod_mask) >>
                                 pll_data->clkod_shift) + 1;
-       else
+       else if (pll_data->pllod) {
+               postdiv = readl(pll_data->pllod);
+               postdiv = ((postdiv & pll_data->clkod_mask) >>
+                               pll_data->clkod_shift) + 1;
+       } else
                postdiv = pll_data->postdiv;
 
        rate /= (prediv + 1);
@@ -172,12 +178,21 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
                /* assume the PLL has output divider register bits */
                pll_data->clkod_mask = CLKOD_MASK;
                pll_data->clkod_shift = CLKOD_SHIFT;
+
+               /*
+                * Check if there is an post-divider register. If not
+                * assume od bits are part of control register.
+                */
+               i = of_property_match_string(node, "reg-names",
+                                            "post-divider");
+               pll_data->pllod = of_iomap(node, i);
        }
 
        i = of_property_match_string(node, "reg-names", "control");
        pll_data->pll_ctl0 = of_iomap(node, i);
        if (!pll_data->pll_ctl0) {
                pr_err("%s: ioremap failed\n", __func__);
+               iounmap(pll_data->pllod);
                goto out;
        }
 
@@ -193,6 +208,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
                pll_data->pllm = of_iomap(node, i);
                if (!pll_data->pllm) {
                        iounmap(pll_data->pll_ctl0);
+                       iounmap(pll_data->pllod);
                        goto out;
                }
        }
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
new file mode 100644 (file)
index 0000000..8e4b2a4
--- /dev/null
@@ -0,0 +1,4 @@
+obj-y += clk-mtk.o clk-pll.o clk-gate.o
+obj-$(CONFIG_RESET_CONTROLLER) += reset.o
+obj-y += clk-mt8135.o
+obj-y += clk-mt8173.o
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
new file mode 100644 (file)
index 0000000..5702036
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/clkdev.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
+{
+       struct mtk_clk_gate *cg = to_clk_gate(hw);
+       u32 val;
+
+       regmap_read(cg->regmap, cg->sta_ofs, &val);
+
+       val &= BIT(cg->bit);
+
+       return val == 0;
+}
+
+static int mtk_cg_bit_is_set(struct clk_hw *hw)
+{
+       struct mtk_clk_gate *cg = to_clk_gate(hw);
+       u32 val;
+
+       regmap_read(cg->regmap, cg->sta_ofs, &val);
+
+       val &= BIT(cg->bit);
+
+       return val != 0;
+}
+
+static void mtk_cg_set_bit(struct clk_hw *hw)
+{
+       struct mtk_clk_gate *cg = to_clk_gate(hw);
+
+       regmap_write(cg->regmap, cg->set_ofs, BIT(cg->bit));
+}
+
+static void mtk_cg_clr_bit(struct clk_hw *hw)
+{
+       struct mtk_clk_gate *cg = to_clk_gate(hw);
+
+       regmap_write(cg->regmap, cg->clr_ofs, BIT(cg->bit));
+}
+
+static int mtk_cg_enable(struct clk_hw *hw)
+{
+       mtk_cg_clr_bit(hw);
+
+       return 0;
+}
+
+static void mtk_cg_disable(struct clk_hw *hw)
+{
+       mtk_cg_set_bit(hw);
+}
+
+static int mtk_cg_enable_inv(struct clk_hw *hw)
+{
+       mtk_cg_set_bit(hw);
+
+       return 0;
+}
+
+static void mtk_cg_disable_inv(struct clk_hw *hw)
+{
+       mtk_cg_clr_bit(hw);
+}
+
+const struct clk_ops mtk_clk_gate_ops_setclr = {
+       .is_enabled     = mtk_cg_bit_is_cleared,
+       .enable         = mtk_cg_enable,
+       .disable        = mtk_cg_disable,
+};
+
+const struct clk_ops mtk_clk_gate_ops_setclr_inv = {
+       .is_enabled     = mtk_cg_bit_is_set,
+       .enable         = mtk_cg_enable_inv,
+       .disable        = mtk_cg_disable_inv,
+};
+
+struct clk *mtk_clk_register_gate(
+               const char *name,
+               const char *parent_name,
+               struct regmap *regmap,
+               int set_ofs,
+               int clr_ofs,
+               int sta_ofs,
+               u8 bit,
+               const struct clk_ops *ops)
+{
+       struct mtk_clk_gate *cg;
+       struct clk *clk;
+       struct clk_init_data init = {};
+
+       cg = kzalloc(sizeof(*cg), GFP_KERNEL);
+       if (!cg)
+               return ERR_PTR(-ENOMEM);
+
+       init.name = name;
+       init.flags = CLK_SET_RATE_PARENT;
+       init.parent_names = parent_name ? &parent_name : NULL;
+       init.num_parents = parent_name ? 1 : 0;
+       init.ops = ops;
+
+       cg->regmap = regmap;
+       cg->set_ofs = set_ofs;
+       cg->clr_ofs = clr_ofs;
+       cg->sta_ofs = sta_ofs;
+       cg->bit = bit;
+
+       cg->hw.init = &init;
+
+       clk = clk_register(NULL, &cg->hw);
+       if (IS_ERR(clk))
+               kfree(cg);
+
+       return clk;
+}
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
new file mode 100644 (file)
index 0000000..6b6780b
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRV_CLK_GATE_H
+#define __DRV_CLK_GATE_H
+
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+struct mtk_clk_gate {
+       struct clk_hw   hw;
+       struct regmap   *regmap;
+       int             set_ofs;
+       int             clr_ofs;
+       int             sta_ofs;
+       u8              bit;
+};
+
+static inline struct mtk_clk_gate *to_clk_gate(struct clk_hw *hw)
+{
+       return container_of(hw, struct mtk_clk_gate, hw);
+}
+
+extern const struct clk_ops mtk_clk_gate_ops_setclr;
+extern const struct clk_ops mtk_clk_gate_ops_setclr_inv;
+
+struct clk *mtk_clk_register_gate(
+               const char *name,
+               const char *parent_name,
+               struct regmap *regmap,
+               int set_ofs,
+               int clr_ofs,
+               int sta_ofs,
+               u8 bit,
+               const struct clk_ops *ops);
+
+#endif /* __DRV_CLK_GATE_H */
diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
new file mode 100644 (file)
index 0000000..08b4b84
--- /dev/null
@@ -0,0 +1,644 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/clock/mt8135-clk.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+static DEFINE_SPINLOCK(mt8135_clk_lock);
+
+static const struct mtk_fixed_factor root_clk_alias[] __initconst = {
+       FACTOR(CLK_TOP_DSI0_LNTC_DSICLK, "dsi0_lntc_dsiclk", "clk_null", 1, 1),
+       FACTOR(CLK_TOP_HDMITX_CLKDIG_CTS, "hdmitx_clkdig_cts", "clk_null", 1, 1),
+       FACTOR(CLK_TOP_CLKPH_MCK, "clkph_mck", "clk_null", 1, 1),
+       FACTOR(CLK_TOP_CPUM_TCK_IN, "cpum_tck_in", "clk_null", 1, 1),
+};
+
+static const struct mtk_fixed_factor top_divs[] __initconst = {
+       FACTOR(CLK_TOP_MAINPLL_806M, "mainpll_806m", "mainpll", 1, 2),
+       FACTOR(CLK_TOP_MAINPLL_537P3M, "mainpll_537p3m", "mainpll", 1, 3),
+       FACTOR(CLK_TOP_MAINPLL_322P4M, "mainpll_322p4m", "mainpll", 1, 5),
+       FACTOR(CLK_TOP_MAINPLL_230P3M, "mainpll_230p3m", "mainpll", 1, 7),
+
+       FACTOR(CLK_TOP_UNIVPLL_624M, "univpll_624m", "univpll", 1, 2),
+       FACTOR(CLK_TOP_UNIVPLL_416M, "univpll_416m", "univpll", 1, 3),
+       FACTOR(CLK_TOP_UNIVPLL_249P6M, "univpll_249p6m", "univpll", 1, 5),
+       FACTOR(CLK_TOP_UNIVPLL_178P3M, "univpll_178p3m", "univpll", 1, 7),
+       FACTOR(CLK_TOP_UNIVPLL_48M, "univpll_48m", "univpll", 1, 26),
+
+       FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+       FACTOR(CLK_TOP_MMPLL_D3, "mmpll_d3", "mmpll", 1, 3),
+       FACTOR(CLK_TOP_MMPLL_D5, "mmpll_d5", "mmpll", 1, 5),
+       FACTOR(CLK_TOP_MMPLL_D7, "mmpll_d7", "mmpll", 1, 7),
+       FACTOR(CLK_TOP_MMPLL_D4, "mmpll_d4", "mmpll_d2", 1, 2),
+       FACTOR(CLK_TOP_MMPLL_D6, "mmpll_d6", "mmpll_d3", 1, 2),
+
+       FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll_806m", 1, 1),
+       FACTOR(CLK_TOP_SYSPLL_D4, "syspll_d4", "mainpll_806m", 1, 2),
+       FACTOR(CLK_TOP_SYSPLL_D6, "syspll_d6", "mainpll_806m", 1, 3),
+       FACTOR(CLK_TOP_SYSPLL_D8, "syspll_d8", "mainpll_806m", 1, 4),
+       FACTOR(CLK_TOP_SYSPLL_D10, "syspll_d10", "mainpll_806m", 1, 5),
+       FACTOR(CLK_TOP_SYSPLL_D12, "syspll_d12", "mainpll_806m", 1, 6),
+       FACTOR(CLK_TOP_SYSPLL_D16, "syspll_d16", "mainpll_806m", 1, 8),
+       FACTOR(CLK_TOP_SYSPLL_D24, "syspll_d24", "mainpll_806m", 1, 12),
+
+       FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll_537p3m", 1, 1),
+
+       FACTOR(CLK_TOP_SYSPLL_D2P5, "syspll_d2p5", "mainpll_322p4m", 2, 1),
+       FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll_322p4m", 1, 1),
+
+       FACTOR(CLK_TOP_SYSPLL_D3P5, "syspll_d3p5", "mainpll_230p3m", 2, 1),
+
+       FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll_624m", 1, 2),
+       FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll_624m", 1, 4),
+       FACTOR(CLK_TOP_UNIVPLL1_D6, "univpll1_d6", "univpll_624m", 1, 6),
+       FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll_624m", 1, 8),
+       FACTOR(CLK_TOP_UNIVPLL1_D10, "univpll1_d10", "univpll_624m", 1, 10),
+
+       FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll_416m", 1, 2),
+       FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll_416m", 1, 4),
+       FACTOR(CLK_TOP_UNIVPLL2_D6, "univpll2_d6", "univpll_416m", 1, 6),
+       FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll_416m", 1, 8),
+
+       FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll_416m", 1, 1),
+       FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll_249p6m", 1, 1),
+       FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll_178p3m", 1, 1),
+       FACTOR(CLK_TOP_UNIVPLL_D10, "univpll_d10", "univpll_249p6m", 1, 2),
+       FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll_48m", 1, 1),
+
+       FACTOR(CLK_TOP_APLL, "apll_ck", "audpll", 1, 1),
+       FACTOR(CLK_TOP_APLL_D4, "apll_d4", "audpll", 1, 4),
+       FACTOR(CLK_TOP_APLL_D8, "apll_d8", "audpll", 1, 8),
+       FACTOR(CLK_TOP_APLL_D16, "apll_d16", "audpll", 1, 16),
+       FACTOR(CLK_TOP_APLL_D24, "apll_d24", "audpll", 1, 24),
+
+       FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll", 1, 2),
+       FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll", 1, 4),
+       FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll", 1, 8),
+
+       FACTOR(CLK_TOP_LVDSTX_CLKDIG_CT, "lvdstx_clkdig_cts", "lvdspll", 1, 1),
+       FACTOR(CLK_TOP_VPLL_DPIX, "vpll_dpix_ck", "lvdspll", 1, 1),
+
+       FACTOR(CLK_TOP_TVHDMI_H, "tvhdmi_h_ck", "tvdpll", 1, 1),
+
+       FACTOR(CLK_TOP_HDMITX_CLKDIG_D2, "hdmitx_clkdig_d2", "hdmitx_clkdig_cts", 1, 2),
+       FACTOR(CLK_TOP_HDMITX_CLKDIG_D3, "hdmitx_clkdig_d3", "hdmitx_clkdig_cts", 1, 3),
+
+       FACTOR(CLK_TOP_TVHDMI_D2, "tvhdmi_d2", "tvhdmi_h_ck", 1, 2),
+       FACTOR(CLK_TOP_TVHDMI_D4, "tvhdmi_d4", "tvhdmi_h_ck", 1, 4),
+
+       FACTOR(CLK_TOP_MEMPLL_MCK_D4, "mempll_mck_d4", "clkph_mck", 1, 4),
+};
+
+static const char * const axi_parents[] __initconst = {
+       "clk26m",
+       "syspll_d3",
+       "syspll_d4",
+       "syspll_d6",
+       "univpll_d5",
+       "univpll2_d2",
+       "syspll_d3p5"
+};
+
+static const char * const smi_parents[] __initconst = {
+       "clk26m",
+       "clkph_mck",
+       "syspll_d2p5",
+       "syspll_d3",
+       "syspll_d8",
+       "univpll_d5",
+       "univpll1_d2",
+       "univpll1_d6",
+       "mmpll_d3",
+       "mmpll_d4",
+       "mmpll_d5",
+       "mmpll_d6",
+       "mmpll_d7",
+       "vdecpll",
+       "lvdspll"
+};
+
+static const char * const mfg_parents[] __initconst = {
+       "clk26m",
+       "univpll1_d4",
+       "syspll_d2",
+       "syspll_d2p5",
+       "syspll_d3",
+       "univpll_d5",
+       "univpll1_d2",
+       "mmpll_d2",
+       "mmpll_d3",
+       "mmpll_d4",
+       "mmpll_d5",
+       "mmpll_d6",
+       "mmpll_d7"
+};
+
+static const char * const irda_parents[] __initconst = {
+       "clk26m",
+       "univpll2_d8",
+       "univpll1_d6"
+};
+
+static const char * const cam_parents[] __initconst = {
+       "clk26m",
+       "syspll_d3",
+       "syspll_d3p5",
+       "syspll_d4",
+       "univpll_d5",
+       "univpll2_d2",
+       "univpll_d7",
+       "univpll1_d4"
+};
+
+static const char * const aud_intbus_parents[] __initconst = {
+       "clk26m",
+       "syspll_d6",
+       "univpll_d10"
+};
+
+static const char * const jpg_parents[] __initconst = {
+       "clk26m",
+       "syspll_d5",
+       "syspll_d4",
+       "syspll_d3",
+       "univpll_d7",
+       "univpll2_d2",
+       "univpll_d5"
+};
+
+static const char * const disp_parents[] __initconst = {
+       "clk26m",
+       "syspll_d3p5",
+       "syspll_d3",
+       "univpll2_d2",
+       "univpll_d5",
+       "univpll1_d2",
+       "lvdspll",
+       "vdecpll"
+};
+
+static const char * const msdc30_parents[] __initconst = {
+       "clk26m",
+       "syspll_d6",
+       "syspll_d5",
+       "univpll1_d4",
+       "univpll2_d4",
+       "msdcpll"
+};
+
+static const char * const usb20_parents[] __initconst = {
+       "clk26m",
+       "univpll2_d6",
+       "univpll1_d10"
+};
+
+static const char * const venc_parents[] __initconst = {
+       "clk26m",
+       "syspll_d3",
+       "syspll_d8",
+       "univpll_d5",
+       "univpll1_d6",
+       "mmpll_d4",
+       "mmpll_d5",
+       "mmpll_d6"
+};
+
+static const char * const spi_parents[] __initconst = {
+       "clk26m",
+       "syspll_d6",
+       "syspll_d8",
+       "syspll_d10",
+       "univpll1_d6",
+       "univpll1_d8"
+};
+
+static const char * const uart_parents[] __initconst = {
+       "clk26m",
+       "univpll2_d8"
+};
+
+static const char * const mem_parents[] __initconst = {
+       "clk26m",
+       "clkph_mck"
+};
+
+static const char * const camtg_parents[] __initconst = {
+       "clk26m",
+       "univpll_d26",
+       "univpll1_d6",
+       "syspll_d16",
+       "syspll_d8"
+};
+
+static const char * const audio_parents[] __initconst = {
+       "clk26m",
+       "syspll_d24"
+};
+
+static const char * const fix_parents[] __initconst = {
+       "rtc32k",
+       "clk26m",
+       "univpll_d5",
+       "univpll_d7",
+       "univpll1_d2",
+       "univpll1_d4",
+       "univpll1_d6",
+       "univpll1_d8"
+};
+
+static const char * const vdec_parents[] __initconst = {
+       "clk26m",
+       "vdecpll",
+       "clkph_mck",
+       "syspll_d2p5",
+       "syspll_d3",
+       "syspll_d3p5",
+       "syspll_d4",
+       "syspll_d5",
+       "syspll_d6",
+       "syspll_d8",
+       "univpll1_d2",
+       "univpll2_d2",
+       "univpll_d7",
+       "univpll_d10",
+       "univpll2_d4",
+       "lvdspll"
+};
+
+static const char * const ddrphycfg_parents[] __initconst = {
+       "clk26m",
+       "axi_sel",
+       "syspll_d12"
+};
+
+static const char * const dpilvds_parents[] __initconst = {
+       "clk26m",
+       "lvdspll",
+       "lvdspll_d2",
+       "lvdspll_d4",
+       "lvdspll_d8"
+};
+
+static const char * const pmicspi_parents[] __initconst = {
+       "clk26m",
+       "univpll2_d6",
+       "syspll_d8",
+       "syspll_d10",
+       "univpll1_d10",
+       "mempll_mck_d4",
+       "univpll_d26",
+       "syspll_d24"
+};
+
+static const char * const smi_mfg_as_parents[] __initconst = {
+       "clk26m",
+       "smi_sel",
+       "mfg_sel",
+       "mem_sel"
+};
+
+static const char * const gcpu_parents[] __initconst = {
+       "clk26m",
+       "syspll_d4",
+       "univpll_d7",
+       "syspll_d5",
+       "syspll_d6"
+};
+
+static const char * const dpi1_parents[] __initconst = {
+       "clk26m",
+       "tvhdmi_h_ck",
+       "tvhdmi_d2",
+       "tvhdmi_d4"
+};
+
+static const char * const cci_parents[] __initconst = {
+       "clk26m",
+       "mainpll_537p3m",
+       "univpll_d3",
+       "syspll_d2p5",
+       "syspll_d3",
+       "syspll_d5"
+};
+
+static const char * const apll_parents[] __initconst = {
+       "clk26m",
+       "apll_ck",
+       "apll_d4",
+       "apll_d8",
+       "apll_d16",
+       "apll_d24"
+};
+
+static const char * const hdmipll_parents[] __initconst = {
+       "clk26m",
+       "hdmitx_clkdig_cts",
+       "hdmitx_clkdig_d2",
+       "hdmitx_clkdig_d3"
+};
+
+static const struct mtk_composite top_muxes[] __initconst = {
+       /* CLK_CFG_0 */
+       MUX_GATE(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+               0x0140, 0, 3, INVALID_MUX_GATE_BIT),
+       MUX_GATE(CLK_TOP_SMI_SEL, "smi_sel", smi_parents, 0x0140, 8, 4, 15),
+       MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, 0x0140, 16, 4, 23),
+       MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel", irda_parents, 0x0140, 24, 2, 31),
+       /* CLK_CFG_1 */
+       MUX_GATE(CLK_TOP_CAM_SEL, "cam_sel", cam_parents, 0x0144, 0, 3, 7),
+       MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+               0x0144, 8, 2, 15),
+       MUX_GATE(CLK_TOP_JPG_SEL, "jpg_sel", jpg_parents, 0x0144, 16, 3, 23),
+       MUX_GATE(CLK_TOP_DISP_SEL, "disp_sel", disp_parents, 0x0144, 24, 3, 31),
+       /* CLK_CFG_2 */
+       MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_parents, 0x0148, 0, 3, 7),
+       MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_parents, 0x0148, 8, 3, 15),
+       MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_parents, 0x0148, 16, 3, 23),
+       MUX_GATE(CLK_TOP_MSDC30_4_SEL, "msdc30_4_sel", msdc30_parents, 0x0148, 24, 3, 31),
+       /* CLK_CFG_3 */
+       MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel", usb20_parents, 0x014c, 0, 2, 7),
+       /* CLK_CFG_4 */
+       MUX_GATE(CLK_TOP_VENC_SEL, "venc_sel", venc_parents, 0x0150, 8, 3, 15),
+       MUX_GATE(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x0150, 16, 3, 23),
+       MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x0150, 24, 2, 31),
+       /* CLK_CFG_6 */
+       MUX_GATE(CLK_TOP_MEM_SEL, "mem_sel", mem_parents, 0x0158, 0, 2, 7),
+       MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_parents, 0x0158, 8, 3, 15),
+       MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents, 0x0158, 24, 2, 31),
+       /* CLK_CFG_7 */
+       MUX_GATE(CLK_TOP_FIX_SEL, "fix_sel", fix_parents, 0x015c, 0, 3, 7),
+       MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_parents, 0x015c, 8, 4, 15),
+       MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
+               0x015c, 16, 2, 23),
+       MUX_GATE(CLK_TOP_DPILVDS_SEL, "dpilvds_sel", dpilvds_parents, 0x015c, 24, 3, 31),
+       /* CLK_CFG_8 */
+       MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents, 0x0164, 0, 3, 7),
+       MUX_GATE(CLK_TOP_MSDC30_0_SEL, "msdc30_0_sel", msdc30_parents, 0x0164, 8, 3, 15),
+       MUX_GATE(CLK_TOP_SMI_MFG_AS_SEL, "smi_mfg_as_sel", smi_mfg_as_parents,
+               0x0164, 16, 2, 23),
+       MUX_GATE(CLK_TOP_GCPU_SEL, "gcpu_sel", gcpu_parents, 0x0164, 24, 3, 31),
+       /* CLK_CFG_9 */
+       MUX_GATE(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents, 0x0168, 0, 2, 7),
+       MUX_GATE(CLK_TOP_CCI_SEL, "cci_sel", cci_parents, 0x0168, 8, 3, 15),
+       MUX_GATE(CLK_TOP_APLL_SEL, "apll_sel", apll_parents, 0x0168, 16, 3, 23),
+       MUX_GATE(CLK_TOP_HDMIPLL_SEL, "hdmipll_sel", hdmipll_parents, 0x0168, 24, 2, 31),
+};
+
+static const struct mtk_gate_regs infra_cg_regs = {
+       .set_ofs = 0x0040,
+       .clr_ofs = 0x0044,
+       .sta_ofs = 0x0048,
+};
+
+#define GATE_ICG(_id, _name, _parent, _shift) {        \
+               .id = _id,                                      \
+               .name = _name,                                  \
+               .parent_name = _parent,                         \
+               .regs = &infra_cg_regs,                         \
+               .shift = _shift,                                \
+               .ops = &mtk_clk_gate_ops_setclr,                \
+       }
+
+static const struct mtk_gate infra_clks[] __initconst = {
+       GATE_ICG(CLK_INFRA_PMIC_WRAP, "pmic_wrap_ck", "axi_sel", 23),
+       GATE_ICG(CLK_INFRA_PMICSPI, "pmicspi_ck", "pmicspi_sel", 22),
+       GATE_ICG(CLK_INFRA_CCIF1_AP_CTRL, "ccif1_ap_ctrl", "axi_sel", 21),
+       GATE_ICG(CLK_INFRA_CCIF0_AP_CTRL, "ccif0_ap_ctrl", "axi_sel", 20),
+       GATE_ICG(CLK_INFRA_KP, "kp_ck", "axi_sel", 16),
+       GATE_ICG(CLK_INFRA_CPUM, "cpum_ck", "cpum_tck_in", 15),
+       GATE_ICG(CLK_INFRA_M4U, "m4u_ck", "mem_sel", 8),
+       GATE_ICG(CLK_INFRA_MFGAXI, "mfgaxi_ck", "axi_sel", 7),
+       GATE_ICG(CLK_INFRA_DEVAPC, "devapc_ck", "axi_sel", 6),
+       GATE_ICG(CLK_INFRA_AUDIO, "audio_ck", "aud_intbus_sel", 5),
+       GATE_ICG(CLK_INFRA_MFG_BUS, "mfg_bus_ck", "axi_sel", 2),
+       GATE_ICG(CLK_INFRA_SMI, "smi_ck", "smi_sel", 1),
+       GATE_ICG(CLK_INFRA_DBGCLK, "dbgclk_ck", "axi_sel", 0),
+};
+
+static const struct mtk_gate_regs peri0_cg_regs = {
+       .set_ofs = 0x0008,
+       .clr_ofs = 0x0010,
+       .sta_ofs = 0x0018,
+};
+
+static const struct mtk_gate_regs peri1_cg_regs = {
+       .set_ofs = 0x000c,
+       .clr_ofs = 0x0014,
+       .sta_ofs = 0x001c,
+};
+
+#define GATE_PERI0(_id, _name, _parent, _shift) {      \
+               .id = _id,                                      \
+               .name = _name,                                  \
+               .parent_name = _parent,                         \
+               .regs = &peri0_cg_regs,                         \
+               .shift = _shift,                                \
+               .ops = &mtk_clk_gate_ops_setclr,                \
+       }
+
+#define GATE_PERI1(_id, _name, _parent, _shift) {      \
+               .id = _id,                                      \
+               .name = _name,                                  \
+               .parent_name = _parent,                         \
+               .regs = &peri1_cg_regs,                         \
+               .shift = _shift,                                \
+               .ops = &mtk_clk_gate_ops_setclr,                \
+       }
+
+static const struct mtk_gate peri_gates[] __initconst = {
+       /* PERI0 */
+       GATE_PERI0(CLK_PERI_I2C5, "i2c5_ck", "axi_sel", 31),
+       GATE_PERI0(CLK_PERI_I2C4, "i2c4_ck", "axi_sel", 30),
+       GATE_PERI0(CLK_PERI_I2C3, "i2c3_ck", "axi_sel", 29),
+       GATE_PERI0(CLK_PERI_I2C2, "i2c2_ck", "axi_sel", 28),
+       GATE_PERI0(CLK_PERI_I2C1, "i2c1_ck", "axi_sel", 27),
+       GATE_PERI0(CLK_PERI_I2C0, "i2c0_ck", "axi_sel", 26),
+       GATE_PERI0(CLK_PERI_UART3, "uart3_ck", "axi_sel", 25),
+       GATE_PERI0(CLK_PERI_UART2, "uart2_ck", "axi_sel", 24),
+       GATE_PERI0(CLK_PERI_UART1, "uart1_ck", "axi_sel", 23),
+       GATE_PERI0(CLK_PERI_UART0, "uart0_ck", "axi_sel", 22),
+       GATE_PERI0(CLK_PERI_IRDA, "irda_ck", "irda_sel", 21),
+       GATE_PERI0(CLK_PERI_NLI, "nli_ck", "axi_sel", 20),
+       GATE_PERI0(CLK_PERI_MD_HIF, "md_hif_ck", "axi_sel", 19),
+       GATE_PERI0(CLK_PERI_AP_HIF, "ap_hif_ck", "axi_sel", 18),
+       GATE_PERI0(CLK_PERI_MSDC30_3, "msdc30_3_ck", "msdc30_4_sel", 17),
+       GATE_PERI0(CLK_PERI_MSDC30_2, "msdc30_2_ck", "msdc30_3_sel", 16),
+       GATE_PERI0(CLK_PERI_MSDC30_1, "msdc30_1_ck", "msdc30_2_sel", 15),
+       GATE_PERI0(CLK_PERI_MSDC20_2, "msdc20_2_ck", "msdc30_1_sel", 14),
+       GATE_PERI0(CLK_PERI_MSDC20_1, "msdc20_1_ck", "msdc30_0_sel", 13),
+       GATE_PERI0(CLK_PERI_AP_DMA, "ap_dma_ck", "axi_sel", 12),
+       GATE_PERI0(CLK_PERI_USB1, "usb1_ck", "usb20_sel", 11),
+       GATE_PERI0(CLK_PERI_USB0, "usb0_ck", "usb20_sel", 10),
+       GATE_PERI0(CLK_PERI_PWM, "pwm_ck", "axi_sel", 9),
+       GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axi_sel", 8),
+       GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axi_sel", 7),
+       GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axi_sel", 6),
+       GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axi_sel", 5),
+       GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axi_sel", 4),
+       GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axi_sel", 3),
+       GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axi_sel", 2),
+       GATE_PERI0(CLK_PERI_THERM, "therm_ck", "axi_sel", 1),
+       GATE_PERI0(CLK_PERI_NFI, "nfi_ck", "axi_sel", 0),
+       /* PERI1 */
+       GATE_PERI1(CLK_PERI_USBSLV, "usbslv_ck", "axi_sel", 8),
+       GATE_PERI1(CLK_PERI_USB1_MCU, "usb1_mcu_ck", "axi_sel", 7),
+       GATE_PERI1(CLK_PERI_USB0_MCU, "usb0_mcu_ck", "axi_sel", 6),
+       GATE_PERI1(CLK_PERI_GCPU, "gcpu_ck", "gcpu_sel", 5),
+       GATE_PERI1(CLK_PERI_FHCTL, "fhctl_ck", "clk26m", 4),
+       GATE_PERI1(CLK_PERI_SPI1, "spi1_ck", "spi_sel", 3),
+       GATE_PERI1(CLK_PERI_AUXADC, "auxadc_ck", "clk26m", 2),
+       GATE_PERI1(CLK_PERI_PERI_PWRAP, "peri_pwrap_ck", "axi_sel", 1),
+       GATE_PERI1(CLK_PERI_I2C6, "i2c6_ck", "axi_sel", 0),
+};
+
+static const char * const uart_ck_sel_parents[] __initconst = {
+       "clk26m",
+       "uart_sel",
+};
+
+static const struct mtk_composite peri_clks[] __initconst = {
+       MUX(CLK_PERI_UART0_SEL, "uart0_ck_sel", uart_ck_sel_parents, 0x40c, 0, 1),
+       MUX(CLK_PERI_UART1_SEL, "uart1_ck_sel", uart_ck_sel_parents, 0x40c, 1, 1),
+       MUX(CLK_PERI_UART2_SEL, "uart2_ck_sel", uart_ck_sel_parents, 0x40c, 2, 1),
+       MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
+};
+
+static void __init mtk_topckgen_init(struct device_node *node)
+{
+       struct clk_onecell_data *clk_data;
+       void __iomem *base;
+       int r;
+
+       base = of_iomap(node, 0);
+       if (!base) {
+               pr_err("%s(): ioremap failed\n", __func__);
+               return;
+       }
+
+       clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+       mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data);
+       mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+       mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+                       &mt8135_clk_lock, clk_data);
+
+       clk_prepare_enable(clk_data->clks[CLK_TOP_CCI_SEL]);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+}
+CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8135-topckgen", mtk_topckgen_init);
+
+static void __init mtk_infrasys_init(struct device_node *node)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+
+       clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+
+       mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+                                               clk_data);
+
+       clk_prepare_enable(clk_data->clks[CLK_INFRA_M4U]);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       mtk_register_reset_controller(node, 2, 0x30);
+}
+CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8135-infracfg", mtk_infrasys_init);
+
+static void __init mtk_pericfg_init(struct device_node *node)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+       void __iomem *base;
+
+       base = of_iomap(node, 0);
+       if (!base) {
+               pr_err("%s(): ioremap failed\n", __func__);
+               return;
+       }
+
+       clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+
+       mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates),
+                                               clk_data);
+       mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
+                       &mt8135_clk_lock, clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       mtk_register_reset_controller(node, 2, 0);
+}
+CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8135-pericfg", mtk_pericfg_init);
+
+#define MT8135_PLL_FMAX                (2000 * MHZ)
+#define CON0_MT8135_RST_BAR    BIT(27)
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift) { \
+               .id = _id,                                              \
+               .name = _name,                                          \
+               .reg = _reg,                                            \
+               .pwr_reg = _pwr_reg,                                    \
+               .en_mask = _en_mask,                                    \
+               .flags = _flags,                                        \
+               .rst_bar_mask = CON0_MT8135_RST_BAR,                    \
+               .fmax = MT8135_PLL_FMAX,                                \
+               .pcwbits = _pcwbits,                                    \
+               .pd_reg = _pd_reg,                                      \
+               .pd_shift = _pd_shift,                                  \
+               .tuner_reg = _tuner_reg,                                \
+               .pcw_reg = _pcw_reg,                                    \
+               .pcw_shift = _pcw_shift,                                \
+       }
+
+static const struct mtk_pll_data plls[] = {
+       PLL(CLK_APMIXED_ARMPLL1, "armpll1", 0x200, 0x218, 0x80000001, 0, 21, 0x204, 24, 0x0, 0x204, 0),
+       PLL(CLK_APMIXED_ARMPLL2, "armpll2", 0x2cc, 0x2e4, 0x80000001, 0, 21, 0x2d0, 24, 0x0, 0x2d0, 0),
+       PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x21c, 0x234, 0xf0000001, HAVE_RST_BAR, 21, 0x21c, 6, 0x0, 0x220, 0),
+       PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x238, 0x250, 0xf3000001, HAVE_RST_BAR, 7, 0x238, 6, 0x0, 0x238, 9),
+       PLL(CLK_APMIXED_MMPLL, "mmpll", 0x254, 0x26c, 0xf0000001, HAVE_RST_BAR, 21, 0x254, 6, 0x0, 0x258, 0),
+       PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x278, 0x290, 0x80000001, 0, 21, 0x278, 6, 0x0, 0x27c, 0),
+       PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x294, 0x2ac, 0x80000001, 0, 31, 0x294, 6, 0x0, 0x298, 0),
+       PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2b0, 0x2c8,       0x80000001, 0, 21, 0x2b0, 6, 0x0, 0x2b4, 0),
+       PLL(CLK_APMIXED_AUDPLL, "audpll", 0x2e8, 0x300, 0x80000001, 0, 31, 0x2e8, 6, 0x2f8, 0x2ec, 0),
+       PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x304, 0x31c,       0x80000001, 0, 21, 0x2b0, 6, 0x0, 0x308, 0),
+};
+
+static void __init mtk_apmixedsys_init(struct device_node *node)
+{
+       struct clk_onecell_data *clk_data;
+
+       clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+       if (!clk_data)
+               return;
+
+       mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+}
+CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8135-apmixedsys",
+               mtk_apmixedsys_init);
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
new file mode 100644 (file)
index 0000000..4b9e04c
--- /dev/null
@@ -0,0 +1,830 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8173-clk.h>
+
+static DEFINE_SPINLOCK(mt8173_clk_lock);
+
+static const struct mtk_fixed_factor root_clk_alias[] __initconst = {
+       FACTOR(CLK_TOP_CLKPH_MCK_O, "clkph_mck_o", "clk_null", 1, 1),
+       FACTOR(CLK_TOP_DPI, "dpi_ck", "clk_null", 1, 1),
+       FACTOR(CLK_TOP_USB_SYSPLL_125M, "usb_syspll_125m", "clk_null", 1, 1),
+       FACTOR(CLK_TOP_HDMITX_DIG_CTS, "hdmitx_dig_cts", "clk_null", 1, 1),
+};
+
+static const struct mtk_fixed_factor top_divs[] __initconst = {
+       FACTOR(CLK_TOP_ARMCA7PLL_754M, "armca7pll_754m", "armca7pll", 1, 2),
+       FACTOR(CLK_TOP_ARMCA7PLL_502M, "armca7pll_502m", "armca7pll", 1, 3),
+
+       FACTOR(CLK_TOP_MAIN_H546M, "main_h546m", "mainpll", 1, 2),
+       FACTOR(CLK_TOP_MAIN_H364M, "main_h364m", "mainpll", 1, 3),
+       FACTOR(CLK_TOP_MAIN_H218P4M, "main_h218p4m", "mainpll", 1, 5),
+       FACTOR(CLK_TOP_MAIN_H156M, "main_h156m", "mainpll", 1, 7),
+
+       FACTOR(CLK_TOP_TVDPLL_445P5M, "tvdpll_445p5m", "tvdpll", 1, 4),
+       FACTOR(CLK_TOP_TVDPLL_594M, "tvdpll_594m", "tvdpll", 1, 3),
+
+       FACTOR(CLK_TOP_UNIV_624M, "univ_624m", "univpll", 1, 2),
+       FACTOR(CLK_TOP_UNIV_416M, "univ_416m", "univpll", 1, 3),
+       FACTOR(CLK_TOP_UNIV_249P6M, "univ_249p6m", "univpll", 1, 5),
+       FACTOR(CLK_TOP_UNIV_178P3M, "univ_178p3m", "univpll", 1, 7),
+       FACTOR(CLK_TOP_UNIV_48M, "univ_48m", "univpll", 1, 26),
+
+       FACTOR(CLK_TOP_CLKRTC_EXT, "clkrtc_ext", "clk32k", 1, 1),
+       FACTOR(CLK_TOP_CLKRTC_INT, "clkrtc_int", "clk26m", 1, 793),
+       FACTOR(CLK_TOP_FPC, "fpc_ck", "clk26m", 1, 1),
+
+       FACTOR(CLK_TOP_HDMITXPLL_D2, "hdmitxpll_d2", "hdmitx_dig_cts", 1, 2),
+       FACTOR(CLK_TOP_HDMITXPLL_D3, "hdmitxpll_d3", "hdmitx_dig_cts", 1, 3),
+
+       FACTOR(CLK_TOP_ARMCA7PLL_D2, "armca7pll_d2", "armca7pll_754m", 1, 1),
+       FACTOR(CLK_TOP_ARMCA7PLL_D3, "armca7pll_d3", "armca7pll_502m", 1, 1),
+
+       FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+       FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
+
+       FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "clkph_mck_o", 1, 1),
+       FACTOR(CLK_TOP_DMPLL_D2, "dmpll_d2", "clkph_mck_o", 1, 2),
+       FACTOR(CLK_TOP_DMPLL_D4, "dmpll_d4", "clkph_mck_o", 1, 4),
+       FACTOR(CLK_TOP_DMPLL_D8, "dmpll_d8", "clkph_mck_o", 1, 8),
+       FACTOR(CLK_TOP_DMPLL_D16, "dmpll_d16", "clkph_mck_o", 1, 16),
+
+       FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll", 1, 2),
+       FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll", 1, 4),
+       FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll", 1, 8),
+
+       FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1),
+       FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+
+       FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1),
+       FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+       FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4),
+       FACTOR(CLK_TOP_MSDCPLL2, "msdcpll2_ck", "msdcpll2", 1, 1),
+       FACTOR(CLK_TOP_MSDCPLL2_D2, "msdcpll2_d2", "msdcpll2", 1, 2),
+       FACTOR(CLK_TOP_MSDCPLL2_D4, "msdcpll2_d4", "msdcpll2", 1, 4),
+
+       FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "main_h546m", 1, 1),
+       FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "main_h546m", 1, 2),
+       FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "main_h546m", 1, 4),
+       FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "main_h546m", 1, 8),
+       FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "main_h546m", 1, 16),
+       FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "main_h364m", 1, 1),
+       FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "main_h364m", 1, 2),
+       FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "main_h364m", 1, 4),
+       FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "main_h218p4m", 1, 1),
+       FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "main_h218p4m", 1, 2),
+       FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "main_h218p4m", 1, 4),
+       FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "main_h156m", 1, 1),
+       FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "main_h156m", 1, 2),
+       FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "main_h156m", 1, 4),
+
+       FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll_594m", 1, 1),
+       FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_594m", 1, 2),
+       FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_594m", 1, 4),
+       FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_594m", 1, 8),
+       FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll_594m", 1, 16),
+
+       FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univ_624m", 1, 1),
+       FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univ_624m", 1, 2),
+       FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univ_624m", 1, 4),
+       FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univ_624m", 1, 8),
+       FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univ_416m", 1, 1),
+       FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univ_416m", 1, 2),
+       FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univ_416m", 1, 4),
+       FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univ_416m", 1, 8),
+       FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univ_249p6m", 1, 1),
+       FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univ_249p6m", 1, 2),
+       FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univ_249p6m", 1, 4),
+       FACTOR(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univ_249p6m", 1, 8),
+       FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univ_178p3m", 1, 1),
+       FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univ_48m", 1, 1),
+       FACTOR(CLK_TOP_UNIVPLL_D52, "univpll_d52", "univ_48m", 1, 2),
+
+       FACTOR(CLK_TOP_VCODECPLL, "vcodecpll_ck", "vcodecpll", 1, 3),
+       FACTOR(CLK_TOP_VCODECPLL_370P5, "vcodecpll_370p5", "vcodecpll", 1, 4),
+
+       FACTOR(CLK_TOP_VENCPLL, "vencpll_ck", "vencpll", 1, 1),
+       FACTOR(CLK_TOP_VENCPLL_D2, "vencpll_d2", "vencpll", 1, 2),
+       FACTOR(CLK_TOP_VENCPLL_D4, "vencpll_d4", "vencpll", 1, 4),
+};
+
+static const char * const axi_parents[] __initconst = {
+       "clk26m",
+       "syspll1_d2",
+       "syspll_d5",
+       "syspll1_d4",
+       "univpll_d5",
+       "univpll2_d2",
+       "dmpll_d2",
+       "dmpll_d4"
+};
+
+static const char * const mem_parents[] __initconst = {
+       "clk26m",
+       "dmpll_ck"
+};
+
+static const char * const ddrphycfg_parents[] __initconst = {
+       "clk26m",
+       "syspll1_d8"
+};
+
+static const char * const mm_parents[] __initconst = {
+       "clk26m",
+       "vencpll_d2",
+       "main_h364m",
+       "syspll1_d2",
+       "syspll_d5",
+       "syspll1_d4",
+       "univpll1_d2",
+       "univpll2_d2",
+       "dmpll_d2"
+};
+
+static const char * const pwm_parents[] __initconst = {
+       "clk26m",
+       "univpll2_d4",
+       "univpll3_d2",
+       "univpll1_d4"
+};
+
+static const char * const vdec_parents[] __initconst = {
+       "clk26m",
+       "vcodecpll_ck",
+       "tvdpll_445p5m",
+       "univpll_d3",
+       "vencpll_d2",
+       "syspll_d3",
+       "univpll1_d2",
+       "mmpll_d2",
+       "dmpll_d2",
+       "dmpll_d4"
+};
+
+static const char * const venc_parents[] __initconst = {
+       "clk26m",
+       "vcodecpll_ck",
+       "tvdpll_445p5m",
+       "univpll_d3",
+       "vencpll_d2",
+       "syspll_d3",
+       "univpll1_d2",
+       "univpll2_d2",
+       "dmpll_d2",
+       "dmpll_d4"
+};
+
+static const char * const mfg_parents[] __initconst = {
+       "clk26m",
+       "mmpll_ck",
+       "dmpll_ck",
+       "clk26m",
+       "clk26m",
+       "clk26m",
+       "clk26m",
+       "clk26m",
+       "clk26m",
+       "syspll_d3",
+       "syspll1_d2",
+       "syspll_d5",
+       "univpll_d3",
+       "univpll1_d2",
+       "univpll_d5",
+       "univpll2_d2"
+};
+
+static const char * const camtg_parents[] __initconst = {
+       "clk26m",
+       "univpll_d26",
+       "univpll2_d2",
+       "syspll3_d2",
+       "syspll3_d4",
+       "univpll1_d4"
+};
+
+static const char * const uart_parents[] __initconst = {
+       "clk26m",
+       "univpll2_d8"
+};
+
+static const char * const spi_parents[] __initconst = {
+       "clk26m",
+       "syspll3_d2",
+       "syspll1_d4",
+       "syspll4_d2",
+       "univpll3_d2",
+       "univpll2_d4",
+       "univpll1_d8"
+};
+
+static const char * const usb20_parents[] __initconst = {
+       "clk26m",
+       "univpll1_d8",
+       "univpll3_d4"
+};
+
+static const char * const usb30_parents[] __initconst = {
+       "clk26m",
+       "univpll3_d2",
+       "usb_syspll_125m",
+       "univpll2_d4"
+};
+
+static const char * const msdc50_0_h_parents[] __initconst = {
+       "clk26m",
+       "syspll1_d2",
+       "syspll2_d2",
+       "syspll4_d2",
+       "univpll_d5",
+       "univpll1_d4"
+};
+
+static const char * const msdc50_0_parents[] __initconst = {
+       "clk26m",
+       "msdcpll_ck",
+       "msdcpll_d2",
+       "univpll1_d4",
+       "syspll2_d2",
+       "syspll_d7",
+       "msdcpll_d4",
+       "vencpll_d4",
+       "tvdpll_ck",
+       "univpll_d2",
+       "univpll1_d2",
+       "mmpll_ck",
+       "msdcpll2_ck",
+       "msdcpll2_d2",
+       "msdcpll2_d4"
+};
+
+static const char * const msdc30_1_parents[] __initconst = {
+       "clk26m",
+       "univpll2_d2",
+       "msdcpll_d4",
+       "univpll1_d4",
+       "syspll2_d2",
+       "syspll_d7",
+       "univpll_d7",
+       "vencpll_d4"
+};
+
+static const char * const msdc30_2_parents[] __initconst = {
+       "clk26m",
+       "univpll2_d2",
+       "msdcpll_d4",
+       "univpll1_d4",
+       "syspll2_d2",
+       "syspll_d7",
+       "univpll_d7",
+       "vencpll_d2"
+};
+
+static const char * const msdc30_3_parents[] __initconst = {
+       "clk26m",
+       "msdcpll2_ck",
+       "msdcpll2_d2",
+       "univpll2_d2",
+       "msdcpll2_d4",
+       "msdcpll_d4",
+       "univpll1_d4",
+       "syspll2_d2",
+       "syspll_d7",
+       "univpll_d7",
+       "vencpll_d4",
+       "msdcpll_ck",
+       "msdcpll_d2",
+       "msdcpll_d4"
+};
+
+static const char * const audio_parents[] __initconst = {
+       "clk26m",
+       "syspll3_d4",
+       "syspll4_d4",
+       "syspll1_d16"
+};
+
+static const char * const aud_intbus_parents[] __initconst = {
+       "clk26m",
+       "syspll1_d4",
+       "syspll4_d2",
+       "univpll3_d2",
+       "univpll2_d8",
+       "dmpll_d4",
+       "dmpll_d8"
+};
+
+static const char * const pmicspi_parents[] __initconst = {
+       "clk26m",
+       "syspll1_d8",
+       "syspll3_d4",
+       "syspll1_d16",
+       "univpll3_d4",
+       "univpll_d26",
+       "dmpll_d8",
+       "dmpll_d16"
+};
+
+static const char * const scp_parents[] __initconst = {
+       "clk26m",
+       "syspll1_d2",
+       "univpll_d5",
+       "syspll_d5",
+       "dmpll_d2",
+       "dmpll_d4"
+};
+
+static const char * const atb_parents[] __initconst = {
+       "clk26m",
+       "syspll1_d2",
+       "univpll_d5",
+       "dmpll_d2"
+};
+
+static const char * const venc_lt_parents[] __initconst = {
+       "clk26m",
+       "univpll_d3",
+       "vcodecpll_ck",
+       "tvdpll_445p5m",
+       "vencpll_d2",
+       "syspll_d3",
+       "univpll1_d2",
+       "univpll2_d2",
+       "syspll1_d2",
+       "univpll_d5",
+       "vcodecpll_370p5",
+       "dmpll_ck"
+};
+
+static const char * const dpi0_parents[] __initconst = {
+       "clk26m",
+       "tvdpll_d2",
+       "tvdpll_d4",
+       "clk26m",
+       "clk26m",
+       "tvdpll_d8",
+       "tvdpll_d16"
+};
+
+static const char * const irda_parents[] __initconst = {
+       "clk26m",
+       "univpll2_d4",
+       "syspll2_d4"
+};
+
+static const char * const cci400_parents[] __initconst = {
+       "clk26m",
+       "vencpll_ck",
+       "armca7pll_754m",
+       "armca7pll_502m",
+       "univpll_d2",
+       "syspll_d2",
+       "msdcpll_ck",
+       "dmpll_ck"
+};
+
+static const char * const aud_1_parents[] __initconst = {
+       "clk26m",
+       "apll1_ck",
+       "univpll2_d4",
+       "univpll2_d8"
+};
+
+static const char * const aud_2_parents[] __initconst = {
+       "clk26m",
+       "apll2_ck",
+       "univpll2_d4",
+       "univpll2_d8"
+};
+
+static const char * const mem_mfg_in_parents[] __initconst = {
+       "clk26m",
+       "mmpll_ck",
+       "dmpll_ck",
+       "clk26m"
+};
+
+static const char * const axi_mfg_in_parents[] __initconst = {
+       "clk26m",
+       "axi_sel",
+       "dmpll_d2"
+};
+
+static const char * const scam_parents[] __initconst = {
+       "clk26m",
+       "syspll3_d2",
+       "univpll2_d4",
+       "dmpll_d4"
+};
+
+static const char * const spinfi_ifr_parents[] __initconst = {
+       "clk26m",
+       "univpll2_d8",
+       "univpll3_d4",
+       "syspll4_d2",
+       "univpll2_d4",
+       "univpll3_d2",
+       "syspll1_d4",
+       "univpll1_d4"
+};
+
+static const char * const hdmi_parents[] __initconst = {
+       "clk26m",
+       "hdmitx_dig_cts",
+       "hdmitxpll_d2",
+       "hdmitxpll_d3"
+};
+
+static const char * const dpilvds_parents[] __initconst = {
+       "clk26m",
+       "lvdspll",
+       "lvdspll_d2",
+       "lvdspll_d4",
+       "lvdspll_d8",
+       "fpc_ck"
+};
+
+static const char * const msdc50_2_h_parents[] __initconst = {
+       "clk26m",
+       "syspll1_d2",
+       "syspll2_d2",
+       "syspll4_d2",
+       "univpll_d5",
+       "univpll1_d4"
+};
+
+static const char * const hdcp_parents[] __initconst = {
+       "clk26m",
+       "syspll4_d2",
+       "syspll3_d4",
+       "univpll2_d4"
+};
+
+static const char * const hdcp_24m_parents[] __initconst = {
+       "clk26m",
+       "univpll_d26",
+       "univpll_d52",
+       "univpll2_d8"
+};
+
+static const char * const rtc_parents[] __initconst = {
+       "clkrtc_int",
+       "clkrtc_ext",
+       "clk26m",
+       "univpll3_d8"
+};
+
+static const char * const i2s0_m_ck_parents[] __initconst = {
+       "apll1_div1",
+       "apll2_div1"
+};
+
+static const char * const i2s1_m_ck_parents[] __initconst = {
+       "apll1_div2",
+       "apll2_div2"
+};
+
+static const char * const i2s2_m_ck_parents[] __initconst = {
+       "apll1_div3",
+       "apll2_div3"
+};
+
+static const char * const i2s3_m_ck_parents[] __initconst = {
+       "apll1_div4",
+       "apll2_div4"
+};
+
+static const char * const i2s3_b_ck_parents[] __initconst = {
+       "apll1_div5",
+       "apll2_div5"
+};
+
+static const struct mtk_composite top_muxes[] __initconst = {
+       /* CLK_CFG_0 */
+       MUX(CLK_TOP_AXI_SEL, "axi_sel", axi_parents, 0x0040, 0, 3),
+       MUX(CLK_TOP_MEM_SEL, "mem_sel", mem_parents, 0x0040, 8, 1),
+       MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents, 0x0040, 16, 1, 23),
+       MUX_GATE(CLK_TOP_MM_SEL, "mm_sel", mm_parents, 0x0040, 24, 4, 31),
+       /* CLK_CFG_1 */
+       MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, 0x0050, 0, 2, 7),
+       MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_parents, 0x0050, 8, 4, 15),
+       MUX_GATE(CLK_TOP_VENC_SEL, "venc_sel", venc_parents, 0x0050, 16, 4, 23),
+       MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, 0x0050, 24, 4, 31),
+       /* CLK_CFG_2 */
+       MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_parents, 0x0060, 0, 3, 7),
+       MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x0060, 8, 1, 15),
+       MUX_GATE(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x0060, 16, 3, 23),
+       MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel", usb20_parents, 0x0060, 24, 2, 31),
+       /* CLK_CFG_3 */
+       MUX_GATE(CLK_TOP_USB30_SEL, "usb30_sel", usb30_parents, 0x0070, 0, 2, 7),
+       MUX_GATE(CLK_TOP_MSDC50_0_H_SEL, "msdc50_0_h_sel", msdc50_0_h_parents, 0x0070, 8, 3, 15),
+       MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", msdc50_0_parents, 0x0070, 16, 4, 23),
+       MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_1_parents, 0x0070, 24, 3, 31),
+       /* CLK_CFG_4 */
+       MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_2_parents, 0x0080, 0, 3, 7),
+       MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_3_parents, 0x0080, 8, 4, 15),
+       MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents, 0x0080, 16, 2, 23),
+       MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents, 0x0080, 24, 3, 31),
+       /* CLK_CFG_5 */
+       MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents, 0x0090, 0, 3, 7 /* 7:5 */),
+       MUX_GATE(CLK_TOP_SCP_SEL, "scp_sel", scp_parents, 0x0090, 8, 3, 15),
+       MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel", atb_parents, 0x0090, 16, 2, 23),
+       MUX_GATE(CLK_TOP_VENC_LT_SEL, "venclt_sel", venc_lt_parents, 0x0090, 24, 4, 31),
+       /* CLK_CFG_6 */
+       MUX_GATE(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, 0x00a0, 0, 3, 7),
+       MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel", irda_parents, 0x00a0, 8, 2, 15),
+       MUX_GATE(CLK_TOP_CCI400_SEL, "cci400_sel", cci400_parents, 0x00a0, 16, 3, 23),
+       MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents, 0x00a0, 24, 2, 31),
+       /* CLK_CFG_7 */
+       MUX_GATE(CLK_TOP_AUD_2_SEL, "aud_2_sel", aud_2_parents, 0x00b0, 0, 2, 7),
+       MUX_GATE(CLK_TOP_MEM_MFG_IN_SEL, "mem_mfg_in_sel", mem_mfg_in_parents, 0x00b0, 8, 2, 15),
+       MUX_GATE(CLK_TOP_AXI_MFG_IN_SEL, "axi_mfg_in_sel", axi_mfg_in_parents, 0x00b0, 16, 2, 23),
+       MUX_GATE(CLK_TOP_SCAM_SEL, "scam_sel", scam_parents, 0x00b0, 24, 2, 31),
+       /* CLK_CFG_12 */
+       MUX_GATE(CLK_TOP_SPINFI_IFR_SEL, "spinfi_ifr_sel", spinfi_ifr_parents, 0x00c0, 0, 3, 7),
+       MUX_GATE(CLK_TOP_HDMI_SEL, "hdmi_sel", hdmi_parents, 0x00c0, 8, 2, 15),
+       MUX_GATE(CLK_TOP_DPILVDS_SEL, "dpilvds_sel", dpilvds_parents, 0x00c0, 24, 3, 31),
+       /* CLK_CFG_13 */
+       MUX_GATE(CLK_TOP_MSDC50_2_H_SEL, "msdc50_2_h_sel", msdc50_2_h_parents, 0x00d0, 0, 3, 7),
+       MUX_GATE(CLK_TOP_HDCP_SEL, "hdcp_sel", hdcp_parents, 0x00d0, 8, 2, 15),
+       MUX_GATE(CLK_TOP_HDCP_24M_SEL, "hdcp_24m_sel", hdcp_24m_parents, 0x00d0, 16, 2, 23),
+       MUX(CLK_TOP_RTC_SEL, "rtc_sel", rtc_parents, 0x00d0, 24, 2),
+
+       DIV_GATE(CLK_TOP_APLL1_DIV0, "apll1_div0", "aud_1_sel", 0x12c, 8, 0x120, 4, 24),
+       DIV_GATE(CLK_TOP_APLL1_DIV1, "apll1_div1", "aud_1_sel", 0x12c, 9, 0x124, 8, 0),
+       DIV_GATE(CLK_TOP_APLL1_DIV2, "apll1_div2", "aud_1_sel", 0x12c, 10, 0x124, 8, 8),
+       DIV_GATE(CLK_TOP_APLL1_DIV3, "apll1_div3", "aud_1_sel", 0x12c, 11, 0x124, 8, 16),
+       DIV_GATE(CLK_TOP_APLL1_DIV4, "apll1_div4", "aud_1_sel", 0x12c, 12, 0x124, 8, 24),
+       DIV_GATE(CLK_TOP_APLL1_DIV5, "apll1_div5", "apll1_div4", 0x12c, 13, 0x12c, 4, 0),
+
+       DIV_GATE(CLK_TOP_APLL2_DIV0, "apll2_div0", "aud_2_sel", 0x12c, 16, 0x120, 4, 28),
+       DIV_GATE(CLK_TOP_APLL2_DIV1, "apll2_div1", "aud_2_sel", 0x12c, 17, 0x128, 8, 0),
+       DIV_GATE(CLK_TOP_APLL2_DIV2, "apll2_div2", "aud_2_sel", 0x12c, 18, 0x128, 8, 8),
+       DIV_GATE(CLK_TOP_APLL2_DIV3, "apll2_div3", "aud_2_sel", 0x12c, 19, 0x128, 8, 16),
+       DIV_GATE(CLK_TOP_APLL2_DIV4, "apll2_div4", "aud_2_sel", 0x12c, 20, 0x128, 8, 24),
+       DIV_GATE(CLK_TOP_APLL2_DIV5, "apll2_div5", "apll2_div4", 0x12c, 21, 0x12c, 4, 4),
+
+       MUX(CLK_TOP_I2S0_M_SEL, "i2s0_m_ck_sel", i2s0_m_ck_parents, 0x120, 4, 1),
+       MUX(CLK_TOP_I2S1_M_SEL, "i2s1_m_ck_sel", i2s1_m_ck_parents, 0x120, 5, 1),
+       MUX(CLK_TOP_I2S2_M_SEL, "i2s2_m_ck_sel", i2s2_m_ck_parents, 0x120, 6, 1),
+       MUX(CLK_TOP_I2S3_M_SEL, "i2s3_m_ck_sel", i2s3_m_ck_parents, 0x120, 7, 1),
+       MUX(CLK_TOP_I2S3_B_SEL, "i2s3_b_ck_sel", i2s3_b_ck_parents, 0x120, 8, 1),
+};
+
+static const struct mtk_gate_regs infra_cg_regs = {
+       .set_ofs = 0x0040,
+       .clr_ofs = 0x0044,
+       .sta_ofs = 0x0048,
+};
+
+#define GATE_ICG(_id, _name, _parent, _shift) {        \
+               .id = _id,                                      \
+               .name = _name,                                  \
+               .parent_name = _parent,                         \
+               .regs = &infra_cg_regs,                         \
+               .shift = _shift,                                \
+               .ops = &mtk_clk_gate_ops_setclr,                \
+       }
+
+static const struct mtk_gate infra_clks[] __initconst = {
+       GATE_ICG(CLK_INFRA_DBGCLK, "infra_dbgclk", "axi_sel", 0),
+       GATE_ICG(CLK_INFRA_SMI, "infra_smi", "mm_sel", 1),
+       GATE_ICG(CLK_INFRA_AUDIO, "infra_audio", "aud_intbus_sel", 5),
+       GATE_ICG(CLK_INFRA_GCE, "infra_gce", "axi_sel", 6),
+       GATE_ICG(CLK_INFRA_L2C_SRAM, "infra_l2c_sram", "axi_sel", 7),
+       GATE_ICG(CLK_INFRA_M4U, "infra_m4u", "mem_sel", 8),
+       GATE_ICG(CLK_INFRA_CPUM, "infra_cpum", "clk_null", 15),
+       GATE_ICG(CLK_INFRA_KP, "infra_kp", "axi_sel", 16),
+       GATE_ICG(CLK_INFRA_CEC, "infra_cec", "clk26m", 18),
+       GATE_ICG(CLK_INFRA_PMICSPI, "infra_pmicspi", "pmicspi_sel", 22),
+       GATE_ICG(CLK_INFRA_PMICWRAP, "infra_pmicwrap", "axi_sel", 23),
+};
+
+static const struct mtk_gate_regs peri0_cg_regs = {
+       .set_ofs = 0x0008,
+       .clr_ofs = 0x0010,
+       .sta_ofs = 0x0018,
+};
+
+static const struct mtk_gate_regs peri1_cg_regs = {
+       .set_ofs = 0x000c,
+       .clr_ofs = 0x0014,
+       .sta_ofs = 0x001c,
+};
+
+#define GATE_PERI0(_id, _name, _parent, _shift) {      \
+               .id = _id,                                      \
+               .name = _name,                                  \
+               .parent_name = _parent,                         \
+               .regs = &peri0_cg_regs,                         \
+               .shift = _shift,                                \
+               .ops = &mtk_clk_gate_ops_setclr,                \
+       }
+
+#define GATE_PERI1(_id, _name, _parent, _shift) {      \
+               .id = _id,                                      \
+               .name = _name,                                  \
+               .parent_name = _parent,                         \
+               .regs = &peri1_cg_regs,                         \
+               .shift = _shift,                                \
+               .ops = &mtk_clk_gate_ops_setclr,                \
+       }
+
+static const struct mtk_gate peri_gates[] __initconst = {
+       /* PERI0 */
+       GATE_PERI0(CLK_PERI_NFI, "peri_nfi", "axi_sel", 0),
+       GATE_PERI0(CLK_PERI_THERM, "peri_therm", "axi_sel", 1),
+       GATE_PERI0(CLK_PERI_PWM1, "peri_pwm1", "axi_sel", 2),
+       GATE_PERI0(CLK_PERI_PWM2, "peri_pwm2", "axi_sel", 3),
+       GATE_PERI0(CLK_PERI_PWM3, "peri_pwm3", "axi_sel", 4),
+       GATE_PERI0(CLK_PERI_PWM4, "peri_pwm4", "axi_sel", 5),
+       GATE_PERI0(CLK_PERI_PWM5, "peri_pwm5", "axi_sel", 6),
+       GATE_PERI0(CLK_PERI_PWM6, "peri_pwm6", "axi_sel", 7),
+       GATE_PERI0(CLK_PERI_PWM7, "peri_pwm7", "axi_sel", 8),
+       GATE_PERI0(CLK_PERI_PWM, "peri_pwm", "axi_sel", 9),
+       GATE_PERI0(CLK_PERI_USB0, "peri_usb0", "usb20_sel", 10),
+       GATE_PERI0(CLK_PERI_USB1, "peri_usb1", "usb20_sel", 11),
+       GATE_PERI0(CLK_PERI_AP_DMA, "peri_ap_dma", "axi_sel", 12),
+       GATE_PERI0(CLK_PERI_MSDC30_0, "peri_msdc30_0", "msdc50_0_sel", 13),
+       GATE_PERI0(CLK_PERI_MSDC30_1, "peri_msdc30_1", "msdc30_1_sel", 14),
+       GATE_PERI0(CLK_PERI_MSDC30_2, "peri_msdc30_2", "msdc30_2_sel", 15),
+       GATE_PERI0(CLK_PERI_MSDC30_3, "peri_msdc30_3", "msdc30_3_sel", 16),
+       GATE_PERI0(CLK_PERI_NLI_ARB, "peri_nli_arb", "axi_sel", 17),
+       GATE_PERI0(CLK_PERI_IRDA, "peri_irda", "irda_sel", 18),
+       GATE_PERI0(CLK_PERI_UART0, "peri_uart0", "axi_sel", 19),
+       GATE_PERI0(CLK_PERI_UART1, "peri_uart1", "axi_sel", 20),
+       GATE_PERI0(CLK_PERI_UART2, "peri_uart2", "axi_sel", 21),
+       GATE_PERI0(CLK_PERI_UART3, "peri_uart3", "axi_sel", 22),
+       GATE_PERI0(CLK_PERI_I2C0, "peri_i2c0", "axi_sel", 23),
+       GATE_PERI0(CLK_PERI_I2C1, "peri_i2c1", "axi_sel", 24),
+       GATE_PERI0(CLK_PERI_I2C2, "peri_i2c2", "axi_sel", 25),
+       GATE_PERI0(CLK_PERI_I2C3, "peri_i2c3", "axi_sel", 26),
+       GATE_PERI0(CLK_PERI_I2C4, "peri_i2c4", "axi_sel", 27),
+       GATE_PERI0(CLK_PERI_AUXADC, "peri_auxadc", "clk26m", 28),
+       GATE_PERI0(CLK_PERI_SPI0, "peri_spi0", "spi_sel", 29),
+       GATE_PERI0(CLK_PERI_I2C5, "peri_i2c5", "axi_sel", 30),
+       GATE_PERI0(CLK_PERI_NFIECC, "peri_nfiecc", "axi_sel", 31),
+       /* PERI1 */
+       GATE_PERI1(CLK_PERI_SPI, "peri_spi", "spi_sel", 0),
+       GATE_PERI1(CLK_PERI_IRRX, "peri_irrx", "spi_sel", 1),
+       GATE_PERI1(CLK_PERI_I2C6, "peri_i2c6", "axi_sel", 2),
+};
+
+static const char * const uart_ck_sel_parents[] __initconst = {
+       "clk26m",
+       "uart_sel",
+};
+
+static const struct mtk_composite peri_clks[] __initconst = {
+       MUX(CLK_PERI_UART0_SEL, "uart0_ck_sel", uart_ck_sel_parents, 0x40c, 0, 1),
+       MUX(CLK_PERI_UART1_SEL, "uart1_ck_sel", uart_ck_sel_parents, 0x40c, 1, 1),
+       MUX(CLK_PERI_UART2_SEL, "uart2_ck_sel", uart_ck_sel_parents, 0x40c, 2, 1),
+       MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
+};
+
+static void __init mtk_topckgen_init(struct device_node *node)
+{
+       struct clk_onecell_data *clk_data;
+       void __iomem *base;
+       int r;
+
+       base = of_iomap(node, 0);
+       if (!base) {
+               pr_err("%s(): ioremap failed\n", __func__);
+               return;
+       }
+
+       clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+       mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data);
+       mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+       mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+                       &mt8173_clk_lock, clk_data);
+
+       clk_prepare_enable(clk_data->clks[CLK_TOP_CCI400_SEL]);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+}
+CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init);
+
+static void __init mtk_infrasys_init(struct device_node *node)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+
+       clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+
+       mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+                                               clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       mtk_register_reset_controller(node, 2, 0x30);
+}
+CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8173-infracfg", mtk_infrasys_init);
+
+static void __init mtk_pericfg_init(struct device_node *node)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+       void __iomem *base;
+
+       base = of_iomap(node, 0);
+       if (!base) {
+               pr_err("%s(): ioremap failed\n", __func__);
+               return;
+       }
+
+       clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+
+       mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates),
+                                               clk_data);
+       mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
+                       &mt8173_clk_lock, clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       mtk_register_reset_controller(node, 2, 0);
+}
+CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8173-pericfg", mtk_pericfg_init);
+
+#define MT8173_PLL_FMAX                (3000UL * MHZ)
+
+#define CON0_MT8173_RST_BAR    BIT(24)
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, _pd_shift, \
+                       _tuner_reg, _pcw_reg, _pcw_shift) { \
+               .id = _id,                                              \
+               .name = _name,                                          \
+               .reg = _reg,                                            \
+               .pwr_reg = _pwr_reg,                                    \
+               .en_mask = _en_mask,                                    \
+               .flags = _flags,                                        \
+               .rst_bar_mask = CON0_MT8173_RST_BAR,                    \
+               .fmax = MT8173_PLL_FMAX,                                \
+               .pcwbits = _pcwbits,                                    \
+               .pd_reg = _pd_reg,                                      \
+               .pd_shift = _pd_shift,                                  \
+               .tuner_reg = _tuner_reg,                                \
+               .pcw_reg = _pcw_reg,                                    \
+               .pcw_shift = _pcw_shift,                                \
+       }
+
+static const struct mtk_pll_data plls[] = {
+       PLL(CLK_APMIXED_ARMCA15PLL, "armca15pll", 0x200, 0x20c, 0x00000001, 0, 21, 0x204, 24, 0x0, 0x204, 0),
+       PLL(CLK_APMIXED_ARMCA7PLL, "armca7pll", 0x210, 0x21c, 0x00000001, 0, 21, 0x214, 24, 0x0, 0x214, 0),
+       PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x220, 0x22c, 0xf0000101, HAVE_RST_BAR, 21, 0x220, 4, 0x0, 0x224, 0),
+       PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x230, 0x23c, 0xfe000001, HAVE_RST_BAR, 7, 0x230, 4, 0x0, 0x234, 14),
+       PLL(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0x00000001, 0, 21, 0x244, 24, 0x0, 0x244, 0),
+       PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x250, 0x25c, 0x00000001, 0, 21, 0x250, 4, 0x0, 0x254, 0),
+       PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x260, 0x26c, 0x00000001, 0, 21, 0x260, 4, 0x0, 0x264, 0),
+       PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x270, 0x27c, 0x00000001, 0, 21, 0x270, 4, 0x0, 0x274, 0),
+       PLL(CLK_APMIXED_MPLL, "mpll", 0x280, 0x28c, 0x00000001, 0, 21, 0x280, 4, 0x0, 0x284, 0),
+       PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x290, 0x29c, 0x00000001, 0, 21, 0x290, 4, 0x0, 0x294, 0),
+       PLL(CLK_APMIXED_APLL1, "apll1", 0x2a0, 0x2b0, 0x00000001, 0, 31, 0x2a0, 4, 0x2a4, 0x2a4, 0),
+       PLL(CLK_APMIXED_APLL2, "apll2", 0x2b4, 0x2c4, 0x00000001, 0, 31, 0x2b4, 4, 0x2b8, 0x2b8, 0),
+       PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2d0, 0x2dc, 0x00000001, 0, 21, 0x2d0, 4, 0x0, 0x2d4, 0),
+       PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x2f0, 0x2fc, 0x00000001, 0, 21, 0x2f0, 4, 0x0, 0x2f4, 0),
+};
+
+static void __init mtk_apmixedsys_init(struct device_node *node)
+{
+       struct clk_onecell_data *clk_data;
+
+       clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+       if (!clk_data)
+               return;
+
+       mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+       clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMCA15PLL]);
+}
+CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys",
+               mtk_apmixedsys_init);
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
new file mode 100644 (file)
index 0000000..18444ae
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/clkdev.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num)
+{
+       int i;
+       struct clk_onecell_data *clk_data;
+
+       clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+       if (!clk_data)
+               return NULL;
+
+       clk_data->clks = kcalloc(clk_num, sizeof(*clk_data->clks), GFP_KERNEL);
+       if (!clk_data->clks)
+               goto err_out;
+
+       clk_data->clk_num = clk_num;
+
+       for (i = 0; i < clk_num; i++)
+               clk_data->clks[i] = ERR_PTR(-ENOENT);
+
+       return clk_data;
+err_out:
+       kfree(clk_data);
+
+       return NULL;
+}
+
+void mtk_clk_register_factors(const struct mtk_fixed_factor *clks, int num,
+               struct clk_onecell_data *clk_data)
+{
+       int i;
+       struct clk *clk;
+
+       for (i = 0; i < num; i++) {
+               const struct mtk_fixed_factor *ff = &clks[i];
+
+               clk = clk_register_fixed_factor(NULL, ff->name, ff->parent_name,
+                               CLK_SET_RATE_PARENT, ff->mult, ff->div);
+
+               if (IS_ERR(clk)) {
+                       pr_err("Failed to register clk %s: %ld\n",
+                                       ff->name, PTR_ERR(clk));
+                       continue;
+               }
+
+               if (clk_data)
+                       clk_data->clks[ff->id] = clk;
+       }
+}
+
+int mtk_clk_register_gates(struct device_node *node, const struct mtk_gate *clks,
+               int num, struct clk_onecell_data *clk_data)
+{
+       int i;
+       struct clk *clk;
+       struct regmap *regmap;
+
+       if (!clk_data)
+               return -ENOMEM;
+
+       regmap = syscon_node_to_regmap(node);
+       if (IS_ERR(regmap)) {
+               pr_err("Cannot find regmap for %s: %ld\n", node->full_name,
+                               PTR_ERR(regmap));
+               return PTR_ERR(regmap);
+       }
+
+       for (i = 0; i < num; i++) {
+               const struct mtk_gate *gate = &clks[i];
+
+               clk = mtk_clk_register_gate(gate->name, gate->parent_name,
+                               regmap,
+                               gate->regs->set_ofs,
+                               gate->regs->clr_ofs,
+                               gate->regs->sta_ofs,
+                               gate->shift, gate->ops);
+
+               if (IS_ERR(clk)) {
+                       pr_err("Failed to register clk %s: %ld\n",
+                                       gate->name, PTR_ERR(clk));
+                       continue;
+               }
+
+               clk_data->clks[gate->id] = clk;
+       }
+
+       return 0;
+}
+
+struct clk *mtk_clk_register_composite(const struct mtk_composite *mc,
+               void __iomem *base, spinlock_t *lock)
+{
+       struct clk *clk;
+       struct clk_mux *mux = NULL;
+       struct clk_gate *gate = NULL;
+       struct clk_divider *div = NULL;
+       struct clk_hw *mux_hw = NULL, *gate_hw = NULL, *div_hw = NULL;
+       const struct clk_ops *mux_ops = NULL, *gate_ops = NULL, *div_ops = NULL;
+       const char * const *parent_names;
+       const char *parent;
+       int num_parents;
+       int ret;
+
+       if (mc->mux_shift >= 0) {
+               mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+               if (!mux)
+                       return ERR_PTR(-ENOMEM);
+
+               mux->reg = base + mc->mux_reg;
+               mux->mask = BIT(mc->mux_width) - 1;
+               mux->shift = mc->mux_shift;
+               mux->lock = lock;
+
+               mux_hw = &mux->hw;
+               mux_ops = &clk_mux_ops;
+
+               parent_names = mc->parent_names;
+               num_parents = mc->num_parents;
+       } else {
+               parent = mc->parent;
+               parent_names = &parent;
+               num_parents = 1;
+       }
+
+       if (mc->gate_shift >= 0) {
+               gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+               if (!gate) {
+                       ret = -ENOMEM;
+                       goto err_out;
+               }
+
+               gate->reg = base + mc->gate_reg;
+               gate->bit_idx = mc->gate_shift;
+               gate->flags = CLK_GATE_SET_TO_DISABLE;
+               gate->lock = lock;
+
+               gate_hw = &gate->hw;
+               gate_ops = &clk_gate_ops;
+       }
+
+       if (mc->divider_shift >= 0) {
+               div = kzalloc(sizeof(*div), GFP_KERNEL);
+               if (!div) {
+                       ret = -ENOMEM;
+                       goto err_out;
+               }
+
+               div->reg = base + mc->divider_reg;
+               div->shift = mc->divider_shift;
+               div->width = mc->divider_width;
+               div->lock = lock;
+
+               div_hw = &div->hw;
+               div_ops = &clk_divider_ops;
+       }
+
+       clk = clk_register_composite(NULL, mc->name, parent_names, num_parents,
+               mux_hw, mux_ops,
+               div_hw, div_ops,
+               gate_hw, gate_ops,
+               mc->flags);
+
+       if (IS_ERR(clk)) {
+               kfree(gate);
+               kfree(mux);
+       }
+
+       return clk;
+err_out:
+       kfree(mux);
+
+       return ERR_PTR(ret);
+}
+
+void mtk_clk_register_composites(const struct mtk_composite *mcs,
+               int num, void __iomem *base, spinlock_t *lock,
+               struct clk_onecell_data *clk_data)
+{
+       struct clk *clk;
+       int i;
+
+       for (i = 0; i < num; i++) {
+               const struct mtk_composite *mc = &mcs[i];
+
+               clk = mtk_clk_register_composite(mc, base, lock);
+
+               if (IS_ERR(clk)) {
+                       pr_err("Failed to register clk %s: %ld\n",
+                                       mc->name, PTR_ERR(clk));
+                       continue;
+               }
+
+               if (clk_data)
+                       clk_data->clks[mc->id] = clk;
+       }
+}
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
new file mode 100644 (file)
index 0000000..9dda9d8
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRV_CLK_MTK_H
+#define __DRV_CLK_MTK_H
+
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#define MAX_MUX_GATE_BIT       31
+#define INVALID_MUX_GATE_BIT   (MAX_MUX_GATE_BIT + 1)
+
+#define MHZ (1000 * 1000)
+
+struct mtk_fixed_factor {
+       int id;
+       const char *name;
+       const char *parent_name;
+       int mult;
+       int div;
+};
+
+#define FACTOR(_id, _name, _parent, _mult, _div) {     \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .mult = _mult,                          \
+               .div = _div,                            \
+       }
+
+extern void mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
+               int num, struct clk_onecell_data *clk_data);
+
+struct mtk_composite {
+       int id;
+       const char *name;
+       const char * const *parent_names;
+       const char *parent;
+       unsigned flags;
+
+       uint32_t mux_reg;
+       uint32_t divider_reg;
+       uint32_t gate_reg;
+
+       signed char mux_shift;
+       signed char mux_width;
+       signed char gate_shift;
+
+       signed char divider_shift;
+       signed char divider_width;
+
+       signed char num_parents;
+};
+
+#define MUX_GATE(_id, _name, _parents, _reg, _shift, _width, _gate) {  \
+               .id = _id,                                              \
+               .name = _name,                                          \
+               .mux_reg = _reg,                                        \
+               .mux_shift = _shift,                                    \
+               .mux_width = _width,                                    \
+               .gate_reg = _reg,                                       \
+               .gate_shift = _gate,                                    \
+               .divider_shift = -1,                                    \
+               .parent_names = _parents,                               \
+               .num_parents = ARRAY_SIZE(_parents),                    \
+               .flags = CLK_SET_RATE_PARENT,                           \
+       }
+
+#define MUX(_id, _name, _parents, _reg, _shift, _width) {              \
+               .id = _id,                                              \
+               .name = _name,                                          \
+               .mux_reg = _reg,                                        \
+               .mux_shift = _shift,                                    \
+               .mux_width = _width,                                    \
+               .gate_shift = -1,                                       \
+               .divider_shift = -1,                                    \
+               .parent_names = _parents,                               \
+               .num_parents = ARRAY_SIZE(_parents),                    \
+               .flags = CLK_SET_RATE_PARENT,                           \
+       }
+
+#define DIV_GATE(_id, _name, _parent, _gate_reg, _gate_shift, _div_reg, _div_width, _div_shift) {      \
+               .id = _id,                                              \
+               .parent = _parent,                                      \
+               .name = _name,                                          \
+               .divider_reg = _div_reg,                                \
+               .divider_shift = _div_shift,                            \
+               .divider_width = _div_width,                            \
+               .gate_reg = _gate_reg,                                  \
+               .gate_shift = _gate_shift,                              \
+               .mux_shift = -1,                                        \
+               .flags = 0,                                             \
+       }
+
+struct clk *mtk_clk_register_composite(const struct mtk_composite *mc,
+               void __iomem *base, spinlock_t *lock);
+
+void mtk_clk_register_composites(const struct mtk_composite *mcs,
+               int num, void __iomem *base, spinlock_t *lock,
+               struct clk_onecell_data *clk_data);
+
+struct mtk_gate_regs {
+       u32 sta_ofs;
+       u32 clr_ofs;
+       u32 set_ofs;
+};
+
+struct mtk_gate {
+       int id;
+       const char *name;
+       const char *parent_name;
+       const struct mtk_gate_regs *regs;
+       int shift;
+       const struct clk_ops *ops;
+};
+
+int mtk_clk_register_gates(struct device_node *node, const struct mtk_gate *clks,
+               int num, struct clk_onecell_data *clk_data);
+
+struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num);
+
+#define HAVE_RST_BAR   BIT(0)
+
+struct mtk_pll_data {
+       int id;
+       const char *name;
+       uint32_t reg;
+       uint32_t pwr_reg;
+       uint32_t en_mask;
+       uint32_t pd_reg;
+       uint32_t tuner_reg;
+       int pd_shift;
+       unsigned int flags;
+       const struct clk_ops *ops;
+       u32 rst_bar_mask;
+       unsigned long fmax;
+       int pcwbits;
+       uint32_t pcw_reg;
+       int pcw_shift;
+};
+
+void __init mtk_clk_register_plls(struct device_node *node,
+               const struct mtk_pll_data *plls, int num_plls,
+               struct clk_onecell_data *clk_data);
+
+#ifdef CONFIG_RESET_CONTROLLER
+void mtk_register_reset_controller(struct device_node *np,
+                       unsigned int num_regs, int regofs);
+#else
+static inline void mtk_register_reset_controller(struct device_node *np,
+                       unsigned int num_regs, int regofs)
+{
+}
+#endif
+
+#endif /* __DRV_CLK_MTK_H */
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
new file mode 100644 (file)
index 0000000..44409e9
--- /dev/null
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+
+#include "clk-mtk.h"
+
+#define REG_CON0               0
+#define REG_CON1               4
+
+#define CON0_BASE_EN           BIT(0)
+#define CON0_PWR_ON            BIT(0)
+#define CON0_ISO_EN            BIT(1)
+#define CON0_PCW_CHG           BIT(31)
+
+#define AUDPLL_TUNER_EN                BIT(31)
+
+#define POSTDIV_MASK           0x7
+#define INTEGER_BITS           7
+
+/*
+ * MediaTek PLLs are configured through their pcw value. The pcw value describes
+ * a divider in the PLL feedback loop which consists of 7 bits for the integer
+ * part and the remaining bits (if present) for the fractional part. Also they
+ * have a 3 bit power-of-two post divider.
+ */
+
+struct mtk_clk_pll {
+       struct clk_hw   hw;
+       void __iomem    *base_addr;
+       void __iomem    *pd_addr;
+       void __iomem    *pwr_addr;
+       void __iomem    *tuner_addr;
+       void __iomem    *pcw_addr;
+       const struct mtk_pll_data *data;
+};
+
+static inline struct mtk_clk_pll *to_mtk_clk_pll(struct clk_hw *hw)
+{
+       return container_of(hw, struct mtk_clk_pll, hw);
+}
+
+static int mtk_pll_is_prepared(struct clk_hw *hw)
+{
+       struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+       return (readl(pll->base_addr + REG_CON0) & CON0_BASE_EN) != 0;
+}
+
+static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
+               u32 pcw, int postdiv)
+{
+       int pcwbits = pll->data->pcwbits;
+       int pcwfbits;
+       u64 vco;
+       u8 c = 0;
+
+       /* The fractional part of the PLL divider. */
+       pcwfbits = pcwbits > INTEGER_BITS ? pcwbits - INTEGER_BITS : 0;
+
+       vco = (u64)fin * pcw;
+
+       if (pcwfbits && (vco & GENMASK(pcwfbits - 1, 0)))
+               c = 1;
+
+       vco >>= pcwfbits;
+
+       if (c)
+               vco++;
+
+       return ((unsigned long)vco + postdiv - 1) / postdiv;
+}
+
+static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
+               int postdiv)
+{
+       u32 con1, pd, val;
+       int pll_en;
+
+       /* set postdiv */
+       pd = readl(pll->pd_addr);
+       pd &= ~(POSTDIV_MASK << pll->data->pd_shift);
+       pd |= (ffs(postdiv) - 1) << pll->data->pd_shift;
+       writel(pd, pll->pd_addr);
+
+       pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN;
+
+       /* set pcw */
+       val = readl(pll->pcw_addr);
+
+       val &= ~GENMASK(pll->data->pcw_shift + pll->data->pcwbits - 1,
+                       pll->data->pcw_shift);
+       val |= pcw << pll->data->pcw_shift;
+       writel(val, pll->pcw_addr);
+
+       con1 = readl(pll->base_addr + REG_CON1);
+
+       if (pll_en)
+               con1 |= CON0_PCW_CHG;
+
+       writel(con1, pll->base_addr + REG_CON1);
+       if (pll->tuner_addr)
+               writel(con1 + 1, pll->tuner_addr);
+
+       if (pll_en)
+               udelay(20);
+}
+
+/*
+ * mtk_pll_calc_values - calculate good values for a given input frequency.
+ * @pll:       The pll
+ * @pcw:       The pcw value (output)
+ * @postdiv:   The post divider (output)
+ * @freq:      The desired target frequency
+ * @fin:       The input frequency
+ *
+ */
+static void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv,
+               u32 freq, u32 fin)
+{
+       unsigned long fmin = 1000 * MHZ;
+       u64 _pcw;
+       u32 val;
+
+       if (freq > pll->data->fmax)
+               freq = pll->data->fmax;
+
+       for (val = 0; val < 4; val++) {
+               *postdiv = 1 << val;
+               if (freq * *postdiv >= fmin)
+                       break;
+       }
+
+       /* _pcw = freq * postdiv / fin * 2^pcwfbits */
+       _pcw = ((u64)freq << val) << (pll->data->pcwbits - INTEGER_BITS);
+       do_div(_pcw, fin);
+
+       *pcw = (u32)_pcw;
+}
+
+static int mtk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+       u32 pcw = 0;
+       u32 postdiv;
+
+       mtk_pll_calc_values(pll, &pcw, &postdiv, rate, parent_rate);
+       mtk_pll_set_rate_regs(pll, pcw, postdiv);
+
+       return 0;
+}
+
+static unsigned long mtk_pll_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+       u32 postdiv;
+       u32 pcw;
+
+       postdiv = (readl(pll->pd_addr) >> pll->data->pd_shift) & POSTDIV_MASK;
+       postdiv = 1 << postdiv;
+
+       pcw = readl(pll->pcw_addr) >> pll->data->pcw_shift;
+       pcw &= GENMASK(pll->data->pcwbits - 1, 0);
+
+       return __mtk_pll_recalc_rate(pll, parent_rate, pcw, postdiv);
+}
+
+static long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long *prate)
+{
+       struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+       u32 pcw = 0;
+       int postdiv;
+
+       mtk_pll_calc_values(pll, &pcw, &postdiv, rate, *prate);
+
+       return __mtk_pll_recalc_rate(pll, *prate, pcw, postdiv);
+}
+
+static int mtk_pll_prepare(struct clk_hw *hw)
+{
+       struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+       u32 r;
+
+       r = readl(pll->pwr_addr) | CON0_PWR_ON;
+       writel(r, pll->pwr_addr);
+       udelay(1);
+
+       r = readl(pll->pwr_addr) & ~CON0_ISO_EN;
+       writel(r, pll->pwr_addr);
+       udelay(1);
+
+       r = readl(pll->base_addr + REG_CON0);
+       r |= pll->data->en_mask;
+       writel(r, pll->base_addr + REG_CON0);
+
+       if (pll->tuner_addr) {
+               r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
+               writel(r, pll->tuner_addr);
+       }
+
+       udelay(20);
+
+       if (pll->data->flags & HAVE_RST_BAR) {
+               r = readl(pll->base_addr + REG_CON0);
+               r |= pll->data->rst_bar_mask;
+               writel(r, pll->base_addr + REG_CON0);
+       }
+
+       return 0;
+}
+
+static void mtk_pll_unprepare(struct clk_hw *hw)
+{
+       struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+       u32 r;
+
+       if (pll->data->flags & HAVE_RST_BAR) {
+               r = readl(pll->base_addr + REG_CON0);
+               r &= ~pll->data->rst_bar_mask;
+               writel(r, pll->base_addr + REG_CON0);
+       }
+
+       if (pll->tuner_addr) {
+               r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
+               writel(r, pll->tuner_addr);
+       }
+
+       r = readl(pll->base_addr + REG_CON0);
+       r &= ~CON0_BASE_EN;
+       writel(r, pll->base_addr + REG_CON0);
+
+       r = readl(pll->pwr_addr) | CON0_ISO_EN;
+       writel(r, pll->pwr_addr);
+
+       r = readl(pll->pwr_addr) & ~CON0_PWR_ON;
+       writel(r, pll->pwr_addr);
+}
+
+static const struct clk_ops mtk_pll_ops = {
+       .is_prepared    = mtk_pll_is_prepared,
+       .prepare        = mtk_pll_prepare,
+       .unprepare      = mtk_pll_unprepare,
+       .recalc_rate    = mtk_pll_recalc_rate,
+       .round_rate     = mtk_pll_round_rate,
+       .set_rate       = mtk_pll_set_rate,
+};
+
+static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
+               void __iomem *base)
+{
+       struct mtk_clk_pll *pll;
+       struct clk_init_data init = {};
+       struct clk *clk;
+       const char *parent_name = "clk26m";
+
+       pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+       if (!pll)
+               return ERR_PTR(-ENOMEM);
+
+       pll->base_addr = base + data->reg;
+       pll->pwr_addr = base + data->pwr_reg;
+       pll->pd_addr = base + data->pd_reg;
+       pll->pcw_addr = base + data->pcw_reg;
+       if (data->tuner_reg)
+               pll->tuner_addr = base + data->tuner_reg;
+       pll->hw.init = &init;
+       pll->data = data;
+
+       init.name = data->name;
+       init.ops = &mtk_pll_ops;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       clk = clk_register(NULL, &pll->hw);
+
+       if (IS_ERR(clk))
+               kfree(pll);
+
+       return clk;
+}
+
+void __init mtk_clk_register_plls(struct device_node *node,
+               const struct mtk_pll_data *plls, int num_plls, struct clk_onecell_data *clk_data)
+{
+       void __iomem *base;
+       int r, i;
+       struct clk *clk;
+
+       base = of_iomap(node, 0);
+       if (!base) {
+               pr_err("%s(): ioremap failed\n", __func__);
+               return;
+       }
+
+       for (i = 0; i < num_plls; i++) {
+               const struct mtk_pll_data *pll = &plls[i];
+
+               clk = mtk_clk_register_pll(pll, base);
+
+               if (IS_ERR(clk)) {
+                       pr_err("Failed to register clk %s: %ld\n",
+                                       pll->name, PTR_ERR(clk));
+                       continue;
+               }
+
+               clk_data->clks[pll->id] = clk;
+       }
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+}
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
new file mode 100644 (file)
index 0000000..9e9fe4b
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+
+#include "clk-mtk.h"
+
+struct mtk_reset {
+       struct regmap *regmap;
+       int regofs;
+       struct reset_controller_dev rcdev;
+};
+
+static int mtk_reset_assert(struct reset_controller_dev *rcdev,
+                             unsigned long id)
+{
+       struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
+
+       return regmap_update_bits(data->regmap, data->regofs + ((id / 32) << 2),
+                       BIT(id % 32), ~0);
+}
+
+static int mtk_reset_deassert(struct reset_controller_dev *rcdev,
+                               unsigned long id)
+{
+       struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev);
+
+       return regmap_update_bits(data->regmap, data->regofs + ((id / 32) << 2),
+                       BIT(id % 32), 0);
+}
+
+static int mtk_reset(struct reset_controller_dev *rcdev,
+                             unsigned long id)
+{
+       int ret;
+
+       ret = mtk_reset_assert(rcdev, id);
+       if (ret)
+               return ret;
+
+       return mtk_reset_deassert(rcdev, id);
+}
+
+static struct reset_control_ops mtk_reset_ops = {
+       .assert = mtk_reset_assert,
+       .deassert = mtk_reset_deassert,
+       .reset = mtk_reset,
+};
+
+void mtk_register_reset_controller(struct device_node *np,
+                       unsigned int num_regs, int regofs)
+{
+       struct mtk_reset *data;
+       int ret;
+       struct regmap *regmap;
+
+       regmap = syscon_node_to_regmap(np);
+       if (IS_ERR(regmap)) {
+               pr_err("Cannot find regmap for %s: %ld\n", np->full_name,
+                               PTR_ERR(regmap));
+               return;
+       }
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return;
+
+       data->regmap = regmap;
+       data->regofs = regofs;
+       data->rcdev.owner = THIS_MODULE;
+       data->rcdev.nr_resets = num_regs * 32;
+       data->rcdev.ops = &mtk_reset_ops;
+       data->rcdev.of_node = np;
+
+       ret = reset_controller_register(&data->rcdev);
+       if (ret) {
+               pr_err("could not register reset controller: %d\n", ret);
+               kfree(data);
+               return;
+       }
+}
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
new file mode 100644 (file)
index 0000000..6d45531
--- /dev/null
@@ -0,0 +1,6 @@
+#
+# Makefile for Meson specific clk
+#
+
+obj-y += clkc.o clk-pll.o clk-cpu.o
+obj-y += meson8b-clkc.o
diff --git a/drivers/clk/meson/clk-cpu.c b/drivers/clk/meson/clk-cpu.c
new file mode 100644 (file)
index 0000000..71ad493
--- /dev/null
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * CPU clock path:
+ *
+ *                           +-[/N]-----|3|
+ *             MUX2  +--[/3]-+----------|2| MUX1
+ * [sys_pll]---|1|   |--[/2]------------|1|-|1|
+ *             | |---+------------------|0| | |----- [a5_clk]
+ *          +--|0|                          | |
+ * [xtal]---+-------------------------------|0|
+ *
+ *
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+
+#define MESON_CPU_CLK_CNTL1            0x00
+#define MESON_CPU_CLK_CNTL             0x40
+
+#define MESON_CPU_CLK_MUX1             BIT(7)
+#define MESON_CPU_CLK_MUX2             BIT(0)
+
+#define MESON_N_WIDTH                  9
+#define MESON_N_SHIFT                  20
+#define MESON_SEL_WIDTH                        2
+#define MESON_SEL_SHIFT                        2
+
+#include "clkc.h"
+
+struct meson_clk_cpu {
+       struct notifier_block           clk_nb;
+       const struct clk_div_table      *div_table;
+       struct clk_hw                   hw;
+       void __iomem                    *base;
+       u16                             reg_off;
+};
+#define to_meson_clk_cpu_hw(_hw) container_of(_hw, struct meson_clk_cpu, hw)
+#define to_meson_clk_cpu_nb(_nb) container_of(_nb, struct meson_clk_cpu, clk_nb)
+
+static long meson_clk_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
+                                    unsigned long *prate)
+{
+       struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_hw(hw);
+
+       return divider_round_rate(hw, rate, prate, clk_cpu->div_table,
+                                 MESON_N_WIDTH, CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int meson_clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
+                                 unsigned long parent_rate)
+{
+       struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_hw(hw);
+       unsigned int div, sel, N = 0;
+       u32 reg;
+
+       div = DIV_ROUND_UP(parent_rate, rate);
+
+       if (div <= 3) {
+               sel = div - 1;
+       } else {
+               sel = 3;
+               N = div / 2;
+       }
+
+       reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL1);
+       reg = PARM_SET(MESON_N_WIDTH, MESON_N_SHIFT, reg, N);
+       writel(reg, clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL1);
+
+       reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL);
+       reg = PARM_SET(MESON_SEL_WIDTH, MESON_SEL_SHIFT, reg, sel);
+       writel(reg, clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL);
+
+       return 0;
+}
+
+static unsigned long meson_clk_cpu_recalc_rate(struct clk_hw *hw,
+                                              unsigned long parent_rate)
+{
+       struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_hw(hw);
+       unsigned int N, sel;
+       unsigned int div = 1;
+       u32 reg;
+
+       reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL1);
+       N = PARM_GET(MESON_N_WIDTH, MESON_N_SHIFT, reg);
+
+       reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL);
+       sel = PARM_GET(MESON_SEL_WIDTH, MESON_SEL_SHIFT, reg);
+
+       if (sel < 3)
+               div = sel + 1;
+       else
+               div = 2 * N;
+
+       return parent_rate / div;
+}
+
+static int meson_clk_cpu_pre_rate_change(struct meson_clk_cpu *clk_cpu,
+                                        struct clk_notifier_data *ndata)
+{
+       u32 cpu_clk_cntl;
+
+       /* switch MUX1 to xtal */
+       cpu_clk_cntl = readl(clk_cpu->base + clk_cpu->reg_off
+                               + MESON_CPU_CLK_CNTL);
+       cpu_clk_cntl &= ~MESON_CPU_CLK_MUX1;
+       writel(cpu_clk_cntl, clk_cpu->base + clk_cpu->reg_off
+                               + MESON_CPU_CLK_CNTL);
+       udelay(100);
+
+       /* switch MUX2 to sys-pll */
+       cpu_clk_cntl |= MESON_CPU_CLK_MUX2;
+       writel(cpu_clk_cntl, clk_cpu->base + clk_cpu->reg_off
+                               + MESON_CPU_CLK_CNTL);
+
+       return 0;
+}
+
+static int meson_clk_cpu_post_rate_change(struct meson_clk_cpu *clk_cpu,
+                                         struct clk_notifier_data *ndata)
+{
+       u32 cpu_clk_cntl;
+
+       /* switch MUX1 to divisors' output */
+       cpu_clk_cntl = readl(clk_cpu->base + clk_cpu->reg_off
+                               + MESON_CPU_CLK_CNTL);
+       cpu_clk_cntl |= MESON_CPU_CLK_MUX1;
+       writel(cpu_clk_cntl, clk_cpu->base + clk_cpu->reg_off
+                               + MESON_CPU_CLK_CNTL);
+       udelay(100);
+
+       return 0;
+}
+
+/*
+ * This clock notifier is called when the frequency of the of the parent
+ * PLL clock is to be changed. We use the xtal input as temporary parent
+ * while the PLL frequency is stabilized.
+ */
+static int meson_clk_cpu_notifier_cb(struct notifier_block *nb,
+                                    unsigned long event, void *data)
+{
+       struct clk_notifier_data *ndata = data;
+       struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_nb(nb);
+       int ret = 0;
+
+       if (event == PRE_RATE_CHANGE)
+               ret = meson_clk_cpu_pre_rate_change(clk_cpu, ndata);
+       else if (event == POST_RATE_CHANGE)
+               ret = meson_clk_cpu_post_rate_change(clk_cpu, ndata);
+
+       return notifier_from_errno(ret);
+}
+
+static const struct clk_ops meson_clk_cpu_ops = {
+       .recalc_rate    = meson_clk_cpu_recalc_rate,
+       .round_rate     = meson_clk_cpu_round_rate,
+       .set_rate       = meson_clk_cpu_set_rate,
+};
+
+struct clk *meson_clk_register_cpu(const struct clk_conf *clk_conf,
+                                  void __iomem *reg_base,
+                                  spinlock_t *lock)
+{
+       struct clk *clk;
+       struct clk *pclk;
+       struct meson_clk_cpu *clk_cpu;
+       struct clk_init_data init;
+       int ret;
+
+       clk_cpu = kzalloc(sizeof(*clk_cpu), GFP_KERNEL);
+       if (!clk_cpu)
+               return ERR_PTR(-ENOMEM);
+
+       clk_cpu->base = reg_base;
+       clk_cpu->reg_off = clk_conf->reg_off;
+       clk_cpu->div_table = clk_conf->conf.div_table;
+       clk_cpu->clk_nb.notifier_call = meson_clk_cpu_notifier_cb;
+
+       init.name = clk_conf->clk_name;
+       init.ops = &meson_clk_cpu_ops;
+       init.flags = clk_conf->flags | CLK_GET_RATE_NOCACHE;
+       init.flags |= CLK_SET_RATE_PARENT;
+       init.parent_names = clk_conf->clks_parent;
+       init.num_parents = 1;
+
+       clk_cpu->hw.init = &init;
+
+       pclk = __clk_lookup(clk_conf->clks_parent[0]);
+       if (!pclk) {
+               pr_err("%s: could not lookup parent clock %s\n",
+                               __func__, clk_conf->clks_parent[0]);
+               ret = -EINVAL;
+               goto free_clk;
+       }
+
+       ret = clk_notifier_register(pclk, &clk_cpu->clk_nb);
+       if (ret) {
+               pr_err("%s: failed to register clock notifier for %s\n",
+                               __func__, clk_conf->clk_name);
+               goto free_clk;
+       }
+
+       clk = clk_register(NULL, &clk_cpu->hw);
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               goto unregister_clk_nb;
+       }
+
+       return clk;
+
+unregister_clk_nb:
+       clk_notifier_unregister(pclk, &clk_cpu->clk_nb);
+free_clk:
+       kfree(clk_cpu);
+
+       return ERR_PTR(ret);
+}
+
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
new file mode 100644 (file)
index 0000000..664edf0
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * In the most basic form, a Meson PLL is composed as follows:
+ *
+ *                     PLL
+ *      +------------------------------+
+ *      |                              |
+ * in -----[ /N ]---[ *M ]---[ >>OD ]----->> out
+ *      |         ^        ^           |
+ *      +------------------------------+
+ *                |        |
+ *               FREF     VCO
+ *
+ * out = (in * M / N) >> OD
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "clkc.h"
+
+#define MESON_PLL_RESET                                BIT(29)
+#define MESON_PLL_LOCK                         BIT(31)
+
+struct meson_clk_pll {
+       struct clk_hw   hw;
+       void __iomem    *base;
+       struct pll_conf *conf;
+       unsigned int    rate_count;
+       spinlock_t      *lock;
+};
+#define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw)
+
+static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       struct meson_clk_pll *pll = to_meson_clk_pll(hw);
+       struct parm *p;
+       unsigned long parent_rate_mhz = parent_rate / 1000000;
+       unsigned long rate_mhz;
+       u16 n, m, od;
+       u32 reg;
+
+       p = &pll->conf->n;
+       reg = readl(pll->base + p->reg_off);
+       n = PARM_GET(p->width, p->shift, reg);
+
+       p = &pll->conf->m;
+       reg = readl(pll->base + p->reg_off);
+       m = PARM_GET(p->width, p->shift, reg);
+
+       p = &pll->conf->od;
+       reg = readl(pll->base + p->reg_off);
+       od = PARM_GET(p->width, p->shift, reg);
+
+       rate_mhz = (parent_rate_mhz * m / n) >> od;
+
+       return rate_mhz * 1000000;
+}
+
+static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                    unsigned long *parent_rate)
+{
+       struct meson_clk_pll *pll = to_meson_clk_pll(hw);
+       const struct pll_rate_table *rate_table = pll->conf->rate_table;
+       int i;
+
+       for (i = 0; i < pll->rate_count; i++) {
+               if (rate <= rate_table[i].rate)
+                       return rate_table[i].rate;
+       }
+
+       /* else return the smallest value */
+       return rate_table[0].rate;
+}
+
+static const struct pll_rate_table *meson_clk_get_pll_settings(struct meson_clk_pll *pll,
+                                                              unsigned long rate)
+{
+       const struct pll_rate_table *rate_table = pll->conf->rate_table;
+       int i;
+
+       for (i = 0; i < pll->rate_count; i++) {
+               if (rate == rate_table[i].rate)
+                       return &rate_table[i];
+       }
+       return NULL;
+}
+
+static int meson_clk_pll_wait_lock(struct meson_clk_pll *pll,
+                                  struct parm *p_n)
+{
+       int delay = 24000000;
+       u32 reg;
+
+       while (delay > 0) {
+               reg = readl(pll->base + p_n->reg_off);
+
+               if (reg & MESON_PLL_LOCK)
+                       return 0;
+               delay--;
+       }
+       return -ETIMEDOUT;
+}
+
+static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+                                 unsigned long parent_rate)
+{
+       struct meson_clk_pll *pll = to_meson_clk_pll(hw);
+       struct parm *p;
+       const struct pll_rate_table *rate_set;
+       unsigned long old_rate;
+       int ret = 0;
+       u32 reg;
+
+       if (parent_rate == 0 || rate == 0)
+               return -EINVAL;
+
+       old_rate = rate;
+
+       rate_set = meson_clk_get_pll_settings(pll, rate);
+       if (!rate_set)
+               return -EINVAL;
+
+       /* PLL reset */
+       p = &pll->conf->n;
+       reg = readl(pll->base + p->reg_off);
+       writel(reg | MESON_PLL_RESET, pll->base + p->reg_off);
+
+       reg = PARM_SET(p->width, p->shift, reg, rate_set->n);
+       writel(reg, pll->base + p->reg_off);
+
+       p = &pll->conf->m;
+       reg = readl(pll->base + p->reg_off);
+       reg = PARM_SET(p->width, p->shift, reg, rate_set->m);
+       writel(reg, pll->base + p->reg_off);
+
+       p = &pll->conf->od;
+       reg = readl(pll->base + p->reg_off);
+       reg = PARM_SET(p->width, p->shift, reg, rate_set->od);
+       writel(reg, pll->base + p->reg_off);
+
+       p = &pll->conf->n;
+       ret = meson_clk_pll_wait_lock(pll, p);
+       if (ret) {
+               pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
+                       __func__, old_rate);
+               meson_clk_pll_set_rate(hw, old_rate, parent_rate);
+       }
+
+       return ret;
+}
+
+static const struct clk_ops meson_clk_pll_ops = {
+       .recalc_rate    = meson_clk_pll_recalc_rate,
+       .round_rate     = meson_clk_pll_round_rate,
+       .set_rate       = meson_clk_pll_set_rate,
+};
+
+static const struct clk_ops meson_clk_pll_ro_ops = {
+       .recalc_rate    = meson_clk_pll_recalc_rate,
+};
+
+struct clk *meson_clk_register_pll(const struct clk_conf *clk_conf,
+                                  void __iomem *reg_base,
+                                  spinlock_t *lock)
+{
+       struct clk *clk;
+       struct meson_clk_pll *clk_pll;
+       struct clk_init_data init;
+
+       clk_pll = kzalloc(sizeof(*clk_pll), GFP_KERNEL);
+       if (!clk_pll)
+               return ERR_PTR(-ENOMEM);
+
+       clk_pll->base = reg_base + clk_conf->reg_off;
+       clk_pll->lock = lock;
+       clk_pll->conf = clk_conf->conf.pll;
+
+       init.name = clk_conf->clk_name;
+       init.flags = clk_conf->flags | CLK_GET_RATE_NOCACHE;
+
+       init.parent_names = &clk_conf->clks_parent[0];
+       init.num_parents = 1;
+       init.ops = &meson_clk_pll_ro_ops;
+
+       /* If no rate_table is specified we assume the PLL is read-only */
+       if (clk_pll->conf->rate_table) {
+               int len;
+
+               for (len = 0; clk_pll->conf->rate_table[len].rate != 0; )
+                       len++;
+
+                clk_pll->rate_count = len;
+                init.ops = &meson_clk_pll_ops;
+       }
+
+       clk_pll->hw.init = &init;
+
+       clk = clk_register(NULL, &clk_pll->hw);
+       if (IS_ERR(clk))
+               kfree(clk_pll);
+
+       return clk;
+}
diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c
new file mode 100644 (file)
index 0000000..b8c511c
--- /dev/null
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include "clkc.h"
+
+static DEFINE_SPINLOCK(clk_lock);
+
+static struct clk **clks;
+static struct clk_onecell_data clk_data;
+
+struct clk ** __init meson_clk_init(struct device_node *np,
+                                  unsigned long nr_clks)
+{
+       clks = kcalloc(nr_clks, sizeof(*clks), GFP_KERNEL);
+       if (!clks)
+               return ERR_PTR(-ENOMEM);
+
+       clk_data.clks = clks;
+       clk_data.clk_num = nr_clks;
+       of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+       return clks;
+}
+
+static void meson_clk_add_lookup(struct clk *clk, unsigned int id)
+{
+       if (clks && id)
+               clks[id] = clk;
+}
+
+static struct clk * __init
+meson_clk_register_composite(const struct clk_conf *clk_conf,
+                            void __iomem *clk_base)
+{
+       struct clk *clk;
+       struct clk_mux *mux = NULL;
+       struct clk_divider *div = NULL;
+       struct clk_gate *gate = NULL;
+       const struct clk_ops *mux_ops = NULL;
+       const struct composite_conf *composite_conf;
+
+       composite_conf = clk_conf->conf.composite;
+
+       if (clk_conf->num_parents > 1) {
+               mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+               if (!mux)
+                       return ERR_PTR(-ENOMEM);
+
+               mux->reg = clk_base + clk_conf->reg_off
+                               + composite_conf->mux_parm.reg_off;
+               mux->shift = composite_conf->mux_parm.shift;
+               mux->mask = BIT(composite_conf->mux_parm.width) - 1;
+               mux->flags = composite_conf->mux_flags;
+               mux->lock = &clk_lock;
+               mux->table = composite_conf->mux_table;
+               mux_ops = (composite_conf->mux_flags & CLK_MUX_READ_ONLY) ?
+                         &clk_mux_ro_ops : &clk_mux_ops;
+       }
+
+       if (MESON_PARM_APPLICABLE(&composite_conf->div_parm)) {
+               div = kzalloc(sizeof(*div), GFP_KERNEL);
+               if (!div) {
+                       clk = ERR_PTR(-ENOMEM);
+                       goto error;
+               }
+
+               div->reg = clk_base + clk_conf->reg_off
+                               + composite_conf->div_parm.reg_off;
+               div->shift = composite_conf->div_parm.shift;
+               div->width = composite_conf->div_parm.width;
+               div->lock = &clk_lock;
+               div->flags = composite_conf->div_flags;
+               div->table = composite_conf->div_table;
+       }
+
+       if (MESON_PARM_APPLICABLE(&composite_conf->gate_parm)) {
+               gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+               if (!gate) {
+                       clk = ERR_PTR(-ENOMEM);
+                       goto error;
+               }
+
+               gate->reg = clk_base + clk_conf->reg_off
+                               + composite_conf->div_parm.reg_off;
+               gate->bit_idx = composite_conf->gate_parm.shift;
+               gate->flags = composite_conf->gate_flags;
+               gate->lock = &clk_lock;
+       }
+
+       clk = clk_register_composite(NULL, clk_conf->clk_name,
+                                   clk_conf->clks_parent,
+                                   clk_conf->num_parents,
+                                   mux ? &mux->hw : NULL, mux_ops,
+                                   div ? &div->hw : NULL, &clk_divider_ops,
+                                   gate ? &gate->hw : NULL, &clk_gate_ops,
+                                   clk_conf->flags);
+       if (IS_ERR(clk))
+               goto error;
+
+       return clk;
+
+error:
+       kfree(gate);
+       kfree(div);
+       kfree(mux);
+
+       return clk;
+}
+
+static struct clk * __init
+meson_clk_register_fixed_factor(const struct clk_conf *clk_conf,
+                               void __iomem *clk_base)
+{
+       struct clk *clk;
+       const struct fixed_fact_conf *fixed_fact_conf;
+       const struct parm *p;
+       unsigned int mult, div;
+       u32 reg;
+
+       fixed_fact_conf = &clk_conf->conf.fixed_fact;
+
+       mult = clk_conf->conf.fixed_fact.mult;
+       div = clk_conf->conf.fixed_fact.div;
+
+       if (!mult) {
+               mult = 1;
+               p = &fixed_fact_conf->mult_parm;
+               if (MESON_PARM_APPLICABLE(p)) {
+                       reg = readl(clk_base + clk_conf->reg_off + p->reg_off);
+                       mult = PARM_GET(p->width, p->shift, reg);
+               }
+       }
+
+       if (!div) {
+               div = 1;
+               p = &fixed_fact_conf->div_parm;
+               if (MESON_PARM_APPLICABLE(p)) {
+                       reg = readl(clk_base + clk_conf->reg_off + p->reg_off);
+                       mult = PARM_GET(p->width, p->shift, reg);
+               }
+       }
+
+       clk = clk_register_fixed_factor(NULL,
+                       clk_conf->clk_name,
+                       clk_conf->clks_parent[0],
+                       clk_conf->flags,
+                       mult, div);
+
+       return clk;
+}
+
+static struct clk * __init
+meson_clk_register_fixed_rate(const struct clk_conf *clk_conf,
+                             void __iomem *clk_base)
+{
+       struct clk *clk;
+       const struct fixed_rate_conf *fixed_rate_conf;
+       const struct parm *r;
+       unsigned long rate;
+       u32 reg;
+
+       fixed_rate_conf = &clk_conf->conf.fixed_rate;
+       rate = fixed_rate_conf->rate;
+
+       if (!rate) {
+               r = &fixed_rate_conf->rate_parm;
+               reg = readl(clk_base + clk_conf->reg_off + r->reg_off);
+               rate = PARM_GET(r->width, r->shift, reg);
+       }
+
+       rate *= 1000000;
+
+       clk = clk_register_fixed_rate(NULL,
+                       clk_conf->clk_name,
+                       clk_conf->num_parents
+                               ? clk_conf->clks_parent[0] : NULL,
+                       clk_conf->flags, rate);
+
+       return clk;
+}
+
+void __init meson_clk_register_clks(const struct clk_conf *clk_confs,
+                                   size_t nr_confs,
+                                   void __iomem *clk_base)
+{
+       unsigned int i;
+       struct clk *clk = NULL;
+
+       for (i = 0; i < nr_confs; i++) {
+               const struct clk_conf *clk_conf = &clk_confs[i];
+
+               switch (clk_conf->clk_type) {
+               case CLK_FIXED_RATE:
+                       clk = meson_clk_register_fixed_rate(clk_conf,
+                                                           clk_base);
+                       break;
+               case CLK_FIXED_FACTOR:
+                       clk = meson_clk_register_fixed_factor(clk_conf,
+                                                             clk_base);
+                       break;
+               case CLK_COMPOSITE:
+                       clk = meson_clk_register_composite(clk_conf,
+                                                          clk_base);
+                       break;
+               case CLK_CPU:
+                       clk = meson_clk_register_cpu(clk_conf, clk_base,
+                                                    &clk_lock);
+                       break;
+               case CLK_PLL:
+                       clk = meson_clk_register_pll(clk_conf, clk_base,
+                                                    &clk_lock);
+                       break;
+               default:
+                       clk = NULL;
+               }
+
+               if (!clk) {
+                       pr_err("%s: unknown clock type %d\n", __func__,
+                              clk_conf->clk_type);
+                       continue;
+               }
+
+               if (IS_ERR(clk)) {
+                       pr_warn("%s: Unable to create %s clock\n", __func__,
+                               clk_conf->clk_name);
+                       continue;
+               }
+
+               meson_clk_add_lookup(clk, clk_conf->clk_id);
+       }
+}
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
new file mode 100644 (file)
index 0000000..609ae92
--- /dev/null
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __CLKC_H
+#define __CLKC_H
+
+#define PMASK(width)                   GENMASK(width - 1, 0)
+#define SETPMASK(width, shift)         GENMASK(shift + width - 1, shift)
+#define CLRPMASK(width, shift)         (~SETPMASK(width, shift))
+
+#define PARM_GET(width, shift, reg)                                    \
+       (((reg) & SETPMASK(width, shift)) >> (shift))
+#define PARM_SET(width, shift, reg, val)                               \
+       (((reg) & CLRPMASK(width, shift)) | (val << (shift)))
+
+#define MESON_PARM_APPLICABLE(p)               (!!((p)->width))
+
+struct parm {
+       u16     reg_off;
+       u8      shift;
+       u8      width;
+};
+#define PARM(_r, _s, _w)                                               \
+       {                                                               \
+               .reg_off        = (_r),                                 \
+               .shift          = (_s),                                 \
+               .width          = (_w),                                 \
+       }                                                               \
+
+struct pll_rate_table {
+       unsigned long   rate;
+       u16             m;
+       u16             n;
+       u16             od;
+};
+#define PLL_RATE(_r, _m, _n, _od)                                      \
+       {                                                               \
+               .rate           = (_r),                                 \
+               .m              = (_m),                                 \
+               .n              = (_n),                                 \
+               .od             = (_od),                                \
+       }                                                               \
+
+struct pll_conf {
+       const struct pll_rate_table     *rate_table;
+       struct parm                     m;
+       struct parm                     n;
+       struct parm                     od;
+};
+
+struct fixed_fact_conf {
+       unsigned int    div;
+       unsigned int    mult;
+       struct parm     div_parm;
+       struct parm     mult_parm;
+};
+
+struct fixed_rate_conf {
+       unsigned long   rate;
+       struct parm     rate_parm;
+};
+
+struct composite_conf {
+       struct parm             mux_parm;
+       struct parm             div_parm;
+       struct parm             gate_parm;
+       struct clk_div_table    *div_table;
+       u32                     *mux_table;
+       u8                      mux_flags;
+       u8                      div_flags;
+       u8                      gate_flags;
+};
+
+#define PNAME(x) static const char *x[]
+
+enum clk_type {
+       CLK_FIXED_FACTOR,
+       CLK_FIXED_RATE,
+       CLK_COMPOSITE,
+       CLK_CPU,
+       CLK_PLL,
+};
+
+struct clk_conf {
+       u16                             reg_off;
+       enum clk_type                   clk_type;
+       unsigned int                    clk_id;
+       const char                      *clk_name;
+       const char                      **clks_parent;
+       int                             num_parents;
+       unsigned long                   flags;
+       union {
+               struct fixed_fact_conf          fixed_fact;
+               struct fixed_rate_conf          fixed_rate;
+               const struct composite_conf             *composite;
+               struct pll_conf                 *pll;
+               const struct clk_div_table      *div_table;
+       } conf;
+};
+
+#define FIXED_RATE_P(_ro, _ci, _cn, _f, _c)                            \
+       {                                                               \
+               .reg_off                        = (_ro),                \
+               .clk_type                       = CLK_FIXED_RATE,       \
+               .clk_id                         = (_ci),                \
+               .clk_name                       = (_cn),                \
+               .flags                          = (_f),                 \
+               .conf.fixed_rate.rate_parm      = _c,                   \
+       }                                                               \
+
+#define FIXED_RATE(_ci, _cn, _f, _r)                                   \
+       {                                                               \
+               .clk_type                       = CLK_FIXED_RATE,       \
+               .clk_id                         = (_ci),                \
+               .clk_name                       = (_cn),                \
+               .flags                          = (_f),                 \
+               .conf.fixed_rate.rate           = (_r),                 \
+       }                                                               \
+
+#define PLL(_ro, _ci, _cn, _cp, _f, _c)                                        \
+       {                                                               \
+               .reg_off                        = (_ro),                \
+               .clk_type                       = CLK_PLL,              \
+               .clk_id                         = (_ci),                \
+               .clk_name                       = (_cn),                \
+               .clks_parent                    = (_cp),                \
+               .num_parents                    = ARRAY_SIZE(_cp),      \
+               .flags                          = (_f),                 \
+               .conf.pll                       = (_c),                 \
+       }                                                               \
+
+#define FIXED_FACTOR_DIV(_ci, _cn, _cp, _f, _d)                                \
+       {                                                               \
+               .clk_type                       = CLK_FIXED_FACTOR,     \
+               .clk_id                         = (_ci),                \
+               .clk_name                       = (_cn),                \
+               .clks_parent                    = (_cp),                \
+               .num_parents                    = ARRAY_SIZE(_cp),      \
+               .conf.fixed_fact.div            = (_d),                 \
+       }                                                               \
+
+#define CPU(_ro, _ci, _cn, _cp, _dt)                                   \
+       {                                                               \
+               .reg_off                        = (_ro),                \
+               .clk_type                       = CLK_CPU,              \
+               .clk_id                         = (_ci),                \
+               .clk_name                       = (_cn),                \
+               .clks_parent                    = (_cp),                \
+               .num_parents                    = ARRAY_SIZE(_cp),      \
+               .conf.div_table                 = (_dt),                \
+       }                                                               \
+
+#define COMPOSITE(_ro, _ci, _cn, _cp, _f, _c)                          \
+       {                                                               \
+               .reg_off                        = (_ro),                \
+               .clk_type                       = CLK_COMPOSITE,        \
+               .clk_id                         = (_ci),                \
+               .clk_name                       = (_cn),                \
+               .clks_parent                    = (_cp),                \
+               .num_parents                    = ARRAY_SIZE(_cp),      \
+               .flags                          = (_f),                 \
+               .conf.composite                 = (_c),                 \
+       }                                                               \
+
+struct clk **meson_clk_init(struct device_node *np, unsigned long nr_clks);
+void meson_clk_register_clks(const struct clk_conf *clk_confs,
+                            unsigned int nr_confs, void __iomem *clk_base);
+struct clk *meson_clk_register_cpu(const struct clk_conf *clk_conf,
+                                  void __iomem *reg_base, spinlock_t *lock);
+struct clk *meson_clk_register_pll(const struct clk_conf *clk_conf,
+                                  void __iomem *reg_base, spinlock_t *lock);
+
+#endif /* __CLKC_H */
diff --git a/drivers/clk/meson/meson8b-clkc.c b/drivers/clk/meson/meson8b-clkc.c
new file mode 100644 (file)
index 0000000..61f6d55
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <dt-bindings/clock/meson8b-clkc.h>
+
+#include "clkc.h"
+
+#define MESON8B_REG_CTL0_ADDR          0x0000
+#define MESON8B_REG_SYS_CPU_CNTL1      0x015c
+#define MESON8B_REG_HHI_MPEG           0x0174
+#define MESON8B_REG_MALI               0x01b0
+#define MESON8B_REG_PLL_FIXED          0x0280
+#define MESON8B_REG_PLL_SYS            0x0300
+#define MESON8B_REG_PLL_VID            0x0320
+
+static const struct pll_rate_table sys_pll_rate_table[] = {
+       PLL_RATE(312000000, 52, 1, 2),
+       PLL_RATE(336000000, 56, 1, 2),
+       PLL_RATE(360000000, 60, 1, 2),
+       PLL_RATE(384000000, 64, 1, 2),
+       PLL_RATE(408000000, 68, 1, 2),
+       PLL_RATE(432000000, 72, 1, 2),
+       PLL_RATE(456000000, 76, 1, 2),
+       PLL_RATE(480000000, 80, 1, 2),
+       PLL_RATE(504000000, 84, 1, 2),
+       PLL_RATE(528000000, 88, 1, 2),
+       PLL_RATE(552000000, 92, 1, 2),
+       PLL_RATE(576000000, 96, 1, 2),
+       PLL_RATE(600000000, 50, 1, 1),
+       PLL_RATE(624000000, 52, 1, 1),
+       PLL_RATE(648000000, 54, 1, 1),
+       PLL_RATE(672000000, 56, 1, 1),
+       PLL_RATE(696000000, 58, 1, 1),
+       PLL_RATE(720000000, 60, 1, 1),
+       PLL_RATE(744000000, 62, 1, 1),
+       PLL_RATE(768000000, 64, 1, 1),
+       PLL_RATE(792000000, 66, 1, 1),
+       PLL_RATE(816000000, 68, 1, 1),
+       PLL_RATE(840000000, 70, 1, 1),
+       PLL_RATE(864000000, 72, 1, 1),
+       PLL_RATE(888000000, 74, 1, 1),
+       PLL_RATE(912000000, 76, 1, 1),
+       PLL_RATE(936000000, 78, 1, 1),
+       PLL_RATE(960000000, 80, 1, 1),
+       PLL_RATE(984000000, 82, 1, 1),
+       PLL_RATE(1008000000, 84, 1, 1),
+       PLL_RATE(1032000000, 86, 1, 1),
+       PLL_RATE(1056000000, 88, 1, 1),
+       PLL_RATE(1080000000, 90, 1, 1),
+       PLL_RATE(1104000000, 92, 1, 1),
+       PLL_RATE(1128000000, 94, 1, 1),
+       PLL_RATE(1152000000, 96, 1, 1),
+       PLL_RATE(1176000000, 98, 1, 1),
+       PLL_RATE(1200000000, 50, 1, 0),
+       PLL_RATE(1224000000, 51, 1, 0),
+       PLL_RATE(1248000000, 52, 1, 0),
+       PLL_RATE(1272000000, 53, 1, 0),
+       PLL_RATE(1296000000, 54, 1, 0),
+       PLL_RATE(1320000000, 55, 1, 0),
+       PLL_RATE(1344000000, 56, 1, 0),
+       PLL_RATE(1368000000, 57, 1, 0),
+       PLL_RATE(1392000000, 58, 1, 0),
+       PLL_RATE(1416000000, 59, 1, 0),
+       PLL_RATE(1440000000, 60, 1, 0),
+       PLL_RATE(1464000000, 61, 1, 0),
+       PLL_RATE(1488000000, 62, 1, 0),
+       PLL_RATE(1512000000, 63, 1, 0),
+       PLL_RATE(1536000000, 64, 1, 0),
+       { /* sentinel */ },
+};
+
+static const struct clk_div_table cpu_div_table[] = {
+       { .val = 1, .div = 1 },
+       { .val = 2, .div = 2 },
+       { .val = 3, .div = 3 },
+       { .val = 2, .div = 4 },
+       { .val = 3, .div = 6 },
+       { .val = 4, .div = 8 },
+       { .val = 5, .div = 10 },
+       { .val = 6, .div = 12 },
+       { .val = 7, .div = 14 },
+       { .val = 8, .div = 16 },
+       { /* sentinel */ },
+};
+
+PNAME(p_xtal)          = { "xtal" };
+PNAME(p_fclk_div)      = { "fixed_pll" };
+PNAME(p_cpu_clk)       = { "sys_pll" };
+PNAME(p_clk81)         = { "fclk_div3", "fclk_div4", "fclk_div5" };
+PNAME(p_mali)          = { "fclk_div3", "fclk_div4", "fclk_div5",
+                           "fclk_div7", "zero" };
+
+static u32 mux_table_clk81[]   = { 6, 5, 7 };
+static u32 mux_table_mali[]    = { 6, 5, 7, 4, 0 };
+
+static struct pll_conf pll_confs = {
+       .m              = PARM(0x00, 0,  9),
+       .n              = PARM(0x00, 9,  5),
+       .od             = PARM(0x00, 16, 2),
+};
+
+static struct pll_conf sys_pll_conf = {
+       .m              = PARM(0x00, 0,  9),
+       .n              = PARM(0x00, 9,  5),
+       .od             = PARM(0x00, 16, 2),
+       .rate_table     = sys_pll_rate_table,
+};
+
+static const struct composite_conf clk81_conf __initconst = {
+       .mux_table              = mux_table_clk81,
+       .mux_flags              = CLK_MUX_READ_ONLY,
+       .mux_parm               = PARM(0x00, 12, 3),
+       .div_parm               = PARM(0x00, 0, 7),
+       .gate_parm              = PARM(0x00, 7, 1),
+};
+
+static const struct composite_conf mali_conf __initconst = {
+       .mux_table              = mux_table_mali,
+       .mux_parm               = PARM(0x00, 9, 3),
+       .div_parm               = PARM(0x00, 0, 7),
+       .gate_parm              = PARM(0x00, 8, 1),
+};
+
+static const struct clk_conf meson8b_xtal_conf __initconst =
+       FIXED_RATE_P(MESON8B_REG_CTL0_ADDR, CLKID_XTAL, "xtal",
+                    CLK_IS_ROOT, PARM(0x00, 4, 7));
+
+static const struct clk_conf meson8b_clk_confs[] __initconst = {
+       FIXED_RATE(CLKID_ZERO, "zero", CLK_IS_ROOT, 0),
+       PLL(MESON8B_REG_PLL_FIXED, CLKID_PLL_FIXED, "fixed_pll",
+           p_xtal, 0, &pll_confs),
+       PLL(MESON8B_REG_PLL_VID, CLKID_PLL_VID, "vid_pll",
+           p_xtal, 0, &pll_confs),
+       PLL(MESON8B_REG_PLL_SYS, CLKID_PLL_SYS, "sys_pll",
+           p_xtal, 0, &sys_pll_conf),
+       FIXED_FACTOR_DIV(CLKID_FCLK_DIV2, "fclk_div2", p_fclk_div, 0, 2),
+       FIXED_FACTOR_DIV(CLKID_FCLK_DIV3, "fclk_div3", p_fclk_div, 0, 3),
+       FIXED_FACTOR_DIV(CLKID_FCLK_DIV4, "fclk_div4", p_fclk_div, 0, 4),
+       FIXED_FACTOR_DIV(CLKID_FCLK_DIV5, "fclk_div5", p_fclk_div, 0, 5),
+       FIXED_FACTOR_DIV(CLKID_FCLK_DIV7, "fclk_div7", p_fclk_div, 0, 7),
+       CPU(MESON8B_REG_SYS_CPU_CNTL1, CLKID_CPUCLK, "a5_clk", p_cpu_clk,
+           cpu_div_table),
+       COMPOSITE(MESON8B_REG_HHI_MPEG, CLKID_CLK81, "clk81", p_clk81,
+                 CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED, &clk81_conf),
+       COMPOSITE(MESON8B_REG_MALI, CLKID_MALI, "mali", p_mali,
+                 CLK_IGNORE_UNUSED, &mali_conf),
+};
+
+static void __init meson8b_clkc_init(struct device_node *np)
+{
+       void __iomem *clk_base;
+
+       if (!meson_clk_init(np, CLK_NR_CLKS))
+               return;
+
+       /* XTAL */
+       clk_base = of_iomap(np, 0);
+       if (!clk_base) {
+               pr_err("%s: Unable to map xtal base\n", __func__);
+               return;
+       }
+
+       meson_clk_register_clks(&meson8b_xtal_conf, 1, clk_base);
+       iounmap(clk_base);
+
+       /*  Generic clocks and PLLs */
+       clk_base = of_iomap(np, 1);
+       if (!clk_base) {
+               pr_err("%s: Unable to map clk base\n", __func__);
+               return;
+       }
+
+       meson_clk_register_clks(meson8b_clk_confs,
+                               ARRAY_SIZE(meson8b_clk_confs),
+                               clk_base);
+}
+CLK_OF_DECLARE(meson8b_clock, "amlogic,meson8b-clkc", meson8b_clkc_init);
index 3caaf7cc169c684559973f105317dea76bbd1d54..9d4bc41e4239506b7061fc183d6756cead5b2dad 100644 (file)
@@ -12,3 +12,5 @@ obj-$(CONFIG_MACH_MMP2_DT) += clk-of-mmp2.o
 obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o
 obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o
 obj-$(CONFIG_CPU_MMP2) += clk-mmp2.o
+
+obj-y += clk-of-pxa1928.o
index d14120eaa71f4c5d5498814aeee91a46d9012283..09d41c717c52830efd22849100deb01e9f9252fb 100644 (file)
@@ -115,7 +115,7 @@ static void clk_apbc_unprepare(struct clk_hw *hw)
                spin_unlock_irqrestore(apbc->lock, flags);
 }
 
-struct clk_ops clk_apbc_ops = {
+static struct clk_ops clk_apbc_ops = {
        .prepare = clk_apbc_prepare,
        .unprepare = clk_apbc_unprepare,
 };
index abe182b2377f04889cbfd85f3ca3ef332b786a79..cdcf2d7f321e0d5becfaa90b199d9839bcc9d81a 100644 (file)
@@ -61,7 +61,7 @@ static void clk_apmu_disable(struct clk_hw *hw)
                spin_unlock_irqrestore(apmu->lock, flags);
 }
 
-struct clk_ops clk_apmu_ops = {
+static struct clk_ops clk_apmu_ops = {
        .enable = clk_apmu_enable,
        .disable = clk_apmu_disable,
 };
index 5c90a4230fa3d3f2718c7ed6452c32c51df70b24..09d2832fbd7821a56e53fe8dc22ab16548fa3f1c 100644 (file)
@@ -63,10 +63,8 @@ static struct mmp_clk_factor_masks uart_factor_masks = {
 };
 
 static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
-       {.num = 14634, .den = 2165},    /*14.745MHZ */
+       {.num = 8125, .den = 1536},     /*14.745MHZ */
        {.num = 3521, .den = 689},      /*19.23MHZ */
-       {.num = 9679, .den = 5728},     /*58.9824MHZ */
-       {.num = 15850, .den = 9451},    /*59.429MHZ */
 };
 
 static const char *uart_parent[] = {"uart_pll", "vctcxo"};
index 2cbc2b43ae527265c89f8a9deabfbf26a2f2bd28..251533d87c6538f6ff2cb46bee025a2589a1df2f 100644 (file)
@@ -30,6 +30,7 @@
 #define APBC_TWSI4     0x7c
 #define APBC_TWSI5     0x80
 #define APBC_KPC       0x18
+#define APBC_TIMER     0x24
 #define APBC_UART0     0x2c
 #define APBC_UART1     0x30
 #define APBC_UART2     0x34
@@ -98,10 +99,8 @@ static struct mmp_clk_factor_masks uart_factor_masks = {
 };
 
 static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
-       {.num = 14634, .den = 2165},    /*14.745MHZ */
+       {.num = 8125, .den = 1536},     /*14.745MHZ */
        {.num = 3521, .den = 689},      /*19.23MHZ */
-       {.num = 9679, .den = 5728},     /*58.9824MHZ */
-       {.num = 15850, .den = 9451},    /*59.429MHZ */
 };
 
 static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit)
@@ -134,6 +133,9 @@ static DEFINE_SPINLOCK(ssp2_lock);
 static DEFINE_SPINLOCK(ssp3_lock);
 static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
 
+static DEFINE_SPINLOCK(timer_lock);
+static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"};
+
 static DEFINE_SPINLOCK(reset_lock);
 
 static struct mmp_param_mux_clk apbc_mux_clks[] = {
@@ -145,6 +147,7 @@ static struct mmp_param_mux_clk apbc_mux_clks[] = {
        {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
        {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP2, 4, 3, 0, &ssp2_lock},
        {0, "ssp3_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP3, 4, 3, 0, &ssp3_lock},
+       {0, "timer_mux", timer_parent_names, ARRAY_SIZE(timer_parent_names), CLK_SET_RATE_PARENT, APBC_TIMER, 4, 3, 0, &timer_lock},
 };
 
 static struct mmp_param_gate_clk apbc_gate_clks[] = {
@@ -170,6 +173,7 @@ static struct mmp_param_gate_clk apbc_gate_clks[] = {
        {MMP2_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x7, 0x3, 0x0, 0, &ssp1_lock},
        {MMP2_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x7, 0x3, 0x0, 0, &ssp2_lock},
        {MMP2_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x7, 0x3, 0x0, 0, &ssp3_lock},
+       {MMP2_CLK_TIMER, "timer_clk", "timer_mux", CLK_SET_RATE_PARENT, APBC_TIMER, 0x7, 0x3, 0x0, 0, &timer_lock},
 };
 
 static void mmp2_apb_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
index 5b1810dc4bd20839d90531c6a47208a7f73a829f..64eaf4141c69842a766f49a79a1b2698b6974e8a 100644 (file)
@@ -32,6 +32,7 @@
 #define APBC_PWM1      0x10
 #define APBC_PWM2      0x14
 #define APBC_PWM3      0x18
+#define APBC_TIMER     0x34
 #define APBC_SSP0      0x81c
 #define APBC_SSP1      0x820
 #define APBC_SSP2      0x84c
@@ -58,6 +59,7 @@ static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
        {PXA168_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
        {PXA168_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
        {PXA168_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000},
+       {PXA168_CLK_USB_PLL, "usb_pll", NULL, CLK_IS_ROOT, 480000000},
 };
 
 static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
@@ -70,6 +72,7 @@ static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
        {PXA168_CLK_PLL1_24, "pll1_24", "pll1_12", 1, 2, 0},
        {PXA168_CLK_PLL1_48, "pll1_48", "pll1_24", 1, 2, 0},
        {PXA168_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0},
+       {PXA168_CLK_PLL1_192, "pll1_192", "pll1_96", 1, 2, 0},
        {PXA168_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0},
        {PXA168_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0},
        {PXA168_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0},
@@ -119,6 +122,9 @@ static DEFINE_SPINLOCK(ssp3_lock);
 static DEFINE_SPINLOCK(ssp4_lock);
 static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
 
+static DEFINE_SPINLOCK(timer_lock);
+static const char *timer_parent_names[] = {"pll1_48", "clk32", "pll1_96", "pll1_192"};
+
 static DEFINE_SPINLOCK(reset_lock);
 
 static struct mmp_param_mux_clk apbc_mux_clks[] = {
@@ -130,6 +136,7 @@ static struct mmp_param_mux_clk apbc_mux_clks[] = {
        {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP2, 4, 3, 0, &ssp2_lock},
        {0, "ssp3_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP3, 4, 3, 0, &ssp3_lock},
        {0, "ssp4_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP4, 4, 3, 0, &ssp4_lock},
+       {0, "timer_mux", timer_parent_names, ARRAY_SIZE(timer_parent_names), CLK_SET_RATE_PARENT, APBC_TIMER, 4, 3, 0, &timer_lock},
 };
 
 static struct mmp_param_gate_clk apbc_gate_clks[] = {
@@ -151,6 +158,7 @@ static struct mmp_param_gate_clk apbc_gate_clks[] = {
        {PXA168_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x3, 0x3, 0x0, 0, &ssp2_lock},
        {PXA168_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x3, 0x3, 0x0, 0, &ssp3_lock},
        {PXA168_CLK_SSP4, "ssp4_clk", "ssp4_mux", CLK_SET_RATE_PARENT, APBC_SSP4, 0x3, 0x3, 0x0, 0, &ssp4_lock},
+       {PXA168_CLK_TIMER, "timer_clk", "timer_mux", CLK_SET_RATE_PARENT, APBC_TIMER, 0x3, 0x3, 0x0, 0, &timer_lock},
 };
 
 static void pxa168_apb_periph_clk_init(struct pxa168_clk_unit *pxa_unit)
diff --git a/drivers/clk/mmp/clk-of-pxa1928.c b/drivers/clk/mmp/clk-of-pxa1928.c
new file mode 100644 (file)
index 0000000..433a5ae
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+ * pxa1928 clock framework source file
+ *
+ * Copyright (C) 2015 Linaro, Ltd.
+ * Rob Herring <robh@kernel.org>
+ *
+ * Based on drivers/clk/mmp/clk-of-mmp2.c:
+ * Copyright (C) 2012 Marvell
+ * Chao Xie <xiechao.mail@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/marvell,pxa1928.h>
+
+#include "clk.h"
+#include "reset.h"
+
+#define MPMU_UART_PLL  0x14
+
+struct pxa1928_clk_unit {
+       struct mmp_clk_unit unit;
+       void __iomem *mpmu_base;
+       void __iomem *apmu_base;
+       void __iomem *apbc_base;
+       void __iomem *apbcp_base;
+};
+
+static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
+       {0, "clk32", NULL, CLK_IS_ROOT, 32768},
+       {0, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
+       {0, "pll1_624", NULL, CLK_IS_ROOT, 624000000},
+       {0, "pll5p", NULL, CLK_IS_ROOT, 832000000},
+       {0, "pll5", NULL, CLK_IS_ROOT, 1248000000},
+       {0, "usb_pll", NULL, CLK_IS_ROOT, 480000000},
+};
+
+static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
+       {0, "pll1_d2", "pll1_624", 1, 2, 0},
+       {0, "pll1_d9", "pll1_624", 1, 9, 0},
+       {0, "pll1_d12", "pll1_624", 1, 12, 0},
+       {0, "pll1_d16", "pll1_624", 1, 16, 0},
+       {0, "pll1_d20", "pll1_624", 1, 20, 0},
+       {0, "pll1_416", "pll1_624", 2, 3, 0},
+       {0, "vctcxo_d2", "vctcxo", 1, 2, 0},
+       {0, "vctcxo_d4", "vctcxo", 1, 4, 0},
+};
+
+static struct mmp_clk_factor_masks uart_factor_masks = {
+       .factor = 2,
+       .num_mask = 0x1fff,
+       .den_mask = 0x1fff,
+       .num_shift = 16,
+       .den_shift = 0,
+};
+
+static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
+       {.num = 832, .den = 234},       /*58.5MHZ */
+       {.num = 1, .den = 1},           /*26MHZ */
+};
+
+static void pxa1928_pll_init(struct pxa1928_clk_unit *pxa_unit)
+{
+       struct clk *clk;
+       struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+       mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
+                                       ARRAY_SIZE(fixed_rate_clks));
+
+       mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
+                                       ARRAY_SIZE(fixed_factor_clks));
+
+       clk = mmp_clk_register_factor("uart_pll", "pll1_416",
+                               CLK_SET_RATE_PARENT,
+                               pxa_unit->mpmu_base + MPMU_UART_PLL,
+                               &uart_factor_masks, uart_factor_tbl,
+                               ARRAY_SIZE(uart_factor_tbl), NULL);
+}
+
+static DEFINE_SPINLOCK(uart0_lock);
+static DEFINE_SPINLOCK(uart1_lock);
+static DEFINE_SPINLOCK(uart2_lock);
+static DEFINE_SPINLOCK(uart3_lock);
+static const char *uart_parent_names[] = {"uart_pll", "vctcxo"};
+
+static DEFINE_SPINLOCK(ssp0_lock);
+static DEFINE_SPINLOCK(ssp1_lock);
+static const char *ssp_parent_names[] = {"vctcxo_d4", "vctcxo_d2", "vctcxo", "pll1_d12"};
+
+static DEFINE_SPINLOCK(reset_lock);
+
+static struct mmp_param_mux_clk apbc_mux_clks[] = {
+       {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_UART0 * 4, 4, 3, 0, &uart0_lock},
+       {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_UART1 * 4, 4, 3, 0, &uart1_lock},
+       {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_UART2 * 4, 4, 3, 0, &uart2_lock},
+       {0, "uart3_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_UART3 * 4, 4, 3, 0, &uart3_lock},
+       {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_SSP0 * 4, 4, 3, 0, &ssp0_lock},
+       {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_SSP1 * 4, 4, 3, 0, &ssp1_lock},
+};
+
+static struct mmp_param_gate_clk apbc_gate_clks[] = {
+       {PXA1928_CLK_TWSI0, "twsi0_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_TWSI0 * 4, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA1928_CLK_TWSI1, "twsi1_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_TWSI1 * 4, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA1928_CLK_TWSI2, "twsi2_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_TWSI2 * 4, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA1928_CLK_TWSI3, "twsi3_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_TWSI3 * 4, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA1928_CLK_TWSI4, "twsi4_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_TWSI4 * 4, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA1928_CLK_TWSI5, "twsi5_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_TWSI5 * 4, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA1928_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_GPIO * 4, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA1928_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, PXA1928_CLK_KPC * 4, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
+       {PXA1928_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, PXA1928_CLK_RTC * 4, 0x83, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
+       {PXA1928_CLK_PWM0, "pwm0_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_PWM0 * 4, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA1928_CLK_PWM1, "pwm1_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_PWM1 * 4, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA1928_CLK_PWM2, "pwm2_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_PWM2 * 4, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA1928_CLK_PWM3, "pwm3_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_PWM3 * 4, 0x3, 0x3, 0x0, 0, &reset_lock},
+       /* The gate clocks has mux parent. */
+       {PXA1928_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, PXA1928_CLK_UART0 * 4, 0x3, 0x3, 0x0, 0, &uart0_lock},
+       {PXA1928_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, PXA1928_CLK_UART1 * 4, 0x3, 0x3, 0x0, 0, &uart1_lock},
+       {PXA1928_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, PXA1928_CLK_UART2 * 4, 0x3, 0x3, 0x0, 0, &uart2_lock},
+       {PXA1928_CLK_UART3, "uart3_clk", "uart3_mux", CLK_SET_RATE_PARENT, PXA1928_CLK_UART3 * 4, 0x3, 0x3, 0x0, 0, &uart3_lock},
+       {PXA1928_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, PXA1928_CLK_SSP0 * 4, 0x3, 0x3, 0x0, 0, &ssp0_lock},
+       {PXA1928_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, PXA1928_CLK_SSP1 * 4, 0x3, 0x3, 0x0, 0, &ssp1_lock},
+};
+
+static void pxa1928_apb_periph_clk_init(struct pxa1928_clk_unit *pxa_unit)
+{
+       struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+       mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
+                               ARRAY_SIZE(apbc_mux_clks));
+
+       mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
+                               ARRAY_SIZE(apbc_gate_clks));
+}
+
+static DEFINE_SPINLOCK(sdh0_lock);
+static DEFINE_SPINLOCK(sdh1_lock);
+static DEFINE_SPINLOCK(sdh2_lock);
+static DEFINE_SPINLOCK(sdh3_lock);
+static DEFINE_SPINLOCK(sdh4_lock);
+static const char *sdh_parent_names[] = {"pll1_624", "pll5p", "pll5", "pll1_416"};
+
+static DEFINE_SPINLOCK(usb_lock);
+
+static struct mmp_param_mux_clk apmu_mux_clks[] = {
+       {0, "sdh_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_SDH0 * 4, 8, 2, 0, &sdh0_lock},
+};
+
+static struct mmp_param_div_clk apmu_div_clks[] = {
+       {0, "sdh_div", "sdh_mux", 0, PXA1928_CLK_SDH0 * 4, 10, 4, CLK_DIVIDER_ONE_BASED, &sdh0_lock},
+};
+
+static struct mmp_param_gate_clk apmu_gate_clks[] = {
+       {PXA1928_CLK_USB, "usb_clk", "usb_pll", 0, PXA1928_CLK_USB * 4, 0x9, 0x9, 0x0, 0, &usb_lock},
+       {PXA1928_CLK_HSIC, "hsic_clk", "usb_pll", 0, PXA1928_CLK_HSIC * 4, 0x9, 0x9, 0x0, 0, &usb_lock},
+       /* The gate clocks has mux parent. */
+       {PXA1928_CLK_SDH0, "sdh0_clk", "sdh_div", CLK_SET_RATE_PARENT, PXA1928_CLK_SDH0 * 4, 0x1b, 0x1b, 0x0, 0, &sdh0_lock},
+       {PXA1928_CLK_SDH1, "sdh1_clk", "sdh_div", CLK_SET_RATE_PARENT, PXA1928_CLK_SDH1 * 4, 0x1b, 0x1b, 0x0, 0, &sdh1_lock},
+       {PXA1928_CLK_SDH2, "sdh2_clk", "sdh_div", CLK_SET_RATE_PARENT, PXA1928_CLK_SDH2 * 4, 0x1b, 0x1b, 0x0, 0, &sdh2_lock},
+       {PXA1928_CLK_SDH3, "sdh3_clk", "sdh_div", CLK_SET_RATE_PARENT, PXA1928_CLK_SDH3 * 4, 0x1b, 0x1b, 0x0, 0, &sdh3_lock},
+       {PXA1928_CLK_SDH4, "sdh4_clk", "sdh_div", CLK_SET_RATE_PARENT, PXA1928_CLK_SDH4 * 4, 0x1b, 0x1b, 0x0, 0, &sdh4_lock},
+};
+
+static void pxa1928_axi_periph_clk_init(struct pxa1928_clk_unit *pxa_unit)
+{
+       struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+       mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
+                               ARRAY_SIZE(apmu_mux_clks));
+
+       mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
+                               ARRAY_SIZE(apmu_div_clks));
+
+       mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
+                               ARRAY_SIZE(apmu_gate_clks));
+}
+
+static void pxa1928_clk_reset_init(struct device_node *np,
+                               struct pxa1928_clk_unit *pxa_unit)
+{
+       struct mmp_clk_reset_cell *cells;
+       int i, base, nr_resets;
+
+       nr_resets = ARRAY_SIZE(apbc_gate_clks);
+       cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
+       if (!cells)
+               return;
+
+       base = 0;
+       for (i = 0; i < nr_resets; i++) {
+               cells[base + i].clk_id = apbc_gate_clks[i].id;
+               cells[base + i].reg =
+                       pxa_unit->apbc_base + apbc_gate_clks[i].offset;
+               cells[base + i].flags = 0;
+               cells[base + i].lock = apbc_gate_clks[i].lock;
+               cells[base + i].bits = 0x4;
+       }
+
+       mmp_clk_reset_register(np, cells, nr_resets);
+}
+
+static void __init pxa1928_mpmu_clk_init(struct device_node *np)
+{
+       struct pxa1928_clk_unit *pxa_unit;
+
+       pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
+       if (!pxa_unit)
+               return;
+
+       pxa_unit->mpmu_base = of_iomap(np, 0);
+       if (!pxa_unit->mpmu_base) {
+               pr_err("failed to map mpmu registers\n");
+               return;
+       }
+
+       pxa1928_pll_init(pxa_unit);
+}
+CLK_OF_DECLARE(pxa1928_mpmu_clk, "marvell,pxa1928-mpmu", pxa1928_mpmu_clk_init);
+
+static void __init pxa1928_apmu_clk_init(struct device_node *np)
+{
+       struct pxa1928_clk_unit *pxa_unit;
+
+       pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
+       if (!pxa_unit)
+               return;
+
+       pxa_unit->apmu_base = of_iomap(np, 0);
+       if (!pxa_unit->apmu_base) {
+               pr_err("failed to map apmu registers\n");
+               return;
+       }
+
+       mmp_clk_init(np, &pxa_unit->unit, PXA1928_APMU_NR_CLKS);
+
+       pxa1928_axi_periph_clk_init(pxa_unit);
+}
+CLK_OF_DECLARE(pxa1928_apmu_clk, "marvell,pxa1928-apmu", pxa1928_apmu_clk_init);
+
+static void __init pxa1928_apbc_clk_init(struct device_node *np)
+{
+       struct pxa1928_clk_unit *pxa_unit;
+
+       pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
+       if (!pxa_unit)
+               return;
+
+       pxa_unit->apbc_base = of_iomap(np, 0);
+       if (!pxa_unit->apbc_base) {
+               pr_err("failed to map apbc registers\n");
+               return;
+       }
+
+       mmp_clk_init(np, &pxa_unit->unit, PXA1928_APBC_NR_CLKS);
+
+       pxa1928_apb_periph_clk_init(pxa_unit);
+       pxa1928_clk_reset_init(np, pxa_unit);
+}
+CLK_OF_DECLARE(pxa1928_apbc_clk, "marvell,pxa1928-apbc", pxa1928_apbc_clk_init);
index 5e3c80dad336e0ea1857f39be3ccccadf7b947f8..13d6173326a435ad46af652f27bfc20e32302e04 100644 (file)
@@ -35,6 +35,8 @@
 #define APBC_SSP0      0x1c
 #define APBC_SSP1      0x20
 #define APBC_SSP2      0x4c
+#define APBC_TIMER0    0x30
+#define APBC_TIMER1    0x44
 #define APBCP_TWSI1    0x28
 #define APBCP_UART2    0x1c
 #define APMU_SDH0      0x54
@@ -57,6 +59,7 @@ static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
        {PXA910_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
        {PXA910_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
        {PXA910_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000},
+       {PXA910_CLK_USB_PLL, "usb_pll", NULL, CLK_IS_ROOT, 480000000},
 };
 
 static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
@@ -69,6 +72,7 @@ static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
        {PXA910_CLK_PLL1_24, "pll1_24", "pll1_12", 1, 2, 0},
        {PXA910_CLK_PLL1_48, "pll1_48", "pll1_24", 1, 2, 0},
        {PXA910_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0},
+       {PXA910_CLK_PLL1_192, "pll1_192", "pll1_96", 1, 2, 0},
        {PXA910_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0},
        {PXA910_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0},
        {PXA910_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0},
@@ -115,6 +119,10 @@ static DEFINE_SPINLOCK(ssp0_lock);
 static DEFINE_SPINLOCK(ssp1_lock);
 static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
 
+static DEFINE_SPINLOCK(timer0_lock);
+static DEFINE_SPINLOCK(timer1_lock);
+static const char *timer_parent_names[] = {"pll1_48", "clk32", "pll1_96"};
+
 static DEFINE_SPINLOCK(reset_lock);
 
 static struct mmp_param_mux_clk apbc_mux_clks[] = {
@@ -122,6 +130,8 @@ static struct mmp_param_mux_clk apbc_mux_clks[] = {
        {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
        {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
        {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
+       {0, "timer0_mux", timer_parent_names, ARRAY_SIZE(timer_parent_names), CLK_SET_RATE_PARENT, APBC_TIMER0, 4, 3, 0, &timer0_lock},
+       {0, "timer1_mux", timer_parent_names, ARRAY_SIZE(timer_parent_names), CLK_SET_RATE_PARENT, APBC_TIMER1, 4, 3, 0, &timer1_lock},
 };
 
 static struct mmp_param_mux_clk apbcp_mux_clks[] = {
@@ -142,6 +152,8 @@ static struct mmp_param_gate_clk apbc_gate_clks[] = {
        {PXA910_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x3, 0x3, 0x0, 0, &uart1_lock},
        {PXA910_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x3, 0x3, 0x0, 0, &ssp0_lock},
        {PXA910_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x3, 0x3, 0x0, 0, &ssp1_lock},
+       {PXA910_CLK_TIMER0, "timer0_clk", "timer0_mux", CLK_SET_RATE_PARENT, APBC_TIMER0, 0x3, 0x3, 0x0, 0, &timer0_lock},
+       {PXA910_CLK_TIMER1, "timer1_clk", "timer1_mux", CLK_SET_RATE_PARENT, APBC_TIMER1, 0x3, 0x3, 0x0, 0, &timer1_lock},
 };
 
 static struct mmp_param_gate_clk apbcp_gate_clks[] = {
index 756f0f39d6a3d9f4d3ffcd19a9eae6166c7ba3e3..2c7c1085f88300ca84ac529831876af25376a047 100644 (file)
@@ -163,6 +163,7 @@ static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = {
        { "pex1", "pex1_en", 9, 0 },
        { "sata0", NULL, 15, 0 },
        { "sdio", NULL, 17, 0 },
+       { "crypto", NULL, 23, CLK_IGNORE_UNUSED },
        { "tdm", NULL, 25, 0 },
        { "ddr", NULL, 28, CLK_IGNORE_UNUSED },
        { "sata1", NULL, 30, 0 },
index 22d136aa699ff1b15a73247e183d4a81d98726c7..32216f9b7f03e9b5540106d29c842cd68cf9b60a 100644 (file)
@@ -77,12 +77,12 @@ static void __init clk_misc_init(void)
        writel_relaxed(30 << BP_FRAC_IOFRAC, FRAC + SET);
 }
 
-static const char *sel_pll[]  __initdata = { "pll", "ref_xtal", };
-static const char *sel_cpu[]  __initdata = { "ref_cpu", "ref_xtal", };
-static const char *sel_pix[]  __initdata = { "ref_pix", "ref_xtal", };
-static const char *sel_io[]   __initdata = { "ref_io", "ref_xtal", };
-static const char *cpu_sels[] __initdata = { "cpu_pll", "cpu_xtal", };
-static const char *emi_sels[] __initdata = { "emi_pll", "emi_xtal", };
+static const char *const sel_pll[]  __initconst = { "pll", "ref_xtal", };
+static const char *const sel_cpu[]  __initconst = { "ref_cpu", "ref_xtal", };
+static const char *const sel_pix[]  __initconst = { "ref_pix", "ref_xtal", };
+static const char *const sel_io[]   __initconst = { "ref_io", "ref_xtal", };
+static const char *const cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
+static const char *const emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
 
 enum imx23_clk {
        ref_xtal, pll, ref_cpu, ref_emi, ref_pix, ref_io, saif_sel,
index b1be3746ce958dad95fe396c0621b3f4ae248b29..a68670868baacd42686b4ce61471838b02ee4905 100644 (file)
@@ -125,15 +125,15 @@ static void __init clk_misc_init(void)
        writel_relaxed(val, FRAC0);
 }
 
-static const char *sel_cpu[]  __initdata = { "ref_cpu", "ref_xtal", };
-static const char *sel_io0[]  __initdata = { "ref_io0", "ref_xtal", };
-static const char *sel_io1[]  __initdata = { "ref_io1", "ref_xtal", };
-static const char *sel_pix[]  __initdata = { "ref_pix", "ref_xtal", };
-static const char *sel_gpmi[] __initdata = { "ref_gpmi", "ref_xtal", };
-static const char *sel_pll0[] __initdata = { "pll0", "ref_xtal", };
-static const char *cpu_sels[] __initdata = { "cpu_pll", "cpu_xtal", };
-static const char *emi_sels[] __initdata = { "emi_pll", "emi_xtal", };
-static const char *ptp_sels[] __initdata = { "ref_xtal", "pll0", };
+static const char *const sel_cpu[]  __initconst = { "ref_cpu", "ref_xtal", };
+static const char *const sel_io0[]  __initconst = { "ref_io0", "ref_xtal", };
+static const char *const sel_io1[]  __initconst = { "ref_io1", "ref_xtal", };
+static const char *const sel_pix[]  __initconst = { "ref_pix", "ref_xtal", };
+static const char *const sel_gpmi[] __initconst = { "ref_gpmi", "ref_xtal", };
+static const char *const sel_pll0[] __initconst = { "pll0", "ref_xtal", };
+static const char *const cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
+static const char *const emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
+static const char *const ptp_sels[] __initconst = { "ref_xtal", "pll0", };
 
 enum imx28_clk {
        ref_xtal, pll0, pll1, pll2, ref_cpu, ref_emi, ref_io0, ref_io1,
index ef10ad9b5daa0b48601670d0e40fe3d01da60dfa..f07d821dd75d5d4a9318d5976c6c84938e3420f9 100644 (file)
@@ -49,7 +49,7 @@ static inline struct clk *mxs_clk_gate(const char *name,
 }
 
 static inline struct clk *mxs_clk_mux(const char *name, void __iomem *reg,
-               u8 shift, u8 width, const char **parent_names, int num_parents)
+               u8 shift, u8 width, const char *const *parent_names, int num_parents)
 {
        return clk_register_mux(NULL, name, parent_names, num_parents,
                                CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
diff --git a/drivers/clk/nxp/Makefile b/drivers/clk/nxp/Makefile
new file mode 100644 (file)
index 0000000..7f608b0
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_ARCH_LPC18XX)     += clk-lpc18xx-cgu.o
+obj-$(CONFIG_ARCH_LPC18XX)     += clk-lpc18xx-ccu.o
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
new file mode 100644 (file)
index 0000000..eeaee97
--- /dev/null
@@ -0,0 +1,293 @@
+/*
+ * Clk driver for NXP LPC18xx/LPC43xx Clock Control Unit (CCU)
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <dt-bindings/clock/lpc18xx-ccu.h>
+
+/* Bit defines for CCU branch configuration register */
+#define LPC18XX_CCU_RUN                BIT(0)
+#define LPC18XX_CCU_AUTO       BIT(1)
+#define LPC18XX_CCU_DIV                BIT(5)
+#define LPC18XX_CCU_DIVSTAT    BIT(27)
+
+/* CCU branch feature bits */
+#define CCU_BRANCH_IS_BUS      BIT(0)
+#define CCU_BRANCH_HAVE_DIV2   BIT(1)
+
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+struct lpc18xx_branch_clk_data {
+       const char **name;
+       int num;
+};
+
+struct lpc18xx_clk_branch {
+       const char *base_name;
+       const char *name;
+       u16 offset;
+       u16 flags;
+       struct clk *clk;
+       struct clk_gate gate;
+};
+
+static struct lpc18xx_clk_branch clk_branches[] = {
+       {"base_apb3_clk", "apb3_bus",           CLK_APB3_BUS,           CCU_BRANCH_IS_BUS},
+       {"base_apb3_clk", "apb3_i2c1",          CLK_APB3_I2C1,          0},
+       {"base_apb3_clk", "apb3_dac",           CLK_APB3_DAC,           0},
+       {"base_apb3_clk", "apb3_adc0",          CLK_APB3_ADC0,          0},
+       {"base_apb3_clk", "apb3_adc1",          CLK_APB3_ADC1,          0},
+       {"base_apb3_clk", "apb3_can0",          CLK_APB3_CAN0,          0},
+
+       {"base_apb1_clk", "apb1_bus",           CLK_APB1_BUS,           CCU_BRANCH_IS_BUS},
+       {"base_apb1_clk", "apb1_mc_pwm",        CLK_APB1_MOTOCON_PWM,   0},
+       {"base_apb1_clk", "apb1_i2c0",          CLK_APB1_I2C0,          0},
+       {"base_apb1_clk", "apb1_i2s",           CLK_APB1_I2S,           0},
+       {"base_apb1_clk", "apb1_can1",          CLK_APB1_CAN1,          0},
+
+       {"base_spifi_clk", "spifi",             CLK_SPIFI,              0},
+
+       {"base_cpu_clk", "cpu_bus",             CLK_CPU_BUS,            CCU_BRANCH_IS_BUS},
+       {"base_cpu_clk", "cpu_spifi",           CLK_CPU_SPIFI,          0},
+       {"base_cpu_clk", "cpu_gpio",            CLK_CPU_GPIO,           0},
+       {"base_cpu_clk", "cpu_lcd",             CLK_CPU_LCD,            0},
+       {"base_cpu_clk", "cpu_ethernet",        CLK_CPU_ETHERNET,       0},
+       {"base_cpu_clk", "cpu_usb0",            CLK_CPU_USB0,           0},
+       {"base_cpu_clk", "cpu_emc",             CLK_CPU_EMC,            0},
+       {"base_cpu_clk", "cpu_sdio",            CLK_CPU_SDIO,           0},
+       {"base_cpu_clk", "cpu_dma",             CLK_CPU_DMA,            0},
+       {"base_cpu_clk", "cpu_core",            CLK_CPU_CORE,           0},
+       {"base_cpu_clk", "cpu_sct",             CLK_CPU_SCT,            0},
+       {"base_cpu_clk", "cpu_usb1",            CLK_CPU_USB1,           0},
+       {"base_cpu_clk", "cpu_emcdiv",          CLK_CPU_EMCDIV,         CCU_BRANCH_HAVE_DIV2},
+       {"base_cpu_clk", "cpu_flasha",          CLK_CPU_FLASHA,         CCU_BRANCH_HAVE_DIV2},
+       {"base_cpu_clk", "cpu_flashb",          CLK_CPU_FLASHB,         CCU_BRANCH_HAVE_DIV2},
+       {"base_cpu_clk", "cpu_m0app",           CLK_CPU_M0APP,          CCU_BRANCH_HAVE_DIV2},
+       {"base_cpu_clk", "cpu_adchs",           CLK_CPU_ADCHS,          CCU_BRANCH_HAVE_DIV2},
+       {"base_cpu_clk", "cpu_eeprom",          CLK_CPU_EEPROM,         CCU_BRANCH_HAVE_DIV2},
+       {"base_cpu_clk", "cpu_wwdt",            CLK_CPU_WWDT,           0},
+       {"base_cpu_clk", "cpu_uart0",           CLK_CPU_UART0,          0},
+       {"base_cpu_clk", "cpu_uart1",           CLK_CPU_UART1,          0},
+       {"base_cpu_clk", "cpu_ssp0",            CLK_CPU_SSP0,           0},
+       {"base_cpu_clk", "cpu_timer0",          CLK_CPU_TIMER0,         0},
+       {"base_cpu_clk", "cpu_timer1",          CLK_CPU_TIMER1,         0},
+       {"base_cpu_clk", "cpu_scu",             CLK_CPU_SCU,            0},
+       {"base_cpu_clk", "cpu_creg",            CLK_CPU_CREG,           0},
+       {"base_cpu_clk", "cpu_ritimer",         CLK_CPU_RITIMER,        0},
+       {"base_cpu_clk", "cpu_uart2",           CLK_CPU_UART2,          0},
+       {"base_cpu_clk", "cpu_uart3",           CLK_CPU_UART3,          0},
+       {"base_cpu_clk", "cpu_timer2",          CLK_CPU_TIMER2,         0},
+       {"base_cpu_clk", "cpu_timer3",          CLK_CPU_TIMER3,         0},
+       {"base_cpu_clk", "cpu_ssp1",            CLK_CPU_SSP1,           0},
+       {"base_cpu_clk", "cpu_qei",             CLK_CPU_QEI,            0},
+
+       {"base_periph_clk", "periph_bus",       CLK_PERIPH_BUS,         CCU_BRANCH_IS_BUS},
+       {"base_periph_clk", "periph_core",      CLK_PERIPH_CORE,        0},
+       {"base_periph_clk", "periph_sgpio",     CLK_PERIPH_SGPIO,       0},
+
+       {"base_usb0_clk",  "usb0",              CLK_USB0,               0},
+       {"base_usb1_clk",  "usb1",              CLK_USB1,               0},
+       {"base_spi_clk",   "spi",               CLK_SPI,                0},
+       {"base_adchs_clk", "adchs",             CLK_ADCHS,              0},
+
+       {"base_audio_clk", "audio",             CLK_AUDIO,              0},
+       {"base_uart3_clk", "apb2_uart3",        CLK_APB2_UART3,         0},
+       {"base_uart2_clk", "apb2_uart2",        CLK_APB2_UART2,         0},
+       {"base_uart1_clk", "apb0_uart1",        CLK_APB0_UART1,         0},
+       {"base_uart0_clk", "apb0_uart0",        CLK_APB0_UART0,         0},
+       {"base_ssp1_clk",  "apb2_ssp1",         CLK_APB2_SSP1,          0},
+       {"base_ssp0_clk",  "apb0_ssp0",         CLK_APB0_SSP0,          0},
+       {"base_sdio_clk",  "sdio",              CLK_SDIO,               0},
+};
+
+static struct clk *lpc18xx_ccu_branch_clk_get(struct of_phandle_args *clkspec,
+                                             void *data)
+{
+       struct lpc18xx_branch_clk_data *clk_data = data;
+       unsigned int offset = clkspec->args[0];
+       int i, j;
+
+       for (i = 0; i < ARRAY_SIZE(clk_branches); i++) {
+               if (clk_branches[i].offset != offset)
+                       continue;
+
+               for (j = 0; j < clk_data->num; j++) {
+                       if (!strcmp(clk_branches[i].base_name, clk_data->name[j]))
+                               return clk_branches[i].clk;
+               }
+       }
+
+       pr_err("%s: invalid clock offset %d\n", __func__, offset);
+
+       return ERR_PTR(-EINVAL);
+}
+
+static int lpc18xx_ccu_gate_endisable(struct clk_hw *hw, bool enable)
+{
+       struct clk_gate *gate = to_clk_gate(hw);
+       u32 val;
+
+       /*
+        * Divider field is write only, so divider stat field must
+        * be read so divider field can be set accordingly.
+        */
+       val = clk_readl(gate->reg);
+       if (val & LPC18XX_CCU_DIVSTAT)
+               val |= LPC18XX_CCU_DIV;
+
+       if (enable) {
+               val |= LPC18XX_CCU_RUN;
+       } else {
+               /*
+                * To safely disable a branch clock a squence of two separate
+                * writes must be used. First write should set the AUTO bit
+                * and the next write should clear the RUN bit.
+                */
+               val |= LPC18XX_CCU_AUTO;
+               clk_writel(val, gate->reg);
+
+               val &= ~LPC18XX_CCU_RUN;
+       }
+
+       clk_writel(val, gate->reg);
+
+       return 0;
+}
+
+static int lpc18xx_ccu_gate_enable(struct clk_hw *hw)
+{
+       return lpc18xx_ccu_gate_endisable(hw, true);
+}
+
+static void lpc18xx_ccu_gate_disable(struct clk_hw *hw)
+{
+       lpc18xx_ccu_gate_endisable(hw, false);
+}
+
+static int lpc18xx_ccu_gate_is_enabled(struct clk_hw *hw)
+{
+       struct clk_gate *gate = to_clk_gate(hw);
+
+       return clk_readl(gate->reg) & LPC18XX_CCU_RUN;
+}
+
+static const struct clk_ops lpc18xx_ccu_gate_ops = {
+       .enable         = lpc18xx_ccu_gate_enable,
+       .disable        = lpc18xx_ccu_gate_disable,
+       .is_enabled     = lpc18xx_ccu_gate_is_enabled,
+};
+
+static void lpc18xx_ccu_register_branch_gate_div(struct lpc18xx_clk_branch *branch,
+                                                void __iomem *reg_base,
+                                                const char *parent)
+{
+       const struct clk_ops *div_ops = NULL;
+       struct clk_divider *div = NULL;
+       struct clk_hw *div_hw = NULL;
+
+       if (branch->flags & CCU_BRANCH_HAVE_DIV2) {
+               div = kzalloc(sizeof(*div), GFP_KERNEL);
+               if (!div)
+                       return;
+
+               div->reg = branch->offset + reg_base;
+               div->flags = CLK_DIVIDER_READ_ONLY;
+               div->shift = 27;
+               div->width = 1;
+
+               div_hw = &div->hw;
+               div_ops = &clk_divider_ops;
+       }
+
+       branch->gate.reg = branch->offset + reg_base;
+       branch->gate.bit_idx = 0;
+
+       branch->clk = clk_register_composite(NULL, branch->name, &parent, 1,
+                                            NULL, NULL,
+                                            div_hw, div_ops,
+                                            &branch->gate.hw, &lpc18xx_ccu_gate_ops, 0);
+       if (IS_ERR(branch->clk)) {
+               kfree(div);
+               pr_warn("%s: failed to register %s\n", __func__, branch->name);
+               return;
+       }
+
+       /* Grab essential branch clocks for CPU and SDRAM */
+       switch (branch->offset) {
+       case CLK_CPU_EMC:
+       case CLK_CPU_CORE:
+       case CLK_CPU_CREG:
+       case CLK_CPU_EMCDIV:
+               clk_prepare_enable(branch->clk);
+       }
+}
+
+static void lpc18xx_ccu_register_branch_clks(void __iomem *reg_base,
+                                            const char *base_name)
+{
+       const char *parent = base_name;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(clk_branches); i++) {
+               if (strcmp(clk_branches[i].base_name, base_name))
+                       continue;
+
+               lpc18xx_ccu_register_branch_gate_div(&clk_branches[i], reg_base,
+                                                    parent);
+
+               if (clk_branches[i].flags & CCU_BRANCH_IS_BUS)
+                       parent = clk_branches[i].name;
+       }
+}
+
+static void __init lpc18xx_ccu_init(struct device_node *np)
+{
+       struct lpc18xx_branch_clk_data *clk_data;
+       void __iomem *reg_base;
+       int i, ret;
+
+       reg_base = of_iomap(np, 0);
+       if (!reg_base) {
+               pr_warn("%s: failed to map address range\n", __func__);
+               return;
+       }
+
+       clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+       if (!clk_data)
+               return;
+
+       clk_data->num = of_property_count_strings(np, "clock-names");
+       clk_data->name = kcalloc(clk_data->num, sizeof(char *), GFP_KERNEL);
+       if (!clk_data->name) {
+               kfree(clk_data);
+               return;
+       }
+
+       for (i = 0; i < clk_data->num; i++) {
+               ret = of_property_read_string_index(np, "clock-names", i,
+                                                   &clk_data->name[i]);
+               if (ret) {
+                       pr_warn("%s: failed to get clock name at idx %d\n",
+                               __func__, i);
+                       continue;
+               }
+
+               lpc18xx_ccu_register_branch_clks(reg_base, clk_data->name[i]);
+       }
+
+       of_clk_add_provider(np, lpc18xx_ccu_branch_clk_get, clk_data);
+}
+CLK_OF_DECLARE(lpc18xx_ccu, "nxp,lpc1850-ccu", lpc18xx_ccu_init);
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
new file mode 100644 (file)
index 0000000..81e9e1c
--- /dev/null
@@ -0,0 +1,635 @@
+/*
+ * Clk driver for NXP LPC18xx/LPC43xx Clock Generation Unit (CGU)
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/lpc18xx-cgu.h>
+
+/* Clock Generation Unit (CGU) registers */
+#define LPC18XX_CGU_XTAL_OSC_CTRL      0x018
+#define LPC18XX_CGU_PLL0USB_STAT       0x01c
+#define LPC18XX_CGU_PLL0USB_CTRL       0x020
+#define LPC18XX_CGU_PLL0USB_MDIV       0x024
+#define LPC18XX_CGU_PLL0USB_NP_DIV     0x028
+#define LPC18XX_CGU_PLL0AUDIO_STAT     0x02c
+#define LPC18XX_CGU_PLL0AUDIO_CTRL     0x030
+#define LPC18XX_CGU_PLL0AUDIO_MDIV     0x034
+#define LPC18XX_CGU_PLL0AUDIO_NP_DIV   0x038
+#define LPC18XX_CGU_PLL0AUDIO_FRAC     0x03c
+#define LPC18XX_CGU_PLL1_STAT          0x040
+#define LPC18XX_CGU_PLL1_CTRL          0x044
+#define  LPC18XX_PLL1_CTRL_FBSEL       BIT(6)
+#define  LPC18XX_PLL1_CTRL_DIRECT      BIT(7)
+#define LPC18XX_CGU_IDIV_CTRL(n)       (0x048 + (n) * sizeof(u32))
+#define LPC18XX_CGU_BASE_CLK(id)       (0x05c + (id) * sizeof(u32))
+#define LPC18XX_CGU_PLL_CTRL_OFFSET    0x4
+
+/* PLL0 bits common to both audio and USB PLL */
+#define LPC18XX_PLL0_STAT_LOCK         BIT(0)
+#define LPC18XX_PLL0_CTRL_PD           BIT(0)
+#define LPC18XX_PLL0_CTRL_BYPASS       BIT(1)
+#define LPC18XX_PLL0_CTRL_DIRECTI      BIT(2)
+#define LPC18XX_PLL0_CTRL_DIRECTO      BIT(3)
+#define LPC18XX_PLL0_CTRL_CLKEN                BIT(4)
+#define LPC18XX_PLL0_MDIV_MDEC_MASK    0x1ffff
+#define LPC18XX_PLL0_MDIV_SELP_SHIFT   17
+#define LPC18XX_PLL0_MDIV_SELI_SHIFT   22
+#define LPC18XX_PLL0_MSEL_MAX          BIT(15)
+
+/* Register value that gives PLL0 post/pre dividers equal to 1 */
+#define LPC18XX_PLL0_NP_DIVS_1         0x00302062
+
+enum {
+       CLK_SRC_OSC32,
+       CLK_SRC_IRC,
+       CLK_SRC_ENET_RX_CLK,
+       CLK_SRC_ENET_TX_CLK,
+       CLK_SRC_GP_CLKIN,
+       CLK_SRC_RESERVED1,
+       CLK_SRC_OSC,
+       CLK_SRC_PLL0USB,
+       CLK_SRC_PLL0AUDIO,
+       CLK_SRC_PLL1,
+       CLK_SRC_RESERVED2,
+       CLK_SRC_RESERVED3,
+       CLK_SRC_IDIVA,
+       CLK_SRC_IDIVB,
+       CLK_SRC_IDIVC,
+       CLK_SRC_IDIVD,
+       CLK_SRC_IDIVE,
+       CLK_SRC_MAX
+};
+
+static const char *clk_src_names[CLK_SRC_MAX] = {
+       [CLK_SRC_OSC32]         = "osc32",
+       [CLK_SRC_IRC]           = "irc",
+       [CLK_SRC_ENET_RX_CLK]   = "enet_rx_clk",
+       [CLK_SRC_ENET_TX_CLK]   = "enet_tx_clk",
+       [CLK_SRC_GP_CLKIN]      = "gp_clkin",
+       [CLK_SRC_OSC]           = "osc",
+       [CLK_SRC_PLL0USB]       = "pll0usb",
+       [CLK_SRC_PLL0AUDIO]     = "pll0audio",
+       [CLK_SRC_PLL1]          = "pll1",
+       [CLK_SRC_IDIVA]         = "idiva",
+       [CLK_SRC_IDIVB]         = "idivb",
+       [CLK_SRC_IDIVC]         = "idivc",
+       [CLK_SRC_IDIVD]         = "idivd",
+       [CLK_SRC_IDIVE]         = "idive",
+};
+
+static const char *clk_base_names[BASE_CLK_MAX] = {
+       [BASE_SAFE_CLK]         = "base_safe_clk",
+       [BASE_USB0_CLK]         = "base_usb0_clk",
+       [BASE_PERIPH_CLK]       = "base_periph_clk",
+       [BASE_USB1_CLK]         = "base_usb1_clk",
+       [BASE_CPU_CLK]          = "base_cpu_clk",
+       [BASE_SPIFI_CLK]        = "base_spifi_clk",
+       [BASE_SPI_CLK]          = "base_spi_clk",
+       [BASE_PHY_RX_CLK]       = "base_phy_rx_clk",
+       [BASE_PHY_TX_CLK]       = "base_phy_tx_clk",
+       [BASE_APB1_CLK]         = "base_apb1_clk",
+       [BASE_APB3_CLK]         = "base_apb3_clk",
+       [BASE_LCD_CLK]          = "base_lcd_clk",
+       [BASE_ADCHS_CLK]        = "base_adchs_clk",
+       [BASE_SDIO_CLK]         = "base_sdio_clk",
+       [BASE_SSP0_CLK]         = "base_ssp0_clk",
+       [BASE_SSP1_CLK]         = "base_ssp1_clk",
+       [BASE_UART0_CLK]        = "base_uart0_clk",
+       [BASE_UART1_CLK]        = "base_uart1_clk",
+       [BASE_UART2_CLK]        = "base_uart2_clk",
+       [BASE_UART3_CLK]        = "base_uart3_clk",
+       [BASE_OUT_CLK]          = "base_out_clk",
+       [BASE_AUDIO_CLK]        = "base_audio_clk",
+       [BASE_CGU_OUT0_CLK]     = "base_cgu_out0_clk",
+       [BASE_CGU_OUT1_CLK]     = "base_cgu_out1_clk",
+};
+
+static u32 lpc18xx_cgu_pll0_src_ids[] = {
+       CLK_SRC_OSC32, CLK_SRC_IRC, CLK_SRC_ENET_RX_CLK,
+       CLK_SRC_ENET_TX_CLK, CLK_SRC_GP_CLKIN, CLK_SRC_OSC,
+       CLK_SRC_PLL1, CLK_SRC_IDIVA, CLK_SRC_IDIVB, CLK_SRC_IDIVC,
+       CLK_SRC_IDIVD, CLK_SRC_IDIVE,
+};
+
+static u32 lpc18xx_cgu_pll1_src_ids[] = {
+       CLK_SRC_OSC32, CLK_SRC_IRC, CLK_SRC_ENET_RX_CLK,
+       CLK_SRC_ENET_TX_CLK, CLK_SRC_GP_CLKIN, CLK_SRC_OSC,
+       CLK_SRC_PLL0USB, CLK_SRC_PLL0AUDIO, CLK_SRC_IDIVA,
+       CLK_SRC_IDIVB, CLK_SRC_IDIVC, CLK_SRC_IDIVD, CLK_SRC_IDIVE,
+};
+
+static u32 lpc18xx_cgu_idiva_src_ids[] = {
+       CLK_SRC_OSC32, CLK_SRC_IRC, CLK_SRC_ENET_RX_CLK,
+       CLK_SRC_ENET_TX_CLK, CLK_SRC_GP_CLKIN, CLK_SRC_OSC,
+       CLK_SRC_PLL0USB, CLK_SRC_PLL0AUDIO, CLK_SRC_PLL1
+};
+
+static u32 lpc18xx_cgu_idivbcde_src_ids[] = {
+       CLK_SRC_OSC32, CLK_SRC_IRC, CLK_SRC_ENET_RX_CLK,
+       CLK_SRC_ENET_TX_CLK, CLK_SRC_GP_CLKIN, CLK_SRC_OSC,
+       CLK_SRC_PLL0AUDIO, CLK_SRC_PLL1, CLK_SRC_IDIVA,
+};
+
+static u32 lpc18xx_cgu_base_irc_src_ids[] = {CLK_SRC_IRC};
+
+static u32 lpc18xx_cgu_base_usb0_src_ids[] = {CLK_SRC_PLL0USB};
+
+static u32 lpc18xx_cgu_base_common_src_ids[] = {
+       CLK_SRC_OSC32, CLK_SRC_IRC, CLK_SRC_ENET_RX_CLK,
+       CLK_SRC_ENET_TX_CLK, CLK_SRC_GP_CLKIN, CLK_SRC_OSC,
+       CLK_SRC_PLL0AUDIO, CLK_SRC_PLL1, CLK_SRC_IDIVA,
+       CLK_SRC_IDIVB, CLK_SRC_IDIVC, CLK_SRC_IDIVD, CLK_SRC_IDIVE,
+};
+
+static u32 lpc18xx_cgu_base_all_src_ids[] = {
+       CLK_SRC_OSC32, CLK_SRC_IRC, CLK_SRC_ENET_RX_CLK,
+       CLK_SRC_ENET_TX_CLK, CLK_SRC_GP_CLKIN, CLK_SRC_OSC,
+       CLK_SRC_PLL0USB, CLK_SRC_PLL0AUDIO, CLK_SRC_PLL1,
+       CLK_SRC_IDIVA, CLK_SRC_IDIVB, CLK_SRC_IDIVC,
+       CLK_SRC_IDIVD, CLK_SRC_IDIVE,
+};
+
+struct lpc18xx_cgu_src_clk_div {
+       u8 clk_id;
+       u8 n_parents;
+       struct clk_divider      div;
+       struct clk_mux          mux;
+       struct clk_gate         gate;
+};
+
+#define LPC1XX_CGU_SRC_CLK_DIV(_id, _width, _table)    \
+{                                                      \
+       .clk_id = CLK_SRC_ ##_id,                       \
+       .n_parents = ARRAY_SIZE(lpc18xx_cgu_ ##_table), \
+       .div = {                                        \
+               .shift = 2,                             \
+               .width = _width,                        \
+       },                                              \
+       .mux = {                                        \
+               .mask = 0x1f,                           \
+               .shift = 24,                            \
+               .table = lpc18xx_cgu_ ##_table,         \
+       },                                              \
+       .gate = {                                       \
+               .bit_idx = 0,                           \
+               .flags = CLK_GATE_SET_TO_DISABLE,       \
+       },                                              \
+}
+
+static struct lpc18xx_cgu_src_clk_div lpc18xx_cgu_src_clk_divs[] = {
+       LPC1XX_CGU_SRC_CLK_DIV(IDIVA, 2, idiva_src_ids),
+       LPC1XX_CGU_SRC_CLK_DIV(IDIVB, 4, idivbcde_src_ids),
+       LPC1XX_CGU_SRC_CLK_DIV(IDIVC, 4, idivbcde_src_ids),
+       LPC1XX_CGU_SRC_CLK_DIV(IDIVD, 4, idivbcde_src_ids),
+       LPC1XX_CGU_SRC_CLK_DIV(IDIVE, 8, idivbcde_src_ids),
+};
+
+struct lpc18xx_cgu_base_clk {
+       u8 clk_id;
+       u8 n_parents;
+       struct clk_mux mux;
+       struct clk_gate gate;
+};
+
+#define LPC1XX_CGU_BASE_CLK(_id, _table, _flags)       \
+{                                                      \
+       .clk_id = BASE_ ##_id ##_CLK,                   \
+       .n_parents = ARRAY_SIZE(lpc18xx_cgu_ ##_table), \
+       .mux = {                                        \
+               .mask = 0x1f,                           \
+               .shift = 24,                            \
+               .table = lpc18xx_cgu_ ##_table,         \
+               .flags = _flags,                        \
+       },                                              \
+       .gate = {                                       \
+               .bit_idx = 0,                           \
+               .flags = CLK_GATE_SET_TO_DISABLE,       \
+       },                                              \
+}
+
+static struct lpc18xx_cgu_base_clk lpc18xx_cgu_base_clks[] = {
+       LPC1XX_CGU_BASE_CLK(SAFE,       base_irc_src_ids, CLK_MUX_READ_ONLY),
+       LPC1XX_CGU_BASE_CLK(USB0,       base_usb0_src_ids,   0),
+       LPC1XX_CGU_BASE_CLK(PERIPH,     base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(USB1,       base_all_src_ids,    0),
+       LPC1XX_CGU_BASE_CLK(CPU,        base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(SPIFI,      base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(SPI,        base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(PHY_RX,     base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(PHY_TX,     base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(APB1,       base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(APB3,       base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(LCD,        base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(ADCHS,      base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(SDIO,       base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(SSP0,       base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(SSP1,       base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(UART0,      base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(UART1,      base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(UART2,      base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(UART3,      base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(OUT,        base_all_src_ids,    0),
+       { /* 21 reserved */ },
+       { /* 22 reserved */ },
+       { /* 23 reserved */ },
+       { /* 24 reserved */ },
+       LPC1XX_CGU_BASE_CLK(AUDIO,      base_common_src_ids, 0),
+       LPC1XX_CGU_BASE_CLK(CGU_OUT0,   base_all_src_ids,    0),
+       LPC1XX_CGU_BASE_CLK(CGU_OUT1,   base_all_src_ids,    0),
+};
+
+struct lpc18xx_pll {
+       struct          clk_hw hw;
+       void __iomem    *reg;
+       spinlock_t      *lock;
+       u8              flags;
+};
+
+#define to_lpc_pll(hw) container_of(hw, struct lpc18xx_pll, hw)
+
+struct lpc18xx_cgu_pll_clk {
+       u8 clk_id;
+       u8 n_parents;
+       u8 reg_offset;
+       struct clk_mux mux;
+       struct clk_gate gate;
+       struct lpc18xx_pll pll;
+       const struct clk_ops *pll_ops;
+};
+
+#define LPC1XX_CGU_CLK_PLL(_id, _table, _pll_ops)      \
+{                                                      \
+       .clk_id = CLK_SRC_ ##_id,                       \
+       .n_parents = ARRAY_SIZE(lpc18xx_cgu_ ##_table), \
+       .reg_offset = LPC18XX_CGU_ ##_id ##_STAT,       \
+       .mux = {                                        \
+               .mask = 0x1f,                           \
+               .shift = 24,                            \
+               .table = lpc18xx_cgu_ ##_table,         \
+       },                                              \
+       .gate = {                                       \
+               .bit_idx = 0,                           \
+               .flags = CLK_GATE_SET_TO_DISABLE,       \
+       },                                              \
+       .pll_ops = &lpc18xx_ ##_pll_ops,                \
+}
+
+/*
+ * PLL0 uses a special register value encoding. The compute functions below
+ * are taken or derived from the LPC1850 user manual (section 12.6.3.3).
+ */
+
+/* Compute PLL0 multiplier from decoded version */
+static u32 lpc18xx_pll0_mdec2msel(u32 x)
+{
+       int i;
+
+       switch (x) {
+       case 0x18003: return 1;
+       case 0x10003: return 2;
+       default:
+               for (i = LPC18XX_PLL0_MSEL_MAX + 1; x != 0x4000 && i > 0; i--)
+                       x = ((x ^ x >> 14) & 1) | (x << 1 & 0x7fff);
+               return i;
+       }
+}
+/* Compute PLL0 decoded multiplier from binary version */
+static u32 lpc18xx_pll0_msel2mdec(u32 msel)
+{
+       u32 i, x = 0x4000;
+
+       switch (msel) {
+       case 0: return 0;
+       case 1: return 0x18003;
+       case 2: return 0x10003;
+       default:
+               for (i = msel; i <= LPC18XX_PLL0_MSEL_MAX; i++)
+                       x = ((x ^ x >> 1) & 1) << 14 | (x >> 1 & 0xffff);
+               return x;
+       }
+}
+
+/* Compute PLL0 bandwidth SELI reg from multiplier */
+static u32 lpc18xx_pll0_msel2seli(u32 msel)
+{
+       u32 tmp;
+
+       if (msel > 16384) return 1;
+       if (msel >  8192) return 2;
+       if (msel >  2048) return 4;
+       if (msel >=  501) return 8;
+       if (msel >=   60) {
+               tmp = 1024 / (msel + 9);
+               return ((1024 == (tmp * (msel + 9))) == 0) ? tmp * 4 : (tmp + 1) * 4;
+       }
+
+       return (msel & 0x3c) + 4;
+}
+
+/* Compute PLL0 bandwidth SELP reg from multiplier */
+static u32 lpc18xx_pll0_msel2selp(u32 msel)
+{
+       if (msel < 60)
+               return (msel >> 1) + 1;
+
+       return 31;
+}
+
+static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct lpc18xx_pll *pll = to_lpc_pll(hw);
+       u32 ctrl, mdiv, msel, npdiv;
+
+       ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+       mdiv = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_MDIV);
+       npdiv = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV);
+
+       if (ctrl & LPC18XX_PLL0_CTRL_BYPASS)
+               return parent_rate;
+
+       if (npdiv != LPC18XX_PLL0_NP_DIVS_1) {
+               pr_warn("%s: pre/post dividers not supported\n", __func__);
+               return 0;
+       }
+
+       msel = lpc18xx_pll0_mdec2msel(mdiv & LPC18XX_PLL0_MDIV_MDEC_MASK);
+       if (msel)
+               return 2 * msel * parent_rate;
+
+       pr_warn("%s: unable to calculate rate\n", __func__);
+
+       return 0;
+}
+
+static long lpc18xx_pll0_round_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long *prate)
+{
+       unsigned long m;
+
+       if (*prate < rate) {
+               pr_warn("%s: pll dividers not supported\n", __func__);
+               return -EINVAL;
+       }
+
+       m = DIV_ROUND_UP_ULL(*prate, rate * 2);
+       if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
+               pr_warn("%s: unable to support rate %lu\n", __func__, rate);
+               return -EINVAL;
+       }
+
+       return 2 * *prate * m;
+}
+
+static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long parent_rate)
+{
+       struct lpc18xx_pll *pll = to_lpc_pll(hw);
+       u32 ctrl, stat, m;
+       int retry = 3;
+
+       if (parent_rate < rate) {
+               pr_warn("%s: pll dividers not supported\n", __func__);
+               return -EINVAL;
+       }
+
+       m = DIV_ROUND_UP_ULL(parent_rate, rate * 2);
+       if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
+               pr_warn("%s: unable to support rate %lu\n", __func__, rate);
+               return -EINVAL;
+       }
+
+       m  = lpc18xx_pll0_msel2mdec(m);
+       m |= lpc18xx_pll0_msel2selp(m) << LPC18XX_PLL0_MDIV_SELP_SHIFT;
+       m |= lpc18xx_pll0_msel2seli(m) << LPC18XX_PLL0_MDIV_SELI_SHIFT;
+
+       /* Power down PLL, disable clk output and dividers */
+       ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+       ctrl |= LPC18XX_PLL0_CTRL_PD;
+       ctrl &= ~(LPC18XX_PLL0_CTRL_BYPASS | LPC18XX_PLL0_CTRL_DIRECTI |
+                 LPC18XX_PLL0_CTRL_DIRECTO | LPC18XX_PLL0_CTRL_CLKEN);
+       clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+
+       /* Configure new PLL settings */
+       clk_writel(m, pll->reg + LPC18XX_CGU_PLL0USB_MDIV);
+       clk_writel(LPC18XX_PLL0_NP_DIVS_1, pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV);
+
+       /* Power up PLL and wait for lock */
+       ctrl &= ~LPC18XX_PLL0_CTRL_PD;
+       clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+       do {
+               udelay(10);
+               stat = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_STAT);
+               if (stat & LPC18XX_PLL0_STAT_LOCK) {
+                       ctrl |= LPC18XX_PLL0_CTRL_CLKEN;
+                       clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+
+                       return 0;
+               }
+       } while (retry--);
+
+       pr_warn("%s: unable to lock pll\n", __func__);
+
+       return -EINVAL;
+}
+
+static const struct clk_ops lpc18xx_pll0_ops = {
+       .recalc_rate    = lpc18xx_pll0_recalc_rate,
+       .round_rate     = lpc18xx_pll0_round_rate,
+       .set_rate       = lpc18xx_pll0_set_rate,
+};
+
+static unsigned long lpc18xx_pll1_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct lpc18xx_pll *pll = to_lpc_pll(hw);
+       u16 msel, nsel, psel;
+       bool direct, fbsel;
+       u32 stat, ctrl;
+
+       stat = clk_readl(pll->reg + LPC18XX_CGU_PLL1_STAT);
+       ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL1_CTRL);
+
+       direct = (ctrl & LPC18XX_PLL1_CTRL_DIRECT) ? true : false;
+       fbsel = (ctrl & LPC18XX_PLL1_CTRL_FBSEL) ? true : false;
+
+       msel = ((ctrl >> 16) & 0xff) + 1;
+       nsel = ((ctrl >> 12) & 0x3) + 1;
+
+       if (direct || fbsel)
+               return msel * (parent_rate / nsel);
+
+       psel = (ctrl >>  8) & 0x3;
+       psel = 1 << psel;
+
+       return (msel / (2 * psel)) * (parent_rate / nsel);
+}
+
+static const struct clk_ops lpc18xx_pll1_ops = {
+       .recalc_rate = lpc18xx_pll1_recalc_rate,
+};
+
+static struct lpc18xx_cgu_pll_clk lpc18xx_cgu_src_clk_plls[] = {
+       LPC1XX_CGU_CLK_PLL(PLL0USB,     pll0_src_ids, pll0_ops),
+       LPC1XX_CGU_CLK_PLL(PLL0AUDIO,   pll0_src_ids, pll0_ops),
+       LPC1XX_CGU_CLK_PLL(PLL1,        pll1_src_ids, pll1_ops),
+};
+
+static void lpc18xx_fill_parent_names(const char **parent, u32 *id, int size)
+{
+       int i;
+
+       for (i = 0; i < size; i++)
+               parent[i] = clk_src_names[id[i]];
+}
+
+static struct clk *lpc18xx_cgu_register_div(struct lpc18xx_cgu_src_clk_div *clk,
+                                           void __iomem *base, int n)
+{
+       void __iomem *reg = base + LPC18XX_CGU_IDIV_CTRL(n);
+       const char *name = clk_src_names[clk->clk_id];
+       const char *parents[CLK_SRC_MAX];
+
+       clk->div.reg = reg;
+       clk->mux.reg = reg;
+       clk->gate.reg = reg;
+
+       lpc18xx_fill_parent_names(parents, clk->mux.table, clk->n_parents);
+
+       return clk_register_composite(NULL, name, parents, clk->n_parents,
+                                     &clk->mux.hw, &clk_mux_ops,
+                                     &clk->div.hw, &clk_divider_ops,
+                                     &clk->gate.hw, &clk_gate_ops, 0);
+}
+
+
+static struct clk *lpc18xx_register_base_clk(struct lpc18xx_cgu_base_clk *clk,
+                                            void __iomem *reg_base, int n)
+{
+       void __iomem *reg = reg_base + LPC18XX_CGU_BASE_CLK(n);
+       const char *name = clk_base_names[clk->clk_id];
+       const char *parents[CLK_SRC_MAX];
+
+       if (clk->n_parents == 0)
+               return ERR_PTR(-ENOENT);
+
+       clk->mux.reg = reg;
+       clk->gate.reg = reg;
+
+       lpc18xx_fill_parent_names(parents, clk->mux.table, clk->n_parents);
+
+       /* SAFE_CLK can not be turned off */
+       if (n == BASE_SAFE_CLK)
+               return clk_register_composite(NULL, name, parents, clk->n_parents,
+                                             &clk->mux.hw, &clk_mux_ops,
+                                             NULL, NULL, NULL, NULL, 0);
+
+       return clk_register_composite(NULL, name, parents, clk->n_parents,
+                                     &clk->mux.hw, &clk_mux_ops,
+                                     NULL,  NULL,
+                                     &clk->gate.hw, &clk_gate_ops, 0);
+}
+
+
+static struct clk *lpc18xx_cgu_register_pll(struct lpc18xx_cgu_pll_clk *clk,
+                                           void __iomem *base)
+{
+       const char *name = clk_src_names[clk->clk_id];
+       const char *parents[CLK_SRC_MAX];
+
+       clk->pll.reg  = base;
+       clk->mux.reg  = base + clk->reg_offset + LPC18XX_CGU_PLL_CTRL_OFFSET;
+       clk->gate.reg = base + clk->reg_offset + LPC18XX_CGU_PLL_CTRL_OFFSET;
+
+       lpc18xx_fill_parent_names(parents, clk->mux.table, clk->n_parents);
+
+       return clk_register_composite(NULL, name, parents, clk->n_parents,
+                                     &clk->mux.hw, &clk_mux_ops,
+                                     &clk->pll.hw, clk->pll_ops,
+                                     &clk->gate.hw, &clk_gate_ops, 0);
+}
+
+static void __init lpc18xx_cgu_register_source_clks(struct device_node *np,
+                                                   void __iomem *base)
+{
+       const char *parents[CLK_SRC_MAX];
+       struct clk *clk;
+       int i;
+
+       /* Register the internal 12 MHz RC oscillator (IRC) */
+       clk = clk_register_fixed_rate(NULL, clk_src_names[CLK_SRC_IRC],
+                                     NULL, CLK_IS_ROOT, 12000000);
+       if (IS_ERR(clk))
+               pr_warn("%s: failed to register irc clk\n", __func__);
+
+       /* Register crystal oscillator controlller */
+       parents[0] = of_clk_get_parent_name(np, 0);
+       clk = clk_register_gate(NULL, clk_src_names[CLK_SRC_OSC], parents[0],
+                               0, base + LPC18XX_CGU_XTAL_OSC_CTRL,
+                               0, CLK_GATE_SET_TO_DISABLE, NULL);
+       if (IS_ERR(clk))
+               pr_warn("%s: failed to register osc clk\n", __func__);
+
+       /* Register all PLLs */
+       for (i = 0; i < ARRAY_SIZE(lpc18xx_cgu_src_clk_plls); i++) {
+               clk = lpc18xx_cgu_register_pll(&lpc18xx_cgu_src_clk_plls[i],
+                                                  base);
+               if (IS_ERR(clk))
+                       pr_warn("%s: failed to register pll (%d)\n", __func__, i);
+       }
+
+       /* Register all clock dividers A-E */
+       for (i = 0; i < ARRAY_SIZE(lpc18xx_cgu_src_clk_divs); i++) {
+               clk = lpc18xx_cgu_register_div(&lpc18xx_cgu_src_clk_divs[i],
+                                              base, i);
+               if (IS_ERR(clk))
+                       pr_warn("%s: failed to register div %d\n", __func__, i);
+       }
+}
+
+static struct clk *clk_base[BASE_CLK_MAX];
+static struct clk_onecell_data clk_base_data = {
+       .clks = clk_base,
+       .clk_num = BASE_CLK_MAX,
+};
+
+static void __init lpc18xx_cgu_register_base_clks(void __iomem *reg_base)
+{
+       int i;
+
+       for (i = BASE_SAFE_CLK; i < BASE_CLK_MAX; i++) {
+               clk_base[i] = lpc18xx_register_base_clk(&lpc18xx_cgu_base_clks[i],
+                                                       reg_base, i);
+               if (IS_ERR(clk_base[i]) && PTR_ERR(clk_base[i]) != -ENOENT)
+                       pr_warn("%s: register base clk %d failed\n", __func__, i);
+       }
+}
+
+static void __init lpc18xx_cgu_init(struct device_node *np)
+{
+       void __iomem *reg_base;
+
+       reg_base = of_iomap(np, 0);
+       if (!reg_base) {
+               pr_warn("%s: failed to map address range\n", __func__);
+               return;
+       }
+
+       lpc18xx_cgu_register_source_clks(np, reg_base);
+       lpc18xx_cgu_register_base_clks(reg_base);
+
+       of_clk_add_provider(np, of_clk_src_onecell_get, &clk_base_data);
+}
+CLK_OF_DECLARE(lpc18xx_cgu, "nxp,lpc1850-cgu", lpc18xx_cgu_init);
index de537560bf709444601405f42650c22b91c4334d..e17dada0dd21ab88fe1282b6dcc662e5e775988a 100644 (file)
@@ -6,9 +6,12 @@
  * version 2, as published by the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/printk.h>
 #include <linux/slab.h>
 
 #include "clk.h"
 #define PLL_CTRL4                      0x10
 #define PLL_FRAC_CTRL4_BYPASS          BIT(28)
 
+#define MIN_PFD                                9600000UL
+#define MIN_VCO_LA                     400000000UL
+#define MAX_VCO_LA                     1600000000UL
+#define MIN_VCO_FRAC_INT               600000000UL
+#define MAX_VCO_FRAC_INT               1600000000UL
+#define MIN_VCO_FRAC_FRAC              600000000UL
+#define MAX_VCO_FRAC_FRAC              2400000000UL
+#define MIN_OUTPUT_LA                  8000000UL
+#define MAX_OUTPUT_LA                  1600000000UL
+#define MIN_OUTPUT_FRAC                        12000000UL
+#define MAX_OUTPUT_FRAC                        1600000000UL
+
 struct pistachio_clk_pll {
        struct clk_hw hw;
        void __iomem *base;
@@ -67,6 +82,12 @@ static inline void pll_writel(struct pistachio_clk_pll *pll, u32 val, u32 reg)
        writel(val, pll->base + reg);
 }
 
+static inline void pll_lock(struct pistachio_clk_pll *pll)
+{
+       while (!(pll_readl(pll, PLL_STATUS) & PLL_STATUS_LOCK))
+               cpu_relax();
+}
+
 static inline u32 do_div_round_closest(u64 dividend, u32 divisor)
 {
        dividend += divisor / 2;
@@ -124,6 +145,8 @@ static int pll_gf40lp_frac_enable(struct clk_hw *hw)
        val &= ~PLL_FRAC_CTRL4_BYPASS;
        pll_writel(pll, val, PLL_CTRL4);
 
+       pll_lock(pll);
+
        return 0;
 }
 
@@ -149,16 +172,29 @@ static int pll_gf40lp_frac_set_rate(struct clk_hw *hw, unsigned long rate,
 {
        struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
        struct pistachio_pll_rate_table *params;
-       bool was_enabled;
-       u32 val;
+       int enabled = pll_gf40lp_frac_is_enabled(hw);
+       u32 val, vco, old_postdiv1, old_postdiv2;
+       const char *name = __clk_get_name(hw->clk);
+
+       if (rate < MIN_OUTPUT_FRAC || rate > MAX_OUTPUT_FRAC)
+               return -EINVAL;
 
        params = pll_get_params(pll, parent_rate, rate);
-       if (!params)
+       if (!params || !params->refdiv)
                return -EINVAL;
 
-       was_enabled = pll_gf40lp_frac_is_enabled(hw);
-       if (!was_enabled)
-               pll_gf40lp_frac_enable(hw);
+       vco = params->fref * params->fbdiv / params->refdiv;
+       if (vco < MIN_VCO_FRAC_FRAC || vco > MAX_VCO_FRAC_FRAC)
+               pr_warn("%s: VCO %u is out of range %lu..%lu\n", name, vco,
+                       MIN_VCO_FRAC_FRAC, MAX_VCO_FRAC_FRAC);
+
+       val = params->fref / params->refdiv;
+       if (val < MIN_PFD)
+               pr_warn("%s: PFD %u is too low (min %lu)\n",
+                       name, val, MIN_PFD);
+       if (val > vco / 16)
+               pr_warn("%s: PFD %u is too high (max %u)\n",
+                       name, val, vco / 16);
 
        val = pll_readl(pll, PLL_CTRL1);
        val &= ~((PLL_CTRL1_REFDIV_MASK << PLL_CTRL1_REFDIV_SHIFT) |
@@ -168,6 +204,19 @@ static int pll_gf40lp_frac_set_rate(struct clk_hw *hw, unsigned long rate,
        pll_writel(pll, val, PLL_CTRL1);
 
        val = pll_readl(pll, PLL_CTRL2);
+
+       old_postdiv1 = (val >> PLL_FRAC_CTRL2_POSTDIV1_SHIFT) &
+                      PLL_FRAC_CTRL2_POSTDIV1_MASK;
+       old_postdiv2 = (val >> PLL_FRAC_CTRL2_POSTDIV2_SHIFT) &
+                      PLL_FRAC_CTRL2_POSTDIV2_MASK;
+       if (enabled &&
+           (params->postdiv1 != old_postdiv1 ||
+            params->postdiv2 != old_postdiv2))
+               pr_warn("%s: changing postdiv while PLL is enabled\n", name);
+
+       if (params->postdiv2 > params->postdiv1)
+               pr_warn("%s: postdiv2 should not exceed postdiv1\n", name);
+
        val &= ~((PLL_FRAC_CTRL2_FRAC_MASK << PLL_FRAC_CTRL2_FRAC_SHIFT) |
                 (PLL_FRAC_CTRL2_POSTDIV1_MASK <<
                  PLL_FRAC_CTRL2_POSTDIV1_SHIFT) |
@@ -178,11 +227,8 @@ static int pll_gf40lp_frac_set_rate(struct clk_hw *hw, unsigned long rate,
                (params->postdiv2 << PLL_FRAC_CTRL2_POSTDIV2_SHIFT);
        pll_writel(pll, val, PLL_CTRL2);
 
-       while (!(pll_readl(pll, PLL_STATUS) & PLL_STATUS_LOCK))
-               cpu_relax();
-
-       if (!was_enabled)
-               pll_gf40lp_frac_disable(hw);
+       if (enabled)
+               pll_lock(pll);
 
        return 0;
 }
@@ -241,6 +287,8 @@ static int pll_gf40lp_laint_enable(struct clk_hw *hw)
        val &= ~PLL_INT_CTRL2_BYPASS;
        pll_writel(pll, val, PLL_CTRL2);
 
+       pll_lock(pll);
+
        return 0;
 }
 
@@ -266,18 +314,44 @@ static int pll_gf40lp_laint_set_rate(struct clk_hw *hw, unsigned long rate,
 {
        struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
        struct pistachio_pll_rate_table *params;
-       bool was_enabled;
-       u32 val;
+       int enabled = pll_gf40lp_laint_is_enabled(hw);
+       u32 val, vco, old_postdiv1, old_postdiv2;
+       const char *name = __clk_get_name(hw->clk);
+
+       if (rate < MIN_OUTPUT_LA || rate > MAX_OUTPUT_LA)
+               return -EINVAL;
 
        params = pll_get_params(pll, parent_rate, rate);
-       if (!params)
+       if (!params || !params->refdiv)
                return -EINVAL;
 
-       was_enabled = pll_gf40lp_laint_is_enabled(hw);
-       if (!was_enabled)
-               pll_gf40lp_laint_enable(hw);
+       vco = params->fref * params->fbdiv / params->refdiv;
+       if (vco < MIN_VCO_LA || vco > MAX_VCO_LA)
+               pr_warn("%s: VCO %u is out of range %lu..%lu\n", name, vco,
+                       MIN_VCO_LA, MAX_VCO_LA);
+
+       val = params->fref / params->refdiv;
+       if (val < MIN_PFD)
+               pr_warn("%s: PFD %u is too low (min %lu)\n",
+                       name, val, MIN_PFD);
+       if (val > vco / 16)
+               pr_warn("%s: PFD %u is too high (max %u)\n",
+                       name, val, vco / 16);
 
        val = pll_readl(pll, PLL_CTRL1);
+
+       old_postdiv1 = (val >> PLL_INT_CTRL1_POSTDIV1_SHIFT) &
+                      PLL_INT_CTRL1_POSTDIV1_MASK;
+       old_postdiv2 = (val >> PLL_INT_CTRL1_POSTDIV2_SHIFT) &
+                      PLL_INT_CTRL1_POSTDIV2_MASK;
+       if (enabled &&
+           (params->postdiv1 != old_postdiv1 ||
+            params->postdiv2 != old_postdiv2))
+               pr_warn("%s: changing postdiv while PLL is enabled\n", name);
+
+       if (params->postdiv2 > params->postdiv1)
+               pr_warn("%s: postdiv2 should not exceed postdiv1\n", name);
+
        val &= ~((PLL_CTRL1_REFDIV_MASK << PLL_CTRL1_REFDIV_SHIFT) |
                 (PLL_CTRL1_FBDIV_MASK << PLL_CTRL1_FBDIV_SHIFT) |
                 (PLL_INT_CTRL1_POSTDIV1_MASK << PLL_INT_CTRL1_POSTDIV1_SHIFT) |
@@ -288,11 +362,8 @@ static int pll_gf40lp_laint_set_rate(struct clk_hw *hw, unsigned long rate,
                (params->postdiv2 << PLL_INT_CTRL1_POSTDIV2_SHIFT);
        pll_writel(pll, val, PLL_CTRL1);
 
-       while (!(pll_readl(pll, PLL_STATUS) & PLL_STATUS_LOCK))
-               cpu_relax();
-
-       if (!was_enabled)
-               pll_gf40lp_laint_disable(hw);
+       if (enabled)
+               pll_lock(pll);
 
        return 0;
 }
index b04c5b9c0ea816e4e900b83268126cecd051e3f5..d1de805df86733bb2a4b69505f070bae72f23f3f 100644 (file)
@@ -14,7 +14,7 @@
 #define _CLK_PXA_
 
 #define PARENTS(name) \
-       static const char *name ## _parents[] __initdata
+       static const char *const name ## _parents[] __initconst
 #define MUX_RO_RATE_RO_OPS(name, clk_name)                     \
        static struct clk_hw name ## _mux_hw;                   \
        static struct clk_hw name ## _rate_hw;                  \
@@ -72,7 +72,7 @@ struct desc_clk_cken {
        const char *name;
        const char *dev_id;
        const char *con_id;
-       const char **parent_names;
+       const char * const *parent_names;
        struct clk_fixed_factor lp;
        struct clk_fixed_factor hp;
        struct clk_gate gate;
index 8539c4fd34cc37bd28810b93d6ffb815b0bd48d6..fb7721bd37e6a9595f70cbd8125fdab52fd2265e 100644 (file)
@@ -231,7 +231,7 @@ static int rockchip_cpuclk_notifier_cb(struct notifier_block *nb,
 }
 
 struct clk *rockchip_clk_register_cpuclk(const char *name,
-                       const char **parent_names, u8 num_parents,
+                       const char *const *parent_names, u8 num_parents,
                        const struct rockchip_cpuclk_reg_data *reg_data,
                        const struct rockchip_cpuclk_rate_table *rates,
                        int nrates, void __iomem *reg_base, spinlock_t *lock)
index c842e3b60f21af12a80f74ee43e7c65d1172a249..e9f8df324e7ccecad7730998d8210c8ea11a2276 100644 (file)
@@ -120,7 +120,7 @@ static const struct clk_ops rockchip_mmc_clk_ops = {
 };
 
 struct clk *rockchip_clk_register_mmc(const char *name,
-                               const char **parent_names, u8 num_parents,
+                               const char *const *parent_names, u8 num_parents,
                                void __iomem *reg, int shift)
 {
        struct clk_init_data init;
index f8d3baf275b211623efd553d7b7fdf23c609d7f0..76027261f7ed999971f0b46ca10a4b4774d55bc3 100644 (file)
@@ -329,10 +329,10 @@ static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
  */
 
 struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
-               const char *name, const char **parent_names, u8 num_parents,
-               void __iomem *base, int con_offset, int grf_lock_offset,
-               int lock_shift, int mode_offset, int mode_shift,
-               struct rockchip_pll_rate_table *rate_table,
+               const char *name, const char *const *parent_names,
+               u8 num_parents, void __iomem *base, int con_offset,
+               int grf_lock_offset, int lock_shift, int mode_offset,
+               int mode_shift, struct rockchip_pll_rate_table *rate_table,
                u8 clk_pll_flags, spinlock_t *lock)
 {
        const char *pll_parents[3];
index 556ce041d371478b43a9549a258f368b57f3468d..e4f9d472f1ffb52f7ffb5f5061b39d56d519fb89 100644 (file)
@@ -26,7 +26,7 @@ enum rk3188_plls {
        apll, cpll, dpll, gpll,
 };
 
-struct rockchip_pll_rate_table rk3188_pll_rates[] = {
+static struct rockchip_pll_rate_table rk3188_pll_rates[] = {
        RK3066_PLL_RATE(2208000000, 1, 92, 1),
        RK3066_PLL_RATE(2184000000, 1, 91, 1),
        RK3066_PLL_RATE(2160000000, 1, 90, 1),
index d17eb4528a283ee089603876ac8daa7da6b66054..4f817ed9e6eedc432d0e6fd05adcb365e03e0f6b 100644 (file)
@@ -27,7 +27,7 @@ enum rk3288_plls {
        apll, dpll, cpll, gpll, npll,
 };
 
-struct rockchip_pll_rate_table rk3288_pll_rates[] = {
+static struct rockchip_pll_rate_table rk3288_pll_rates[] = {
        RK3066_PLL_RATE(2208000000, 1, 92, 1),
        RK3066_PLL_RATE(2184000000, 1, 91, 1),
        RK3066_PLL_RATE(2160000000, 1, 90, 1),
index edb5d489ae61859c5853d2ece634bd5d3e397f73..052b94db0ff93a690dda50339b81ad673c00fadd 100644 (file)
@@ -39,7 +39,7 @@
  * sometimes without one of those components.
  */
 static struct clk *rockchip_clk_register_branch(const char *name,
-               const char **parent_names, u8 num_parents, void __iomem *base,
+               const char *const *parent_names, u8 num_parents, void __iomem *base,
                int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
                u8 div_shift, u8 div_width, u8 div_flags,
                struct clk_div_table *div_table, int gate_offset,
@@ -103,8 +103,8 @@ static struct clk *rockchip_clk_register_branch(const char *name,
 }
 
 static struct clk *rockchip_clk_register_frac_branch(const char *name,
-               const char **parent_names, u8 num_parents, void __iomem *base,
-               int muxdiv_offset, u8 div_flags,
+               const char *const *parent_names, u8 num_parents,
+               void __iomem *base, int muxdiv_offset, u8 div_flags,
                int gate_offset, u8 gate_shift, u8 gate_flags,
                unsigned long flags, spinlock_t *lock)
 {
@@ -297,7 +297,7 @@ void __init rockchip_clk_register_branches(
 }
 
 void __init rockchip_clk_register_armclk(unsigned int lookup_id,
-                       const char *name, const char **parent_names,
+                       const char *name, const char *const *parent_names,
                        u8 num_parents,
                        const struct rockchip_cpuclk_reg_data *reg_data,
                        const struct rockchip_cpuclk_rate_table *rates,
index e63cafe893e19f82f2d200a6c1a391aff6112a12..6b092673048a2514399bd07cf156aefb081fb56a 100644 (file)
@@ -108,7 +108,7 @@ struct rockchip_pll_rate_table {
 struct rockchip_pll_clock {
        unsigned int            id;
        const char              *name;
-       const char              **parent_names;
+       const char              *const *parent_names;
        u8                      num_parents;
        unsigned long           flags;
        int                     con_offset;
@@ -140,10 +140,10 @@ struct rockchip_pll_clock {
        }
 
 struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
-               const char *name, const char **parent_names, u8 num_parents,
-               void __iomem *base, int con_offset, int grf_lock_offset,
-               int lock_shift, int reg_mode, int mode_shift,
-               struct rockchip_pll_rate_table *rate_table,
+               const char *name, const char *const *parent_names,
+               u8 num_parents, void __iomem *base, int con_offset,
+               int grf_lock_offset, int lock_shift, int reg_mode,
+               int mode_shift, struct rockchip_pll_rate_table *rate_table,
                u8 clk_pll_flags, spinlock_t *lock);
 
 struct rockchip_cpuclk_clksel {
@@ -173,16 +173,16 @@ struct rockchip_cpuclk_reg_data {
 };
 
 struct clk *rockchip_clk_register_cpuclk(const char *name,
-                       const char **parent_names, u8 num_parents,
+                       const char *const *parent_names, u8 num_parents,
                        const struct rockchip_cpuclk_reg_data *reg_data,
                        const struct rockchip_cpuclk_rate_table *rates,
                        int nrates, void __iomem *reg_base, spinlock_t *lock);
 
 struct clk *rockchip_clk_register_mmc(const char *name,
-                               const char **parent_names, u8 num_parents,
+                               const char *const *parent_names, u8 num_parents,
                                void __iomem *reg, int shift);
 
-#define PNAME(x) static const char *x[] __initdata
+#define PNAME(x) static const char *const x[] __initconst
 
 enum rockchip_clk_branch_type {
        branch_composite,
@@ -197,7 +197,7 @@ struct rockchip_clk_branch {
        unsigned int                    id;
        enum rockchip_clk_branch_type   branch_type;
        const char                      *name;
-       const char                      **parent_names;
+       const char                      *const *parent_names;
        u8                              num_parents;
        unsigned long                   flags;
        int                             muxdiv_offset;
@@ -403,7 +403,7 @@ void rockchip_clk_register_branches(struct rockchip_clk_branch *clk_list,
 void rockchip_clk_register_plls(struct rockchip_pll_clock *pll_list,
                                unsigned int nr_pll, int grf_lock_offset);
 void rockchip_clk_register_armclk(unsigned int lookup_id, const char *name,
-                       const char **parent_names, u8 num_parents,
+                       const char *const *parent_names, u8 num_parents,
                        const struct rockchip_cpuclk_reg_data *reg_data,
                        const struct rockchip_cpuclk_rate_table *rates,
                        int nrates);
index a17683b2cf276b03e287bd6959f217e7aacb7c5f..5f6833ea355d686a5d1429133715cf1ba1f48795 100644 (file)
@@ -2,7 +2,7 @@
 # Samsung Clock specific Makefile
 #
 
-obj-$(CONFIG_COMMON_CLK)       += clk.o clk-pll.o
+obj-$(CONFIG_COMMON_CLK)       += clk.o clk-pll.o clk-cpu.o
 obj-$(CONFIG_SOC_EXYNOS3250)   += clk-exynos3250.o
 obj-$(CONFIG_ARCH_EXYNOS4)     += clk-exynos4.o
 obj-$(CONFIG_SOC_EXYNOS4415)   += clk-exynos4415.o
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
new file mode 100644 (file)
index 0000000..3a1fe07
--- /dev/null
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Thomas Abraham <thomas.ab@samsung.com>
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains the utility function to register CPU clock for Samsung
+ * Exynos platforms. A CPU clock is defined as a clock supplied to a CPU or a
+ * group of CPUs. The CPU clock is typically derived from a hierarchy of clock
+ * blocks which includes mux and divider blocks. There are a number of other
+ * auxiliary clocks supplied to the CPU domain such as the debug blocks and AXI
+ * clock for CPU domain. The rates of these auxiliary clocks are related to the
+ * CPU clock rate and this relation is usually specified in the hardware manual
+ * of the SoC or supplied after the SoC characterization.
+ *
+ * The below implementation of the CPU clock allows the rate changes of the CPU
+ * clock and the corresponding rate changes of the auxillary clocks of the CPU
+ * domain. The platform clock driver provides a clock register configuration
+ * for each configurable rate which is then used to program the clock hardware
+ * registers to acheive a fast co-oridinated rate change for all the CPU domain
+ * clocks.
+ *
+ * On a rate change request for the CPU clock, the rate change is propagated
+ * upto the PLL supplying the clock to the CPU domain clock blocks. While the
+ * CPU domain PLL is reconfigured, the CPU domain clocks are driven using an
+ * alternate clock source. If required, the alternate clock source is divided
+ * down in order to keep the output clock rate within the previous OPP limits.
+*/
+
+#include <linux/errno.h>
+#include "clk-cpu.h"
+
+#define E4210_SRC_CPU          0x0
+#define E4210_STAT_CPU         0x200
+#define E4210_DIV_CPU0         0x300
+#define E4210_DIV_CPU1         0x304
+#define E4210_DIV_STAT_CPU0    0x400
+#define E4210_DIV_STAT_CPU1    0x404
+
+#define E4210_DIV0_RATIO0_MASK 0x7
+#define E4210_DIV1_HPM_MASK    (0x7 << 4)
+#define E4210_DIV1_COPY_MASK   (0x7 << 0)
+#define E4210_MUX_HPM_MASK     (1 << 20)
+#define E4210_DIV0_ATB_SHIFT   16
+#define E4210_DIV0_ATB_MASK    (DIV_MASK << E4210_DIV0_ATB_SHIFT)
+
+#define MAX_DIV                        8
+#define DIV_MASK               7
+#define DIV_MASK_ALL           0xffffffff
+#define MUX_MASK               7
+
+/*
+ * Helper function to wait until divider(s) have stabilized after the divider
+ * value has changed.
+ */
+static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
+{
+       unsigned long timeout = jiffies + msecs_to_jiffies(10);
+
+       do {
+               if (!(readl(div_reg) & mask))
+                       return;
+       } while (time_before(jiffies, timeout));
+
+       if (!(readl(div_reg) & mask))
+               return;
+
+       pr_err("%s: timeout in divider stablization\n", __func__);
+}
+
+/*
+ * Helper function to wait until mux has stabilized after the mux selection
+ * value was changed.
+ */
+static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos,
+                                       unsigned long mux_value)
+{
+       unsigned long timeout = jiffies + msecs_to_jiffies(10);
+
+       do {
+               if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
+                       return;
+       } while (time_before(jiffies, timeout));
+
+       if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
+               return;
+
+       pr_err("%s: re-parenting mux timed-out\n", __func__);
+}
+
+/* common round rate callback useable for all types of CPU clocks */
+static long exynos_cpuclk_round_rate(struct clk_hw *hw,
+                       unsigned long drate, unsigned long *prate)
+{
+       struct clk *parent = __clk_get_parent(hw->clk);
+       *prate = __clk_round_rate(parent, drate);
+       return *prate;
+}
+
+/* common recalc rate callback useable for all types of CPU clocks */
+static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
+                       unsigned long parent_rate)
+{
+       /*
+        * The CPU clock output (armclk) rate is the same as its parent
+        * rate. Although there exist certain dividers inside the CPU
+        * clock block that could be used to divide the parent clock,
+        * the driver does not make use of them currently, except during
+        * frequency transitions.
+        */
+       return parent_rate;
+}
+
+static const struct clk_ops exynos_cpuclk_clk_ops = {
+       .recalc_rate = exynos_cpuclk_recalc_rate,
+       .round_rate = exynos_cpuclk_round_rate,
+};
+
+/*
+ * Helper function to set the 'safe' dividers for the CPU clock. The parameters
+ * div and mask contain the divider value and the register bit mask of the
+ * dividers to be programmed.
+ */
+static void exynos_set_safe_div(void __iomem *base, unsigned long div,
+                                       unsigned long mask)
+{
+       unsigned long div0;
+
+       div0 = readl(base + E4210_DIV_CPU0);
+       div0 = (div0 & ~mask) | (div & mask);
+       writel(div0, base + E4210_DIV_CPU0);
+       wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, mask);
+}
+
+/* handler for pre-rate change notification from parent clock */
+static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
+                       struct exynos_cpuclk *cpuclk, void __iomem *base)
+{
+       const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
+       unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
+       unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
+       unsigned long div0, div1 = 0, mux_reg;
+
+       /* find out the divider values to use for clock data */
+       while ((cfg_data->prate * 1000) != ndata->new_rate) {
+               if (cfg_data->prate == 0)
+                       return -EINVAL;
+               cfg_data++;
+       }
+
+       spin_lock(cpuclk->lock);
+
+       /*
+        * For the selected PLL clock frequency, get the pre-defined divider
+        * values. If the clock for sclk_hpm is not sourced from apll, then
+        * the values for DIV_COPY and DIV_HPM dividers need not be set.
+        */
+       div0 = cfg_data->div0;
+       if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
+               div1 = cfg_data->div1;
+               if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
+                       div1 = readl(base + E4210_DIV_CPU1) &
+                               (E4210_DIV1_HPM_MASK | E4210_DIV1_COPY_MASK);
+       }
+
+       /*
+        * If the old parent clock speed is less than the clock speed of
+        * the alternate parent, then it should be ensured that at no point
+        * the armclk speed is more than the old_prate until the dividers are
+        * set.  Also workaround the issue of the dividers being set to lower
+        * values before the parent clock speed is set to new lower speed
+        * (this can result in too high speed of armclk output clocks).
+        */
+       if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
+               unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
+
+               alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
+               WARN_ON(alt_div >= MAX_DIV);
+
+               if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+                       /*
+                        * In Exynos4210, ATB clock parent is also mout_core. So
+                        * ATB clock also needs to be mantained at safe speed.
+                        */
+                       alt_div |= E4210_DIV0_ATB_MASK;
+                       alt_div_mask |= E4210_DIV0_ATB_MASK;
+               }
+               exynos_set_safe_div(base, alt_div, alt_div_mask);
+               div0 |= alt_div;
+       }
+
+       /* select sclk_mpll as the alternate parent */
+       mux_reg = readl(base + E4210_SRC_CPU);
+       writel(mux_reg | (1 << 16), base + E4210_SRC_CPU);
+       wait_until_mux_stable(base + E4210_STAT_CPU, 16, 2);
+
+       /* alternate parent is active now. set the dividers */
+       writel(div0, base + E4210_DIV_CPU0);
+       wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
+
+       if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
+               writel(div1, base + E4210_DIV_CPU1);
+               wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
+                               DIV_MASK_ALL);
+       }
+
+       spin_unlock(cpuclk->lock);
+       return 0;
+}
+
+/* handler for post-rate change notification from parent clock */
+static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
+                       struct exynos_cpuclk *cpuclk, void __iomem *base)
+{
+       const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
+       unsigned long div = 0, div_mask = DIV_MASK;
+       unsigned long mux_reg;
+
+       /* find out the divider values to use for clock data */
+       if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+               while ((cfg_data->prate * 1000) != ndata->new_rate) {
+                       if (cfg_data->prate == 0)
+                               return -EINVAL;
+                       cfg_data++;
+               }
+       }
+
+       spin_lock(cpuclk->lock);
+
+       /* select mout_apll as the alternate parent */
+       mux_reg = readl(base + E4210_SRC_CPU);
+       writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
+       wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
+
+       if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+               div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
+               div_mask |= E4210_DIV0_ATB_MASK;
+       }
+
+       exynos_set_safe_div(base, div, div_mask);
+       spin_unlock(cpuclk->lock);
+       return 0;
+}
+
+/*
+ * This notifier function is called for the pre-rate and post-rate change
+ * notifications of the parent clock of cpuclk.
+ */
+static int exynos_cpuclk_notifier_cb(struct notifier_block *nb,
+                               unsigned long event, void *data)
+{
+       struct clk_notifier_data *ndata = data;
+       struct exynos_cpuclk *cpuclk;
+       void __iomem *base;
+       int err = 0;
+
+       cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
+       base = cpuclk->ctrl_base;
+
+       if (event == PRE_RATE_CHANGE)
+               err = exynos_cpuclk_pre_rate_change(ndata, cpuclk, base);
+       else if (event == POST_RATE_CHANGE)
+               err = exynos_cpuclk_post_rate_change(ndata, cpuclk, base);
+
+       return notifier_from_errno(err);
+}
+
+/* helper function to register a CPU clock */
+int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
+               unsigned int lookup_id, const char *name, const char *parent,
+               const char *alt_parent, unsigned long offset,
+               const struct exynos_cpuclk_cfg_data *cfg,
+               unsigned long num_cfgs, unsigned long flags)
+{
+       struct exynos_cpuclk *cpuclk;
+       struct clk_init_data init;
+       struct clk *clk;
+       int ret = 0;
+
+       cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
+       if (!cpuclk)
+               return -ENOMEM;
+
+       init.name = name;
+       init.flags = CLK_SET_RATE_PARENT;
+       init.parent_names = &parent;
+       init.num_parents = 1;
+       init.ops = &exynos_cpuclk_clk_ops;
+
+       cpuclk->hw.init = &init;
+       cpuclk->ctrl_base = ctx->reg_base + offset;
+       cpuclk->lock = &ctx->lock;
+       cpuclk->flags = flags;
+       cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
+
+       cpuclk->alt_parent = __clk_lookup(alt_parent);
+       if (!cpuclk->alt_parent) {
+               pr_err("%s: could not lookup alternate parent %s\n",
+                               __func__, alt_parent);
+               ret = -EINVAL;
+               goto free_cpuclk;
+       }
+
+       clk = __clk_lookup(parent);
+       if (!clk) {
+               pr_err("%s: could not lookup parent clock %s\n",
+                               __func__, parent);
+               ret = -EINVAL;
+               goto free_cpuclk;
+       }
+
+       ret = clk_notifier_register(clk, &cpuclk->clk_nb);
+       if (ret) {
+               pr_err("%s: failed to register clock notifier for %s\n",
+                               __func__, name);
+               goto free_cpuclk;
+       }
+
+       cpuclk->cfg = kmemdup(cfg, sizeof(*cfg) * num_cfgs, GFP_KERNEL);
+       if (!cpuclk->cfg) {
+               pr_err("%s: could not allocate memory for cpuclk data\n",
+                               __func__);
+               ret = -ENOMEM;
+               goto unregister_clk_nb;
+       }
+
+       clk = clk_register(NULL, &cpuclk->hw);
+       if (IS_ERR(clk)) {
+               pr_err("%s: could not register cpuclk %s\n", __func__,  name);
+               ret = PTR_ERR(clk);
+               goto free_cpuclk_data;
+       }
+
+       samsung_clk_add_lookup(ctx, clk, lookup_id);
+       return 0;
+
+free_cpuclk_data:
+       kfree(cpuclk->cfg);
+unregister_clk_nb:
+       clk_notifier_unregister(__clk_lookup(parent), &cpuclk->clk_nb);
+free_cpuclk:
+       kfree(cpuclk);
+       return ret;
+}
diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h
new file mode 100644 (file)
index 0000000..37874d3
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for all PLL's in Samsung platforms
+*/
+
+#ifndef __SAMSUNG_CLK_CPU_H
+#define __SAMSUNG_CLK_CPU_H
+
+#include "clk.h"
+
+/**
+ * struct exynos_cpuclk_data: config data to setup cpu clocks.
+ * @prate: frequency of the primary parent clock (in KHz).
+ * @div0: value to be programmed in the div_cpu0 register.
+ * @div1: value to be programmed in the div_cpu1 register.
+ *
+ * This structure holds the divider configuration data for dividers in the CPU
+ * clock domain. The parent frequency at which these divider values are valid is
+ * specified in @prate. The @prate is the frequency of the primary parent clock.
+ * For CPU clock domains that do not have a DIV1 register, the @div1 member
+ * value is not used.
+ */
+struct exynos_cpuclk_cfg_data {
+       unsigned long   prate;
+       unsigned long   div0;
+       unsigned long   div1;
+};
+
+/**
+ * struct exynos_cpuclk: information about clock supplied to a CPU core.
+ * @hw:        handle between CCF and CPU clock.
+ * @alt_parent: alternate parent clock to use when switching the speed
+ *     of the primary parent clock.
+ * @ctrl_base: base address of the clock controller.
+ * @lock: cpu clock domain register access lock.
+ * @cfg: cpu clock rate configuration data.
+ * @num_cfgs: number of array elements in @cfg array.
+ * @clk_nb: clock notifier registered for changes in clock speed of the
+ *     primary parent clock.
+ * @flags: configuration flags for the CPU clock.
+ *
+ * This structure holds information required for programming the CPU clock for
+ * various clock speeds.
+ */
+struct exynos_cpuclk {
+       struct clk_hw                           hw;
+       struct clk                              *alt_parent;
+       void __iomem                            *ctrl_base;
+       spinlock_t                              *lock;
+       const struct exynos_cpuclk_cfg_data     *cfg;
+       const unsigned long                     num_cfgs;
+       struct notifier_block                   clk_nb;
+       unsigned long                           flags;
+
+/* The CPU clock registers has DIV1 configuration register */
+#define CLK_CPU_HAS_DIV1               (1 << 0)
+/* When ALT parent is active, debug clocks need safe divider values */
+#define CLK_CPU_NEEDS_DEBUG_ALT_DIV    (1 << 1)
+};
+
+extern int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
+                       unsigned int lookup_id, const char *name,
+                       const char *parent, const char *alt_parent,
+                       unsigned long offset,
+                       const struct exynos_cpuclk_cfg_data *cfg,
+                       unsigned long num_cfgs, unsigned long flags);
+
+#endif /* __SAMSUNG_CLK_CPU_H */
index 714d6ba782c81b2420c7770db183a3b6b1ffd785..cae2c048488db3e7a4c3545c81faa50f450797d7 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/syscore_ops.h>
 
 #include "clk.h"
+#include "clk-cpu.h"
 
 /* Exynos4 clock controller register offsets */
 #define SRC_LEFTBUS            0x4200
@@ -534,7 +535,8 @@ static struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __initda
 /* list of mux clocks supported in all exynos4 soc's */
 static struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
        MUX_FA(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
-                       CLK_SET_RATE_PARENT, 0, "mout_apll"),
+                       CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0,
+                       "mout_apll"),
        MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
        MUX(0, "mout_mfc1", sclk_evpll_p, SRC_MFC, 4, 1),
        MUX(0, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
@@ -1378,6 +1380,22 @@ static void __init exynos4x12_core_down_clock(void)
        __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
 }
 
+#define E4210_CPU_DIV0(apll, pclk_dbg, atb, periph, corem1, corem0)    \
+               (((apll) << 24) | ((pclk_dbg) << 20) | ((atb) << 16) |  \
+               ((periph) << 12) | ((corem1) << 8) | ((corem0) <<  4))
+#define E4210_CPU_DIV1(hpm, copy)                                      \
+               (((hpm) << 4) | ((copy) << 0))
+
+static const struct exynos_cpuclk_cfg_data e4210_armclk_d[] __initconst = {
+       { 1200000, E4210_CPU_DIV0(7, 1, 4, 3, 7, 3), E4210_CPU_DIV1(0, 5), },
+       { 1000000, E4210_CPU_DIV0(7, 1, 4, 3, 7, 3), E4210_CPU_DIV1(0, 4), },
+       {  800000, E4210_CPU_DIV0(7, 1, 3, 3, 7, 3), E4210_CPU_DIV1(0, 3), },
+       {  500000, E4210_CPU_DIV0(7, 1, 3, 3, 7, 3), E4210_CPU_DIV1(0, 3), },
+       {  400000, E4210_CPU_DIV0(7, 1, 3, 3, 7, 3), E4210_CPU_DIV1(0, 3), },
+       {  200000, E4210_CPU_DIV0(0, 1, 1, 1, 3, 1), E4210_CPU_DIV1(0, 3), },
+       {  0 },
+};
+
 /* register exynos4 clocks */
 static void __init exynos4_clk_init(struct device_node *np,
                                    enum exynos4_soc soc)
@@ -1455,6 +1473,10 @@ static void __init exynos4_clk_init(struct device_node *np,
                samsung_clk_register_fixed_factor(ctx,
                        exynos4210_fixed_factor_clks,
                        ARRAY_SIZE(exynos4210_fixed_factor_clks));
+               exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+                       mout_core_p4210[0], mout_core_p4210[1], 0x14200,
+                       e4210_armclk_d, ARRAY_SIZE(e4210_armclk_d),
+                       CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
        } else {
                samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
                        ARRAY_SIZE(exynos4x12_mux_clks));
index e2e5193d10490b0295805023c448c1c0b685d02d..06f96eb7cf93c5b4c7d946d41b57b30078042db2 100644 (file)
@@ -94,7 +94,7 @@ PNAME(mout_aud_pll_user_p) = {"fin_pll", "fout_aud_pll"};
 PNAME(mout_sclk_aud_i2s_p) = {"mout_aud_pll_user", "ioclk_i2s_cdclk"};
 PNAME(mout_sclk_aud_pcm_p) = {"mout_aud_pll_user", "ioclk_pcm_extclk"};
 
-struct samsung_mux_clock aud_mux_clks[] __initdata = {
+static struct samsung_mux_clock aud_mux_clks[] __initdata = {
        MUX(AUD_MOUT_AUD_PLL_USER, "mout_aud_pll_user", mout_aud_pll_user_p,
                        MUX_SEL_AUD, 0, 1),
        MUX(AUD_MOUT_SCLK_AUD_I2S, "mout_sclk_aud_i2s", mout_sclk_aud_i2s_p,
@@ -103,7 +103,7 @@ struct samsung_mux_clock aud_mux_clks[] __initdata = {
                        MUX_SEL_AUD, 8, 1),
 };
 
-struct samsung_div_clock aud_div_clks[] __initdata = {
+static struct samsung_div_clock aud_div_clks[] __initdata = {
        DIV(AUD_DOUT_ACLK_AUD_131, "dout_aclk_aud_131", "mout_aud_pll_user",
                        DIV_AUD0, 0, 4),
 
@@ -115,7 +115,7 @@ struct samsung_div_clock aud_div_clks[] __initdata = {
                        DIV_AUD1, 12, 4),
 };
 
-struct samsung_gate_clock aud_gate_clks[] __initdata = {
+static struct samsung_gate_clock aud_gate_clks[] __initdata = {
        GATE(AUD_SCLK_I2S, "sclk_aud_i2s", "dout_sclk_aud_i2s",
                        EN_SCLK_AUD, 0, CLK_SET_RATE_PARENT, 0),
        GATE(AUD_SCLK_PCM, "sclk_aud_pcm", "dout_sclk_aud_pcm",
@@ -135,7 +135,7 @@ struct samsung_gate_clock aud_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_aud_init(struct device_node *np)
 {
-       struct samsung_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = { NULL };
 
        cmu.mux_clks = aud_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(aud_mux_clks);
@@ -203,7 +203,7 @@ PNAME(mout_phyclk_mipi_dphy_4lmrxclk_esc0_user_p) = {"fin_pll",
 PNAME(mout_sclk_hdmi_spdif_p) = {"fin_pll", "ioclk_spdif_extclk",
                        "dout_aclk_peri_aud", "phyclk_hdmi_phy_ref_cko"};
 
-struct samsung_mux_clock disp_mux_clks[] __initdata = {
+static struct samsung_mux_clock disp_mux_clks[] __initdata = {
        MUX(DISP_MOUT_ACLK_DISP_333_USER, "mout_aclk_disp_333_user",
                        mout_aclk_disp_333_user_p,
                        MUX_SEL_DISP0, 0, 1),
@@ -272,7 +272,7 @@ struct samsung_mux_clock disp_mux_clks[] __initdata = {
                        MUX_SEL_DISP4, 4, 2),
 };
 
-struct samsung_div_clock disp_div_clks[] __initdata = {
+static struct samsung_div_clock disp_div_clks[] __initdata = {
        DIV(DISP_DOUT_PCLK_DISP_111, "dout_pclk_disp_111",
                        "mout_aclk_disp_222_user",
                        DIV_DISP, 8, 4),
@@ -285,7 +285,7 @@ struct samsung_div_clock disp_div_clks[] __initdata = {
                        DIV_DISP, 16, 4),
 };
 
-struct samsung_gate_clock disp_gate_clks[] __initdata = {
+static struct samsung_gate_clock disp_gate_clks[] __initdata = {
        GATE(DISP_MOUT_HDMI_PHY_PIXEL_USER, "sclk_hdmi_link_i_pixel",
                        "mout_phyclk_hdmi_phy_pixel_clko_user",
                        EN_SCLK_DISP0, 26, CLK_SET_RATE_PARENT, 0),
@@ -325,7 +325,7 @@ struct samsung_gate_clock disp_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_disp_init(struct device_node *np)
 {
-       struct samsung_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = { NULL };
 
        cmu.mux_clks = disp_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(disp_mux_clks);
@@ -363,13 +363,13 @@ static unsigned long egl_clk_regs[] __initdata = {
 PNAME(mout_egl_b_p) = {"mout_egl_pll", "dout_bus_pll"};
 PNAME(mout_egl_pll_p) = {"fin_pll", "fout_egl_pll"};
 
-struct samsung_mux_clock egl_mux_clks[] __initdata = {
+static struct samsung_mux_clock egl_mux_clks[] __initdata = {
        MUX(EGL_MOUT_EGL_PLL, "mout_egl_pll", mout_egl_pll_p,
                        MUX_SEL_EGL, 4, 1),
        MUX(EGL_MOUT_EGL_B, "mout_egl_b", mout_egl_b_p, MUX_SEL_EGL, 16, 1),
 };
 
-struct samsung_div_clock egl_div_clks[] __initdata = {
+static struct samsung_div_clock egl_div_clks[] __initdata = {
        DIV(EGL_DOUT_EGL1, "dout_egl1", "mout_egl_b", DIV_EGL, 0, 3),
        DIV(EGL_DOUT_EGL2, "dout_egl2", "dout_egl1", DIV_EGL, 4, 3),
        DIV(EGL_DOUT_ACLK_EGL, "dout_aclk_egl", "dout_egl2", DIV_EGL, 8, 3),
@@ -389,7 +389,7 @@ static struct samsung_pll_clock egl_pll_clks[] __initdata = {
 
 static void __init exynos5260_clk_egl_init(struct device_node *np)
 {
-       struct samsung_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = { NULL };
 
        cmu.pll_clks = egl_pll_clks;
        cmu.nr_pll_clks =  ARRAY_SIZE(egl_pll_clks);
@@ -433,7 +433,7 @@ PNAME(mout_phyclk_usbdrd30_pipe_pclk_user_p) = {"fin_pll",
 PNAME(mout_phyclk_usbdrd30_phyclock_user_p) = {"fin_pll",
                        "phyclk_usbdrd30_udrd30_phyclock"};
 
-struct samsung_mux_clock fsys_mux_clks[] __initdata = {
+static struct samsung_mux_clock fsys_mux_clks[] __initdata = {
        MUX(FSYS_MOUT_PHYCLK_USBDRD30_PHYCLOCK_USER,
                        "mout_phyclk_usbdrd30_phyclock_user",
                        mout_phyclk_usbdrd30_phyclock_user_p,
@@ -456,7 +456,7 @@ struct samsung_mux_clock fsys_mux_clks[] __initdata = {
                        MUX_SEL_FSYS1, 16, 1),
 };
 
-struct samsung_gate_clock fsys_gate_clks[] __initdata = {
+static struct samsung_gate_clock fsys_gate_clks[] __initdata = {
        GATE(FSYS_PHYCLK_USBHOST20, "phyclk_usbhost20_phyclock",
                        "mout_phyclk_usbdrd30_phyclock_user",
                        EN_SCLK_FSYS, 1, 0, 0),
@@ -491,7 +491,7 @@ struct samsung_gate_clock fsys_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_fsys_init(struct device_node *np)
 {
-       struct samsung_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = { NULL };
 
        cmu.mux_clks = fsys_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks);
@@ -537,18 +537,18 @@ static unsigned long g2d_clk_regs[] __initdata = {
 
 PNAME(mout_aclk_g2d_333_user_p) = {"fin_pll", "dout_aclk_g2d_333"};
 
-struct samsung_mux_clock g2d_mux_clks[] __initdata = {
+static struct samsung_mux_clock g2d_mux_clks[] __initdata = {
        MUX(G2D_MOUT_ACLK_G2D_333_USER, "mout_aclk_g2d_333_user",
                        mout_aclk_g2d_333_user_p,
                        MUX_SEL_G2D, 0, 1),
 };
 
-struct samsung_div_clock g2d_div_clks[] __initdata = {
+static struct samsung_div_clock g2d_div_clks[] __initdata = {
        DIV(G2D_DOUT_PCLK_G2D_83, "dout_pclk_g2d_83", "mout_aclk_g2d_333_user",
                        DIV_G2D, 0, 3),
 };
 
-struct samsung_gate_clock g2d_gate_clks[] __initdata = {
+static struct samsung_gate_clock g2d_gate_clks[] __initdata = {
        GATE(G2D_CLK_G2D, "clk_g2d", "mout_aclk_g2d_333_user",
                        EN_IP_G2D, 4, 0, 0),
        GATE(G2D_CLK_JPEG, "clk_jpeg", "mout_aclk_g2d_333_user",
@@ -580,7 +580,7 @@ struct samsung_gate_clock g2d_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_g2d_init(struct device_node *np)
 {
-       struct samsung_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = { NULL };
 
        cmu.mux_clks = g2d_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(g2d_mux_clks);
@@ -617,17 +617,17 @@ static unsigned long g3d_clk_regs[] __initdata = {
 
 PNAME(mout_g3d_pll_p) = {"fin_pll", "fout_g3d_pll"};
 
-struct samsung_mux_clock g3d_mux_clks[] __initdata = {
+static struct samsung_mux_clock g3d_mux_clks[] __initdata = {
        MUX(G3D_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p,
                        MUX_SEL_G3D, 0, 1),
 };
 
-struct samsung_div_clock g3d_div_clks[] __initdata = {
+static struct samsung_div_clock g3d_div_clks[] __initdata = {
        DIV(G3D_DOUT_PCLK_G3D, "dout_pclk_g3d", "dout_aclk_g3d", DIV_G3D, 0, 3),
        DIV(G3D_DOUT_ACLK_G3D, "dout_aclk_g3d", "mout_g3d_pll", DIV_G3D, 4, 3),
 };
 
-struct samsung_gate_clock g3d_gate_clks[] __initdata = {
+static struct samsung_gate_clock g3d_gate_clks[] __initdata = {
        GATE(G3D_CLK_G3D, "clk_g3d", "dout_aclk_g3d", EN_IP_G3D, 2, 0, 0),
        GATE(G3D_CLK_G3D_HPM, "clk_g3d_hpm", "dout_aclk_g3d",
                        EN_IP_G3D, 3, 0, 0),
@@ -641,7 +641,7 @@ static struct samsung_pll_clock g3d_pll_clks[] __initdata = {
 
 static void __init exynos5260_clk_g3d_init(struct device_node *np)
 {
-       struct samsung_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = { NULL };
 
        cmu.pll_clks = g3d_pll_clks;
        cmu.nr_pll_clks =  ARRAY_SIZE(g3d_pll_clks);
@@ -694,7 +694,7 @@ PNAME(mout_aclk_m2m_400_user_p) = {"fin_pll", "dout_aclk_gscl_400"};
 PNAME(mout_aclk_gscl_fimc_user_p) = {"fin_pll", "dout_aclk_gscl_400"};
 PNAME(mout_aclk_csis_p) = {"dout_aclk_csis_200", "mout_aclk_gscl_fimc_user"};
 
-struct samsung_mux_clock gscl_mux_clks[] __initdata = {
+static struct samsung_mux_clock gscl_mux_clks[] __initdata = {
        MUX(GSCL_MOUT_ACLK_GSCL_333_USER, "mout_aclk_gscl_333_user",
                        mout_aclk_gscl_333_user_p,
                        MUX_SEL_GSCL, 0, 1),
@@ -708,7 +708,7 @@ struct samsung_mux_clock gscl_mux_clks[] __initdata = {
                        MUX_SEL_GSCL, 24, 1),
 };
 
-struct samsung_div_clock gscl_div_clks[] __initdata = {
+static struct samsung_div_clock gscl_div_clks[] __initdata = {
        DIV(GSCL_DOUT_PCLK_M2M_100, "dout_pclk_m2m_100",
                        "mout_aclk_m2m_400_user",
                        DIV_GSCL, 0, 3),
@@ -717,7 +717,7 @@ struct samsung_div_clock gscl_div_clks[] __initdata = {
                        DIV_GSCL, 4, 3),
 };
 
-struct samsung_gate_clock gscl_gate_clks[] __initdata = {
+static struct samsung_gate_clock gscl_gate_clks[] __initdata = {
        GATE(GSCL_SCLK_CSIS0_WRAP, "sclk_csis0_wrap", "dout_aclk_csis_200",
                        EN_SCLK_GSCL_FIMC, 0, CLK_SET_RATE_PARENT, 0),
        GATE(GSCL_SCLK_CSIS1_WRAP, "sclk_csis1_wrap", "dout_aclk_csis_200",
@@ -776,7 +776,7 @@ struct samsung_gate_clock gscl_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_gscl_init(struct device_node *np)
 {
-       struct samsung_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = { NULL };
 
        cmu.mux_clks = gscl_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(gscl_mux_clks);
@@ -813,14 +813,14 @@ static unsigned long isp_clk_regs[] __initdata = {
 PNAME(mout_isp_400_user_p) = {"fin_pll", "dout_aclk_isp1_400"};
 PNAME(mout_isp_266_user_p)      = {"fin_pll", "dout_aclk_isp1_266"};
 
-struct samsung_mux_clock isp_mux_clks[] __initdata = {
+static struct samsung_mux_clock isp_mux_clks[] __initdata = {
        MUX(ISP_MOUT_ISP_266_USER, "mout_isp_266_user", mout_isp_266_user_p,
                        MUX_SEL_ISP0, 0, 1),
        MUX(ISP_MOUT_ISP_400_USER, "mout_isp_400_user", mout_isp_400_user_p,
                        MUX_SEL_ISP0, 4, 1),
 };
 
-struct samsung_div_clock isp_div_clks[] __initdata = {
+static struct samsung_div_clock isp_div_clks[] __initdata = {
        DIV(ISP_DOUT_PCLK_ISP_66, "dout_pclk_isp_66", "mout_kfc",
                        DIV_ISP, 0, 3),
        DIV(ISP_DOUT_PCLK_ISP_133, "dout_pclk_isp_133", "mout_kfc",
@@ -832,7 +832,7 @@ struct samsung_div_clock isp_div_clks[] __initdata = {
        DIV(ISP_DOUT_SCLK_MPWM, "dout_sclk_mpwm", "mout_kfc", DIV_ISP, 20, 2),
 };
 
-struct samsung_gate_clock isp_gate_clks[] __initdata = {
+static struct samsung_gate_clock isp_gate_clks[] __initdata = {
        GATE(ISP_CLK_GIC, "clk_isp_gic", "mout_aclk_isp1_266",
                        EN_IP_ISP0, 15, 0, 0),
 
@@ -895,7 +895,7 @@ struct samsung_gate_clock isp_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_isp_init(struct device_node *np)
 {
-       struct samsung_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = { NULL };
 
        cmu.mux_clks = isp_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(isp_mux_clks);
@@ -934,13 +934,13 @@ static unsigned long kfc_clk_regs[] __initdata = {
 PNAME(mout_kfc_pll_p) = {"fin_pll", "fout_kfc_pll"};
 PNAME(mout_kfc_p)       = {"mout_kfc_pll", "dout_media_pll"};
 
-struct samsung_mux_clock kfc_mux_clks[] __initdata = {
+static struct samsung_mux_clock kfc_mux_clks[] __initdata = {
        MUX(KFC_MOUT_KFC_PLL, "mout_kfc_pll", mout_kfc_pll_p,
                        MUX_SEL_KFC0, 0, 1),
        MUX(KFC_MOUT_KFC, "mout_kfc", mout_kfc_p, MUX_SEL_KFC2, 0, 1),
 };
 
-struct samsung_div_clock kfc_div_clks[] __initdata = {
+static struct samsung_div_clock kfc_div_clks[] __initdata = {
        DIV(KFC_DOUT_KFC1, "dout_kfc1", "mout_kfc", DIV_KFC, 0, 3),
        DIV(KFC_DOUT_KFC2, "dout_kfc2", "dout_kfc1", DIV_KFC, 4, 3),
        DIV(KFC_DOUT_KFC_ATCLK, "dout_kfc_atclk", "dout_kfc2", DIV_KFC, 8, 3),
@@ -959,7 +959,7 @@ static struct samsung_pll_clock kfc_pll_clks[] __initdata = {
 
 static void __init exynos5260_clk_kfc_init(struct device_node *np)
 {
-       struct samsung_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = { NULL };
 
        cmu.pll_clks = kfc_pll_clks;
        cmu.nr_pll_clks =  ARRAY_SIZE(kfc_pll_clks);
@@ -993,18 +993,18 @@ static unsigned long mfc_clk_regs[] __initdata = {
 
 PNAME(mout_aclk_mfc_333_user_p) = {"fin_pll", "dout_aclk_mfc_333"};
 
-struct samsung_mux_clock mfc_mux_clks[] __initdata = {
+static struct samsung_mux_clock mfc_mux_clks[] __initdata = {
        MUX(MFC_MOUT_ACLK_MFC_333_USER, "mout_aclk_mfc_333_user",
                        mout_aclk_mfc_333_user_p,
                        MUX_SEL_MFC, 0, 1),
 };
 
-struct samsung_div_clock mfc_div_clks[] __initdata = {
+static struct samsung_div_clock mfc_div_clks[] __initdata = {
        DIV(MFC_DOUT_PCLK_MFC_83, "dout_pclk_mfc_83", "mout_aclk_mfc_333_user",
                        DIV_MFC, 0, 3),
 };
 
-struct samsung_gate_clock mfc_gate_clks[] __initdata = {
+static struct samsung_gate_clock mfc_gate_clks[] __initdata = {
        GATE(MFC_CLK_MFC, "clk_mfc", "mout_aclk_mfc_333_user",
                        EN_IP_MFC, 1, 0, 0),
        GATE(MFC_CLK_SMMU2_MFCM0, "clk_smmu2_mfcm0", "mout_aclk_mfc_333_user",
@@ -1015,7 +1015,7 @@ struct samsung_gate_clock mfc_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_mfc_init(struct device_node *np)
 {
-       struct samsung_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = { NULL };
 
        cmu.mux_clks = mfc_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(mfc_mux_clks);
@@ -1078,7 +1078,7 @@ PNAME(mout_mif_drex2x_p) = {"dout_mem_pll", "dout_bus_pll"};
 PNAME(mout_clkm_phy_p) = {"mout_mif_drex", "dout_media_pll"};
 PNAME(mout_clk2x_phy_p) = {"mout_mif_drex2x", "dout_media_pll"};
 
-struct samsung_mux_clock mif_mux_clks[] __initdata = {
+static struct samsung_mux_clock mif_mux_clks[] __initdata = {
        MUX(MIF_MOUT_MEM_PLL, "mout_mem_pll", mout_mem_pll_p,
                        MUX_SEL_MIF, 0, 1),
        MUX(MIF_MOUT_BUS_PLL, "mout_bus_pll", mout_bus_pll_p,
@@ -1095,7 +1095,7 @@ struct samsung_mux_clock mif_mux_clks[] __initdata = {
                        MUX_SEL_MIF, 24, 1),
 };
 
-struct samsung_div_clock mif_div_clks[] __initdata = {
+static struct samsung_div_clock mif_div_clks[] __initdata = {
        DIV(MIF_DOUT_MEDIA_PLL, "dout_media_pll", "mout_media_pll",
                        DIV_MIF, 0, 3),
        DIV(MIF_DOUT_MEM_PLL, "dout_mem_pll", "mout_mem_pll",
@@ -1114,7 +1114,7 @@ struct samsung_div_clock mif_div_clks[] __initdata = {
                        DIV_MIF, 28, 4),
 };
 
-struct samsung_gate_clock mif_gate_clks[] __initdata = {
+static struct samsung_gate_clock mif_gate_clks[] __initdata = {
        GATE(MIF_CLK_LPDDR3PHY_WRAP0, "clk_lpddr3phy_wrap0", "dout_clk2x_phy",
                        EN_IP_MIF, 12, CLK_IGNORE_UNUSED, 0),
        GATE(MIF_CLK_LPDDR3PHY_WRAP1, "clk_lpddr3phy_wrap1", "dout_clk2x_phy",
@@ -1162,7 +1162,7 @@ static struct samsung_pll_clock mif_pll_clks[] __initdata = {
 
 static void __init exynos5260_clk_mif_init(struct device_node *np)
 {
-       struct samsung_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = { NULL };
 
        cmu.pll_clks = mif_pll_clks;
        cmu.nr_pll_clks =  ARRAY_SIZE(mif_pll_clks);
@@ -1221,7 +1221,7 @@ PNAME(mout_sclk_i2scod_p) = {"ioclk_i2s_cdclk", "fin_pll", "dout_aclk_peri_aud",
 PNAME(mout_sclk_spdif_p) = {"ioclk_spdif_extclk", "fin_pll",
                        "dout_aclk_peri_aud", "phyclk_hdmi_phy_ref_cko"};
 
-struct samsung_mux_clock peri_mux_clks[] __initdata = {
+static struct samsung_mux_clock peri_mux_clks[] __initdata = {
        MUX(PERI_MOUT_SCLK_PCM, "mout_sclk_pcm", mout_sclk_pcm_p,
                        MUX_SEL_PERI1, 4, 2),
        MUX(PERI_MOUT_SCLK_I2SCOD, "mout_sclk_i2scod", mout_sclk_i2scod_p,
@@ -1230,12 +1230,12 @@ struct samsung_mux_clock peri_mux_clks[] __initdata = {
                        MUX_SEL_PERI1, 20, 2),
 };
 
-struct samsung_div_clock peri_div_clks[] __initdata = {
+static struct samsung_div_clock peri_div_clks[] __initdata = {
        DIV(PERI_DOUT_PCM, "dout_pcm", "mout_sclk_pcm", DIV_PERI, 0, 8),
        DIV(PERI_DOUT_I2S, "dout_i2s", "mout_sclk_i2scod", DIV_PERI, 8, 6),
 };
 
-struct samsung_gate_clock peri_gate_clks[] __initdata = {
+static struct samsung_gate_clock peri_gate_clks[] __initdata = {
        GATE(PERI_SCLK_PCM1, "sclk_pcm1", "dout_pcm", EN_SCLK_PERI, 0,
                        CLK_SET_RATE_PARENT, 0),
        GATE(PERI_SCLK_I2S, "sclk_i2s", "dout_i2s", EN_SCLK_PERI, 1,
@@ -1370,7 +1370,7 @@ struct samsung_gate_clock peri_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_peri_init(struct device_node *np)
 {
-       struct samsung_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = { NULL };
 
        cmu.mux_clks = peri_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(peri_mux_clks);
@@ -1432,7 +1432,7 @@ static unsigned long top_clk_regs[] __initdata = {
 };
 
 /* fixed rate clocks generated inside the soc */
-struct samsung_fixed_rate_clock fixed_rate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock fixed_rate_clks[] __initdata = {
        FRATE(PHYCLK_DPTX_PHY_CH3_TXD_CLK, "phyclk_dptx_phy_ch3_txd_clk", NULL,
                        CLK_IS_ROOT, 270000000),
        FRATE(PHYCLK_DPTX_PHY_CH2_TXD_CLK, "phyclk_dptx_phy_ch2_txd_clk", NULL,
@@ -1519,7 +1519,7 @@ PNAME(mout_sclk_fsys_mmc1_sdclkin_b_p) = {"mout_sclk_fsys_mmc1_sdclkin_a",
 PNAME(mout_sclk_fsys_mmc2_sdclkin_b_p) = {"mout_sclk_fsys_mmc2_sdclkin_a",
                        "mout_mediatop_pll_user"};
 
-struct samsung_mux_clock top_mux_clks[] __initdata = {
+static struct samsung_mux_clock top_mux_clks[] __initdata = {
        MUX(TOP_MOUT_MEDIATOP_PLL_USER, "mout_mediatop_pll_user",
                        mout_mediatop_pll_user_p,
                        MUX_SEL_TOP_PLL0, 0, 1),
@@ -1679,7 +1679,7 @@ struct samsung_mux_clock top_mux_clks[] __initdata = {
                        MUX_SEL_TOP_GSCL, 20, 1),
 };
 
-struct samsung_div_clock top_div_clks[] __initdata = {
+static struct samsung_div_clock top_div_clks[] __initdata = {
        DIV(TOP_DOUT_ACLK_G2D_333, "dout_aclk_g2d_333", "mout_aclk_g2d_333",
                        DIV_TOP_G2D_MFC, 0, 3),
        DIV(TOP_DOUT_ACLK_MFC_333, "dout_aclk_mfc_333", "mout_aclk_mfc_333",
@@ -1800,7 +1800,7 @@ struct samsung_div_clock top_div_clks[] __initdata = {
 
 };
 
-struct samsung_gate_clock top_gate_clks[] __initdata = {
+static struct samsung_gate_clock top_gate_clks[] __initdata = {
        GATE(TOP_SCLK_MMC0, "sclk_fsys_mmc0_sdclkin",
                        "dout_sclk_fsys_mmc0_sdclkin_b",
                        EN_SCLK_TOP, 7, CLK_SET_RATE_PARENT, 0),
@@ -1826,7 +1826,7 @@ static struct samsung_pll_clock top_pll_clks[] __initdata = {
 
 static void __init exynos5260_clk_top_init(struct device_node *np)
 {
-       struct samsung_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = { NULL };
 
        cmu.pll_clks = top_pll_clks;
        cmu.nr_pll_clks =  ARRAY_SIZE(top_pll_clks);
index bea4a173eef5e40e12a4a05f8f6ccb3310700814..a1d731ca8f48587870753182a1ef76db82072686 100644 (file)
@@ -504,7 +504,7 @@ static struct samsung_fixed_factor_clock
        FFACTOR(0, "ff_dout_spll2", "mout_sclk_spll", 1, 2, 0),
 };
 
-struct samsung_mux_clock exynos5800_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos5800_mux_clks[] __initdata = {
        MUX(0, "mout_aclk400_isp", mout_group3_5800_p, SRC_TOP0, 0, 3),
        MUX(0, "mout_aclk400_mscl", mout_group3_5800_p, SRC_TOP0, 4, 3),
        MUX(0, "mout_aclk400_wcore", mout_group2_5800_p, SRC_TOP0, 16, 3),
@@ -553,7 +553,7 @@ struct samsung_mux_clock exynos5800_mux_clks[] __initdata = {
        MUX(0, "mout_fimd1", mout_group2_p, SRC_DISP10, 4, 3),
 };
 
-struct samsung_div_clock exynos5800_div_clks[] __initdata = {
+static struct samsung_div_clock exynos5800_div_clks[] __initdata = {
        DIV(0, "dout_aclk400_wcore", "mout_aclk400_wcore", DIV_TOP0, 16, 3),
 
        DIV(0, "dout_aclk550_cam", "mout_aclk550_cam",
@@ -569,14 +569,14 @@ struct samsung_div_clock exynos5800_div_clks[] __initdata = {
        DIV(0, "dout_sclk_sw", "sclk_spll", DIV_TOP9, 24, 6),
 };
 
-struct samsung_gate_clock exynos5800_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos5800_gate_clks[] __initdata = {
        GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
                                GATE_BUS_TOP, 24, 0, 0),
        GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
                                GATE_BUS_TOP, 27, 0, 0),
 };
 
-struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
        MUX(0, "sclk_bpll", mout_bpll_p, TOP_SPARE2, 0, 1),
        MUX(0, "mout_aclk400_wcore_bpll", mout_aclk400_wcore_bpll_p,
                                TOP_SPARE2, 4, 1),
@@ -606,7 +606,7 @@ struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
        MUX(0, "mout_fimd1", mout_group3_p, SRC_DISP10, 4, 1),
 };
 
-struct samsung_div_clock exynos5420_div_clks[] __initdata = {
+static struct samsung_div_clock exynos5420_div_clks[] __initdata = {
        DIV(0, "dout_aclk400_wcore", "mout_aclk400_wcore_bpll",
                        DIV_TOP0, 16, 3),
 };
index 9e04ae2bb4d74912f18976c305a1738756b19837..39c95649d3d016d14da7500e5b5ea188860252c9 100644 (file)
@@ -835,6 +835,7 @@ static unsigned long cpif_clk_regs[] __initdata = {
        MPHY_PLL_CON1,
        MPHY_PLL_FREQ_DET,
        MUX_SEL_CPIF0,
+       DIV_CPIF,
        ENABLE_SCLK_CPIF,
 };
 
@@ -1389,7 +1390,7 @@ static struct samsung_gate_clock mif_gate_clks[] __initdata = {
 
        /* ENABLE_ACLK_MIF2 */
        GATE(CLK_ACLK_MIFND_266, "aclk_mifnd_266", "div_aclk_mif_266",
-                       ENABLE_ACLK_MIF2, 20, 0, 0),
+                       ENABLE_ACLK_MIF2, 20, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_ACLK_PPMU_DREX1S3, "aclk_ppmu_drex1s3", "div_aclk_drex1",
                        ENABLE_ACLK_MIF2, 17, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_ACLK_PPMU_DREX1S1, "aclk_ppmu_drex1s1", "div_aclk_drex1",
@@ -1832,39 +1833,39 @@ static struct samsung_gate_clock peris_gate_clks[] __initdata = {
 
        /* ENABLE_PCLK_PERIS_SECURE_TZPC */
        GATE(CLK_PCLK_TZPC12, "pclk_tzpc12", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_TZPC, 12, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_TZPC, 12, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_PCLK_TZPC11, "pclk_tzpc11", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_TZPC, 11, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_TZPC, 11, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_PCLK_TZPC10, "pclk_tzpc10", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_TZPC, 10, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_TZPC, 10, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_PCLK_TZPC9, "pclk_tzpc9", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_TZPC, 9, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_TZPC, 9, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_PCLK_TZPC8, "pclk_tzpc8", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_TZPC, 8, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_TZPC, 8, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_PCLK_TZPC7, "pclk_tzpc7", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_TZPC, 7, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_TZPC, 7, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_PCLK_TZPC6, "pclk_tzpc6", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_TZPC, 6, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_TZPC, 6, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_PCLK_TZPC5, "pclk_tzpc5", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_TZPC, 5, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_TZPC, 5, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_PCLK_TZPC4, "pclk_tzpc4", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_TZPC, 4, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_TZPC, 4, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_PCLK_TZPC3, "pclk_tzpc3", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_TZPC, 3, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_TZPC, 3, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_PCLK_TZPC2, "pclk_tzpc2", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_TZPC, 2, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_TZPC, 2, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_PCLK_TZPC1, "pclk_tzpc1", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_TZPC, 1, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_TZPC, 1, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_PCLK_TZPC0, "pclk_tzpc0", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_TZPC, 0, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_TZPC, 0, CLK_IGNORE_UNUSED, 0),
 
        /* ENABLE_PCLK_PERIS_SECURE_SECKEY_APBIF */
        GATE(CLK_PCLK_SECKEY_APBIF, "pclk_seckey_apbif", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_SECKEY_APBIF, 0, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_SECKEY_APBIF, 0, CLK_IGNORE_UNUSED, 0),
 
        /* ENABLE_PCLK_PERIS_SECURE_CHIPID_APBIF */
        GATE(CLK_PCLK_CHIPID_APBIF, "pclk_chipid_apbif", "aclk_peris_66",
-                       ENABLE_PCLK_PERIS_SECURE_CHIPID_APBIF, 0, 0, 0),
+                       ENABLE_PCLK_PERIS_SECURE_CHIPID_APBIF, 0, CLK_IGNORE_UNUSED, 0),
 
        /* ENABLE_PCLK_PERIS_SECURE_TOPRTC */
        GATE(CLK_PCLK_TOPRTC, "pclk_toprtc", "aclk_peris_66",
@@ -1895,11 +1896,11 @@ static struct samsung_gate_clock peris_gate_clks[] __initdata = {
 
        /* ENABLE_SCLK_PERIS_SECURE_SECKEY */
        GATE(CLK_SCLK_SECKEY, "sclk_seckey", "oscclk_efuse_common",
-                       ENABLE_SCLK_PERIS_SECURE_SECKEY, 0, 0, 0),
+                       ENABLE_SCLK_PERIS_SECURE_SECKEY, 0, CLK_IGNORE_UNUSED, 0),
 
        /* ENABLE_SCLK_PERIS_SECURE_CHIPID */
        GATE(CLK_SCLK_CHIPID, "sclk_chipid", "oscclk_efuse_common",
-                       ENABLE_SCLK_PERIS_SECURE_CHIPID, 0, 0, 0),
+                       ENABLE_SCLK_PERIS_SECURE_CHIPID, 0, CLK_IGNORE_UNUSED, 0),
 
        /* ENABLE_SCLK_PERIS_SECURE_TOPRTC */
        GATE(CLK_SCLK_TOPRTC, "sclk_toprtc", "oscclk_efuse_common",
@@ -3286,10 +3287,10 @@ static struct samsung_pll_clock g3d_pll_clks[] __initdata = {
 
 static struct samsung_mux_clock g3d_mux_clks[] __initdata = {
        /* MUX_SEL_G3D */
-       MUX(CLK_MOUT_ACLK_G3D_400, "mout_aclk_g3d_400", mout_aclk_g3d_400_p,
-                       MUX_SEL_G3D, 8, 1),
-       MUX(CLK_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p,
-                       MUX_SEL_G3D, 0, 1),
+       MUX_F(CLK_MOUT_ACLK_G3D_400, "mout_aclk_g3d_400", mout_aclk_g3d_400_p,
+                       MUX_SEL_G3D, 8, 1, CLK_SET_RATE_PARENT, 0),
+       MUX_F(CLK_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p,
+                       MUX_SEL_G3D, 0, 1, CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_div_clock g3d_div_clks[] __initdata = {
@@ -3298,8 +3299,8 @@ static struct samsung_div_clock g3d_div_clks[] __initdata = {
                        8, 2),
        DIV(CLK_DIV_PCLK_G3D, "div_pclk_g3d", "div_aclk_g3d", DIV_G3D,
                        4, 3),
-       DIV(CLK_DIV_ACLK_G3D, "div_aclk_g3d", "mout_aclk_g3d_400", DIV_G3D,
-                       0, 3),
+       DIV_F(CLK_DIV_ACLK_G3D, "div_aclk_g3d", "mout_aclk_g3d_400", DIV_G3D,
+                       0, 3, CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_gate_clock g3d_gate_clks[] __initdata = {
@@ -3309,9 +3310,9 @@ static struct samsung_gate_clock g3d_gate_clks[] __initdata = {
        GATE(CLK_ACLK_BTS_G3D0, "aclk_bts_g3d0", "div_aclk_g3d",
                        ENABLE_ACLK_G3D, 6, 0, 0),
        GATE(CLK_ACLK_ASYNCAPBS_G3D, "aclk_asyncapbs_g3d", "div_pclk_g3d",
-                       ENABLE_ACLK_G3D, 5, 0, 0),
+                       ENABLE_ACLK_G3D, 5, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_ACLK_ASYNCAPBM_G3D, "aclk_asyncapbm_g3d", "div_aclk_g3d",
-                       ENABLE_ACLK_G3D, 4, 0, 0),
+                       ENABLE_ACLK_G3D, 4, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_ACLK_AHB2APB_G3DP, "aclk_ahb2apb_g3dp", "div_pclk_g3d",
                        ENABLE_ACLK_G3D, 3, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_ACLK_G3DNP_150, "aclk_g3dnp_150", "div_pclk_g3d",
@@ -3319,7 +3320,7 @@ static struct samsung_gate_clock g3d_gate_clks[] __initdata = {
        GATE(CLK_ACLK_G3DND_600, "aclk_g3dnd_600", "div_aclk_g3d",
                        ENABLE_ACLK_G3D, 1, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_ACLK_G3D, "aclk_g3d", "div_aclk_g3d",
-                       ENABLE_ACLK_G3D, 0, 0, 0),
+                       ENABLE_ACLK_G3D, 0, CLK_SET_RATE_PARENT, 0),
 
        /* ENABLE_PCLK_G3D */
        GATE(CLK_PCLK_BTS_G3D1, "pclk_bts_g3d1", "div_pclk_g3d",
@@ -3582,7 +3583,7 @@ static struct samsung_pll_clock apollo_pll_clks[] __initdata = {
 static struct samsung_mux_clock apollo_mux_clks[] __initdata = {
        /* MUX_SEL_APOLLO0 */
        MUX_F(CLK_MOUT_APOLLO_PLL, "mout_apollo_pll", mout_apollo_pll_p,
-                       MUX_SEL_APOLLO0, 0, 1, 0, CLK_MUX_READ_ONLY),
+                       MUX_SEL_APOLLO0, 0, 1, CLK_SET_RATE_PARENT, 0),
 
        /* MUX_SEL_APOLLO1 */
        MUX(CLK_MOUT_BUS_PLL_APOLLO_USER, "mout_bus_pll_apollo_user",
@@ -3590,7 +3591,7 @@ static struct samsung_mux_clock apollo_mux_clks[] __initdata = {
 
        /* MUX_SEL_APOLLO2 */
        MUX_F(CLK_MOUT_APOLLO, "mout_apollo", mout_apollo_p, MUX_SEL_APOLLO2,
-                       0, 1, 0, CLK_MUX_READ_ONLY),
+                       0, 1, CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_div_clock apollo_div_clks[] __initdata = {
@@ -3611,11 +3612,9 @@ static struct samsung_div_clock apollo_div_clks[] __initdata = {
                        DIV_APOLLO0, 8, 3, CLK_GET_RATE_NOCACHE,
                        CLK_DIVIDER_READ_ONLY),
        DIV_F(CLK_DIV_APOLLO2, "div_apollo2", "div_apollo1",
-                       DIV_APOLLO0, 4, 3, CLK_GET_RATE_NOCACHE,
-                       CLK_DIVIDER_READ_ONLY),
+                       DIV_APOLLO0, 4, 3, CLK_SET_RATE_PARENT, 0),
        DIV_F(CLK_DIV_APOLLO1, "div_apollo1", "mout_apollo",
-                       DIV_APOLLO0, 0, 3, CLK_GET_RATE_NOCACHE,
-                       CLK_DIVIDER_READ_ONLY),
+                       DIV_APOLLO0, 0, 3, CLK_SET_RATE_PARENT, 0),
 
        /* DIV_APOLLO1 */
        DIV_F(CLK_DIV_SCLK_HPM_APOLLO, "div_sclk_hpm_apollo", "mout_apollo",
@@ -3666,7 +3665,8 @@ static struct samsung_gate_clock apollo_gate_clks[] __initdata = {
        GATE(CLK_SCLK_HPM_APOLLO, "sclk_hpm_apollo", "div_sclk_hpm_apollo",
                        ENABLE_SCLK_APOLLO, 1, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo2",
-                       ENABLE_SCLK_APOLLO, 0, CLK_IGNORE_UNUSED, 0),
+                       ENABLE_SCLK_APOLLO, 0,
+                       CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_cmu_info apollo_cmu_info __initdata = {
@@ -3775,7 +3775,7 @@ static struct samsung_pll_clock atlas_pll_clks[] __initdata = {
 static struct samsung_mux_clock atlas_mux_clks[] __initdata = {
        /* MUX_SEL_ATLAS0 */
        MUX_F(CLK_MOUT_ATLAS_PLL, "mout_atlas_pll", mout_atlas_pll_p,
-                       MUX_SEL_ATLAS0, 0, 1, 0, CLK_MUX_READ_ONLY),
+                       MUX_SEL_ATLAS0, 0, 1, CLK_SET_RATE_PARENT, 0),
 
        /* MUX_SEL_ATLAS1 */
        MUX(CLK_MOUT_BUS_PLL_ATLAS_USER, "mout_bus_pll_atlas_user",
@@ -3783,7 +3783,7 @@ static struct samsung_mux_clock atlas_mux_clks[] __initdata = {
 
        /* MUX_SEL_ATLAS2 */
        MUX_F(CLK_MOUT_ATLAS, "mout_atlas", mout_atlas_p, MUX_SEL_ATLAS2,
-                       0, 1, 0, CLK_MUX_READ_ONLY),
+                       0, 1, CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_div_clock atlas_div_clks[] __initdata = {
@@ -3804,11 +3804,9 @@ static struct samsung_div_clock atlas_div_clks[] __initdata = {
                        DIV_ATLAS0, 8, 3, CLK_GET_RATE_NOCACHE,
                        CLK_DIVIDER_READ_ONLY),
        DIV_F(CLK_DIV_ATLAS2, "div_atlas2", "div_atlas1",
-                       DIV_ATLAS0, 4, 3, CLK_GET_RATE_NOCACHE,
-                       CLK_DIVIDER_READ_ONLY),
+                       DIV_ATLAS0, 4, 3, CLK_SET_RATE_PARENT, 0),
        DIV_F(CLK_DIV_ATLAS1, "div_atlas1", "mout_atlas",
-                       DIV_ATLAS0, 0, 3, CLK_GET_RATE_NOCACHE,
-                       CLK_DIVIDER_READ_ONLY),
+                       DIV_ATLAS0, 0, 3, CLK_SET_RATE_PARENT, 0),
 
        /* DIV_ATLAS1 */
        DIV_F(CLK_DIV_SCLK_HPM_ATLAS, "div_sclk_hpm_atlas", "mout_atlas",
@@ -3885,7 +3883,8 @@ static struct samsung_gate_clock atlas_gate_clks[] __initdata = {
        GATE(CLK_ATCLK, "atclk", "div_atclk_atlas",
                        ENABLE_SCLK_ATLAS, 1, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_SCLK_ATLAS, "sclk_atlas", "div_atlas2",
-                       ENABLE_SCLK_ATLAS, 0, CLK_IGNORE_UNUSED, 0),
+                       ENABLE_SCLK_ATLAS, 0,
+                       CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_cmu_info atlas_cmu_info __initdata = {
index 9d70e5c03804cee247ee54ff4faed549bbf7ea12..bebc61b5fce1f3741200bf146e6dd059a222f5b9 100644 (file)
@@ -1156,7 +1156,7 @@ static const struct clk_ops samsung_pll2650xx_clk_min_ops = {
 };
 
 static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
-                               struct samsung_pll_clock *pll_clk,
+                               const struct samsung_pll_clock *pll_clk,
                                void __iomem *base)
 {
        struct samsung_clk_pll *pll;
@@ -1303,7 +1303,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
 }
 
 void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
-                       struct samsung_pll_clock *pll_list,
+                       const struct samsung_pll_clock *pll_list,
                        unsigned int nr_pll, void __iomem *base)
 {
        int cnt;
index f4f29ed6bd25a4d6d4cd0174738aaa93d6541353..e56df5064889e00530ddf8c531e29539debecc76 100644 (file)
@@ -81,13 +81,13 @@ static int s3c24xx_clkout_set_parent(struct clk_hw *hw, u8 index)
        return ret;
 }
 
-const struct clk_ops s3c24xx_clkout_ops = {
+static const struct clk_ops s3c24xx_clkout_ops = {
        .get_parent = s3c24xx_clkout_get_parent,
        .set_parent = s3c24xx_clkout_set_parent,
        .determine_rate = __clk_mux_determine_rate,
 };
 
-struct clk *s3c24xx_register_clkout(struct device *dev, const char *name,
+static struct clk *s3c24xx_register_clkout(struct device *dev, const char *name,
                const char **parent_names, u8 num_parents,
                u8 shift, u32 mask)
 {
@@ -404,7 +404,7 @@ static struct s3c24xx_dclk_drv_data dclk_variants[] = {
        },
 };
 
-static struct platform_device_id s3c24xx_dclk_driver_ids[] = {
+static const struct platform_device_id s3c24xx_dclk_driver_ids[] = {
        {
                .name           = "s3c2410-dclk",
                .driver_data    = (kernel_ulong_t)&dclk_variants[S3C2410],
index e668e479a6970301c9512711bb6b4f044ff670e5..cf7e8fa7b624c701511d7cefadb4c721221ec1bd 100644 (file)
@@ -169,44 +169,44 @@ static inline void s5pv210_clk_sleep_init(void) { }
 #endif
 
 /* Mux parent lists. */
-static const char *fin_pll_p[] __initdata = {
+static const char *const fin_pll_p[] __initconst = {
        "xxti",
        "xusbxti"
 };
 
-static const char *mout_apll_p[] __initdata = {
+static const char *const mout_apll_p[] __initconst = {
        "fin_pll",
        "fout_apll"
 };
 
-static const char *mout_mpll_p[] __initdata = {
+static const char *const mout_mpll_p[] __initconst = {
        "fin_pll",
        "fout_mpll"
 };
 
-static const char *mout_epll_p[] __initdata = {
+static const char *const mout_epll_p[] __initconst = {
        "fin_pll",
        "fout_epll"
 };
 
-static const char *mout_vpllsrc_p[] __initdata = {
+static const char *const mout_vpllsrc_p[] __initconst = {
        "fin_pll",
        "sclk_hdmi27m"
 };
 
-static const char *mout_vpll_p[] __initdata = {
+static const char *const mout_vpll_p[] __initconst = {
        "mout_vpllsrc",
        "fout_vpll"
 };
 
-static const char *mout_group1_p[] __initdata = {
+static const char *const mout_group1_p[] __initconst = {
        "dout_a2m",
        "mout_mpll",
        "mout_epll",
        "mout_vpll"
 };
 
-static const char *mout_group2_p[] __initdata = {
+static const char *const mout_group2_p[] __initconst = {
        "xxti",
        "xusbxti",
        "sclk_hdmi27m",
@@ -218,7 +218,7 @@ static const char *mout_group2_p[] __initdata = {
        "mout_vpll",
 };
 
-static const char *mout_audio0_p[] __initdata = {
+static const char *const mout_audio0_p[] __initconst = {
        "xxti",
        "pcmcdclk0",
        "sclk_hdmi27m",
@@ -230,7 +230,7 @@ static const char *mout_audio0_p[] __initdata = {
        "mout_vpll",
 };
 
-static const char *mout_audio1_p[] __initdata = {
+static const char *const mout_audio1_p[] __initconst = {
        "i2scdclk1",
        "pcmcdclk1",
        "sclk_hdmi27m",
@@ -242,7 +242,7 @@ static const char *mout_audio1_p[] __initdata = {
        "mout_vpll",
 };
 
-static const char *mout_audio2_p[] __initdata = {
+static const char *const mout_audio2_p[] __initconst = {
        "i2scdclk2",
        "pcmcdclk2",
        "sclk_hdmi27m",
@@ -254,63 +254,63 @@ static const char *mout_audio2_p[] __initdata = {
        "mout_vpll",
 };
 
-static const char *mout_spdif_p[] __initdata = {
+static const char *const mout_spdif_p[] __initconst = {
        "dout_audio0",
        "dout_audio1",
        "dout_audio3",
 };
 
-static const char *mout_group3_p[] __initdata = {
+static const char *const mout_group3_p[] __initconst = {
        "mout_apll",
        "mout_mpll"
 };
 
-static const char *mout_group4_p[] __initdata = {
+static const char *const mout_group4_p[] __initconst = {
        "mout_mpll",
        "dout_a2m"
 };
 
-static const char *mout_flash_p[] __initdata = {
+static const char *const mout_flash_p[] __initconst = {
        "dout_hclkd",
        "dout_hclkp"
 };
 
-static const char *mout_dac_p[] __initdata = {
+static const char *const mout_dac_p[] __initconst = {
        "mout_vpll",
        "sclk_hdmiphy"
 };
 
-static const char *mout_hdmi_p[] __initdata = {
+static const char *const mout_hdmi_p[] __initconst = {
        "sclk_hdmiphy",
        "dout_tblk"
 };
 
-static const char *mout_mixer_p[] __initdata = {
+static const char *const mout_mixer_p[] __initconst = {
        "mout_dac",
        "mout_hdmi"
 };
 
-static const char *mout_vpll_6442_p[] __initdata = {
+static const char *const mout_vpll_6442_p[] __initconst = {
        "fin_pll",
        "fout_vpll"
 };
 
-static const char *mout_mixer_6442_p[] __initdata = {
+static const char *const mout_mixer_6442_p[] __initconst = {
        "mout_vpll",
        "dout_mixer"
 };
 
-static const char *mout_d0sync_6442_p[] __initdata = {
+static const char *const mout_d0sync_6442_p[] __initconst = {
        "mout_dsys",
        "div_apll"
 };
 
-static const char *mout_d1sync_6442_p[] __initdata = {
+static const char *const mout_d1sync_6442_p[] __initconst = {
        "mout_psys",
        "div_apll"
 };
 
-static const char *mout_group2_6442_p[] __initdata = {
+static const char *const mout_group2_6442_p[] __initconst = {
        "fin_pll",
        "none",
        "none",
@@ -322,7 +322,7 @@ static const char *mout_group2_6442_p[] __initdata = {
        "mout_vpll",
 };
 
-static const char *mout_audio0_6442_p[] __initdata = {
+static const char *const mout_audio0_6442_p[] __initconst = {
        "fin_pll",
        "pcmcdclk0",
        "none",
@@ -334,7 +334,7 @@ static const char *mout_audio0_6442_p[] __initdata = {
        "mout_vpll",
 };
 
-static const char *mout_audio1_6442_p[] __initdata = {
+static const char *const mout_audio1_6442_p[] __initconst = {
        "i2scdclk1",
        "pcmcdclk1",
        "none",
@@ -347,7 +347,7 @@ static const char *mout_audio1_6442_p[] __initdata = {
        "fin_pll",
 };
 
-static const char *mout_clksel_p[] __initdata = {
+static const char *const mout_clksel_p[] __initconst = {
        "fout_apll_clkout",
        "fout_mpll_clkout",
        "fout_epll",
@@ -370,7 +370,7 @@ static const char *mout_clksel_p[] __initdata = {
        "div_dclk"
 };
 
-static const char *mout_clksel_6442_p[] __initdata = {
+static const char *const mout_clksel_6442_p[] __initconst = {
        "fout_apll_clkout",
        "fout_mpll_clkout",
        "fout_epll",
@@ -393,7 +393,7 @@ static const char *mout_clksel_6442_p[] __initdata = {
        "div_dclk"
 };
 
-static const char *mout_clkout_p[] __initdata = {
+static const char *const mout_clkout_p[] __initconst = {
        "dout_clkout",
        "none",
        "xxti",
@@ -401,20 +401,20 @@ static const char *mout_clkout_p[] __initdata = {
 };
 
 /* Common fixed factor clocks. */
-static struct samsung_fixed_factor_clock ffactor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock ffactor_clks[] __initconst = {
        FFACTOR(FOUT_APLL_CLKOUT, "fout_apll_clkout", "fout_apll", 1, 4, 0),
        FFACTOR(FOUT_MPLL_CLKOUT, "fout_mpll_clkout", "fout_mpll", 1, 2, 0),
        FFACTOR(DOUT_APLL_CLKOUT, "dout_apll_clkout", "dout_apll", 1, 4, 0),
 };
 
 /* PLL input mux (fin_pll), which needs to be registered before PLLs. */
-static struct samsung_mux_clock early_mux_clks[] __initdata = {
+static const struct samsung_mux_clock early_mux_clks[] __initconst = {
        MUX_F(FIN_PLL, "fin_pll", fin_pll_p, OM_STAT, 0, 1,
                                        CLK_MUX_READ_ONLY, 0),
 };
 
 /* Common clock muxes. */
-static struct samsung_mux_clock mux_clks[] __initdata = {
+static const struct samsung_mux_clock mux_clks[] __initconst = {
        MUX(MOUT_FLASH, "mout_flash", mout_flash_p, CLK_SRC0, 28, 1),
        MUX(MOUT_PSYS, "mout_psys", mout_group4_p, CLK_SRC0, 24, 1),
        MUX(MOUT_DSYS, "mout_dsys", mout_group4_p, CLK_SRC0, 20, 1),
@@ -427,7 +427,7 @@ static struct samsung_mux_clock mux_clks[] __initdata = {
 };
 
 /* S5PV210-specific clock muxes. */
-static struct samsung_mux_clock s5pv210_mux_clks[] __initdata = {
+static const struct samsung_mux_clock s5pv210_mux_clks[] __initconst = {
        MUX(MOUT_VPLL, "mout_vpll", mout_vpll_p, CLK_SRC0, 12, 1),
 
        MUX(MOUT_VPLLSRC, "mout_vpllsrc", mout_vpllsrc_p, CLK_SRC1, 28, 1),
@@ -472,7 +472,7 @@ static struct samsung_mux_clock s5pv210_mux_clks[] __initdata = {
 };
 
 /* S5P6442-specific clock muxes. */
-static struct samsung_mux_clock s5p6442_mux_clks[] __initdata = {
+static const struct samsung_mux_clock s5p6442_mux_clks[] __initconst = {
        MUX(MOUT_VPLL, "mout_vpll", mout_vpll_6442_p, CLK_SRC0, 12, 1),
 
        MUX(MOUT_FIMD, "mout_fimd", mout_group2_6442_p, CLK_SRC1, 20, 4),
@@ -504,7 +504,7 @@ static struct samsung_mux_clock s5p6442_mux_clks[] __initdata = {
 };
 
 /* S5PV210-specific fixed rate clocks generated inside the SoC. */
-static struct samsung_fixed_rate_clock s5pv210_frate_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock s5pv210_frate_clks[] __initconst = {
        FRATE(SCLK_HDMI27M, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
        FRATE(SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
        FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
@@ -512,12 +512,12 @@ static struct samsung_fixed_rate_clock s5pv210_frate_clks[] __initdata = {
 };
 
 /* S5P6442-specific fixed rate clocks generated inside the SoC. */
-static struct samsung_fixed_rate_clock s5p6442_frate_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock s5p6442_frate_clks[] __initconst = {
        FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 30000000),
 };
 
 /* Common clock dividers. */
-static struct samsung_div_clock div_clks[] __initdata = {
+static const struct samsung_div_clock div_clks[] __initconst = {
        DIV(DOUT_PCLKP, "dout_pclkp", "dout_hclkp", CLK_DIV0, 28, 3),
        DIV(DOUT_PCLKD, "dout_pclkd", "dout_hclkd", CLK_DIV0, 20, 3),
        DIV(DOUT_A2M, "dout_a2m", "mout_apll", CLK_DIV0, 4, 3),
@@ -549,7 +549,7 @@ static struct samsung_div_clock div_clks[] __initdata = {
 };
 
 /* S5PV210-specific clock dividers. */
-static struct samsung_div_clock s5pv210_div_clks[] __initdata = {
+static const struct samsung_div_clock s5pv210_div_clks[] __initconst = {
        DIV(DOUT_HCLKP, "dout_hclkp", "mout_psys", CLK_DIV0, 24, 4),
        DIV(DOUT_HCLKD, "dout_hclkd", "mout_dsys", CLK_DIV0, 16, 4),
        DIV(DOUT_PCLKM, "dout_pclkm", "dout_hclkm", CLK_DIV0, 12, 3),
@@ -578,7 +578,7 @@ static struct samsung_div_clock s5pv210_div_clks[] __initdata = {
 };
 
 /* S5P6442-specific clock dividers. */
-static struct samsung_div_clock s5p6442_div_clks[] __initdata = {
+static const struct samsung_div_clock s5p6442_div_clks[] __initconst = {
        DIV(DOUT_HCLKP, "dout_hclkp", "mout_d1sync", CLK_DIV0, 24, 4),
        DIV(DOUT_HCLKD, "dout_hclkd", "mout_d0sync", CLK_DIV0, 16, 4),
 
@@ -586,7 +586,7 @@ static struct samsung_div_clock s5p6442_div_clks[] __initdata = {
 };
 
 /* Common clock gates. */
-static struct samsung_gate_clock gate_clks[] __initdata = {
+static const struct samsung_gate_clock gate_clks[] __initconst = {
        GATE(CLK_ROTATOR, "rotator", "dout_hclkd", CLK_GATE_IP0, 29, 0, 0),
        GATE(CLK_FIMC2, "fimc2", "dout_hclkd", CLK_GATE_IP0, 26, 0, 0),
        GATE(CLK_FIMC1, "fimc1", "dout_hclkd", CLK_GATE_IP0, 25, 0, 0),
@@ -666,7 +666,7 @@ static struct samsung_gate_clock gate_clks[] __initdata = {
 };
 
 /* S5PV210-specific clock gates. */
-static struct samsung_gate_clock s5pv210_gate_clks[] __initdata = {
+static const struct samsung_gate_clock s5pv210_gate_clks[] __initconst = {
        GATE(CLK_CSIS, "clk_csis", "dout_hclkd", CLK_GATE_IP0, 31, 0, 0),
        GATE(CLK_MFC, "mfc", "dout_hclkm", CLK_GATE_IP0, 16, 0, 0),
        GATE(CLK_G2D, "g2d", "dout_hclkd", CLK_GATE_IP0, 12, 0, 0),
@@ -728,7 +728,7 @@ static struct samsung_gate_clock s5pv210_gate_clks[] __initdata = {
 };
 
 /* S5P6442-specific clock gates. */
-static struct samsung_gate_clock s5p6442_gate_clks[] __initdata = {
+static const struct samsung_gate_clock s5p6442_gate_clks[] __initconst = {
        GATE(CLK_JPEG, "jpeg", "dout_hclkd", CLK_GATE_IP0, 28, 0, 0),
        GATE(CLK_MFC, "mfc", "dout_hclkd", CLK_GATE_IP0, 16, 0, 0),
        GATE(CLK_G2D, "g2d", "dout_hclkd", CLK_GATE_IP0, 12, 0, 0),
@@ -748,14 +748,14 @@ static struct samsung_gate_clock s5p6442_gate_clks[] __initdata = {
  * Clock aliases for legacy clkdev look-up.
  * NOTE: Needed only to support legacy board files.
  */
-static struct samsung_clock_alias s5pv210_aliases[] = {
+static const struct samsung_clock_alias s5pv210_aliases[] __initconst = {
        ALIAS(DOUT_APLL, NULL, "armclk"),
        ALIAS(DOUT_HCLKM, NULL, "hclk_msys"),
        ALIAS(MOUT_DMC0, NULL, "sclk_dmc0"),
 };
 
 /* S5PV210-specific PLLs. */
-static struct samsung_pll_clock s5pv210_pll_clks[] __initdata = {
+static const struct samsung_pll_clock s5pv210_pll_clks[] __initconst = {
        [apll] = PLL(pll_4508, FOUT_APLL, "fout_apll", "fin_pll",
                                                APLL_LOCK, APLL_CON0, NULL),
        [mpll] = PLL(pll_4502, FOUT_MPLL, "fout_mpll", "fin_pll",
@@ -767,7 +767,7 @@ static struct samsung_pll_clock s5pv210_pll_clks[] __initdata = {
 };
 
 /* S5P6442-specific PLLs. */
-static struct samsung_pll_clock s5p6442_pll_clks[] __initdata = {
+static const struct samsung_pll_clock s5p6442_pll_clks[] __initconst = {
        [apll] = PLL(pll_4502, FOUT_APLL, "fout_apll", "fin_pll",
                                                APLL_LOCK, APLL_CON0, NULL),
        [mpll] = PLL(pll_4502, FOUT_MPLL, "fout_mpll", "fin_pll",
index 9e1f88c04fd46dd583e1b2ccd90fc96c8cae06e8..0117238391d6532b5b332a0ba09d319b693c03aa 100644 (file)
@@ -98,7 +98,7 @@ void samsung_clk_add_lookup(struct samsung_clk_provider *ctx, struct clk *clk,
 
 /* register a list of aliases */
 void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx,
-                               struct samsung_clock_alias *list,
+                               const struct samsung_clock_alias *list,
                                unsigned int nr_clk)
 {
        struct clk *clk;
@@ -132,7 +132,8 @@ void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx,
 
 /* register a list of fixed clocks */
 void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx,
-               struct samsung_fixed_rate_clock *list, unsigned int nr_clk)
+               const struct samsung_fixed_rate_clock *list,
+               unsigned int nr_clk)
 {
        struct clk *clk;
        unsigned int idx, ret;
@@ -161,7 +162,7 @@ void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx,
 
 /* register a list of fixed factor clocks */
 void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx,
-               struct samsung_fixed_factor_clock *list, unsigned int nr_clk)
+               const struct samsung_fixed_factor_clock *list, unsigned int nr_clk)
 {
        struct clk *clk;
        unsigned int idx;
@@ -181,7 +182,7 @@ void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx,
 
 /* register a list of mux clocks */
 void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
-                               struct samsung_mux_clock *list,
+                               const struct samsung_mux_clock *list,
                                unsigned int nr_clk)
 {
        struct clk *clk;
@@ -213,7 +214,7 @@ void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
 
 /* register a list of div clocks */
 void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
-                               struct samsung_div_clock *list,
+                               const struct samsung_div_clock *list,
                                unsigned int nr_clk)
 {
        struct clk *clk;
@@ -252,7 +253,7 @@ void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
 
 /* register a list of gate clocks */
 void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
-                               struct samsung_gate_clock *list,
+                               const struct samsung_gate_clock *list,
                                unsigned int nr_clk)
 {
        struct clk *clk;
@@ -389,7 +390,7 @@ struct samsung_clk_provider * __init samsung_cmu_register_one(
 
        ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
        if (!ctx) {
-               panic("%s: unable to alllocate ctx\n", __func__);
+               panic("%s: unable to allocate ctx\n", __func__);
                return ctx;
        }
 
index e4c75383cea718c7fb563db70fbdd8452b1f9bd8..b775fc29caa507dcaccfa1e54d78fef2f7d4a45b 100644 (file)
@@ -121,7 +121,7 @@ struct samsung_mux_clock {
        unsigned int            id;
        const char              *dev_name;
        const char              *name;
-       const char              **parent_names;
+       const char              *const *parent_names;
        u8                      num_parents;
        unsigned long           flags;
        unsigned long           offset;
@@ -368,28 +368,28 @@ extern void __init samsung_clk_of_register_fixed_ext(
 extern void samsung_clk_add_lookup(struct samsung_clk_provider *ctx,
                        struct clk *clk, unsigned int id);
 
-extern void samsung_clk_register_alias(struct samsung_clk_provider *ctx,
-                       struct samsung_clock_alias *list,
+extern void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx,
+                       const struct samsung_clock_alias *list,
                        unsigned int nr_clk);
 extern void __init samsung_clk_register_fixed_rate(
                        struct samsung_clk_provider *ctx,
-                       struct samsung_fixed_rate_clock *clk_list,
+                       const struct samsung_fixed_rate_clock *clk_list,
                        unsigned int nr_clk);
 extern void __init samsung_clk_register_fixed_factor(
                        struct samsung_clk_provider *ctx,
-                       struct samsung_fixed_factor_clock *list,
+                       const struct samsung_fixed_factor_clock *list,
                        unsigned int nr_clk);
 extern void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
-                       struct samsung_mux_clock *clk_list,
+                       const struct samsung_mux_clock *clk_list,
                        unsigned int nr_clk);
 extern void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
-                       struct samsung_div_clock *clk_list,
+                       const struct samsung_div_clock *clk_list,
                        unsigned int nr_clk);
 extern void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
-                       struct samsung_gate_clock *clk_list,
+                       const struct samsung_gate_clock *clk_list,
                        unsigned int nr_clk);
 extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
-                       struct samsung_pll_clock *pll_list,
+                       const struct samsung_pll_clock *pll_list,
                        unsigned int nr_clk, void __iomem *base);
 
 extern struct samsung_clk_provider __init *samsung_cmu_register_one(
index 6c7c929c77655771999c59d0d9c85fc998429944..5b60beb7d0ebce09f7f0f34b97620dd7025ba762 100644 (file)
@@ -34,7 +34,7 @@
 static DEFINE_SPINLOCK(lock);
 
 /* not pretty, but hey */
-void __iomem *smu_base;
+static void __iomem *smu_base;
 
 static void __init emev2_smu_write(unsigned long value, int offs)
 {
index 36b8e203f6e7cdfe92c39ed502e245e736608fee..09b4210d91240b032f5c97a72c2f2da8b59f17f6 100644 (file)
@@ -2,4 +2,4 @@
 # Makefile for sirf specific clk
 #
 
-obj-$(CONFIG_ARCH_SIRF) += clk-prima2.o clk-atlas6.o
+obj-$(CONFIG_ARCH_SIRF) += clk-prima2.o clk-atlas6.o clk-atlas7.o
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
new file mode 100644 (file)
index 0000000..db8ab69
--- /dev/null
@@ -0,0 +1,1632 @@
+/*
+ * Clock tree for CSR SiRFAtlas7
+ *
+ * Copyright (c) 2014 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+
+#define SIRFSOC_CLKC_MEMPLL_AB_FREQ          0x0000
+#define SIRFSOC_CLKC_MEMPLL_AB_SSC           0x0004
+#define SIRFSOC_CLKC_MEMPLL_AB_CTRL0         0x0008
+#define SIRFSOC_CLKC_MEMPLL_AB_CTRL1         0x000c
+#define SIRFSOC_CLKC_MEMPLL_AB_STATUS        0x0010
+#define SIRFSOC_CLKC_MEMPLL_AB_SSRAM_ADDR    0x0014
+#define SIRFSOC_CLKC_MEMPLL_AB_SSRAM_DATA    0x0018
+
+#define SIRFSOC_CLKC_CPUPLL_AB_FREQ          0x001c
+#define SIRFSOC_CLKC_CPUPLL_AB_SSC           0x0020
+#define SIRFSOC_CLKC_CPUPLL_AB_CTRL0         0x0024
+#define SIRFSOC_CLKC_CPUPLL_AB_CTRL1         0x0028
+#define SIRFSOC_CLKC_CPUPLL_AB_STATUS        0x002c
+
+#define SIRFSOC_CLKC_SYS0PLL_AB_FREQ         0x0030
+#define SIRFSOC_CLKC_SYS0PLL_AB_SSC          0x0034
+#define SIRFSOC_CLKC_SYS0PLL_AB_CTRL0        0x0038
+#define SIRFSOC_CLKC_SYS0PLL_AB_CTRL1        0x003c
+#define SIRFSOC_CLKC_SYS0PLL_AB_STATUS       0x0040
+
+#define SIRFSOC_CLKC_SYS1PLL_AB_FREQ         0x0044
+#define SIRFSOC_CLKC_SYS1PLL_AB_SSC          0x0048
+#define SIRFSOC_CLKC_SYS1PLL_AB_CTRL0        0x004c
+#define SIRFSOC_CLKC_SYS1PLL_AB_CTRL1        0x0050
+#define SIRFSOC_CLKC_SYS1PLL_AB_STATUS       0x0054
+
+#define SIRFSOC_CLKC_SYS2PLL_AB_FREQ         0x0058
+#define SIRFSOC_CLKC_SYS2PLL_AB_SSC          0x005c
+#define SIRFSOC_CLKC_SYS2PLL_AB_CTRL0        0x0060
+#define SIRFSOC_CLKC_SYS2PLL_AB_CTRL1        0x0064
+#define SIRFSOC_CLKC_SYS2PLL_AB_STATUS       0x0068
+
+#define SIRFSOC_CLKC_SYS3PLL_AB_FREQ         0x006c
+#define SIRFSOC_CLKC_SYS3PLL_AB_SSC          0x0070
+#define SIRFSOC_CLKC_SYS3PLL_AB_CTRL0        0x0074
+#define SIRFSOC_CLKC_SYS3PLL_AB_CTRL1        0x0078
+#define SIRFSOC_CLKC_SYS3PLL_AB_STATUS       0x007c
+
+#define SIRFSOC_ABPLL_CTRL0_SSEN     0x00001000
+#define SIRFSOC_ABPLL_CTRL0_BYPASS   0x00000010
+#define SIRFSOC_ABPLL_CTRL0_RESET    0x00000001
+
+#define SIRFSOC_CLKC_AUDIO_DTO_INC           0x0088
+#define SIRFSOC_CLKC_DISP0_DTO_INC           0x008c
+#define SIRFSOC_CLKC_DISP1_DTO_INC           0x0090
+
+#define SIRFSOC_CLKC_AUDIO_DTO_SRC           0x0094
+#define SIRFSOC_CLKC_AUDIO_DTO_ENA           0x0098
+#define SIRFSOC_CLKC_AUDIO_DTO_DROFF         0x009c
+
+#define SIRFSOC_CLKC_DISP0_DTO_SRC           0x00a0
+#define SIRFSOC_CLKC_DISP0_DTO_ENA           0x00a4
+#define SIRFSOC_CLKC_DISP0_DTO_DROFF         0x00a8
+
+#define SIRFSOC_CLKC_DISP1_DTO_SRC           0x00ac
+#define SIRFSOC_CLKC_DISP1_DTO_ENA           0x00b0
+#define SIRFSOC_CLKC_DISP1_DTO_DROFF         0x00b4
+
+#define SIRFSOC_CLKC_I2S_CLK_SEL             0x00b8
+#define SIRFSOC_CLKC_I2S_SEL_STAT            0x00bc
+
+#define SIRFSOC_CLKC_USBPHY_CLKDIV_CFG       0x00c0
+#define SIRFSOC_CLKC_USBPHY_CLKDIV_ENA       0x00c4
+#define SIRFSOC_CLKC_USBPHY_CLK_SEL          0x00c8
+#define SIRFSOC_CLKC_USBPHY_CLK_SEL_STAT     0x00cc
+
+#define SIRFSOC_CLKC_BTSS_CLKDIV_CFG         0x00d0
+#define SIRFSOC_CLKC_BTSS_CLKDIV_ENA         0x00d4
+#define SIRFSOC_CLKC_BTSS_CLK_SEL            0x00d8
+#define SIRFSOC_CLKC_BTSS_CLK_SEL_STAT       0x00dc
+
+#define SIRFSOC_CLKC_RGMII_CLKDIV_CFG        0x00e0
+#define SIRFSOC_CLKC_RGMII_CLKDIV_ENA        0x00e4
+#define SIRFSOC_CLKC_RGMII_CLK_SEL           0x00e8
+#define SIRFSOC_CLKC_RGMII_CLK_SEL_STAT      0x00ec
+
+#define SIRFSOC_CLKC_CPU_CLKDIV_CFG          0x00f0
+#define SIRFSOC_CLKC_CPU_CLKDIV_ENA          0x00f4
+#define SIRFSOC_CLKC_CPU_CLK_SEL             0x00f8
+#define SIRFSOC_CLKC_CPU_CLK_SEL_STAT        0x00fc
+
+#define SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG      0x0100
+#define SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA      0x0104
+#define SIRFSOC_CLKC_SDPHY01_CLK_SEL         0x0108
+#define SIRFSOC_CLKC_SDPHY01_CLK_SEL_STAT    0x010c
+
+#define SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG      0x0110
+#define SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA      0x0114
+#define SIRFSOC_CLKC_SDPHY23_CLK_SEL         0x0118
+#define SIRFSOC_CLKC_SDPHY23_CLK_SEL_STAT    0x011c
+
+#define SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG      0x0120
+#define SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA      0x0124
+#define SIRFSOC_CLKC_SDPHY45_CLK_SEL         0x0128
+#define SIRFSOC_CLKC_SDPHY45_CLK_SEL_STAT    0x012c
+
+#define SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG      0x0130
+#define SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA      0x0134
+#define SIRFSOC_CLKC_SDPHY67_CLK_SEL         0x0138
+#define SIRFSOC_CLKC_SDPHY67_CLK_SEL_STAT    0x013c
+
+#define SIRFSOC_CLKC_CAN_CLKDIV_CFG          0x0140
+#define SIRFSOC_CLKC_CAN_CLKDIV_ENA          0x0144
+#define SIRFSOC_CLKC_CAN_CLK_SEL             0x0148
+#define SIRFSOC_CLKC_CAN_CLK_SEL_STAT        0x014c
+
+#define SIRFSOC_CLKC_DEINT_CLKDIV_CFG        0x0150
+#define SIRFSOC_CLKC_DEINT_CLKDIV_ENA        0x0154
+#define SIRFSOC_CLKC_DEINT_CLK_SEL           0x0158
+#define SIRFSOC_CLKC_DEINT_CLK_SEL_STAT      0x015c
+
+#define SIRFSOC_CLKC_NAND_CLKDIV_CFG         0x0160
+#define SIRFSOC_CLKC_NAND_CLKDIV_ENA         0x0164
+#define SIRFSOC_CLKC_NAND_CLK_SEL            0x0168
+#define SIRFSOC_CLKC_NAND_CLK_SEL_STAT       0x016c
+
+#define SIRFSOC_CLKC_DISP0_CLKDIV_CFG        0x0170
+#define SIRFSOC_CLKC_DISP0_CLKDIV_ENA        0x0174
+#define SIRFSOC_CLKC_DISP0_CLK_SEL           0x0178
+#define SIRFSOC_CLKC_DISP0_CLK_SEL_STAT      0x017c
+
+#define SIRFSOC_CLKC_DISP1_CLKDIV_CFG        0x0180
+#define SIRFSOC_CLKC_DISP1_CLKDIV_ENA        0x0184
+#define SIRFSOC_CLKC_DISP1_CLK_SEL           0x0188
+#define SIRFSOC_CLKC_DISP1_CLK_SEL_STAT      0x018c
+
+#define SIRFSOC_CLKC_GPU_CLKDIV_CFG          0x0190
+#define SIRFSOC_CLKC_GPU_CLKDIV_ENA          0x0194
+#define SIRFSOC_CLKC_GPU_CLK_SEL             0x0198
+#define SIRFSOC_CLKC_GPU_CLK_SEL_STAT        0x019c
+
+#define SIRFSOC_CLKC_GNSS_CLKDIV_CFG         0x01a0
+#define SIRFSOC_CLKC_GNSS_CLKDIV_ENA         0x01a4
+#define SIRFSOC_CLKC_GNSS_CLK_SEL            0x01a8
+#define SIRFSOC_CLKC_GNSS_CLK_SEL_STAT       0x01ac
+
+#define SIRFSOC_CLKC_SHARED_DIVIDER_CFG0     0x01b0
+#define SIRFSOC_CLKC_SHARED_DIVIDER_CFG1     0x01b4
+#define SIRFSOC_CLKC_SHARED_DIVIDER_ENA      0x01b8
+
+#define SIRFSOC_CLKC_SYS_CLK_SEL             0x01bc
+#define SIRFSOC_CLKC_SYS_CLK_SEL_STAT        0x01c0
+#define SIRFSOC_CLKC_IO_CLK_SEL              0x01c4
+#define SIRFSOC_CLKC_IO_CLK_SEL_STAT         0x01c8
+#define SIRFSOC_CLKC_G2D_CLK_SEL             0x01cc
+#define SIRFSOC_CLKC_G2D_CLK_SEL_STAT        0x01d0
+#define SIRFSOC_CLKC_JPENC_CLK_SEL           0x01d4
+#define SIRFSOC_CLKC_JPENC_CLK_SEL_STAT      0x01d8
+#define SIRFSOC_CLKC_VDEC_CLK_SEL            0x01dc
+#define SIRFSOC_CLKC_VDEC_CLK_SEL_STAT       0x01e0
+#define SIRFSOC_CLKC_GMAC_CLK_SEL            0x01e4
+#define SIRFSOC_CLKC_GMAC_CLK_SEL_STAT       0x01e8
+#define SIRFSOC_CLKC_USB_CLK_SEL             0x01ec
+#define SIRFSOC_CLKC_USB_CLK_SEL_STAT        0x01f0
+#define SIRFSOC_CLKC_KAS_CLK_SEL             0x01f4
+#define SIRFSOC_CLKC_KAS_CLK_SEL_STAT        0x01f8
+#define SIRFSOC_CLKC_SEC_CLK_SEL             0x01fc
+#define SIRFSOC_CLKC_SEC_CLK_SEL_STAT        0x0200
+#define SIRFSOC_CLKC_SDR_CLK_SEL             0x0204
+#define SIRFSOC_CLKC_SDR_CLK_SEL_STAT        0x0208
+#define SIRFSOC_CLKC_VIP_CLK_SEL             0x020c
+#define SIRFSOC_CLKC_VIP_CLK_SEL_STAT        0x0210
+#define SIRFSOC_CLKC_NOCD_CLK_SEL            0x0214
+#define SIRFSOC_CLKC_NOCD_CLK_SEL_STAT       0x0218
+#define SIRFSOC_CLKC_NOCR_CLK_SEL            0x021c
+#define SIRFSOC_CLKC_NOCR_CLK_SEL_STAT       0x0220
+#define SIRFSOC_CLKC_TPIU_CLK_SEL            0x0224
+#define SIRFSOC_CLKC_TPIU_CLK_SEL_STAT       0x0228
+
+#define SIRFSOC_CLKC_ROOT_CLK_EN0_SET        0x022c
+#define SIRFSOC_CLKC_ROOT_CLK_EN0_CLR        0x0230
+#define SIRFSOC_CLKC_ROOT_CLK_EN0_STAT       0x0234
+#define SIRFSOC_CLKC_ROOT_CLK_EN1_SET        0x0238
+#define SIRFSOC_CLKC_ROOT_CLK_EN1_CLR        0x023c
+#define SIRFSOC_CLKC_ROOT_CLK_EN1_STAT       0x0240
+
+#define SIRFSOC_CLKC_LEAF_CLK_EN0_SET        0x0244
+#define SIRFSOC_CLKC_LEAF_CLK_EN0_CLR        0x0248
+#define SIRFSOC_CLKC_LEAF_CLK_EN0_STAT       0x024c
+
+#define SIRFSOC_CLKC_RSTC_A7_SW_RST          0x0308
+
+#define SIRFSOC_CLKC_LEAF_CLK_EN1_SET        0x04a0
+#define SIRFSOC_CLKC_LEAF_CLK_EN2_SET        0x04b8
+#define SIRFSOC_CLKC_LEAF_CLK_EN3_SET        0x04d0
+#define SIRFSOC_CLKC_LEAF_CLK_EN4_SET        0x04e8
+#define SIRFSOC_CLKC_LEAF_CLK_EN5_SET        0x0500
+#define SIRFSOC_CLKC_LEAF_CLK_EN6_SET        0x0518
+#define SIRFSOC_CLKC_LEAF_CLK_EN7_SET        0x0530
+#define SIRFSOC_CLKC_LEAF_CLK_EN8_SET        0x0548
+
+
+static void __iomem *sirfsoc_clk_vbase;
+static struct clk_onecell_data clk_data;
+
+static const struct clk_div_table pll_div_table[] = {
+       { .val = 0, .div = 1 },
+       { .val = 1, .div = 2 },
+       { .val = 2, .div = 4 },
+       { .val = 3, .div = 8 },
+       { .val = 4, .div = 16 },
+       { .val = 5, .div = 32 },
+};
+
+struct clk_pll {
+       struct clk_hw hw;
+       u16 regofs;  /* register offset */
+};
+#define to_pllclk(_hw) container_of(_hw, struct clk_pll, hw)
+
+struct clk_dto {
+       struct clk_hw hw;
+       u16 inc_offset;  /* dto increment offset */
+       u16 src_offset;  /* dto src offset */
+};
+#define to_dtoclk(_hw) container_of(_hw, struct clk_dto, hw)
+
+struct clk_unit {
+       struct clk_hw hw;
+       u16 regofs;
+       u16 bit;
+       spinlock_t *lock;
+};
+#define to_unitclk(_hw) container_of(_hw, struct clk_unit, hw)
+
+struct atlas7_div_init_data {
+       const char *div_name;
+       const char *parent_name;
+       const char *gate_name;
+       unsigned long flags;
+       u8 divider_flags;
+       u8 gate_flags;
+       u32 div_offset;
+       u8 shift;
+       u8 width;
+       u32 gate_offset;
+       u8 gate_bit;
+       spinlock_t *lock;
+};
+
+struct atlas7_mux_init_data {
+       const char *mux_name;
+       const char * const *parent_names;
+       u8 parent_num;
+       unsigned long flags;
+       u8 mux_flags;
+       u32 mux_offset;
+       u8 shift;
+       u8 width;
+};
+
+struct atlas7_unit_init_data {
+       u32 index;
+       const char *unit_name;
+       const char *parent_name;
+       unsigned long flags;
+       u32 regofs;
+       u8 bit;
+       spinlock_t *lock;
+};
+
+struct atlas7_reset_desc {
+       const char *name;
+       u32 clk_ofs;
+       u8  clk_bit;
+       u32 rst_ofs;
+       u8  rst_bit;
+       spinlock_t *lock;
+};
+
+static DEFINE_SPINLOCK(cpupll_ctrl1_lock);
+static DEFINE_SPINLOCK(mempll_ctrl1_lock);
+static DEFINE_SPINLOCK(sys0pll_ctrl1_lock);
+static DEFINE_SPINLOCK(sys1pll_ctrl1_lock);
+static DEFINE_SPINLOCK(sys2pll_ctrl1_lock);
+static DEFINE_SPINLOCK(sys3pll_ctrl1_lock);
+static DEFINE_SPINLOCK(usbphy_div_lock);
+static DEFINE_SPINLOCK(btss_div_lock);
+static DEFINE_SPINLOCK(rgmii_div_lock);
+static DEFINE_SPINLOCK(cpu_div_lock);
+static DEFINE_SPINLOCK(sdphy01_div_lock);
+static DEFINE_SPINLOCK(sdphy23_div_lock);
+static DEFINE_SPINLOCK(sdphy45_div_lock);
+static DEFINE_SPINLOCK(sdphy67_div_lock);
+static DEFINE_SPINLOCK(can_div_lock);
+static DEFINE_SPINLOCK(deint_div_lock);
+static DEFINE_SPINLOCK(nand_div_lock);
+static DEFINE_SPINLOCK(disp0_div_lock);
+static DEFINE_SPINLOCK(disp1_div_lock);
+static DEFINE_SPINLOCK(gpu_div_lock);
+static DEFINE_SPINLOCK(gnss_div_lock);
+/* gate register shared */
+static DEFINE_SPINLOCK(share_div_lock);
+static DEFINE_SPINLOCK(root0_gate_lock);
+static DEFINE_SPINLOCK(root1_gate_lock);
+static DEFINE_SPINLOCK(leaf0_gate_lock);
+static DEFINE_SPINLOCK(leaf1_gate_lock);
+static DEFINE_SPINLOCK(leaf2_gate_lock);
+static DEFINE_SPINLOCK(leaf3_gate_lock);
+static DEFINE_SPINLOCK(leaf4_gate_lock);
+static DEFINE_SPINLOCK(leaf5_gate_lock);
+static DEFINE_SPINLOCK(leaf6_gate_lock);
+static DEFINE_SPINLOCK(leaf7_gate_lock);
+static DEFINE_SPINLOCK(leaf8_gate_lock);
+
+static inline unsigned long clkc_readl(unsigned reg)
+{
+       return readl(sirfsoc_clk_vbase + reg);
+}
+
+static inline void clkc_writel(u32 val, unsigned reg)
+{
+       writel(val, sirfsoc_clk_vbase + reg);
+}
+
+/*
+*  ABPLL
+*  integer mode: Fvco = Fin * 2 * NF / NR
+*  Spread Spectrum mode: Fvco = Fin * SSN / NR
+*  SSN = 2^24 / (256 * ((ssdiv >> ssdepth) << ssdepth) + (ssmod << ssdepth))
+*/
+static unsigned long pll_clk_recalc_rate(struct clk_hw *hw,
+       unsigned long parent_rate)
+{
+       unsigned long fin = parent_rate;
+       struct clk_pll *clk = to_pllclk(hw);
+       u64 rate;
+       u32 regctrl0 = clkc_readl(clk->regofs + SIRFSOC_CLKC_MEMPLL_AB_CTRL0 -
+                       SIRFSOC_CLKC_MEMPLL_AB_FREQ);
+       u32 regfreq = clkc_readl(clk->regofs);
+       u32 regssc = clkc_readl(clk->regofs + SIRFSOC_CLKC_MEMPLL_AB_SSC -
+                       SIRFSOC_CLKC_MEMPLL_AB_FREQ);
+       u32 nr = (regfreq  >> 16 & (BIT(3) - 1)) + 1;
+       u32 nf = (regfreq & (BIT(9) - 1)) + 1;
+       u32 ssdiv = regssc >> 8 & (BIT(12) - 1);
+       u32 ssdepth = regssc >> 20 & (BIT(2) - 1);
+       u32 ssmod = regssc & (BIT(8) - 1);
+
+       if (regctrl0 & SIRFSOC_ABPLL_CTRL0_BYPASS)
+               return fin;
+
+       if (regctrl0 & SIRFSOC_ABPLL_CTRL0_SSEN) {
+               rate = fin;
+               rate *= 1 << 24;
+               do_div(rate, (256 * ((ssdiv >> ssdepth) << ssdepth)
+                       + (ssmod << ssdepth)));
+       } else {
+               rate = 2 * fin;
+               rate *= nf;
+               do_div(rate, nr);
+       }
+       return rate;
+}
+
+static const struct clk_ops ab_pll_ops = {
+       .recalc_rate = pll_clk_recalc_rate,
+};
+
+static const char * const pll_clk_parents[] = {
+       "xin",
+};
+
+static struct clk_init_data clk_cpupll_init = {
+       .name = "cpupll_vco",
+       .ops = &ab_pll_ops,
+       .parent_names = pll_clk_parents,
+       .num_parents = ARRAY_SIZE(pll_clk_parents),
+};
+
+static struct clk_pll clk_cpupll = {
+       .regofs = SIRFSOC_CLKC_CPUPLL_AB_FREQ,
+       .hw = {
+               .init = &clk_cpupll_init,
+       },
+};
+
+static struct clk_init_data clk_mempll_init = {
+       .name = "mempll_vco",
+       .ops = &ab_pll_ops,
+       .parent_names = pll_clk_parents,
+       .num_parents = ARRAY_SIZE(pll_clk_parents),
+};
+
+static struct clk_pll clk_mempll = {
+       .regofs = SIRFSOC_CLKC_MEMPLL_AB_FREQ,
+       .hw = {
+               .init = &clk_mempll_init,
+       },
+};
+
+static struct clk_init_data clk_sys0pll_init = {
+       .name = "sys0pll_vco",
+       .ops = &ab_pll_ops,
+       .parent_names = pll_clk_parents,
+       .num_parents = ARRAY_SIZE(pll_clk_parents),
+};
+
+static struct clk_pll clk_sys0pll = {
+       .regofs = SIRFSOC_CLKC_SYS0PLL_AB_FREQ,
+       .hw = {
+               .init = &clk_sys0pll_init,
+       },
+};
+
+static struct clk_init_data clk_sys1pll_init = {
+       .name = "sys1pll_vco",
+       .ops = &ab_pll_ops,
+       .parent_names = pll_clk_parents,
+       .num_parents = ARRAY_SIZE(pll_clk_parents),
+};
+
+static struct clk_pll clk_sys1pll = {
+       .regofs = SIRFSOC_CLKC_SYS1PLL_AB_FREQ,
+       .hw = {
+               .init = &clk_sys1pll_init,
+       },
+};
+
+static struct clk_init_data clk_sys2pll_init = {
+       .name = "sys2pll_vco",
+       .ops = &ab_pll_ops,
+       .parent_names = pll_clk_parents,
+       .num_parents = ARRAY_SIZE(pll_clk_parents),
+};
+
+static struct clk_pll clk_sys2pll = {
+       .regofs = SIRFSOC_CLKC_SYS2PLL_AB_FREQ,
+       .hw = {
+               .init = &clk_sys2pll_init,
+       },
+};
+
+static struct clk_init_data clk_sys3pll_init = {
+       .name = "sys3pll_vco",
+       .ops = &ab_pll_ops,
+       .parent_names = pll_clk_parents,
+       .num_parents = ARRAY_SIZE(pll_clk_parents),
+};
+
+static struct clk_pll clk_sys3pll = {
+       .regofs = SIRFSOC_CLKC_SYS3PLL_AB_FREQ,
+       .hw = {
+               .init = &clk_sys3pll_init,
+       },
+};
+
+/*
+ *  DTO in clkc, default enable double resolution mode
+ *  double resolution mode:fout = fin * finc / 2^29
+ *  normal mode:fout = fin * finc / 2^28
+ */
+static int dto_clk_is_enabled(struct clk_hw *hw)
+{
+       struct clk_dto *clk = to_dtoclk(hw);
+       int reg;
+
+       reg = clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_ENA - SIRFSOC_CLKC_AUDIO_DTO_SRC;
+
+       return !!(clkc_readl(reg) & BIT(0));
+}
+
+static int dto_clk_enable(struct clk_hw *hw)
+{
+       u32 val, reg;
+       struct clk_dto *clk = to_dtoclk(hw);
+
+       reg = clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_ENA - SIRFSOC_CLKC_AUDIO_DTO_SRC;
+
+       val = clkc_readl(reg) | BIT(0);
+       clkc_writel(val, reg);
+       return 0;
+}
+
+static void dto_clk_disable(struct clk_hw *hw)
+{
+       u32 val, reg;
+       struct clk_dto *clk = to_dtoclk(hw);
+
+       reg = clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_ENA - SIRFSOC_CLKC_AUDIO_DTO_SRC;
+
+       val = clkc_readl(reg) & ~BIT(0);
+       clkc_writel(val, reg);
+}
+
+static unsigned long dto_clk_recalc_rate(struct clk_hw *hw,
+       unsigned long parent_rate)
+{
+       u64 rate = parent_rate;
+       struct clk_dto *clk = to_dtoclk(hw);
+       u32 finc = clkc_readl(clk->inc_offset);
+       u32 droff = clkc_readl(clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_DROFF - SIRFSOC_CLKC_AUDIO_DTO_SRC);
+
+       rate *= finc;
+       if (droff & BIT(0))
+               /* Double resolution off */
+               do_div(rate, 1 << 28);
+       else
+               do_div(rate, 1 << 29);
+
+       return rate;
+}
+
+static long dto_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+       unsigned long *parent_rate)
+{
+       u64 dividend = rate * (1 << 29);
+
+       do_div(dividend, *parent_rate);
+       dividend *= *parent_rate;
+       do_div(dividend, 1 << 29);
+
+       return dividend;
+}
+
+static int dto_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+       unsigned long parent_rate)
+{
+       u64 dividend = rate * (1 << 29);
+       struct clk_dto *clk = to_dtoclk(hw);
+
+       do_div(dividend, parent_rate);
+       clkc_writel(0, clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_DROFF - SIRFSOC_CLKC_AUDIO_DTO_SRC);
+       clkc_writel(dividend, clk->inc_offset);
+
+       return 0;
+}
+
+static u8 dto_clk_get_parent(struct clk_hw *hw)
+{
+       struct clk_dto *clk = to_dtoclk(hw);
+
+       return clkc_readl(clk->src_offset);
+}
+
+/*
+ *   dto need CLK_SET_PARENT_GATE
+ */
+static int dto_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct clk_dto *clk = to_dtoclk(hw);
+
+       clkc_writel(index, clk->src_offset);
+       return 0;
+}
+
+static const struct clk_ops dto_ops = {
+       .is_enabled = dto_clk_is_enabled,
+       .enable = dto_clk_enable,
+       .disable = dto_clk_disable,
+       .recalc_rate = dto_clk_recalc_rate,
+       .round_rate = dto_clk_round_rate,
+       .set_rate = dto_clk_set_rate,
+       .get_parent = dto_clk_get_parent,
+       .set_parent = dto_clk_set_parent,
+};
+
+/* dto parent clock as syspllvco/clk1 */
+static const char * const audiodto_clk_parents[] = {
+       "sys0pll_clk1",
+       "sys1pll_clk1",
+       "sys3pll_clk1",
+};
+
+static struct clk_init_data clk_audiodto_init = {
+       .name = "audio_dto",
+       .ops = &dto_ops,
+       .parent_names = audiodto_clk_parents,
+       .num_parents = ARRAY_SIZE(audiodto_clk_parents),
+};
+
+static struct clk_dto clk_audio_dto = {
+       .inc_offset = SIRFSOC_CLKC_AUDIO_DTO_INC,
+       .src_offset = SIRFSOC_CLKC_AUDIO_DTO_SRC,
+       .hw = {
+               .init = &clk_audiodto_init,
+       },
+};
+
+static const char * const disp0dto_clk_parents[] = {
+       "sys0pll_clk1",
+       "sys1pll_clk1",
+       "sys3pll_clk1",
+};
+
+static struct clk_init_data clk_disp0dto_init = {
+       .name = "disp0_dto",
+       .ops = &dto_ops,
+       .parent_names = disp0dto_clk_parents,
+       .num_parents = ARRAY_SIZE(disp0dto_clk_parents),
+};
+
+static struct clk_dto clk_disp0_dto = {
+       .inc_offset = SIRFSOC_CLKC_DISP0_DTO_INC,
+       .src_offset = SIRFSOC_CLKC_DISP0_DTO_SRC,
+       .hw = {
+               .init = &clk_disp0dto_init,
+       },
+};
+
+static const char * const disp1dto_clk_parents[] = {
+       "sys0pll_clk1",
+       "sys1pll_clk1",
+       "sys3pll_clk1",
+};
+
+static struct clk_init_data clk_disp1dto_init = {
+       .name = "disp1_dto",
+       .ops = &dto_ops,
+       .parent_names = disp1dto_clk_parents,
+       .num_parents = ARRAY_SIZE(disp1dto_clk_parents),
+};
+
+static struct clk_dto clk_disp1_dto = {
+       .inc_offset = SIRFSOC_CLKC_DISP1_DTO_INC,
+       .src_offset = SIRFSOC_CLKC_DISP1_DTO_SRC,
+       .hw = {
+               .init = &clk_disp1dto_init,
+       },
+};
+
+static struct atlas7_div_init_data divider_list[] __initdata = {
+       /* div_name, parent_name, gate_name, clk_flag, divider_flag, gate_flag, div_offset, shift, wdith, gate_offset, bit_enable, lock */
+       { "sys0pll_qa1", "sys0pll_fixdiv", "sys0pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 0, &usbphy_div_lock },
+       { "sys1pll_qa1", "sys1pll_fixdiv", "sys1pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 4, &usbphy_div_lock },
+       { "sys2pll_qa1", "sys2pll_fixdiv", "sys2pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 8, &usbphy_div_lock },
+       { "sys3pll_qa1", "sys3pll_fixdiv", "sys3pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 12, &usbphy_div_lock },
+       { "sys0pll_qa2", "sys0pll_fixdiv", "sys0pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 0, &btss_div_lock },
+       { "sys1pll_qa2", "sys1pll_fixdiv", "sys1pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 4, &btss_div_lock },
+       { "sys2pll_qa2", "sys2pll_fixdiv", "sys2pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 8, &btss_div_lock },
+       { "sys3pll_qa2", "sys3pll_fixdiv", "sys3pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 12, &btss_div_lock },
+       { "sys0pll_qa3", "sys0pll_fixdiv", "sys0pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 0, &rgmii_div_lock },
+       { "sys1pll_qa3", "sys1pll_fixdiv", "sys1pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 4, &rgmii_div_lock },
+       { "sys2pll_qa3", "sys2pll_fixdiv", "sys2pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 8, &rgmii_div_lock },
+       { "sys3pll_qa3", "sys3pll_fixdiv", "sys3pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 12, &rgmii_div_lock },
+       { "sys0pll_qa4", "sys0pll_fixdiv", "sys0pll_a4", 0, 0, 0, SIRFSOC_CLKC_CPU_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_CPU_CLKDIV_ENA, 0, &cpu_div_lock },
+       { "sys1pll_qa4", "sys1pll_fixdiv", "sys1pll_a4", 0, 0, CLK_IGNORE_UNUSED, SIRFSOC_CLKC_CPU_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_CPU_CLKDIV_ENA, 4, &cpu_div_lock },
+       { "sys0pll_qa5", "sys0pll_fixdiv", "sys0pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 0, &sdphy01_div_lock },
+       { "sys1pll_qa5", "sys1pll_fixdiv", "sys1pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 4, &sdphy01_div_lock },
+       { "sys2pll_qa5", "sys2pll_fixdiv", "sys2pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 8, &sdphy01_div_lock },
+       { "sys3pll_qa5", "sys3pll_fixdiv", "sys3pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 12, &sdphy01_div_lock },
+       { "sys0pll_qa6", "sys0pll_fixdiv", "sys0pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 0, &sdphy23_div_lock },
+       { "sys1pll_qa6", "sys1pll_fixdiv", "sys1pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 4, &sdphy23_div_lock },
+       { "sys2pll_qa6", "sys2pll_fixdiv", "sys2pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 8, &sdphy23_div_lock },
+       { "sys3pll_qa6", "sys3pll_fixdiv", "sys3pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 12, &sdphy23_div_lock },
+       { "sys0pll_qa7", "sys0pll_fixdiv", "sys0pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 0, &sdphy45_div_lock },
+       { "sys1pll_qa7", "sys1pll_fixdiv", "sys1pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 4, &sdphy45_div_lock },
+       { "sys2pll_qa7", "sys2pll_fixdiv", "sys2pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 8, &sdphy45_div_lock },
+       { "sys3pll_qa7", "sys3pll_fixdiv", "sys3pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 12, &sdphy45_div_lock },
+       { "sys0pll_qa8", "sys0pll_fixdiv", "sys0pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 0, &sdphy67_div_lock },
+       { "sys1pll_qa8", "sys1pll_fixdiv", "sys1pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 4, &sdphy67_div_lock },
+       { "sys2pll_qa8", "sys2pll_fixdiv", "sys2pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 8, &sdphy67_div_lock },
+       { "sys3pll_qa8", "sys3pll_fixdiv", "sys3pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 12, &sdphy67_div_lock },
+       { "sys0pll_qa9", "sys0pll_fixdiv", "sys0pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 0, &can_div_lock },
+       { "sys1pll_qa9", "sys1pll_fixdiv", "sys1pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 4, &can_div_lock },
+       { "sys2pll_qa9", "sys2pll_fixdiv", "sys2pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 8, &can_div_lock },
+       { "sys3pll_qa9", "sys3pll_fixdiv", "sys3pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 12, &can_div_lock },
+       { "sys0pll_qa10", "sys0pll_fixdiv", "sys0pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 0, &deint_div_lock },
+       { "sys1pll_qa10", "sys1pll_fixdiv", "sys1pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 4, &deint_div_lock },
+       { "sys2pll_qa10", "sys2pll_fixdiv", "sys2pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 8, &deint_div_lock },
+       { "sys3pll_qa10", "sys3pll_fixdiv", "sys3pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 12, &deint_div_lock },
+       { "sys0pll_qa11", "sys0pll_fixdiv", "sys0pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 0, &nand_div_lock },
+       { "sys1pll_qa11", "sys1pll_fixdiv", "sys1pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 4, &nand_div_lock },
+       { "sys2pll_qa11", "sys2pll_fixdiv", "sys2pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 8, &nand_div_lock },
+       { "sys3pll_qa11", "sys3pll_fixdiv", "sys3pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 12, &nand_div_lock },
+       { "sys0pll_qa12", "sys0pll_fixdiv", "sys0pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 0, &disp0_div_lock },
+       { "sys1pll_qa12", "sys1pll_fixdiv", "sys1pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 4, &disp0_div_lock },
+       { "sys2pll_qa12", "sys2pll_fixdiv", "sys2pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 8, &disp0_div_lock },
+       { "sys3pll_qa12", "sys3pll_fixdiv", "sys3pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 12, &disp0_div_lock },
+       { "sys0pll_qa13", "sys0pll_fixdiv", "sys0pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 0, &disp1_div_lock },
+       { "sys1pll_qa13", "sys1pll_fixdiv", "sys1pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 4, &disp1_div_lock },
+       { "sys2pll_qa13", "sys2pll_fixdiv", "sys2pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 8, &disp1_div_lock },
+       { "sys3pll_qa13", "sys3pll_fixdiv", "sys3pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 12, &disp1_div_lock },
+       { "sys0pll_qa14", "sys0pll_fixdiv", "sys0pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 0, &gpu_div_lock },
+       { "sys1pll_qa14", "sys1pll_fixdiv", "sys1pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 4, &gpu_div_lock },
+       { "sys2pll_qa14", "sys2pll_fixdiv", "sys2pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 8, &gpu_div_lock },
+       { "sys3pll_qa14", "sys3pll_fixdiv", "sys3pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 12, &gpu_div_lock },
+       { "sys0pll_qa15", "sys0pll_fixdiv", "sys0pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 0, &gnss_div_lock },
+       { "sys1pll_qa15", "sys1pll_fixdiv", "sys1pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 4, &gnss_div_lock },
+       { "sys2pll_qa15", "sys2pll_fixdiv", "sys2pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 8, &gnss_div_lock },
+       { "sys3pll_qa15", "sys3pll_fixdiv", "sys3pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 12, &gnss_div_lock },
+       { "sys1pll_qa18", "sys1pll_fixdiv", "sys1pll_a18", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 24, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 12, &share_div_lock },
+       { "sys1pll_qa19", "sys1pll_fixdiv", "sys1pll_a19", 0, 0, CLK_IGNORE_UNUSED, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 16, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 8, &share_div_lock },
+       { "sys1pll_qa20", "sys1pll_fixdiv", "sys1pll_a20", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 8, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 4, &share_div_lock },
+       { "sys2pll_qa20", "sys2pll_fixdiv", "sys2pll_a20", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 0, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 0, &share_div_lock },
+       { "sys1pll_qa17", "sys1pll_fixdiv", "sys1pll_a17", 0, 0, CLK_IGNORE_UNUSED, SIRFSOC_CLKC_SHARED_DIVIDER_CFG1, 8, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 20, &share_div_lock },
+       { "sys0pll_qa20", "sys0pll_fixdiv", "sys0pll_a20", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG1, 0, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 16, &share_div_lock },
+};
+
+static const char * const i2s_clk_parents[] = {
+       "xin",
+       "xinw",
+       "audio_dto",
+       /* "pwm_i2s01" */
+};
+
+static const char * const usbphy_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a1",
+       "sys1pll_a1",
+       "sys2pll_a1",
+       "sys3pll_a1",
+};
+
+static const char * const btss_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a2",
+       "sys1pll_a2",
+       "sys2pll_a2",
+       "sys3pll_a2",
+};
+
+static const char * const rgmii_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a3",
+       "sys1pll_a3",
+       "sys2pll_a3",
+       "sys3pll_a3",
+};
+
+static const char * const cpu_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a4",
+       "sys1pll_a4",
+       "cpupll_clk1",
+};
+
+static const char * const sdphy01_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a5",
+       "sys1pll_a5",
+       "sys2pll_a5",
+       "sys3pll_a5",
+};
+
+static const char * const sdphy23_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a6",
+       "sys1pll_a6",
+       "sys2pll_a6",
+       "sys3pll_a6",
+};
+
+static const char * const sdphy45_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a7",
+       "sys1pll_a7",
+       "sys2pll_a7",
+       "sys3pll_a7",
+};
+
+static const char * const sdphy67_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a8",
+       "sys1pll_a8",
+       "sys2pll_a8",
+       "sys3pll_a8",
+};
+
+static const char * const can_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a9",
+       "sys1pll_a9",
+       "sys2pll_a9",
+       "sys3pll_a9",
+};
+
+static const char * const deint_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a10",
+       "sys1pll_a10",
+       "sys2pll_a10",
+       "sys3pll_a10",
+};
+
+static const char * const nand_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a11",
+       "sys1pll_a11",
+       "sys2pll_a11",
+       "sys3pll_a11",
+};
+
+static const char * const disp0_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a12",
+       "sys1pll_a12",
+       "sys2pll_a12",
+       "sys3pll_a12",
+       "disp0_dto",
+};
+
+static const char * const disp1_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a13",
+       "sys1pll_a13",
+       "sys2pll_a13",
+       "sys3pll_a13",
+       "disp1_dto",
+};
+
+static const char * const gpu_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a14",
+       "sys1pll_a14",
+       "sys2pll_a14",
+       "sys3pll_a14",
+};
+
+static const char * const gnss_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys0pll_a15",
+       "sys1pll_a15",
+       "sys2pll_a15",
+       "sys3pll_a15",
+};
+
+static const char * const sys_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static const char * const io_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static const char * const g2d_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static const char * const jpenc_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static const char * const vdec_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static const char * const gmac_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static const char * const usb_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static const char * const kas_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static const char * const sec_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static const char * const sdr_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static const char * const vip_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static const char * const nocd_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static const char * const nocr_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static const char * const tpiu_clk_parents[] = {
+       "xin",
+       "xinw",
+       "sys2pll_a20",
+       "sys1pll_a20",
+       "sys1pll_a19",
+       "sys1pll_a18",
+       "sys0pll_a20",
+       "sys1pll_a17",
+};
+
+static struct atlas7_mux_init_data mux_list[] __initdata = {
+       /* mux_name, parent_names, parent_num, flags, mux_flags, mux_offset, shift, width */
+       { "i2s_mux", i2s_clk_parents, ARRAY_SIZE(i2s_clk_parents), 0, 0, SIRFSOC_CLKC_I2S_CLK_SEL, 0, 2 },
+       { "usbphy_mux", usbphy_clk_parents, ARRAY_SIZE(usbphy_clk_parents), 0, 0, SIRFSOC_CLKC_I2S_CLK_SEL, 0, 3 },
+       { "btss_mux", btss_clk_parents, ARRAY_SIZE(btss_clk_parents), 0, 0, SIRFSOC_CLKC_BTSS_CLK_SEL, 0, 3 },
+       { "rgmii_mux", rgmii_clk_parents, ARRAY_SIZE(rgmii_clk_parents), 0, 0, SIRFSOC_CLKC_RGMII_CLK_SEL, 0, 3 },
+       { "cpu_mux", cpu_clk_parents, ARRAY_SIZE(cpu_clk_parents), 0, 0, SIRFSOC_CLKC_CPU_CLK_SEL, 0, 3 },
+       { "sdphy01_mux", sdphy01_clk_parents, ARRAY_SIZE(sdphy01_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY01_CLK_SEL, 0, 3 },
+       { "sdphy23_mux", sdphy23_clk_parents, ARRAY_SIZE(sdphy23_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY23_CLK_SEL, 0, 3 },
+       { "sdphy45_mux", sdphy45_clk_parents, ARRAY_SIZE(sdphy45_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY45_CLK_SEL, 0, 3 },
+       { "sdphy67_mux", sdphy67_clk_parents, ARRAY_SIZE(sdphy67_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY67_CLK_SEL, 0, 3 },
+       { "can_mux", can_clk_parents, ARRAY_SIZE(can_clk_parents), 0, 0, SIRFSOC_CLKC_CAN_CLK_SEL, 0, 3 },
+       { "deint_mux", deint_clk_parents, ARRAY_SIZE(deint_clk_parents), 0, 0, SIRFSOC_CLKC_DEINT_CLK_SEL, 0, 3 },
+       { "nand_mux", nand_clk_parents, ARRAY_SIZE(nand_clk_parents), 0, 0, SIRFSOC_CLKC_NAND_CLK_SEL, 0, 3 },
+       { "disp0_mux", disp0_clk_parents, ARRAY_SIZE(disp0_clk_parents), 0, 0, SIRFSOC_CLKC_DISP0_CLK_SEL, 0, 3 },
+       { "disp1_mux", disp1_clk_parents, ARRAY_SIZE(disp1_clk_parents), 0, 0, SIRFSOC_CLKC_DISP1_CLK_SEL, 0, 3 },
+       { "gpu_mux", gpu_clk_parents, ARRAY_SIZE(gpu_clk_parents), 0, 0, SIRFSOC_CLKC_GPU_CLK_SEL, 0, 3 },
+       { "gnss_mux", gnss_clk_parents, ARRAY_SIZE(gnss_clk_parents), 0, 0, SIRFSOC_CLKC_GNSS_CLK_SEL, 0, 3 },
+       { "sys_mux", sys_clk_parents, ARRAY_SIZE(sys_clk_parents), 0, 0, SIRFSOC_CLKC_SYS_CLK_SEL, 0, 3 },
+       { "io_mux", io_clk_parents, ARRAY_SIZE(io_clk_parents), 0, 0, SIRFSOC_CLKC_IO_CLK_SEL, 0, 3 },
+       { "g2d_mux", g2d_clk_parents, ARRAY_SIZE(g2d_clk_parents), 0, 0, SIRFSOC_CLKC_G2D_CLK_SEL, 0, 3 },
+       { "jpenc_mux", jpenc_clk_parents, ARRAY_SIZE(jpenc_clk_parents), 0, 0, SIRFSOC_CLKC_JPENC_CLK_SEL, 0, 3 },
+       { "vdec_mux", vdec_clk_parents, ARRAY_SIZE(vdec_clk_parents), 0, 0, SIRFSOC_CLKC_VDEC_CLK_SEL, 0, 3 },
+       { "gmac_mux", gmac_clk_parents, ARRAY_SIZE(gmac_clk_parents), 0, 0, SIRFSOC_CLKC_GMAC_CLK_SEL, 0, 3 },
+       { "usb_mux", usb_clk_parents, ARRAY_SIZE(usb_clk_parents), 0, 0, SIRFSOC_CLKC_USB_CLK_SEL, 0, 3 },
+       { "kas_mux", kas_clk_parents, ARRAY_SIZE(kas_clk_parents), 0, 0, SIRFSOC_CLKC_KAS_CLK_SEL, 0, 3 },
+       { "sec_mux", sec_clk_parents, ARRAY_SIZE(sec_clk_parents), 0, 0, SIRFSOC_CLKC_SEC_CLK_SEL, 0, 3 },
+       { "sdr_mux", sdr_clk_parents, ARRAY_SIZE(sdr_clk_parents), 0, 0, SIRFSOC_CLKC_SDR_CLK_SEL, 0, 3 },
+       { "vip_mux", vip_clk_parents, ARRAY_SIZE(vip_clk_parents), 0, 0, SIRFSOC_CLKC_VIP_CLK_SEL, 0, 3 },
+       { "nocd_mux", nocd_clk_parents, ARRAY_SIZE(nocd_clk_parents), 0, 0, SIRFSOC_CLKC_NOCD_CLK_SEL, 0, 3 },
+       { "nocr_mux", nocr_clk_parents, ARRAY_SIZE(nocr_clk_parents), 0, 0, SIRFSOC_CLKC_NOCR_CLK_SEL, 0, 3 },
+       { "tpiu_mux", tpiu_clk_parents, ARRAY_SIZE(tpiu_clk_parents), 0, 0, SIRFSOC_CLKC_TPIU_CLK_SEL, 0, 3 },
+};
+
+       /* new unit should add start from the tail of list */
+static struct atlas7_unit_init_data unit_list[] __initdata = {
+       /* unit_name, parent_name, flags, regofs, bit, lock */
+       { 0, "audmscm_kas", "kas_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 0, &root0_gate_lock },
+       { 1, "gnssm_gnss", "gnss_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 1, &root0_gate_lock },
+       { 2, "gpum_gpu", "gpu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 2, &root0_gate_lock },
+       { 3, "mediam_g2d", "g2d_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 3, &root0_gate_lock },
+       { 4, "mediam_jpenc", "jpenc_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 4, &root0_gate_lock },
+       { 5, "vdifm_disp0", "disp0_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 5, &root0_gate_lock },
+       { 6, "vdifm_disp1", "disp1_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 6, &root0_gate_lock },
+       { 7, "audmscm_i2s", "i2s_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 8, &root0_gate_lock },
+       { 8, "audmscm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 11, &root0_gate_lock },
+       { 9, "vdifm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 12, &root0_gate_lock },
+       { 10, "gnssm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 13, &root0_gate_lock },
+       { 11, "mediam_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 14, &root0_gate_lock },
+       { 12, "btm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 17, &root0_gate_lock },
+       { 13, "mediam_sdphy01", "sdphy01_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 18, &root0_gate_lock },
+       { 14, "vdifm_sdphy23", "sdphy23_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 19, &root0_gate_lock },
+       { 15, "vdifm_sdphy45", "sdphy45_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 20, &root0_gate_lock },
+       { 16, "vdifm_sdphy67", "sdphy67_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 21, &root0_gate_lock },
+       { 17, "audmscm_xin", "xin", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 22, &root0_gate_lock },
+       { 18, "mediam_nand", "nand_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 27, &root0_gate_lock },
+       { 19, "gnssm_sec", "sec_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 28, &root0_gate_lock },
+       { 20, "cpum_cpu", "cpu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 29, &root0_gate_lock },
+       { 21, "gnssm_xin", "xin", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 30, &root0_gate_lock },
+       { 22, "vdifm_vip", "vip_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 31, &root0_gate_lock },
+       { 23, "btm_btss", "btss_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 0, &root1_gate_lock },
+       { 24, "mediam_usbphy", "usbphy_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 1, &root1_gate_lock },
+       { 25, "rtcm_kas", "kas_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 2, &root1_gate_lock },
+       { 26, "audmscm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 3, &root1_gate_lock },
+       { 27, "vdifm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 4, &root1_gate_lock },
+       { 28, "gnssm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 5, &root1_gate_lock },
+       { 29, "mediam_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 6, &root1_gate_lock },
+       { 30, "cpum_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 8, &root1_gate_lock },
+       { 31, "gpum_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 9, &root1_gate_lock },
+       { 32, "audmscm_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 11, &root1_gate_lock },
+       { 33, "vdifm_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 12, &root1_gate_lock },
+       { 34, "gnssm_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 13, &root1_gate_lock },
+       { 35, "mediam_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 14, &root1_gate_lock },
+       { 36, "ddrm_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 15, &root1_gate_lock },
+       { 37, "cpum_tpiu", "tpiu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 16, &root1_gate_lock },
+       { 38, "gpum_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 17, &root1_gate_lock },
+       { 39, "gnssm_rgmii", "rgmii_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 20, &root1_gate_lock },
+       { 40, "mediam_vdec", "vdec_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 21, &root1_gate_lock },
+       { 41, "gpum_sdr", "sdr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 22, &root1_gate_lock },
+       { 42, "vdifm_deint", "deint_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 23, &root1_gate_lock },
+       { 43, "gnssm_can", "can_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 26, &root1_gate_lock },
+       { 44, "mediam_usb", "usb_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 28, &root1_gate_lock },
+       { 45, "gnssm_gmac", "gmac_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 29, &root1_gate_lock },
+       { 46, "cvd_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 0, &leaf1_gate_lock },
+       { 47, "timer_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 1, &leaf1_gate_lock },
+       { 48, "pulse_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 2, &leaf1_gate_lock },
+       { 49, "tsc_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 3, &leaf1_gate_lock },
+       { 50, "tsc_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 21, &leaf1_gate_lock },
+       { 51, "ioctop_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 4, &leaf1_gate_lock },
+       { 52, "rsc_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 5, &leaf1_gate_lock },
+       { 53, "dvm_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 6, &leaf1_gate_lock },
+       { 54, "lvds_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 7, &leaf1_gate_lock },
+       { 55, "kas_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 8, &leaf1_gate_lock },
+       { 56, "ac97_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 9, &leaf1_gate_lock },
+       { 57, "usp0_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 10, &leaf1_gate_lock },
+       { 58, "usp1_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 11, &leaf1_gate_lock },
+       { 59, "usp2_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 12, &leaf1_gate_lock },
+       { 60, "dmac2_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 13, &leaf1_gate_lock },
+       { 61, "dmac3_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 14, &leaf1_gate_lock },
+       { 62, "audioif_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 15, &leaf1_gate_lock },
+       { 63, "i2s1_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 17, &leaf1_gate_lock },
+       { 64, "thaudmscm_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 22, &leaf1_gate_lock },
+       { 65, "analogtest_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 23, &leaf1_gate_lock },
+       { 66, "sys2pci_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 0, &leaf2_gate_lock },
+       { 67, "pciarb_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 1, &leaf2_gate_lock },
+       { 68, "pcicopy_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 2, &leaf2_gate_lock },
+       { 69, "rom_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 3, &leaf2_gate_lock },
+       { 70, "sdio23_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 4, &leaf2_gate_lock },
+       { 71, "sdio45_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 5, &leaf2_gate_lock },
+       { 72, "sdio67_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 6, &leaf2_gate_lock },
+       { 73, "vip1_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 7, &leaf2_gate_lock },
+       { 74, "vip1_vip", "vdifm_vip", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 16, &leaf2_gate_lock },
+       { 75, "sdio23_sdphy23", "vdifm_sdphy23", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 8, &leaf2_gate_lock },
+       { 76, "sdio45_sdphy45", "vdifm_sdphy45", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 9, &leaf2_gate_lock },
+       { 77, "sdio67_sdphy67", "vdifm_sdphy67", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 10, &leaf2_gate_lock },
+       { 78, "vpp0_disp0", "vdifm_disp0", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 11, &leaf2_gate_lock },
+       { 79, "lcd0_disp0", "vdifm_disp0", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 12, &leaf2_gate_lock },
+       { 80, "vpp1_disp1", "vdifm_disp1", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 13, &leaf2_gate_lock },
+       { 81, "lcd1_disp1", "vdifm_disp1", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 14, &leaf2_gate_lock },
+       { 82, "dcu_deint", "vdifm_deint", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 15, &leaf2_gate_lock },
+       { 83, "vdifm_dapa_r_nocr", "vdifm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 17, &leaf2_gate_lock },
+       { 84, "gpio1_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 18, &leaf2_gate_lock },
+       { 85, "thvdifm_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 19, &leaf2_gate_lock },
+       { 86, "gmac_rgmii", "gnssm_rgmii", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 0, &leaf3_gate_lock },
+       { 87, "gmac_gmac", "gnssm_gmac", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 1, &leaf3_gate_lock },
+       { 88, "uart1_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 2, &leaf3_gate_lock },
+       { 89, "dmac0_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 3, &leaf3_gate_lock },
+       { 90, "uart0_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 4, &leaf3_gate_lock },
+       { 91, "uart2_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 5, &leaf3_gate_lock },
+       { 92, "uart3_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 6, &leaf3_gate_lock },
+       { 93, "uart4_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 7, &leaf3_gate_lock },
+       { 94, "uart5_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 8, &leaf3_gate_lock },
+       { 95, "spi1_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 9, &leaf3_gate_lock },
+       { 96, "gnss_gnss", "gnssm_gnss", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 10, &leaf3_gate_lock },
+       { 97, "canbus1_can", "gnssm_can", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 12, &leaf3_gate_lock },
+       { 98, "ccsec_sec", "gnssm_sec", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 15, &leaf3_gate_lock },
+       { 99,  "ccpub_sec", "gnssm_sec", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 16, &leaf3_gate_lock },
+       { 100, "gnssm_dapa_r_nocr", "gnssm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 13, &leaf3_gate_lock },
+       { 101, "thgnssm_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 14, &leaf3_gate_lock },
+       { 102, "media_vdec", "mediam_vdec", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 0, &leaf4_gate_lock },
+       { 103, "media_jpenc", "mediam_jpenc", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 1, &leaf4_gate_lock },
+       { 104, "g2d_g2d", "mediam_g2d", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 2, &leaf4_gate_lock },
+       { 105, "i2c0_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 3, &leaf4_gate_lock },
+       { 106, "i2c1_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 4, &leaf4_gate_lock },
+       { 107, "gpio0_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 5, &leaf4_gate_lock },
+       { 108, "nand_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 6, &leaf4_gate_lock },
+       { 109, "sdio01_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 7, &leaf4_gate_lock },
+       { 110, "sys2pci2_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 8, &leaf4_gate_lock },
+       { 111, "sdio01_sdphy01", "mediam_sdphy01", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 9, &leaf4_gate_lock },
+       { 112, "nand_nand", "mediam_nand", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 10, &leaf4_gate_lock },
+       { 113, "usb0_usb", "mediam_usb", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 11, &leaf4_gate_lock },
+       { 114, "usb1_usb", "mediam_usb", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 12, &leaf4_gate_lock },
+       { 115, "usbphy0_usbphy", "mediam_usbphy", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 13, &leaf4_gate_lock },
+       { 116, "usbphy1_usbphy", "mediam_usbphy", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 14, &leaf4_gate_lock },
+       { 117, "thmediam_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 15, &leaf4_gate_lock },
+       { 118, "memc_mem", "mempll_clk1", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 0, &leaf5_gate_lock },
+       { 119, "dapa_mem", "mempll_clk1", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 1, &leaf5_gate_lock },
+       { 120, "nocddrm_nocr", "ddrm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 2, &leaf5_gate_lock },
+       { 121, "thddrm_nocr", "ddrm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 3, &leaf5_gate_lock },
+       { 122, "spram1_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 0, &leaf6_gate_lock },
+       { 123, "spram2_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 1, &leaf6_gate_lock },
+       { 124, "coresight_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 2, &leaf6_gate_lock },
+       { 125, "thcpum_cpudiv4", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 3, &leaf6_gate_lock },
+       { 126, "graphic_gpu", "gpum_gpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 0, &leaf7_gate_lock },
+       { 127, "vss_sdr", "gpum_sdr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 1, &leaf7_gate_lock },
+       { 128, "thgpum_nocr", "gpum_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 2, &leaf7_gate_lock },
+       { 129, "a7ca_btss", "btm_btss", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 1, &leaf8_gate_lock },
+       { 130, "dmac4_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 2, &leaf8_gate_lock },
+       { 131, "uart6_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 3, &leaf8_gate_lock },
+       { 132, "usp3_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 4, &leaf8_gate_lock },
+       { 133, "a7ca_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 5, &leaf8_gate_lock },
+       { 134, "noc_btm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 6, &leaf8_gate_lock },
+       { 135, "thbtm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 7, &leaf8_gate_lock },
+       { 136, "btslow", "xinw_fixdiv_btslow", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 25, &root1_gate_lock },
+       { 137, "a7ca_btslow", "btslow", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 0, &leaf8_gate_lock },
+};
+
+static struct clk *atlas7_clks[ARRAY_SIZE(unit_list)];
+
+static int unit_clk_is_enabled(struct clk_hw *hw)
+{
+       struct clk_unit *clk = to_unitclk(hw);
+       u32 reg;
+
+       reg = clk->regofs + SIRFSOC_CLKC_ROOT_CLK_EN0_STAT - SIRFSOC_CLKC_ROOT_CLK_EN0_SET;
+
+       return !!(clkc_readl(reg) & BIT(clk->bit));
+}
+
+static int unit_clk_enable(struct clk_hw *hw)
+{
+       u32 reg;
+       struct clk_unit *clk = to_unitclk(hw);
+       unsigned long flags;
+
+       reg = clk->regofs;
+
+       spin_lock_irqsave(clk->lock, flags);
+       clkc_writel(BIT(clk->bit), reg);
+       spin_unlock_irqrestore(clk->lock, flags);
+       return 0;
+}
+
+static void unit_clk_disable(struct clk_hw *hw)
+{
+       u32  reg;
+       struct clk_unit *clk = to_unitclk(hw);
+       unsigned long flags;
+
+       reg = clk->regofs + SIRFSOC_CLKC_ROOT_CLK_EN0_CLR - SIRFSOC_CLKC_ROOT_CLK_EN0_SET;
+
+       spin_lock_irqsave(clk->lock, flags);
+       clkc_writel(BIT(clk->bit), reg);
+       spin_unlock_irqrestore(clk->lock, flags);
+}
+
+static const struct clk_ops unit_clk_ops = {
+       .is_enabled = unit_clk_is_enabled,
+       .enable = unit_clk_enable,
+       .disable = unit_clk_disable,
+};
+
+static struct clk * __init
+atlas7_unit_clk_register(struct device *dev, const char *name,
+                const char * const parent_name, unsigned long flags,
+                u32 regofs, u8 bit, spinlock_t *lock)
+{
+       struct clk *clk;
+       struct clk_unit *unit;
+       struct clk_init_data init;
+
+       unit = kzalloc(sizeof(*unit), GFP_KERNEL);
+       if (!unit)
+               return ERR_PTR(-ENOMEM);
+
+       init.name = name;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+       init.ops = &unit_clk_ops;
+       init.flags = flags;
+
+       unit->hw.init = &init;
+       unit->regofs = regofs;
+       unit->bit = bit;
+       unit->lock = lock;
+
+       clk = clk_register(dev, &unit->hw);
+       if (IS_ERR(clk))
+               kfree(unit);
+
+       return clk;
+}
+
+static struct atlas7_reset_desc atlas7_reset_unit[] = {
+       { "PWM", 0x0244, 0, 0x0320, 0, &leaf0_gate_lock }, /* 0-5 */
+       { "THCGUM", 0x0244, 3, 0x0320, 1, &leaf0_gate_lock },
+       { "CVD", 0x04A0, 0, 0x032C, 0, &leaf1_gate_lock },
+       { "TIMER", 0x04A0, 1, 0x032C, 1, &leaf1_gate_lock },
+       { "PULSEC", 0x04A0, 2, 0x032C, 2, &leaf1_gate_lock },
+       { "TSC", 0x04A0, 3, 0x032C, 3, &leaf1_gate_lock },
+       { "IOCTOP", 0x04A0, 4, 0x032C, 4, &leaf1_gate_lock }, /* 6-10 */
+       { "RSC", 0x04A0, 5, 0x032C, 5, &leaf1_gate_lock },
+       { "DVM", 0x04A0, 6, 0x032C, 6, &leaf1_gate_lock },
+       { "LVDS", 0x04A0, 7, 0x032C, 7, &leaf1_gate_lock },
+       { "KAS", 0x04A0, 8, 0x032C, 8, &leaf1_gate_lock },
+       { "AC97", 0x04A0, 9, 0x032C, 9, &leaf1_gate_lock }, /* 11-15 */
+       { "USP0", 0x04A0, 10, 0x032C, 10, &leaf1_gate_lock },
+       { "USP1", 0x04A0, 11, 0x032C, 11, &leaf1_gate_lock },
+       { "USP2", 0x04A0, 12, 0x032C, 12, &leaf1_gate_lock },
+       { "DMAC2", 0x04A0, 13, 0x032C, 13, &leaf1_gate_lock },
+       { "DMAC3", 0x04A0, 14, 0x032C, 14, &leaf1_gate_lock }, /* 16-20 */
+       { "AUDIO", 0x04A0, 15, 0x032C, 15, &leaf1_gate_lock },
+       { "I2S1", 0x04A0, 17, 0x032C, 16, &leaf1_gate_lock },
+       { "PMU_AUDIO", 0x04A0, 22, 0x032C, 17, &leaf1_gate_lock },
+       { "THAUDMSCM", 0x04A0, 23, 0x032C, 18, &leaf1_gate_lock },
+       { "SYS2PCI", 0x04B8, 0, 0x0338, 0, &leaf2_gate_lock }, /* 21-25 */
+       { "PCIARB", 0x04B8, 1, 0x0338, 1, &leaf2_gate_lock },
+       { "PCICOPY", 0x04B8, 2, 0x0338, 2, &leaf2_gate_lock },
+       { "ROM", 0x04B8, 3, 0x0338, 3, &leaf2_gate_lock },
+       { "SDIO23", 0x04B8, 4, 0x0338, 4, &leaf2_gate_lock },
+       { "SDIO45", 0x04B8, 5, 0x0338, 5, &leaf2_gate_lock }, /* 26-30 */
+       { "SDIO67", 0x04B8, 6, 0x0338, 6, &leaf2_gate_lock },
+       { "VIP1", 0x04B8, 7, 0x0338, 7, &leaf2_gate_lock },
+       { "VPP0", 0x04B8, 11, 0x0338, 8, &leaf2_gate_lock },
+       { "LCD0", 0x04B8, 12, 0x0338, 9, &leaf2_gate_lock },
+       { "VPP1", 0x04B8, 13, 0x0338, 10, &leaf2_gate_lock }, /* 31-35 */
+       { "LCD1", 0x04B8, 14, 0x0338, 11, &leaf2_gate_lock },
+       { "DCU", 0x04B8, 15, 0x0338, 12, &leaf2_gate_lock },
+       { "GPIO", 0x04B8, 18, 0x0338, 13, &leaf2_gate_lock },
+       { "DAPA_VDIFM", 0x04B8, 17, 0x0338, 15, &leaf2_gate_lock },
+       { "THVDIFM", 0x04B8, 19, 0x0338, 16, &leaf2_gate_lock }, /* 36-40 */
+       { "RGMII", 0x04D0, 0, 0x0344, 0, &leaf3_gate_lock },
+       { "GMAC", 0x04D0, 1, 0x0344, 1, &leaf3_gate_lock },
+       { "UART1", 0x04D0, 2, 0x0344, 2, &leaf3_gate_lock },
+       { "DMAC0", 0x04D0, 3, 0x0344, 3, &leaf3_gate_lock },
+       { "UART0", 0x04D0, 4, 0x0344, 4, &leaf3_gate_lock }, /* 41-45 */
+       { "UART2", 0x04D0, 5, 0x0344, 5, &leaf3_gate_lock },
+       { "UART3", 0x04D0, 6, 0x0344, 6, &leaf3_gate_lock },
+       { "UART4", 0x04D0, 7, 0x0344, 7, &leaf3_gate_lock },
+       { "UART5", 0x04D0, 8, 0x0344, 8, &leaf3_gate_lock },
+       { "SPI1", 0x04D0, 9, 0x0344, 9, &leaf3_gate_lock }, /* 46-50 */
+       { "GNSS_SYS_M0", 0x04D0, 10, 0x0344, 10, &leaf3_gate_lock },
+       { "CANBUS1", 0x04D0, 12, 0x0344, 11, &leaf3_gate_lock },
+       { "CCSEC", 0x04D0, 15, 0x0344, 12, &leaf3_gate_lock },
+       { "CCPUB", 0x04D0, 16, 0x0344, 13, &leaf3_gate_lock },
+       { "DAPA_GNSSM", 0x04D0, 13, 0x0344, 14, &leaf3_gate_lock }, /* 51-55 */
+       { "THGNSSM", 0x04D0, 14, 0x0344, 15, &leaf3_gate_lock },
+       { "VDEC", 0x04E8, 0, 0x0350, 0, &leaf4_gate_lock },
+       { "JPENC", 0x04E8, 1, 0x0350, 1, &leaf4_gate_lock },
+       { "G2D", 0x04E8, 2, 0x0350, 2, &leaf4_gate_lock },
+       { "I2C0", 0x04E8, 3, 0x0350, 3, &leaf4_gate_lock }, /* 56-60 */
+       { "I2C1", 0x04E8, 4, 0x0350, 4, &leaf4_gate_lock },
+       { "GPIO0", 0x04E8, 5, 0x0350, 5, &leaf4_gate_lock },
+       { "NAND", 0x04E8, 6, 0x0350, 6, &leaf4_gate_lock },
+       { "SDIO01", 0x04E8, 7, 0x0350, 7, &leaf4_gate_lock },
+       { "SYS2PCI2", 0x04E8, 8, 0x0350, 8, &leaf4_gate_lock }, /* 61-65 */
+       { "USB0", 0x04E8, 11, 0x0350, 9, &leaf4_gate_lock },
+       { "USB1", 0x04E8, 12, 0x0350, 10, &leaf4_gate_lock },
+       { "THMEDIAM", 0x04E8, 15, 0x0350, 11, &leaf4_gate_lock },
+       { "MEMC_DDRPHY", 0x0500, 0, 0x035C, 0, &leaf5_gate_lock },
+       { "MEMC_UPCTL", 0x0500, 0, 0x035C, 1, &leaf5_gate_lock }, /* 66-70 */
+       { "DAPA_MEM", 0x0500, 1, 0x035C, 2, &leaf5_gate_lock },
+       { "MEMC_MEMDIV", 0x0500, 0, 0x035C, 3, &leaf5_gate_lock },
+       { "THDDRM", 0x0500, 3, 0x035C, 4, &leaf5_gate_lock },
+       { "CORESIGHT", 0x0518, 3, 0x0368, 13, &leaf6_gate_lock },
+       { "THCPUM", 0x0518, 4, 0x0368, 17, &leaf6_gate_lock }, /* 71-75 */
+       { "GRAPHIC", 0x0530, 0, 0x0374, 0, &leaf7_gate_lock },
+       { "VSS_SDR", 0x0530, 1, 0x0374, 1, &leaf7_gate_lock },
+       { "THGPUM", 0x0530, 2, 0x0374, 2, &leaf7_gate_lock },
+       { "DMAC4", 0x0548, 2, 0x0380, 1, &leaf8_gate_lock },
+       { "UART6", 0x0548, 3, 0x0380, 2, &leaf8_gate_lock }, /* 76- */
+       { "USP3", 0x0548, 4, 0x0380, 3, &leaf8_gate_lock },
+       { "THBTM", 0x0548, 5, 0x0380, 5, &leaf8_gate_lock },
+       { "A7CA", 0x0548, 1, 0x0380, 0, &leaf8_gate_lock },
+       { "A7CA_APB", 0x0548, 5, 0x0380, 4, &leaf8_gate_lock },
+};
+
+static int atlas7_reset_module(struct reset_controller_dev *rcdev,
+                                       unsigned long reset_idx)
+{
+       struct atlas7_reset_desc *reset = &atlas7_reset_unit[reset_idx];
+       unsigned long flags;
+
+       /*
+        * HW suggest unit reset sequence:
+        * assert sw reset (0)
+        * setting sw clk_en to if the clock was disabled before reset
+        * delay 16 clocks
+        * disable clock (sw clk_en = 0)
+        * de-assert reset (1)
+        * after this sequence, restore clock or not is decided by SW
+        */
+
+       spin_lock_irqsave(reset->lock, flags);
+       /* clock enable or not */
+       if (clkc_readl(reset->clk_ofs + 8) & (1 << reset->clk_bit)) {
+               clkc_writel(1 << reset->rst_bit, reset->rst_ofs + 4);
+               udelay(2);
+               clkc_writel(1 << reset->clk_bit, reset->clk_ofs + 4);
+               clkc_writel(1 << reset->rst_bit, reset->rst_ofs);
+               /* restore clock enable */
+               clkc_writel(1 << reset->clk_bit, reset->clk_ofs);
+       } else {
+               clkc_writel(1 << reset->rst_bit, reset->rst_ofs + 4);
+               clkc_writel(1 << reset->clk_bit, reset->clk_ofs);
+               udelay(2);
+               clkc_writel(1 << reset->clk_bit, reset->clk_ofs + 4);
+               clkc_writel(1 << reset->rst_bit, reset->rst_ofs);
+       }
+       spin_unlock_irqrestore(reset->lock, flags);
+
+       return 0;
+}
+
+static struct reset_control_ops atlas7_rst_ops = {
+       .reset = atlas7_reset_module,
+};
+
+static struct reset_controller_dev atlas7_rst_ctlr = {
+       .ops = &atlas7_rst_ops,
+       .owner = THIS_MODULE,
+       .of_reset_n_cells = 1,
+};
+
+static void __init atlas7_clk_init(struct device_node *np)
+{
+       struct clk *clk;
+       struct atlas7_div_init_data *div;
+       struct atlas7_mux_init_data *mux;
+       struct atlas7_unit_init_data *unit;
+       int i;
+       int ret;
+
+       sirfsoc_clk_vbase = of_iomap(np, 0);
+       if (!sirfsoc_clk_vbase)
+               panic("unable to map clkc registers\n");
+
+       of_node_put(np);
+
+       clk = clk_register(NULL, &clk_cpupll.hw);
+       BUG_ON(!clk);
+       clk = clk_register(NULL, &clk_mempll.hw);
+       BUG_ON(!clk);
+       clk = clk_register(NULL, &clk_sys0pll.hw);
+       BUG_ON(!clk);
+       clk = clk_register(NULL, &clk_sys1pll.hw);
+       BUG_ON(!clk);
+       clk = clk_register(NULL, &clk_sys2pll.hw);
+       BUG_ON(!clk);
+       clk = clk_register(NULL, &clk_sys3pll.hw);
+       BUG_ON(!clk);
+
+       clk = clk_register_divider_table(NULL, "cpupll_div1", "cpupll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, 0, 3, 0,
+                        pll_div_table, &cpupll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_divider_table(NULL, "cpupll_div2", "cpupll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, 4, 3, 0,
+                        pll_div_table, &cpupll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_divider_table(NULL, "cpupll_div3", "cpupll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, 8, 3, 0,
+                        pll_div_table, &cpupll_ctrl1_lock);
+       BUG_ON(!clk);
+
+       clk = clk_register_divider_table(NULL, "mempll_div1", "mempll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, 0, 3, 0,
+                        pll_div_table, &mempll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_divider_table(NULL, "mempll_div2", "mempll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, 4, 3, 0,
+                        pll_div_table, &mempll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_divider_table(NULL, "mempll_div3", "mempll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, 8, 3, 0,
+                        pll_div_table, &mempll_ctrl1_lock);
+       BUG_ON(!clk);
+
+       clk = clk_register_divider_table(NULL, "sys0pll_div1", "sys0pll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, 0, 3, 0,
+                        pll_div_table, &sys0pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_divider_table(NULL, "sys0pll_div2", "sys0pll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, 4, 3, 0,
+                        pll_div_table, &sys0pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_divider_table(NULL, "sys0pll_div3", "sys0pll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, 8, 3, 0,
+                        pll_div_table, &sys0pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_fixed_factor(NULL, "sys0pll_fixdiv", "sys0pll_vco",
+                                       CLK_SET_RATE_PARENT, 1, 2);
+
+       clk = clk_register_divider_table(NULL, "sys1pll_div1", "sys1pll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, 0, 3, 0,
+                        pll_div_table, &sys1pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_divider_table(NULL, "sys1pll_div2", "sys1pll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, 4, 3, 0,
+                        pll_div_table, &sys1pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_divider_table(NULL, "sys1pll_div3", "sys1pll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, 8, 3, 0,
+                        pll_div_table, &sys1pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_fixed_factor(NULL, "sys1pll_fixdiv", "sys1pll_vco",
+                                       CLK_SET_RATE_PARENT, 1, 2);
+
+       clk = clk_register_divider_table(NULL, "sys2pll_div1", "sys2pll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, 0, 3, 0,
+                        pll_div_table, &sys2pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_divider_table(NULL, "sys2pll_div2", "sys2pll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, 4, 3, 0,
+                        pll_div_table, &sys2pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_divider_table(NULL, "sys2pll_div3", "sys2pll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, 8, 3, 0,
+                        pll_div_table, &sys2pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_fixed_factor(NULL, "sys2pll_fixdiv", "sys2pll_vco",
+                                       CLK_SET_RATE_PARENT, 1, 2);
+
+       clk = clk_register_divider_table(NULL, "sys3pll_div1", "sys3pll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, 0, 3, 0,
+                        pll_div_table, &sys3pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_divider_table(NULL, "sys3pll_div2", "sys3pll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, 4, 3, 0,
+                        pll_div_table, &sys3pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_divider_table(NULL, "sys3pll_div3", "sys3pll_vco", 0,
+                        sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, 8, 3, 0,
+                        pll_div_table, &sys3pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_fixed_factor(NULL, "sys3pll_fixdiv", "sys3pll_vco",
+                                       CLK_SET_RATE_PARENT, 1, 2);
+
+       BUG_ON(!clk);
+       clk = clk_register_fixed_factor(NULL, "xinw_fixdiv_btslow", "xinw",
+                                       CLK_SET_RATE_PARENT, 1, 4);
+
+       BUG_ON(!clk);
+       clk = clk_register_gate(NULL, "cpupll_clk1", "cpupll_div1",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1,
+                               12, 0, &cpupll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_gate(NULL, "cpupll_clk2", "cpupll_div2",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1,
+                               13, 0, &cpupll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_gate(NULL, "cpupll_clk3", "cpupll_div3",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1,
+                               14, 0, &cpupll_ctrl1_lock);
+       BUG_ON(!clk);
+
+       clk = clk_register_gate(NULL, "mempll_clk1", "mempll_div1",
+               CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+               sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1,
+               12, 0, &mempll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_gate(NULL, "mempll_clk2", "mempll_div2",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1,
+                               13, 0, &mempll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_gate(NULL, "mempll_clk3", "mempll_div3",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1,
+                               14, 0, &mempll_ctrl1_lock);
+       BUG_ON(!clk);
+
+       clk = clk_register_gate(NULL, "sys0pll_clk1", "sys0pll_div1",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1,
+                               12, 0, &sys0pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_gate(NULL, "sys0pll_clk2", "sys0pll_div2",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1,
+                               13, 0, &sys0pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_gate(NULL, "sys0pll_clk3", "sys0pll_div3",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1,
+                               14, 0, &sys0pll_ctrl1_lock);
+       BUG_ON(!clk);
+
+       clk = clk_register_gate(NULL, "sys1pll_clk1", "sys1pll_div1",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1,
+                               12, 0, &sys1pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_gate(NULL, "sys1pll_clk2", "sys1pll_div2",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1,
+                               13, 0, &sys1pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_gate(NULL, "sys1pll_clk3", "sys1pll_div3",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1,
+                               14, 0, &sys1pll_ctrl1_lock);
+       BUG_ON(!clk);
+
+       clk = clk_register_gate(NULL, "sys2pll_clk1", "sys2pll_div1",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1,
+                               12, 0, &sys2pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_gate(NULL, "sys2pll_clk2", "sys2pll_div2",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1,
+                               13, 0, &sys2pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_gate(NULL, "sys2pll_clk3", "sys2pll_div3",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1,
+                               14, 0, &sys2pll_ctrl1_lock);
+       BUG_ON(!clk);
+
+       clk = clk_register_gate(NULL, "sys3pll_clk1", "sys3pll_div1",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1,
+                               12, 0, &sys3pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_gate(NULL, "sys3pll_clk2", "sys3pll_div2",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1,
+                               13, 0, &sys3pll_ctrl1_lock);
+       BUG_ON(!clk);
+       clk = clk_register_gate(NULL, "sys3pll_clk3", "sys3pll_div3",
+               CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1,
+                               14, 0, &sys3pll_ctrl1_lock);
+       BUG_ON(!clk);
+
+       clk = clk_register(NULL, &clk_audio_dto.hw);
+       BUG_ON(!clk);
+
+       clk = clk_register(NULL, &clk_disp0_dto.hw);
+       BUG_ON(!clk);
+
+       clk = clk_register(NULL, &clk_disp1_dto.hw);
+       BUG_ON(!clk);
+
+       for (i = 0; i < ARRAY_SIZE(divider_list); i++) {
+               div = &divider_list[i];
+               clk = clk_register_divider(NULL, div->div_name,
+                       div->parent_name, div->divider_flags, sirfsoc_clk_vbase + div->div_offset,
+                       div->shift, div->width, 0, div->lock);
+               BUG_ON(!clk);
+               clk = clk_register_gate(NULL, div->gate_name, div->div_name,
+                       div->gate_flags, sirfsoc_clk_vbase + div->gate_offset,
+                               div->gate_bit, 0, div->lock);
+               BUG_ON(!clk);
+       }
+       /* ignore selector status register check */
+       for (i = 0; i < ARRAY_SIZE(mux_list); i++) {
+               mux = &mux_list[i];
+               clk = clk_register_mux(NULL, mux->mux_name, mux->parent_names,
+                              mux->parent_num, mux->flags,
+                              sirfsoc_clk_vbase + mux->mux_offset,
+                              mux->shift, mux->width,
+                              mux->mux_flags, NULL);
+               BUG_ON(!clk);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(unit_list); i++) {
+               unit = &unit_list[i];
+               atlas7_clks[i] = atlas7_unit_clk_register(NULL, unit->unit_name, unit->parent_name,
+                               unit->flags, unit->regofs, unit->bit, unit->lock);
+               BUG_ON(!atlas7_clks[i]);
+       }
+
+       clk_data.clks = atlas7_clks;
+       clk_data.clk_num = ARRAY_SIZE(unit_list);
+
+       ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+       BUG_ON(ret);
+
+       atlas7_rst_ctlr.of_node = np;
+       atlas7_rst_ctlr.nr_resets = ARRAY_SIZE(atlas7_reset_unit);
+       reset_controller_register(&atlas7_rst_ctlr);
+}
+CLK_OF_DECLARE(atlas7_clk, "sirf,atlas7-car", atlas7_clk_init);
index 37af51c5f213bb496c2dce74ba975cbc4227a22f..9fc285d784d3faa4b78eac774cc466b989b8d667 100644 (file)
@@ -10,8 +10,8 @@
 #define KHZ     1000
 #define MHZ     (KHZ * KHZ)
 
-static void *sirfsoc_clk_vbase;
-static void *sirfsoc_rsc_vbase;
+static void __iomem *sirfsoc_clk_vbase;
+static void __iomem *sirfsoc_rsc_vbase;
 static struct clk_onecell_data clk_data;
 
 /*
@@ -188,7 +188,7 @@ static struct clk_ops std_pll_ops = {
        .set_rate = pll_clk_set_rate,
 };
 
-static const char *pll_clk_parents[] = {
+static const char * const pll_clk_parents[] = {
        "osc",
 };
 
@@ -284,7 +284,7 @@ static struct clk_hw usb_pll_clk_hw = {
  * clock domains - cpu, mem, sys/io, dsp, gfx
  */
 
-static const char *dmn_clk_parents[] = {
+static const char * const dmn_clk_parents[] = {
        "rtc",
        "osc",
        "pll1",
@@ -673,7 +673,7 @@ static void std_clk_disable(struct clk_hw *hw)
        clkc_writel(val, reg);
 }
 
-static const char *std_clk_io_parents[] = {
+static const char * const std_clk_io_parents[] = {
        "io",
 };
 
@@ -949,7 +949,7 @@ static struct clk_std clk_pulse = {
        },
 };
 
-static const char *std_clk_dsp_parents[] = {
+static const char * const std_clk_dsp_parents[] = {
        "dsp",
 };
 
@@ -981,7 +981,7 @@ static struct clk_std clk_mf = {
        },
 };
 
-static const char *std_clk_sys_parents[] = {
+static const char * const std_clk_sys_parents[] = {
        "sys",
 };
 
@@ -999,7 +999,7 @@ static struct clk_std clk_security = {
        },
 };
 
-static const char *std_clk_usb_parents[] = {
+static const char * const std_clk_usb_parents[] = {
        "usb_pll",
 };
 
index 7e2d15a0c7b8aec7c28e9074727c76406abc6918..d8bb239753a4747261c9ead8f8eb57cf68459e5e 100644 (file)
@@ -2,3 +2,4 @@ obj-y += clk.o
 obj-y += clk-gate.o
 obj-y += clk-pll.o
 obj-y += clk-periph.o
+obj-y += clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o
diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
new file mode 100644 (file)
index 0000000..83c6780
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2015 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "clk.h"
+
+#define streq(a, b) (strcmp((a), (b)) == 0)
+
+#define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, hw.hw)
+
+/* SDMMC Group for System Manager defines */
+#define SYSMGR_SDMMCGRP_CTRL_OFFSET    0x28
+
+static unsigned long socfpga_gate_clk_recalc_rate(struct clk_hw *hwclk,
+       unsigned long parent_rate)
+{
+       struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+       u32 div = 1, val;
+
+       if (socfpgaclk->fixed_div)
+               div = socfpgaclk->fixed_div;
+       else if (socfpgaclk->div_reg) {
+               val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
+               val &= div_mask(socfpgaclk->width);
+               div = (1 << val);
+       }
+
+       return parent_rate / div;
+}
+
+static int socfpga_clk_prepare(struct clk_hw *hwclk)
+{
+       struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+       int i;
+       u32 hs_timing;
+       u32 clk_phase[2];
+
+       if (socfpgaclk->clk_phase[0] || socfpgaclk->clk_phase[1]) {
+               for (i = 0; i < ARRAY_SIZE(clk_phase); i++) {
+                       switch (socfpgaclk->clk_phase[i]) {
+                       case 0:
+                               clk_phase[i] = 0;
+                               break;
+                       case 45:
+                               clk_phase[i] = 1;
+                               break;
+                       case 90:
+                               clk_phase[i] = 2;
+                               break;
+                       case 135:
+                               clk_phase[i] = 3;
+                               break;
+                       case 180:
+                               clk_phase[i] = 4;
+                               break;
+                       case 225:
+                               clk_phase[i] = 5;
+                               break;
+                       case 270:
+                               clk_phase[i] = 6;
+                               break;
+                       case 315:
+                               clk_phase[i] = 7;
+                               break;
+                       default:
+                               clk_phase[i] = 0;
+                               break;
+                       }
+               }
+
+               hs_timing = SYSMGR_SDMMC_CTRL_SET(clk_phase[0], clk_phase[1]);
+               if (!IS_ERR(socfpgaclk->sys_mgr_base_addr))
+                       regmap_write(socfpgaclk->sys_mgr_base_addr,
+                                    SYSMGR_SDMMCGRP_CTRL_OFFSET, hs_timing);
+               else
+                       pr_err("%s: cannot set clk_phase because sys_mgr_base_addr is not available!\n",
+                                       __func__);
+       }
+       return 0;
+}
+
+static struct clk_ops gateclk_ops = {
+       .prepare = socfpga_clk_prepare,
+       .recalc_rate = socfpga_gate_clk_recalc_rate,
+};
+
+static void __init __socfpga_gate_init(struct device_node *node,
+       const struct clk_ops *ops)
+{
+       u32 clk_gate[2];
+       u32 div_reg[3];
+       u32 clk_phase[2];
+       u32 fixed_div;
+       struct clk *clk;
+       struct socfpga_gate_clk *socfpga_clk;
+       const char *clk_name = node->name;
+       const char *parent_name[SOCFPGA_MAX_PARENTS];
+       struct clk_init_data init;
+       int rc;
+       int i = 0;
+
+       socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
+       if (WARN_ON(!socfpga_clk))
+               return;
+
+       rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
+       if (rc)
+               clk_gate[0] = 0;
+
+       if (clk_gate[0]) {
+               socfpga_clk->hw.reg = clk_mgr_a10_base_addr + clk_gate[0];
+               socfpga_clk->hw.bit_idx = clk_gate[1];
+
+               gateclk_ops.enable = clk_gate_ops.enable;
+               gateclk_ops.disable = clk_gate_ops.disable;
+       }
+
+       rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
+       if (rc)
+               socfpga_clk->fixed_div = 0;
+       else
+               socfpga_clk->fixed_div = fixed_div;
+
+       rc = of_property_read_u32_array(node, "div-reg", div_reg, 3);
+       if (!rc) {
+               socfpga_clk->div_reg = clk_mgr_a10_base_addr + div_reg[0];
+               socfpga_clk->shift = div_reg[1];
+               socfpga_clk->width = div_reg[2];
+       } else {
+               socfpga_clk->div_reg = NULL;
+       }
+
+       rc = of_property_read_u32_array(node, "clk-phase", clk_phase, 2);
+       if (!rc) {
+               socfpga_clk->clk_phase[0] = clk_phase[0];
+               socfpga_clk->clk_phase[1] = clk_phase[1];
+
+               socfpga_clk->sys_mgr_base_addr =
+                       syscon_regmap_lookup_by_compatible("altr,sys-mgr");
+               if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) {
+                       pr_err("%s: failed to find altr,sys-mgr regmap!\n",
+                                       __func__);
+                       return;
+               }
+       }
+
+       of_property_read_string(node, "clock-output-names", &clk_name);
+
+       init.name = clk_name;
+       init.ops = ops;
+       init.flags = 0;
+       while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] =
+                       of_clk_get_parent_name(node, i)) != NULL)
+               i++;
+
+       init.parent_names = parent_name;
+       init.num_parents = i;
+       socfpga_clk->hw.hw.init = &init;
+
+       clk = clk_register(NULL, &socfpga_clk->hw.hw);
+       if (WARN_ON(IS_ERR(clk))) {
+               kfree(socfpga_clk);
+               return;
+       }
+       rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       if (WARN_ON(rc))
+               return;
+}
+
+void __init socfpga_a10_gate_init(struct device_node *node)
+{
+       __socfpga_gate_init(node, &gateclk_ops);
+}
index dd3a78c64795f27c7e171f004f9bb0462f44579a..82449cd76fd7e3ebb9b98b39ce81682eb1105173 100644 (file)
 #define SOCFPGA_MMC_CLK                        "sdmmc_clk"
 #define SOCFPGA_GPIO_DB_CLK_OFFSET     0xA8
 
-#define streq(a, b) (strcmp((a), (b)) == 0)
-
 #define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, hw.hw)
 
 /* SDMMC Group for System Manager defines */
 #define SYSMGR_SDMMCGRP_CTRL_OFFSET    0x108
-#define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \
-       ((((smplsel) & 0x7) << 3) | (((drvsel) & 0x7) << 0))
 
 static u8 socfpga_clk_get_parent(struct clk_hw *hwclk)
 {
@@ -194,7 +190,6 @@ static void __init __socfpga_gate_init(struct device_node *node,
        const char *parent_name[SOCFPGA_MAX_PARENTS];
        struct clk_init_data init;
        int rc;
-       int i = 0;
 
        socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
        if (WARN_ON(!socfpga_clk))
@@ -224,7 +219,7 @@ static void __init __socfpga_gate_init(struct device_node *node,
                socfpga_clk->shift = div_reg[1];
                socfpga_clk->width = div_reg[2];
        } else {
-               socfpga_clk->div_reg = 0;
+               socfpga_clk->div_reg = NULL;
        }
 
        rc = of_property_read_u32_array(node, "clk-phase", clk_phase, 2);
@@ -238,12 +233,9 @@ static void __init __socfpga_gate_init(struct device_node *node,
        init.name = clk_name;
        init.ops = ops;
        init.flags = 0;
-       while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] =
-                       of_clk_get_parent_name(node, i)) != NULL)
-               i++;
 
+       init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
        init.parent_names = parent_name;
-       init.num_parents = i;
        socfpga_clk->hw.hw.init = &init;
 
        clk = clk_register(NULL, &socfpga_clk->hw.hw);
diff --git a/drivers/clk/socfpga/clk-periph-a10.c b/drivers/clk/socfpga/clk-periph-a10.c
new file mode 100644 (file)
index 0000000..9d0181b
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2015 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include "clk.h"
+
+#define CLK_MGR_FREE_SHIFT             16
+#define CLK_MGR_FREE_MASK              0x7
+
+#define SOCFPGA_MPU_FREE_CLK           "mpu_free_clk"
+#define SOCFPGA_NOC_FREE_CLK           "noc_free_clk"
+#define SOCFPGA_SDMMC_FREE_CLK         "sdmmc_free_clk"
+#define to_socfpga_periph_clk(p) container_of(p, struct socfpga_periph_clk, hw.hw)
+
+static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
+                                            unsigned long parent_rate)
+{
+       struct socfpga_periph_clk *socfpgaclk = to_socfpga_periph_clk(hwclk);
+       u32 div;
+
+       if (socfpgaclk->fixed_div) {
+               div = socfpgaclk->fixed_div;
+       } else if (socfpgaclk->div_reg) {
+               div = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
+               div &= div_mask(socfpgaclk->width);
+               div += 1;
+       } else {
+               div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
+       }
+
+       return parent_rate / div;
+}
+
+static u8 clk_periclk_get_parent(struct clk_hw *hwclk)
+{
+       struct socfpga_periph_clk *socfpgaclk = to_socfpga_periph_clk(hwclk);
+       u32 clk_src;
+
+       clk_src = readl(socfpgaclk->hw.reg);
+       if (streq(hwclk->init->name, SOCFPGA_MPU_FREE_CLK) ||
+           streq(hwclk->init->name, SOCFPGA_NOC_FREE_CLK) ||
+           streq(hwclk->init->name, SOCFPGA_SDMMC_FREE_CLK))
+               return (clk_src >> CLK_MGR_FREE_SHIFT) &
+                       CLK_MGR_FREE_MASK;
+       else
+               return 0;
+}
+
+static const struct clk_ops periclk_ops = {
+       .recalc_rate = clk_periclk_recalc_rate,
+       .get_parent = clk_periclk_get_parent,
+};
+
+static __init void __socfpga_periph_init(struct device_node *node,
+       const struct clk_ops *ops)
+{
+       u32 reg;
+       struct clk *clk;
+       struct socfpga_periph_clk *periph_clk;
+       const char *clk_name = node->name;
+       const char *parent_name;
+       struct clk_init_data init;
+       int rc;
+       u32 fixed_div;
+       u32 div_reg[3];
+
+       of_property_read_u32(node, "reg", &reg);
+
+       periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
+       if (WARN_ON(!periph_clk))
+               return;
+
+       periph_clk->hw.reg = clk_mgr_a10_base_addr + reg;
+
+       rc = of_property_read_u32_array(node, "div-reg", div_reg, 3);
+       if (!rc) {
+               periph_clk->div_reg = clk_mgr_a10_base_addr + div_reg[0];
+               periph_clk->shift = div_reg[1];
+               periph_clk->width = div_reg[2];
+       } else {
+               periph_clk->div_reg = NULL;
+       }
+
+       rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
+       if (rc)
+               periph_clk->fixed_div = 0;
+       else
+               periph_clk->fixed_div = fixed_div;
+
+       of_property_read_string(node, "clock-output-names", &clk_name);
+
+       init.name = clk_name;
+       init.ops = ops;
+       init.flags = 0;
+
+       parent_name = of_clk_get_parent_name(node, 0);
+       init.num_parents = 1;
+       init.parent_names = &parent_name;
+
+       periph_clk->hw.hw.init = &init;
+
+       clk = clk_register(NULL, &periph_clk->hw.hw);
+       if (WARN_ON(IS_ERR(clk))) {
+               kfree(periph_clk);
+               return;
+       }
+       rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       if (rc < 0) {
+               pr_err("Could not register clock provider for node:%s\n",
+                      clk_name);
+               goto err_clk;
+       }
+
+       return;
+
+err_clk:
+       clk_unregister(clk);
+}
+
+void __init socfpga_a10_periph_init(struct device_node *node)
+{
+       __socfpga_periph_init(node, &periclk_ops);
+}
index 46531c34ec9b5b58799c1c538c3f3ef103158502..83aeaa219d14e82800976e476fc24a72a0f032a9 100644 (file)
@@ -76,7 +76,7 @@ static __init void __socfpga_periph_init(struct device_node *node,
                periph_clk->shift = div_reg[1];
                periph_clk->width = div_reg[2];
        } else {
-               periph_clk->div_reg = 0;
+               periph_clk->div_reg = NULL;
        }
 
        rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
new file mode 100644 (file)
index 0000000..1178b11
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2015 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "clk.h"
+
+/* Clock Manager offsets */
+#define CLK_MGR_PLL_CLK_SRC_SHIFT      8
+#define CLK_MGR_PLL_CLK_SRC_MASK       0x3
+
+/* Clock bypass bits */
+#define SOCFPGA_PLL_BG_PWRDWN          0
+#define SOCFPGA_PLL_PWR_DOWN           1
+#define SOCFPGA_PLL_EXT_ENA            2
+#define SOCFPGA_PLL_DIVF_MASK          0x00001FFF
+#define SOCFPGA_PLL_DIVF_SHIFT 0
+#define SOCFPGA_PLL_DIVQ_MASK          0x003F0000
+#define SOCFPGA_PLL_DIVQ_SHIFT 16
+#define SOCFGPA_MAX_PARENTS    5
+
+#define SOCFPGA_MAIN_PLL_CLK           "main_pll"
+#define SOCFPGA_PERIP_PLL_CLK          "periph_pll"
+
+#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
+
+void __iomem *clk_mgr_a10_base_addr;
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
+                                        unsigned long parent_rate)
+{
+       struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+       unsigned long divf, divq, reg;
+       unsigned long long vco_freq;
+
+       /* read VCO1 reg for numerator and denominator */
+       reg = readl(socfpgaclk->hw.reg + 0x4);
+       divf = (reg & SOCFPGA_PLL_DIVF_MASK) >> SOCFPGA_PLL_DIVF_SHIFT;
+       divq = (reg & SOCFPGA_PLL_DIVQ_MASK) >> SOCFPGA_PLL_DIVQ_SHIFT;
+       vco_freq = (unsigned long long)parent_rate * (divf + 1);
+       do_div(vco_freq, (1 + divq));
+       return (unsigned long)vco_freq;
+}
+
+static u8 clk_pll_get_parent(struct clk_hw *hwclk)
+{
+       struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+       u32 pll_src;
+
+       pll_src = readl(socfpgaclk->hw.reg);
+
+       return (pll_src >> CLK_MGR_PLL_CLK_SRC_SHIFT) &
+               CLK_MGR_PLL_CLK_SRC_MASK;
+}
+
+static struct clk_ops clk_pll_ops = {
+       .recalc_rate = clk_pll_recalc_rate,
+       .get_parent = clk_pll_get_parent,
+};
+
+static struct __init clk * __socfpga_pll_init(struct device_node *node,
+       const struct clk_ops *ops)
+{
+       u32 reg;
+       struct clk *clk;
+       struct socfpga_pll *pll_clk;
+       const char *clk_name = node->name;
+       const char *parent_name[SOCFGPA_MAX_PARENTS];
+       struct clk_init_data init;
+       struct device_node *clkmgr_np;
+       int rc;
+       int i = 0;
+
+       of_property_read_u32(node, "reg", &reg);
+
+       pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+       if (WARN_ON(!pll_clk))
+               return NULL;
+
+       clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
+       clk_mgr_a10_base_addr = of_iomap(clkmgr_np, 0);
+       BUG_ON(!clk_mgr_a10_base_addr);
+       pll_clk->hw.reg = clk_mgr_a10_base_addr + reg;
+
+       of_property_read_string(node, "clock-output-names", &clk_name);
+
+       init.name = clk_name;
+       init.ops = ops;
+       init.flags = 0;
+
+       while (i < SOCFGPA_MAX_PARENTS && (parent_name[i] =
+                       of_clk_get_parent_name(node, i)) != NULL)
+               i++;
+       init.num_parents = i;
+       init.parent_names = parent_name;
+       pll_clk->hw.hw.init = &init;
+
+       pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
+       clk_pll_ops.enable = clk_gate_ops.enable;
+       clk_pll_ops.disable = clk_gate_ops.disable;
+
+       clk = clk_register(NULL, &pll_clk->hw.hw);
+       if (WARN_ON(IS_ERR(clk))) {
+               kfree(pll_clk);
+               return NULL;
+       }
+       rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       return clk;
+}
+
+void __init socfpga_a10_pll_init(struct device_node *node)
+{
+       __socfpga_pll_init(node, &clk_pll_ops);
+}
index de6da957a09d6ebe82f416370c84a7dc50acea8e..8f26b5234947eafca2649fdbce3262db4351f8bd 100644 (file)
@@ -92,7 +92,6 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
        struct clk_init_data init;
        struct device_node *clkmgr_np;
        int rc;
-       int i = 0;
 
        of_property_read_u32(node, "reg", &reg);
 
@@ -111,11 +110,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
        init.ops = ops;
        init.flags = 0;
 
-       while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] =
-                       of_clk_get_parent_name(node, i)) != NULL)
-               i++;
-
-       init.num_parents = i;
+       init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
        init.parent_names = parent_name;
        pll_clk->hw.hw.init = &init;
 
index 43db947e5f0e51e60c232832d379faca9ca386d3..7564d2e35f3207cf2d9ee399827d461cdae90194 100644 (file)
@@ -24,4 +24,9 @@
 CLK_OF_DECLARE(socfpga_pll_clk, "altr,socfpga-pll-clock", socfpga_pll_init);
 CLK_OF_DECLARE(socfpga_perip_clk, "altr,socfpga-perip-clk", socfpga_periph_init);
 CLK_OF_DECLARE(socfpga_gate_clk, "altr,socfpga-gate-clk", socfpga_gate_init);
-
+CLK_OF_DECLARE(socfpga_a10_pll_clk, "altr,socfpga-a10-pll-clock",
+              socfpga_a10_pll_init);
+CLK_OF_DECLARE(socfpga_a10_perip_clk, "altr,socfpga-a10-perip-clk",
+              socfpga_a10_periph_init);
+CLK_OF_DECLARE(socfpga_a10_gate_clk, "altr,socfpga-a10-gate-clk",
+              socfpga_a10_gate_init);
index d291f60c46e1adbbef48733388f1a1f0d40e31c5..603973ab7e2914d0da3c39e56d7613dd8263ee19 100644 (file)
 #define CLKMGR_L4SRC           0x70
 #define CLKMGR_PERPLL_SRC      0xAC
 
-#define SOCFPGA_MAX_PARENTS            3
+#define SOCFPGA_MAX_PARENTS            5
 #define div_mask(width) ((1 << (width)) - 1)
 
+#define streq(a, b) (strcmp((a), (b)) == 0)
+#define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \
+       ((((smplsel) & 0x7) << 3) | (((drvsel) & 0x7) << 0))
+
 extern void __iomem *clk_mgr_base_addr;
+extern void __iomem *clk_mgr_a10_base_addr;
 
 void __init socfpga_pll_init(struct device_node *node);
 void __init socfpga_periph_init(struct device_node *node);
 void __init socfpga_gate_init(struct device_node *node);
+void socfpga_a10_pll_init(struct device_node *node);
+void socfpga_a10_periph_init(struct device_node *node);
+void socfpga_a10_gate_init(struct device_node *node);
 
 struct socfpga_pll {
        struct clk_gate hw;
@@ -44,6 +52,7 @@ struct socfpga_gate_clk {
        char *parent_name;
        u32 fixed_div;
        void __iomem *div_reg;
+       struct regmap *sys_mgr_base_addr;
        u32 width;      /* only valid if div_reg != 0 */
        u32 shift;      /* only valid if div_reg != 0 */
        u32 clk_phase[2];
index bf12a25eb3a22aab048f04d5280195d21bd7dd01..657ca14ba709a64b97e297e74f15f8f5d751efdf 100644 (file)
@@ -116,7 +116,7 @@ static long flexgen_round_rate(struct clk_hw *hw, unsigned long rate,
        return *prate / div;
 }
 
-unsigned long flexgen_recalc_rate(struct clk_hw *hw,
+static unsigned long flexgen_recalc_rate(struct clk_hw *hw,
                unsigned long parent_rate)
 {
        struct flexgen *flexgen = to_flexgen(hw);
@@ -174,7 +174,7 @@ static const struct clk_ops flexgen_ops = {
        .set_rate = flexgen_set_rate,
 };
 
-struct clk *clk_register_flexgen(const char *name,
+static struct clk *clk_register_flexgen(const char *name,
                                const char **parent_names, u8 num_parents,
                                void __iomem *reg, spinlock_t *lock, u32 idx,
                                unsigned long flexgen_flags) {
@@ -245,7 +245,7 @@ static const char ** __init flexgen_get_parents(struct device_node *np,
        const char **parents;
        int nparents, i;
 
-       nparents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+       nparents = of_clk_get_parent_count(np);
        if (WARN_ON(nparents <= 0))
                return NULL;
 
@@ -260,7 +260,7 @@ static const char ** __init flexgen_get_parents(struct device_node *np,
        return parents;
 }
 
-void __init st_of_flexgen_setup(struct device_node *np)
+static void __init st_of_flexgen_setup(struct device_node *np)
 {
        struct device_node *pnode;
        void __iomem *reg;
index a917c4c7eaa9c2f6709985c544a4370494e29b38..e94197f04b0b3bae5a811c70e7d91393a2a263c6 100644 (file)
@@ -492,7 +492,7 @@ static int quadfs_pll_is_enabled(struct clk_hw *hw)
        return !!npda;
 }
 
-int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
+static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
                           unsigned long *rate)
 {
        unsigned long nd = fs->ndiv + 16; /* ndiv value */
@@ -519,7 +519,7 @@ static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw,
        return rate;
 }
 
-int clk_fs660c32_vco_get_params(unsigned long input,
+static int clk_fs660c32_vco_get_params(unsigned long input,
                                unsigned long output, struct stm_fs *fs)
 {
 /* Formula
index fdcff10f6d3089ec389b21522e178db32eaaa911..4fbe6e099587c0541ce9a22783fb2dc4e4becf8c 100644 (file)
@@ -26,7 +26,7 @@ static const char ** __init clkgen_mux_get_parents(struct device_node *np,
        const char **parents;
        int nparents, i;
 
-       nparents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+       nparents = of_clk_get_parent_count(np);
        if (WARN_ON(nparents <= 0))
                return ERR_PTR(-EINVAL);
 
@@ -131,7 +131,7 @@ static int clkgena_divmux_is_enabled(struct clk_hw *hw)
        return (s8)clk_mux_ops.get_parent(mux_hw) > 0;
 }
 
-u8 clkgena_divmux_get_parent(struct clk_hw *hw)
+static u8 clkgena_divmux_get_parent(struct clk_hw *hw)
 {
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *mux_hw = &genamux->mux.hw;
@@ -168,7 +168,7 @@ static int clkgena_divmux_set_parent(struct clk_hw *hw, u8 index)
        return 0;
 }
 
-unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw,
+static unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw,
                unsigned long parent_rate)
 {
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
@@ -215,7 +215,7 @@ static const struct clk_ops clkgena_divmux_ops = {
 /**
  * clk_register_genamux - register a genamux clock with the clock framework
  */
-struct clk *clk_register_genamux(const char *name,
+static struct clk *clk_register_genamux(const char *name,
                                const char **parent_names, u8 num_parents,
                                void __iomem *reg,
                                const struct clkgena_divmux_data *muxdata,
@@ -385,7 +385,7 @@ static void __iomem * __init clkgen_get_register_base(
        return reg;
 }
 
-void __init st_of_clkgena_divmux_setup(struct device_node *np)
+static void __init st_of_clkgena_divmux_setup(struct device_node *np)
 {
        const struct of_device_id *match;
        const struct clkgena_divmux_data *data;
@@ -485,7 +485,7 @@ static const struct of_device_id clkgena_prediv_of_match[] = {
        {}
 };
 
-void __init st_of_clkgena_prediv_setup(struct device_node *np)
+static void __init st_of_clkgena_prediv_setup(struct device_node *np)
 {
        const struct of_device_id *match;
        void __iomem *reg;
@@ -622,7 +622,7 @@ static const struct of_device_id mux_of_match[] = {
        {}
 };
 
-void __init st_of_clkgen_mux_setup(struct device_node *np)
+static void __init st_of_clkgen_mux_setup(struct device_node *np)
 {
        const struct of_device_id *match;
        struct clk *clk;
@@ -699,7 +699,7 @@ static const struct of_device_id vcc_of_match[] = {
        {}
 };
 
-void __init st_of_clkgen_vcc_setup(struct device_node *np)
+static void __init st_of_clkgen_vcc_setup(struct device_node *np)
 {
        const struct of_device_id *match;
        void __iomem *reg;
index d204ba85db3a423d6b404cb825c873c2815850e2..1065322072136bd90c868f9d6a2fd4c56104f303 100644 (file)
@@ -270,7 +270,7 @@ static int clkgen_pll_is_enabled(struct clk_hw *hw)
        return !poweroff;
 }
 
-unsigned long recalc_stm_pll800c65(struct clk_hw *hw,
+static unsigned long recalc_stm_pll800c65(struct clk_hw *hw,
                unsigned long parent_rate)
 {
        struct clkgen_pll *pll = to_clkgen_pll(hw);
@@ -297,7 +297,7 @@ unsigned long recalc_stm_pll800c65(struct clk_hw *hw,
 
 }
 
-unsigned long recalc_stm_pll1600c65(struct clk_hw *hw,
+static unsigned long recalc_stm_pll1600c65(struct clk_hw *hw,
                unsigned long parent_rate)
 {
        struct clkgen_pll *pll = to_clkgen_pll(hw);
@@ -321,7 +321,7 @@ unsigned long recalc_stm_pll1600c65(struct clk_hw *hw,
        return rate;
 }
 
-unsigned long recalc_stm_pll3200c32(struct clk_hw *hw,
+static unsigned long recalc_stm_pll3200c32(struct clk_hw *hw,
                unsigned long parent_rate)
 {
        struct clkgen_pll *pll = to_clkgen_pll(hw);
@@ -343,7 +343,7 @@ unsigned long recalc_stm_pll3200c32(struct clk_hw *hw,
        return rate;
 }
 
-unsigned long recalc_stm_pll1200c32(struct clk_hw *hw,
+static unsigned long recalc_stm_pll1200c32(struct clk_hw *hw,
                unsigned long parent_rate)
 {
        struct clkgen_pll *pll = to_clkgen_pll(hw);
@@ -544,7 +544,7 @@ CLK_OF_DECLARE(clkgena_c65_plls,
               "st,clkgena-plls-c65", clkgena_c65_pll_setup);
 
 static struct clk * __init clkgen_odf_register(const char *parent_name,
-                                              void * __iomem reg,
+                                              void __iomem *reg,
                                               struct clkgen_pll_data *pll_data,
                                               int odf,
                                               spinlock_t *odf_lock,
index ec8f5a1fca09f4240c433a1de2ea8207e6e369e5..9d028aec58e5d8addc3a080ced48594db57c1657 100644 (file)
@@ -128,7 +128,7 @@ static struct platform_driver sun4i_a10_mod0_clk_driver = {
        },
        .probe = sun4i_a10_mod0_clk_probe,
 };
-module_platform_driver(sun4i_a10_mod0_clk_driver);
+builtin_platform_driver(sun4i_a10_mod0_clk_driver);
 
 static const struct factors_data sun9i_a80_mod0_data __initconst = {
        .enable = 31,
index d8da77d72861b29f0867d47b43b7917da31037f7..887f4ea161bb4eb79700c7778aecbec2c977e2b4 100644 (file)
@@ -93,7 +93,7 @@ static void __init sun9i_a80_pll4_setup(struct device_node *node)
        void __iomem *reg;
 
        reg = of_io_request_and_map(node, 0, of_node_full_name(node));
-       if (!reg) {
+       if (IS_ERR(reg)) {
                pr_err("Could not get registers for a80-pll4-clk: %s\n",
                       node->name);
                return;
@@ -154,7 +154,7 @@ static void __init sun9i_a80_gt_setup(struct device_node *node)
        struct clk *gt;
 
        reg = of_io_request_and_map(node, 0, of_node_full_name(node));
-       if (!reg) {
+       if (IS_ERR(reg)) {
                pr_err("Could not get registers for a80-gt-clk: %s\n",
                       node->name);
                return;
@@ -218,7 +218,7 @@ static void __init sun9i_a80_ahb_setup(struct device_node *node)
        void __iomem *reg;
 
        reg = of_io_request_and_map(node, 0, of_node_full_name(node));
-       if (!reg) {
+       if (IS_ERR(reg)) {
                pr_err("Could not get registers for a80-ahb-clk: %s\n",
                       node->name);
                return;
@@ -244,7 +244,7 @@ static void __init sun9i_a80_apb0_setup(struct device_node *node)
        void __iomem *reg;
 
        reg = of_io_request_and_map(node, 0, of_node_full_name(node));
-       if (!reg) {
+       if (IS_ERR(reg)) {
                pr_err("Could not get registers for a80-apb0-clk: %s\n",
                       node->name);
                return;
@@ -310,7 +310,7 @@ static void __init sun9i_a80_apb1_setup(struct device_node *node)
        void __iomem *reg;
 
        reg = of_io_request_and_map(node, 0, of_node_full_name(node));
-       if (!reg) {
+       if (IS_ERR(reg)) {
                pr_err("Could not get registers for a80-apb1-clk: %s\n",
                       node->name);
                return;
index 7e1e2bd189b6a6f6f3c0047d6b3f4d6b5899c3c6..9a82f17d2d73daf611ac5e8651e35c2101ea6245 100644 (file)
@@ -198,6 +198,8 @@ static void __init sun6i_ahb1_clk_setup(struct device_node *node)
        int i = 0;
 
        reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (IS_ERR(reg))
+               return;
 
        /* we have a mux, we will have >1 parents */
        while (i < SUN6I_AHB1_MAX_PARENTS &&
index a86ed2f8d7af85faa09c0363a3d288b2af228fab..3a25f9588e67b58da122a82056854e5add44fd1c 100644 (file)
@@ -204,6 +204,17 @@ static void __init sun6i_a31_usb_setup(struct device_node *node)
 }
 CLK_OF_DECLARE(sun6i_a31_usb, "allwinner,sun6i-a31-usb-clk", sun6i_a31_usb_setup);
 
+static const struct usb_clk_data sun8i_a23_usb_clk_data __initconst = {
+       .clk_mask = BIT(16) | BIT(11) | BIT(10) | BIT(9) | BIT(8),
+       .reset_mask = BIT(2) | BIT(1) | BIT(0),
+};
+
+static void __init sun8i_a23_usb_setup(struct device_node *node)
+{
+       sunxi_usb_clk_setup(node, &sun8i_a23_usb_clk_data, &sun4i_a10_usb_lock);
+}
+CLK_OF_DECLARE(sun8i_a23_usb, "allwinner,sun8i-a23-usb-clk", sun8i_a23_usb_setup);
+
 static const struct usb_clk_data sun9i_a80_usb_mod_data __initconst = {
        .clk_mask = BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2) | BIT(1),
        .reset_mask = BIT(19) | BIT(18) | BIT(17),
diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig
new file mode 100644 (file)
index 0000000..1ba30d1
--- /dev/null
@@ -0,0 +1,3 @@
+config TEGRA_CLK_EMC
+       def_bool y
+       depends on TEGRA124_EMC
index edb8358fa6cebab596e7f39c022736a4b6c31457..aec862ba7a17547b870a175ef436927b2c7307d3 100644 (file)
@@ -11,6 +11,7 @@ obj-y                                 += clk-tegra-periph.o
 obj-y                                  += clk-tegra-pmc.o
 obj-y                                  += clk-tegra-fixed.o
 obj-y                                  += clk-tegra-super-gen4.o
+obj-$(CONFIG_TEGRA_CLK_EMC)            += clk-emc.o
 obj-$(CONFIG_ARCH_TEGRA_2x_SOC)         += clk-tegra20.o
 obj-$(CONFIG_ARCH_TEGRA_3x_SOC)         += clk-tegra30.o
 obj-$(CONFIG_ARCH_TEGRA_114_SOC)       += clk-tegra114.o
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
new file mode 100644 (file)
index 0000000..7649685
--- /dev/null
@@ -0,0 +1,538 @@
+/*
+ * drivers/clk/tegra/clk-emc.c
+ *
+ * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * Author:
+ *     Mikko Perttunen <mperttunen@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sort.h>
+#include <linux/string.h>
+
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/emc.h>
+
+#include "clk.h"
+
+#define CLK_SOURCE_EMC 0x19c
+
+#define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT 0
+#define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK 0xff
+#define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK) << \
+                                             CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT)
+
+#define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT 29
+#define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK 0x7
+#define CLK_SOURCE_EMC_EMC_2X_CLK_SRC(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK) << \
+                                         CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT)
+
+static const char * const emc_parent_clk_names[] = {
+       "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud",
+       "pll_c2", "pll_c3", "pll_c_ud"
+};
+
+/*
+ * List of clock sources for various parents the EMC clock can have.
+ * When we change the timing to a timing with a parent that has the same
+ * clock source as the current parent, we must first change to a backup
+ * timing that has a different clock source.
+ */
+
+#define EMC_SRC_PLL_M 0
+#define EMC_SRC_PLL_C 1
+#define EMC_SRC_PLL_P 2
+#define EMC_SRC_CLK_M 3
+#define EMC_SRC_PLL_C2 4
+#define EMC_SRC_PLL_C3 5
+
+static const char emc_parent_clk_sources[] = {
+       EMC_SRC_PLL_M, EMC_SRC_PLL_C, EMC_SRC_PLL_P, EMC_SRC_CLK_M,
+       EMC_SRC_PLL_M, EMC_SRC_PLL_C2, EMC_SRC_PLL_C3, EMC_SRC_PLL_C
+};
+
+struct emc_timing {
+       unsigned long rate, parent_rate;
+       u8 parent_index;
+       struct clk *parent;
+       u32 ram_code;
+};
+
+struct tegra_clk_emc {
+       struct clk_hw hw;
+       void __iomem *clk_regs;
+       struct clk *prev_parent;
+       bool changing_timing;
+
+       struct device_node *emc_node;
+       struct tegra_emc *emc;
+
+       int num_timings;
+       struct emc_timing *timings;
+       spinlock_t *lock;
+};
+
+/* Common clock framework callback implementations */
+
+static unsigned long emc_recalc_rate(struct clk_hw *hw,
+                                    unsigned long parent_rate)
+{
+       struct tegra_clk_emc *tegra;
+       u32 val, div;
+
+       tegra = container_of(hw, struct tegra_clk_emc, hw);
+
+       /*
+        * CCF wrongly assumes that the parent won't change during set_rate,
+        * so get the parent rate explicitly.
+        */
+       parent_rate = __clk_get_rate(__clk_get_parent(hw->clk));
+
+       val = readl(tegra->clk_regs + CLK_SOURCE_EMC);
+       div = val & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK;
+
+       return parent_rate / (div + 2) * 2;
+}
+
+/*
+ * Rounds up unless no higher rate exists, in which case down. This way is
+ * safer since things have EMC rate floors. Also don't touch parent_rate
+ * since we don't want the CCF to play with our parent clocks.
+ */
+static long emc_determine_rate(struct clk_hw *hw, unsigned long rate,
+                              unsigned long min_rate,
+                              unsigned long max_rate,
+                              unsigned long *best_parent_rate,
+                              struct clk_hw **best_parent_hw)
+{
+       struct tegra_clk_emc *tegra;
+       u8 ram_code = tegra_read_ram_code();
+       struct emc_timing *timing = NULL;
+       int i;
+
+       tegra = container_of(hw, struct tegra_clk_emc, hw);
+
+       for (i = 0; i < tegra->num_timings; i++) {
+               if (tegra->timings[i].ram_code != ram_code)
+                       continue;
+
+               timing = tegra->timings + i;
+
+               if (timing->rate > max_rate) {
+                       i = min(i, 1);
+                       return tegra->timings[i - 1].rate;
+               }
+
+               if (timing->rate < min_rate)
+                       continue;
+
+               if (timing->rate >= rate)
+                       return timing->rate;
+       }
+
+       if (timing)
+               return timing->rate;
+
+       return __clk_get_rate(hw->clk);
+}
+
+static u8 emc_get_parent(struct clk_hw *hw)
+{
+       struct tegra_clk_emc *tegra;
+       u32 val;
+
+       tegra = container_of(hw, struct tegra_clk_emc, hw);
+
+       val = readl(tegra->clk_regs + CLK_SOURCE_EMC);
+
+       return (val >> CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT)
+               & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK;
+}
+
+static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
+{
+       struct platform_device *pdev;
+
+       if (tegra->emc)
+               return tegra->emc;
+
+       if (!tegra->emc_node)
+               return NULL;
+
+       pdev = of_find_device_by_node(tegra->emc_node);
+       if (!pdev) {
+               pr_err("%s: could not get external memory controller\n",
+                      __func__);
+               return NULL;
+       }
+
+       of_node_put(tegra->emc_node);
+       tegra->emc_node = NULL;
+
+       tegra->emc = platform_get_drvdata(pdev);
+       if (!tegra->emc) {
+               pr_err("%s: cannot find EMC driver\n", __func__);
+               return NULL;
+       }
+
+       return tegra->emc;
+}
+
+static int emc_set_timing(struct tegra_clk_emc *tegra,
+                         struct emc_timing *timing)
+{
+       int err;
+       u8 div;
+       u32 car_value;
+       unsigned long flags = 0;
+       struct tegra_emc *emc = emc_ensure_emc_driver(tegra);
+
+       if (!emc)
+               return -ENOENT;
+
+       pr_debug("going to rate %ld prate %ld p %s\n", timing->rate,
+                timing->parent_rate, __clk_get_name(timing->parent));
+
+       if (emc_get_parent(&tegra->hw) == timing->parent_index &&
+           clk_get_rate(timing->parent) != timing->parent_rate) {
+               BUG();
+               return -EINVAL;
+       }
+
+       tegra->changing_timing = true;
+
+       err = clk_set_rate(timing->parent, timing->parent_rate);
+       if (err) {
+               pr_err("cannot change parent %s rate to %ld: %d\n",
+                      __clk_get_name(timing->parent), timing->parent_rate,
+                      err);
+
+               return err;
+       }
+
+       err = clk_prepare_enable(timing->parent);
+       if (err) {
+               pr_err("cannot enable parent clock: %d\n", err);
+               return err;
+       }
+
+       div = timing->parent_rate / (timing->rate / 2) - 2;
+
+       err = tegra_emc_prepare_timing_change(emc, timing->rate);
+       if (err)
+               return err;
+
+       spin_lock_irqsave(tegra->lock, flags);
+
+       car_value = readl(tegra->clk_regs + CLK_SOURCE_EMC);
+
+       car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_SRC(~0);
+       car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_SRC(timing->parent_index);
+
+       car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(~0);
+       car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(div);
+
+       writel(car_value, tegra->clk_regs + CLK_SOURCE_EMC);
+
+       spin_unlock_irqrestore(tegra->lock, flags);
+
+       tegra_emc_complete_timing_change(emc, timing->rate);
+
+       clk_hw_reparent(&tegra->hw, __clk_get_hw(timing->parent));
+       clk_disable_unprepare(tegra->prev_parent);
+
+       tegra->prev_parent = timing->parent;
+       tegra->changing_timing = false;
+
+       return 0;
+}
+
+/*
+ * Get backup timing to use as an intermediate step when a change between
+ * two timings with the same clock source has been requested. First try to
+ * find a timing with a higher clock rate to avoid a rate below any set rate
+ * floors. If that is not possible, find a lower rate.
+ */
+static struct emc_timing *get_backup_timing(struct tegra_clk_emc *tegra,
+                                           int timing_index)
+{
+       int i;
+       u32 ram_code = tegra_read_ram_code();
+       struct emc_timing *timing;
+
+       for (i = timing_index+1; i < tegra->num_timings; i++) {
+               timing = tegra->timings + i;
+               if (timing->ram_code != ram_code)
+                       continue;
+
+               if (emc_parent_clk_sources[timing->parent_index] !=
+                   emc_parent_clk_sources[
+                     tegra->timings[timing_index].parent_index])
+                       return timing;
+       }
+
+       for (i = timing_index-1; i >= 0; --i) {
+               timing = tegra->timings + i;
+               if (timing->ram_code != ram_code)
+                       continue;
+
+               if (emc_parent_clk_sources[timing->parent_index] !=
+                   emc_parent_clk_sources[
+                     tegra->timings[timing_index].parent_index])
+                       return timing;
+       }
+
+       return NULL;
+}
+
+static int emc_set_rate(struct clk_hw *hw, unsigned long rate,
+                       unsigned long parent_rate)
+{
+       struct tegra_clk_emc *tegra;
+       struct emc_timing *timing = NULL;
+       int i, err;
+       u32 ram_code = tegra_read_ram_code();
+
+       tegra = container_of(hw, struct tegra_clk_emc, hw);
+
+       if (__clk_get_rate(hw->clk) == rate)
+               return 0;
+
+       /*
+        * When emc_set_timing changes the parent rate, CCF will propagate
+        * that downward to us, so ignore any set_rate calls while a rate
+        * change is already going on.
+        */
+       if (tegra->changing_timing)
+               return 0;
+
+       for (i = 0; i < tegra->num_timings; i++) {
+               if (tegra->timings[i].rate == rate &&
+                   tegra->timings[i].ram_code == ram_code) {
+                       timing = tegra->timings + i;
+                       break;
+               }
+       }
+
+       if (!timing) {
+               pr_err("cannot switch to rate %ld without emc table\n", rate);
+               return -EINVAL;
+       }
+
+       if (emc_parent_clk_sources[emc_get_parent(hw)] ==
+           emc_parent_clk_sources[timing->parent_index] &&
+           clk_get_rate(timing->parent) != timing->parent_rate) {
+               /*
+                * Parent clock source not changed but parent rate has changed,
+                * need to temporarily switch to another parent
+                */
+
+               struct emc_timing *backup_timing;
+
+               backup_timing = get_backup_timing(tegra, i);
+               if (!backup_timing) {
+                       pr_err("cannot find backup timing\n");
+                       return -EINVAL;
+               }
+
+               pr_debug("using %ld as backup rate when going to %ld\n",
+                        backup_timing->rate, rate);
+
+               err = emc_set_timing(tegra, backup_timing);
+               if (err) {
+                       pr_err("cannot set backup timing: %d\n", err);
+                       return err;
+               }
+       }
+
+       return emc_set_timing(tegra, timing);
+}
+
+/* Initialization and deinitialization */
+
+static int load_one_timing_from_dt(struct tegra_clk_emc *tegra,
+                                  struct emc_timing *timing,
+                                  struct device_node *node)
+{
+       int err, i;
+       u32 tmp;
+
+       err = of_property_read_u32(node, "clock-frequency", &tmp);
+       if (err) {
+               pr_err("timing %s: failed to read rate\n", node->full_name);
+               return err;
+       }
+
+       timing->rate = tmp;
+
+       err = of_property_read_u32(node, "nvidia,parent-clock-frequency", &tmp);
+       if (err) {
+               pr_err("timing %s: failed to read parent rate\n",
+                      node->full_name);
+               return err;
+       }
+
+       timing->parent_rate = tmp;
+
+       timing->parent = of_clk_get_by_name(node, "emc-parent");
+       if (IS_ERR(timing->parent)) {
+               pr_err("timing %s: failed to get parent clock\n",
+                      node->full_name);
+               return PTR_ERR(timing->parent);
+       }
+
+       timing->parent_index = 0xff;
+       for (i = 0; i < ARRAY_SIZE(emc_parent_clk_names); i++) {
+               if (!strcmp(emc_parent_clk_names[i],
+                           __clk_get_name(timing->parent))) {
+                       timing->parent_index = i;
+                       break;
+               }
+       }
+       if (timing->parent_index == 0xff) {
+               pr_err("timing %s: %s is not a valid parent\n",
+                      node->full_name, __clk_get_name(timing->parent));
+               clk_put(timing->parent);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int cmp_timings(const void *_a, const void *_b)
+{
+       const struct emc_timing *a = _a;
+       const struct emc_timing *b = _b;
+
+       if (a->rate < b->rate)
+               return -1;
+       else if (a->rate == b->rate)
+               return 0;
+       else
+               return 1;
+}
+
+static int load_timings_from_dt(struct tegra_clk_emc *tegra,
+                               struct device_node *node,
+                               u32 ram_code)
+{
+       struct device_node *child;
+       int child_count = of_get_child_count(node);
+       int i = 0, err;
+
+       tegra->timings = kcalloc(child_count, sizeof(struct emc_timing),
+                                GFP_KERNEL);
+       if (!tegra->timings)
+               return -ENOMEM;
+
+       tegra->num_timings = child_count;
+
+       for_each_child_of_node(node, child) {
+               struct emc_timing *timing = tegra->timings + (i++);
+
+               err = load_one_timing_from_dt(tegra, timing, child);
+               if (err)
+                       return err;
+
+               timing->ram_code = ram_code;
+       }
+
+       sort(tegra->timings, tegra->num_timings, sizeof(struct emc_timing),
+            cmp_timings, NULL);
+
+       return 0;
+}
+
+static const struct clk_ops tegra_clk_emc_ops = {
+       .recalc_rate = emc_recalc_rate,
+       .determine_rate = emc_determine_rate,
+       .set_rate = emc_set_rate,
+       .get_parent = emc_get_parent,
+};
+
+struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
+                                  spinlock_t *lock)
+{
+       struct tegra_clk_emc *tegra;
+       struct clk_init_data init;
+       struct device_node *node;
+       u32 node_ram_code;
+       struct clk *clk;
+       int err;
+
+       tegra = kcalloc(1, sizeof(*tegra), GFP_KERNEL);
+       if (!tegra)
+               return ERR_PTR(-ENOMEM);
+
+       tegra->clk_regs = base;
+       tegra->lock = lock;
+
+       tegra->num_timings = 0;
+
+       for_each_child_of_node(np, node) {
+               err = of_property_read_u32(node, "nvidia,ram-code",
+                                          &node_ram_code);
+               if (err) {
+                       of_node_put(node);
+                       continue;
+               }
+
+               /*
+                * Store timings for all ram codes as we cannot read the
+                * fuses until the apbmisc driver is loaded.
+                */
+               err = load_timings_from_dt(tegra, node, node_ram_code);
+               if (err)
+                       return ERR_PTR(err);
+               of_node_put(node);
+               break;
+       }
+
+       if (tegra->num_timings == 0)
+               pr_warn("%s: no memory timings registered\n", __func__);
+
+       tegra->emc_node = of_parse_phandle(np,
+                       "nvidia,external-memory-controller", 0);
+       if (!tegra->emc_node)
+               pr_warn("%s: couldn't find node for EMC driver\n", __func__);
+
+       init.name = "emc";
+       init.ops = &tegra_clk_emc_ops;
+       init.flags = 0;
+       init.parent_names = emc_parent_clk_names;
+       init.num_parents = ARRAY_SIZE(emc_parent_clk_names);
+
+       tegra->hw.init = &init;
+
+       clk = clk_register(NULL, &tegra->hw);
+       if (IS_ERR(clk))
+               return clk;
+
+       tegra->prev_parent = clk_get_parent_by_index(
+               tegra->hw.clk, emc_get_parent(&tegra->hw));
+       tegra->changing_timing = false;
+
+       /* Allow debugging tools to see the EMC clock */
+       clk_register_clkdev(clk, "emc", "tegra-clk-debug");
+
+       clk_prepare_enable(clk);
+
+       return clk;
+};
index 11f857cd5f6a2bf01d86dc2b95497905d971a85e..e8cca3eac00742713cc8fe39f9a63c8fe9b62def 100644 (file)
@@ -152,11 +152,6 @@ static unsigned long tegra124_input_freq[] = {
        [12] = 260000000,
 };
 
-static const char *mux_pllmcp_clkm[] = {
-       "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3",
-};
-#define mux_pllmcp_clkm_idx NULL
-
 static struct div_nmp pllxc_nmp = {
        .divm_shift = 0,
        .divm_width = 8,
@@ -791,7 +786,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_i2c2] = { .dt_id = TEGRA124_CLK_I2C2, .present = true },
        [tegra_clk_uartc] = { .dt_id = TEGRA124_CLK_UARTC, .present = true },
        [tegra_clk_mipi_cal] = { .dt_id = TEGRA124_CLK_MIPI_CAL, .present = true },
-       [tegra_clk_emc] = { .dt_id = TEGRA124_CLK_EMC, .present = true },
        [tegra_clk_usb2] = { .dt_id = TEGRA124_CLK_USB2, .present = true },
        [tegra_clk_usb3] = { .dt_id = TEGRA124_CLK_USB3, .present = true },
        [tegra_clk_vde_8] = { .dt_id = TEGRA124_CLK_VDE, .present = true },
@@ -1127,13 +1121,7 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
                                             periph_clk_enb_refcnt);
        clks[TEGRA124_CLK_DSIB] = clk;
 
-       /* emc mux */
-       clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
-                              ARRAY_SIZE(mux_pllmcp_clkm), 0,
-                              clk_base + CLK_SOURCE_EMC,
-                              29, 3, 0, &emc_lock);
-
-       clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
+       clk = tegra_clk_register_mc("mc", "emc", clk_base + CLK_SOURCE_EMC,
                                    &emc_lock);
        clks[TEGRA124_CLK_MC] = clk;
 
@@ -1389,7 +1377,6 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
        {TEGRA124_CLK_XUSB_HOST_SRC, TEGRA124_CLK_PLL_RE_OUT, 112000000, 0},
        {TEGRA124_CLK_SATA, TEGRA124_CLK_PLL_P, 104000000, 0},
        {TEGRA124_CLK_SATA_OOB, TEGRA124_CLK_PLL_P, 204000000, 0},
-       {TEGRA124_CLK_EMC, TEGRA124_CLK_CLK_MAX, 0, 1},
        {TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1},
        {TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1},
        {TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0},
@@ -1513,6 +1500,10 @@ static void __init tegra124_132_clock_init_post(struct device_node *np)
        tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks,
                                  &pll_x_params);
        tegra_add_of_provider(np);
+
+       clks[TEGRA124_CLK_EMC] = tegra_clk_register_emc(clk_base, np,
+                                                       &emc_lock);
+
        tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
 
        tegra_cpu_car_ops = &tegra124_cpu_car_ops;
index 4b26509fc21857ede7ea3c41ed79d4fad3175c8f..0af3e834dd24a25d3e3e0cbfa020c596d57bc908 100644 (file)
@@ -679,7 +679,7 @@ static struct tegra_devclk devclks[] __initdata = {
        { .dev_id = "tegra30-dam.1", .dt_id = TEGRA30_CLK_DAM1 },
        { .dev_id = "tegra30-dam.2", .dt_id = TEGRA30_CLK_DAM2 },
        { .con_id = "hda", .dev_id = "tegra30-hda", .dt_id = TEGRA30_CLK_HDA },
-       { .con_id = "hda2codec", .dev_id = "tegra30-hda", .dt_id = TEGRA30_CLK_HDA2CODEC_2X },
+       { .con_id = "hda2codec_2x", .dev_id = "tegra30-hda", .dt_id = TEGRA30_CLK_HDA2CODEC_2X },
        { .dev_id = "spi_tegra.0", .dt_id = TEGRA30_CLK_SBC1 },
        { .dev_id = "spi_tegra.1", .dt_id = TEGRA30_CLK_SBC2 },
        { .dev_id = "spi_tegra.2", .dt_id = TEGRA30_CLK_SBC3 },
index d6ac00647fafb05311b646f208f7175fa0e618a4..75ddc8ff8bd4aff97d61ff9ae27095aa2ba0cf33 100644 (file)
@@ -623,6 +623,18 @@ void tegra_super_clk_gen4_init(void __iomem *clk_base,
                        void __iomem *pmc_base, struct tegra_clk *tegra_clks,
                        struct tegra_clk_pll_params *pll_params);
 
+#ifdef CONFIG_TEGRA_CLK_EMC
+struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
+                                  spinlock_t *lock);
+#else
+static inline struct clk *tegra_clk_register_emc(void __iomem *base,
+                                                struct device_node *np,
+                                                spinlock_t *lock)
+{
+       return NULL;
+}
+#endif
+
 void tegra114_clock_tune_cpu_trimmers_high(void);
 void tegra114_clock_tune_cpu_trimmers_low(void);
 void tegra114_clock_tune_cpu_trimmers_init(void);
index d86bc46b93bdfeae630f94a55745315a0d8c1ec8..19e543a32e2bad1e4591fb3106258d9f6769551b 100644 (file)
@@ -155,7 +155,7 @@ static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
        return 0;
 }
 
-const struct clk_ops atl_clk_ops = {
+static const struct clk_ops atl_clk_ops = {
        .enable         = atl_clk_enable,
        .disable        = atl_clk_disable,
        .is_enabled     = atl_clk_is_enabled,
@@ -167,7 +167,7 @@ const struct clk_ops atl_clk_ops = {
 static void __init of_dra7_atl_clock_setup(struct device_node *node)
 {
        struct dra7_atl_desc *clk_hw = NULL;
-       struct clk_init_data init = { 0 };
+       struct clk_init_data init = { NULL };
        const char **parent_names = NULL;
        struct clk *clk;
 
@@ -252,6 +252,11 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
                }
 
                clk = of_clk_get_from_provider(&clkspec);
+               if (IS_ERR(clk)) {
+                       pr_err("%s: failed to get atl clock %d from provider\n",
+                              __func__, i);
+                       return PTR_ERR(clk);
+               }
 
                cdesc = to_atl_desc(__clk_get_hw(clk));
                cdesc->cinfo = cinfo;
index 0ebe5c51062b9ee2c2aa8e3fbe5dd84fcb14f753..64bb5e8a3b8cd845952bb4e97ffdead89f366d97 100644 (file)
@@ -122,14 +122,14 @@ void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index)
 
        if (i == CLK_MAX_MEMMAPS) {
                pr_err("clk-provider not found for %s!\n", node->name);
-               return ERR_PTR(-ENOENT);
+               return IOMEM_ERR_PTR(-ENOENT);
        }
 
        reg->index = i;
 
        if (of_property_read_u32_index(node, "reg", index, &val)) {
                pr_err("%s must have reg[%d]!\n", node->name, index);
-               return ERR_PTR(-EINVAL);
+               return IOMEM_ERR_PTR(-EINVAL);
        }
 
        reg->offset = val;
index 35fe1085480cf33da68547e76a7e126d5b95738b..b82ef07f340341052fa8a92ddacd8f65845ba843 100644 (file)
@@ -32,7 +32,7 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
        int i;
        int num_clks;
 
-       num_clks = of_count_phandle_with_args(node, "clocks", "#clock-cells");
+       num_clks = of_clk_get_parent_count(node);
 
        for (i = 0; i < num_clks; i++) {
                clk = of_clk_get(node, i);
index 11478a501c3074c53071bf9afb206b375fbda14c..2aacf7a3bcaeb137e4e7bb973cea2874ff1aa6a7 100644 (file)
@@ -177,7 +177,7 @@ cleanup:
 }
 
 #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
-void __iomem *_get_reg(u8 module, u16 offset)
+static void __iomem *_get_reg(u8 module, u16 offset)
 {
        u32 reg;
        struct clk_omap_reg *reg_setup;
index ffcd8e09e85b9eae1a1ffea5dda006b976a93fc5..730aa62454a2454f6a5311adbd16bfff57e366d2 100644 (file)
@@ -621,13 +621,13 @@ static void __init ti_fapll_setup(struct device_node *node)
 
                /* Check for hardwired audio_pll_clk1 */
                if (is_audio_pll_clk1(freq)) {
-                       freq = 0;
-                       div = 0;
+                       freq = NULL;
+                       div = NULL;
                } else {
                        /* Does the synthesizer have a FREQ register? */
                        v = readl_relaxed(freq);
                        if (!v)
-                               freq = 0;
+                               freq = NULL;
                }
                synth_clk = ti_fapll_synth_setup(fd, freq, div, output_instance,
                                                 output_name, node->name,
index 80069c370a47a1c560f05929ce7bd2aedb651514..4626b97b7d83295b06d0259a5f55574572dc6a09 100644 (file)
@@ -116,11 +116,12 @@ void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
        clk_register_clkdev(clk, NULL, "hdmi");
        clk_register_clkdev(clk, "hdmi", "mcde");
 
-       clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, CLK_IS_ROOT);
+       clk = clk_reg_prcmu_scalable("apeatclk", NULL, PRCMU_APEATCLK, 0,
+                                    CLK_IS_ROOT|CLK_SET_RATE_GATE);
        clk_register_clkdev(clk, NULL, "apeat");
 
-       clk = clk_reg_prcmu_gate("apetraceclk", NULL, PRCMU_APETRACECLK,
-                               CLK_IS_ROOT);
+       clk = clk_reg_prcmu_scalable("apetraceclk", NULL, PRCMU_APETRACECLK, 0,
+                               CLK_IS_ROOT|CLK_SET_RATE_GATE);
        clk_register_clkdev(clk, NULL, "apetrace");
 
        clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, CLK_IS_ROOT);
index 7b55ef89baa53c7ef6d59c19dcf8c9287b773199..e319ef912dc6c27bbaba940a42a4ef97a5bf07a0 100644 (file)
@@ -166,8 +166,8 @@ void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
        clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, CLK_IS_ROOT);
        prcmu_clk[PRCMU_APEATCLK] = clk;
 
-       clk = clk_reg_prcmu_gate("apetraceclk", NULL, PRCMU_APETRACECLK,
-                               CLK_IS_ROOT);
+       clk = clk_reg_prcmu_scalable("apetraceclk", NULL, PRCMU_APETRACECLK, 0,
+                               CLK_IS_ROOT|CLK_SET_RATE_GATE);
        prcmu_clk[PRCMU_APETRACECLK] = clk;
 
        clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, CLK_IS_ROOT);
index c6e86a9a2aa3da2313711ac0c1bc6a46032cde6c..a96dd8e53fdb2d9537a363c480418db859c8c0b1 100644 (file)
@@ -135,7 +135,7 @@ static struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec,
        return sp810->timerclken[clkspec->args[0]].clk;
 }
 
-void __init clk_sp810_of_setup(struct device_node *node)
+static void __init clk_sp810_of_setup(struct device_node *node)
 {
        struct clk_sp810 *sp810 = kzalloc(sizeof(*sp810), GFP_KERNEL);
        const char *parent_names[2];
@@ -156,7 +156,7 @@ void __init clk_sp810_of_setup(struct device_node *node)
                        "timclk");
        parent_names[1] = of_clk_get_parent_name(node, sp810->timclk_index);
 
-       if (parent_names[0] <= 0 || parent_names[1] <= 0) {
+       if (!parent_names[0] || !parent_names[1]) {
                pr_warn("Failed to obtain parent clocks for SP810!\n");
                return;
        }
index 40cb113be6af110139bf533d142b3ea304cb6010..de614384bb447512344e089a9ed56ea657f731f3 100644 (file)
@@ -85,22 +85,29 @@ static DEFINE_SPINLOCK(canmioclk_lock);
 static DEFINE_SPINLOCK(dbgclk_lock);
 static DEFINE_SPINLOCK(aperclk_lock);
 
-static const char *armpll_parents[] __initdata = {"armpll_int", "ps_clk"};
-static const char *ddrpll_parents[] __initdata = {"ddrpll_int", "ps_clk"};
-static const char *iopll_parents[] __initdata = {"iopll_int", "ps_clk"};
+static const char *const armpll_parents[] __initconst = {"armpll_int",
+       "ps_clk"};
+static const char *const ddrpll_parents[] __initconst = {"ddrpll_int",
+       "ps_clk"};
+static const char *const iopll_parents[] __initconst = {"iopll_int",
+       "ps_clk"};
 static const char *gem0_mux_parents[] __initdata = {"gem0_div1", "dummy_name"};
 static const char *gem1_mux_parents[] __initdata = {"gem1_div1", "dummy_name"};
-static const char *can0_mio_mux2_parents[] __initdata = {"can0_gate",
+static const char *const can0_mio_mux2_parents[] __initconst = {"can0_gate",
        "can0_mio_mux"};
-static const char *can1_mio_mux2_parents[] __initdata = {"can1_gate",
+static const char *const can1_mio_mux2_parents[] __initconst = {"can1_gate",
        "can1_mio_mux"};
 static const char *dbg_emio_mux_parents[] __initdata = {"dbg_div",
        "dummy_name"};
 
-static const char *dbgtrc_emio_input_names[] __initdata = {"trace_emio_clk"};
-static const char *gem0_emio_input_names[] __initdata = {"gem0_emio_clk"};
-static const char *gem1_emio_input_names[] __initdata = {"gem1_emio_clk"};
-static const char *swdt_ext_clk_input_names[] __initdata = {"swdt_ext_clk"};
+static const char *const dbgtrc_emio_input_names[] __initconst = {
+       "trace_emio_clk"};
+static const char *const gem0_emio_input_names[] __initconst = {
+       "gem0_emio_clk"};
+static const char *const gem1_emio_input_names[] __initconst = {
+       "gem1_emio_clk"};
+static const char *const swdt_ext_clk_input_names[] __initconst = {
+       "swdt_ext_clk"};
 
 static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
                const char *clk_name, void __iomem *fclk_ctrl_reg,
index 935b05936dbdd9588764b0c04bf84db32b425af0..9064ff743598a42515333f073cbd3d86193a2524 100644 (file)
@@ -462,15 +462,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
        exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
 
        if (mct_int_type == MCT_INT_SPI) {
-               evt->irq = mct_irqs[MCT_L0_IRQ + cpu];
-               if (request_irq(evt->irq, exynos4_mct_tick_isr,
-                               IRQF_TIMER | IRQF_NOBALANCING,
-                               evt->name, mevt)) {
-                       pr_err("exynos-mct: cannot register IRQ %d\n",
-                               evt->irq);
+
+               if (evt->irq == -1)
                        return -EIO;
-               }
-               irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
+
+               irq_force_affinity(evt->irq, cpumask_of(cpu));
+               enable_irq(evt->irq);
        } else {
                enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
        }
@@ -483,10 +480,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
 static void exynos4_local_timer_stop(struct clock_event_device *evt)
 {
        evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
-       if (mct_int_type == MCT_INT_SPI)
-               free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick));
-       else
+       if (mct_int_type == MCT_INT_SPI) {
+               if (evt->irq != -1)
+                       disable_irq_nosync(evt->irq);
+       } else {
                disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
+       }
 }
 
 static int exynos4_mct_cpu_notify(struct notifier_block *self,
@@ -518,7 +517,7 @@ static struct notifier_block exynos4_mct_cpu_nb = {
 
 static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
 {
-       int err;
+       int err, cpu;
        struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
        struct clk *mct_clk, *tick_clk;
 
@@ -545,7 +544,25 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
                WARN(err, "MCT: can't request IRQ %d (%d)\n",
                     mct_irqs[MCT_L0_IRQ], err);
        } else {
-               irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
+               for_each_possible_cpu(cpu) {
+                       int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
+                       struct mct_clock_event_device *pcpu_mevt =
+                               per_cpu_ptr(&percpu_mct_tick, cpu);
+
+                       pcpu_mevt->evt.irq = -1;
+
+                       irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
+                       if (request_irq(mct_irq,
+                                       exynos4_mct_tick_isr,
+                                       IRQF_TIMER | IRQF_NOBALANCING,
+                                       pcpu_mevt->name, pcpu_mevt)) {
+                               pr_err("exynos-mct: cannot register IRQ (cpu%d)\n",
+                                                                       cpu);
+
+                               continue;
+                       }
+                       pcpu_mevt->evt.irq = mct_irq;
+               }
        }
 
        err = register_cpu_notifier(&exynos4_mct_cpu_nb);
index 611cb09239ebe1214837c1e5048e479e5c50d71e..cc8a71c267b88132496efc6bc083ed5107a7e46b 100644 (file)
@@ -36,17 +36,6 @@ config ARM_EXYNOS_CPUFREQ
 
          If in doubt, say N.
 
-config ARM_EXYNOS4210_CPUFREQ
-       bool "SAMSUNG EXYNOS4210"
-       depends on CPU_EXYNOS4210
-       depends on ARM_EXYNOS_CPUFREQ
-       default y
-       help
-         This adds the CPUFreq driver for Samsung EXYNOS4210
-         SoC (S5PV310 or S5PC210).
-
-         If in doubt, say N.
-
 config ARM_EXYNOS4X12_CPUFREQ
        bool "SAMSUNG EXYNOS4x12"
        depends on SOC_EXYNOS4212 || SOC_EXYNOS4412
index cdce92ae2e8b7fdb81e4d25e3ab03ced14e63aac..2169bf792db76cbcea69f8b5d57670004853e47b 100644 (file)
@@ -54,7 +54,6 @@ obj-$(CONFIG_ARCH_DAVINCI)            += davinci-cpufreq.o
 obj-$(CONFIG_UX500_SOC_DB8500)         += dbx500-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS_CPUFREQ)       += arm-exynos-cpufreq.o
 arm-exynos-cpufreq-y                                   := exynos-cpufreq.o
-arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4210_CPUFREQ)    += exynos4210-cpufreq.o
 arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ)    += exynos4x12-cpufreq.o
 arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ)    += exynos5250-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ)   += exynos5440-cpufreq.o
index 82d2fbb20f7eb70d94a9c9c6e1d6ec3708f53ee0..ae5b2bd3a9785c63646e3e922fbe17330678b481 100644 (file)
@@ -10,6 +10,7 @@
 */
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/io.h>
@@ -168,10 +169,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
 
        exynos_info->dev = &pdev->dev;
 
-       if (of_machine_is_compatible("samsung,exynos4210")) {
-               exynos_info->type = EXYNOS_SOC_4210;
-               ret = exynos4210_cpufreq_init(exynos_info);
-       } else if (of_machine_is_compatible("samsung,exynos4212")) {
+       if (of_machine_is_compatible("samsung,exynos4212")) {
                exynos_info->type = EXYNOS_SOC_4212;
                ret = exynos4x12_cpufreq_init(exynos_info);
        } else if (of_machine_is_compatible("samsung,exynos4412")) {
index 9f2062a7cc02ea2500be4f1d54218f0b78d6c127..a3855e4d913d6bf34c429c019fa5d9ec07e660cb 100644 (file)
@@ -18,7 +18,6 @@ enum cpufreq_level_index {
 };
 
 enum exynos_soc_type {
-       EXYNOS_SOC_4210,
        EXYNOS_SOC_4212,
        EXYNOS_SOC_4412,
        EXYNOS_SOC_5250,
@@ -53,14 +52,6 @@ struct exynos_dvfs_info {
        void __iomem    *cmu_regs;
 };
 
-#ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ
-extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *);
-#else
-static inline int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
-{
-       return -EOPNOTSUPP;
-}
-#endif
 #ifdef CONFIG_ARM_EXYNOS4X12_CPUFREQ
 extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
 #else
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
deleted file mode 100644 (file)
index 843ec82..0000000
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * EXYNOS4210 - CPU frequency scaling support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/cpufreq.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include "exynos-cpufreq.h"
-
-static struct clk *cpu_clk;
-static struct clk *moutcore;
-static struct clk *mout_mpll;
-static struct clk *mout_apll;
-static struct exynos_dvfs_info *cpufreq;
-
-static unsigned int exynos4210_volt_table[] = {
-       1250000, 1150000, 1050000, 975000, 950000,
-};
-
-static struct cpufreq_frequency_table exynos4210_freq_table[] = {
-       {0, L0, 1200 * 1000},
-       {0, L1, 1000 * 1000},
-       {0, L2,  800 * 1000},
-       {0, L3,  500 * 1000},
-       {0, L4,  200 * 1000},
-       {0, 0, CPUFREQ_TABLE_END},
-};
-
-static struct apll_freq apll_freq_4210[] = {
-       /*
-        * values:
-        * freq
-        * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, RESERVED
-        * clock divider for COPY, HPM, RESERVED
-        * PLL M, P, S
-        */
-       APLL_FREQ(1200, 0, 3, 7, 3, 4, 1, 7, 0, 5, 0, 0, 150, 3, 1),
-       APLL_FREQ(1000, 0, 3, 7, 3, 4, 1, 7, 0, 4, 0, 0, 250, 6, 1),
-       APLL_FREQ(800,  0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 200, 6, 1),
-       APLL_FREQ(500,  0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 250, 6, 2),
-       APLL_FREQ(200,  0, 1, 3, 1, 3, 1, 0, 0, 3, 0, 0, 200, 6, 3),
-};
-
-static void exynos4210_set_clkdiv(unsigned int div_index)
-{
-       unsigned int tmp;
-
-       /* Change Divider - CPU0 */
-
-       tmp = apll_freq_4210[div_index].clk_div_cpu0;
-
-       __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU);
-
-       do {
-               tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU);
-       } while (tmp & 0x1111111);
-
-       /* Change Divider - CPU1 */
-
-       tmp = apll_freq_4210[div_index].clk_div_cpu1;
-
-       __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1);
-
-       do {
-               tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1);
-       } while (tmp & 0x11);
-}
-
-static void exynos4210_set_apll(unsigned int index)
-{
-       unsigned int tmp, freq = apll_freq_4210[index].freq;
-
-       /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
-       clk_set_parent(moutcore, mout_mpll);
-
-       do {
-               tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU)
-                       >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT);
-               tmp &= 0x7;
-       } while (tmp != 0x2);
-
-       clk_set_rate(mout_apll, freq * 1000);
-
-       /* MUX_CORE_SEL = APLL */
-       clk_set_parent(moutcore, mout_apll);
-
-       do {
-               tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU);
-               tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK;
-       } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
-}
-
-static void exynos4210_set_frequency(unsigned int old_index,
-                                    unsigned int new_index)
-{
-       if (old_index > new_index) {
-               exynos4210_set_clkdiv(new_index);
-               exynos4210_set_apll(new_index);
-       } else if (old_index < new_index) {
-               exynos4210_set_apll(new_index);
-               exynos4210_set_clkdiv(new_index);
-       }
-}
-
-int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
-{
-       struct device_node *np;
-       unsigned long rate;
-
-       /*
-        * HACK: This is a temporary workaround to get access to clock
-        * controller registers directly and remove static mappings and
-        * dependencies on platform headers. It is necessary to enable
-        * Exynos multi-platform support and will be removed together with
-        * this whole driver as soon as Exynos gets migrated to use
-        * cpufreq-dt driver.
-        */
-       np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock");
-       if (!np) {
-               pr_err("%s: failed to find clock controller DT node\n",
-                       __func__);
-               return -ENODEV;
-       }
-
-       info->cmu_regs = of_iomap(np, 0);
-       if (!info->cmu_regs) {
-               pr_err("%s: failed to map CMU registers\n", __func__);
-               return -EFAULT;
-       }
-
-       cpu_clk = clk_get(NULL, "armclk");
-       if (IS_ERR(cpu_clk))
-               return PTR_ERR(cpu_clk);
-
-       moutcore = clk_get(NULL, "moutcore");
-       if (IS_ERR(moutcore))
-               goto err_moutcore;
-
-       mout_mpll = clk_get(NULL, "mout_mpll");
-       if (IS_ERR(mout_mpll))
-               goto err_mout_mpll;
-
-       rate = clk_get_rate(mout_mpll) / 1000;
-
-       mout_apll = clk_get(NULL, "mout_apll");
-       if (IS_ERR(mout_apll))
-               goto err_mout_apll;
-
-       info->mpll_freq_khz = rate;
-       /* 800Mhz */
-       info->pll_safe_idx = L2;
-       info->cpu_clk = cpu_clk;
-       info->volt_table = exynos4210_volt_table;
-       info->freq_table = exynos4210_freq_table;
-       info->set_freq = exynos4210_set_frequency;
-
-       cpufreq = info;
-
-       return 0;
-
-err_mout_apll:
-       clk_put(mout_mpll);
-err_mout_mpll:
-       clk_put(moutcore);
-err_moutcore:
-       clk_put(cpu_clk);
-
-       pr_debug("%s: failed initialization\n", __func__);
-       return -EINVAL;
-}
index b0dac7d6ba31475b9902eefaff8c9dab557f34d2..9e231f52150c404ebd92e6d74ea6a24b5642576a 100644 (file)
@@ -659,4 +659,4 @@ static struct platform_driver s5pv210_cpufreq_platdrv = {
        },
        .probe = s5pv210_cpufreq_probe,
 };
-module_platform_driver(s5pv210_cpufreq_platdrv);
+builtin_platform_driver(s5pv210_cpufreq_platdrv);
index f2446c78d87cf14eda9cec13ab05d9bd0b9198a9..9c5853b6ca4a2f47039dce98ab13e722dfdc05f4 100644 (file)
@@ -62,5 +62,4 @@ static struct platform_driver at91_cpuidle_driver = {
        },
        .probe = at91_cpuidle_probe,
 };
-
-module_platform_driver(at91_cpuidle_driver);
+builtin_platform_driver(at91_cpuidle_driver);
index 9445e6cc02be4336b396d8125d95db5577b0ad13..c13feec89ea1d68c9734553110c56aeee2b812c2 100644 (file)
@@ -75,5 +75,4 @@ static struct platform_driver calxeda_cpuidle_plat_driver = {
         },
         .probe = calxeda_cpuidle_probe,
 };
-
-module_platform_driver(calxeda_cpuidle_plat_driver);
+builtin_platform_driver(calxeda_cpuidle_plat_driver);
index 1e3ef5ec4784dcfc3b758a0d67cac25c0d33e4ba..845bafcfa7929fd66cbb5eb966b16e0f3320de7f 100644 (file)
@@ -67,6 +67,8 @@ static int nap_loop(struct cpuidle_device *dev,
        return index;
 }
 
+/* Register for fastsleep only in oneshot mode of broadcast */
+#ifdef CONFIG_TICK_ONESHOT
 static int fastsleep_loop(struct cpuidle_device *dev,
                                struct cpuidle_driver *drv,
                                int index)
@@ -90,7 +92,7 @@ static int fastsleep_loop(struct cpuidle_device *dev,
 
        return index;
 }
-
+#endif
 /*
  * States for dedicated partition case.
  */
@@ -216,7 +218,14 @@ static int powernv_add_idle_states(void)
                        powernv_states[nr_idle_states].flags = 0;
                        powernv_states[nr_idle_states].target_residency = 100;
                        powernv_states[nr_idle_states].enter = &nap_loop;
-               } else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
+               }
+
+               /*
+                * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come
+                * within this config dependency check.
+                */
+#ifdef CONFIG_TICK_ONESHOT
+               if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
                        flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
                        /* Add FASTSLEEP state */
                        strcpy(powernv_states[nr_idle_states].name, "FastSleep");
@@ -225,7 +234,7 @@ static int powernv_add_idle_states(void)
                        powernv_states[nr_idle_states].target_residency = 300000;
                        powernv_states[nr_idle_states].enter = &fastsleep_loop;
                }
-
+#endif
                powernv_states[nr_idle_states].exit_latency =
                                ((unsigned int)latency_ns[i]) / 1000;
 
index 543292b1d38ea045e9d9504c59d75ba4678a0ce2..6f4257fc56e5192f0b743a00d7bafcbb3b5566c3 100644 (file)
@@ -73,5 +73,4 @@ static struct platform_driver zynq_cpuidle_driver = {
        },
        .probe = zynq_cpuidle_probe,
 };
-
-module_platform_driver(zynq_cpuidle_driver);
+builtin_platform_driver(zynq_cpuidle_driver);
index a432633bced43243780cf3318aa1f223a5ac9746..1c6f98dd88f4958003d6da53ce43933e9e016d45 100644 (file)
@@ -321,9 +321,8 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
        const char *res_name = "sram";
        struct resource *res;
 
-       engine->pool = of_get_named_gen_pool(cesa->dev->of_node,
-                                            "marvell,crypto-srams",
-                                            idx);
+       engine->pool = of_gen_pool_get(cesa->dev->of_node,
+                                      "marvell,crypto-srams", idx);
        if (engine->pool) {
                engine->sram = gen_pool_dma_alloc(engine->pool,
                                                  cesa->sram_size,
index 5bcd575fa96f1b80d3e9a15cabc2898c79d50fdf..e6b658faef63a37df57e497b45637f7815e7dbf5 100644 (file)
@@ -1034,8 +1034,8 @@ static int mv_cesa_get_sram(struct platform_device *pdev,
                             &sram_size);
 
        cp->sram_size = sram_size;
-       cp->sram_pool = of_get_named_gen_pool(pdev->dev.of_node,
-                                             "marvell,crypto-srams", 0);
+       cp->sram_pool = of_gen_pool_get(pdev->dev.of_node,
+                                       "marvell,crypto-srams", 0);
        if (cp->sram_pool) {
                cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size,
                                              &cp->sram_dma);
index 7f8b66c915ed4bd7341961df49470e55bb767278..fdda8e7ae302511bec5c0e1c18d2c5c4b2b3d351 100644 (file)
@@ -88,10 +88,7 @@ void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
 
        qat_uclo_del_uof_obj(loader_data->fw_loader);
        qat_hal_deinit(loader_data->fw_loader);
-
-       if (loader_data->uof_fw)
-               release_firmware(loader_data->uof_fw);
-
+       release_firmware(loader_data->uof_fw);
        loader_data->uof_fw = NULL;
        loader_data->fw_loader = NULL;
 }
index ccec327489daa5e6a64942b3d7d754da7eb36bed..db2926bff8a5bc843741b7cf56facb114ab853b7 100644 (file)
@@ -449,7 +449,7 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
 err:
        for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
                ring = &bank->rings[i];
-               if (hw_data->tx_rings_mask & (1 << i) && ring->inflights)
+               if (hw_data->tx_rings_mask & (1 << i))
                        kfree(ring->inflights);
        }
        return -ENOMEM;
index 220ee49633e49e88c041c8796a88a83370087e0f..b8576fd6bd0e544730bfc07a19ce92b21ab1aba6 100644 (file)
@@ -120,7 +120,7 @@ static struct dmatest_info {
 
 static int dmatest_run_set(const char *val, const struct kernel_param *kp);
 static int dmatest_run_get(char *val, const struct kernel_param *kp);
-static struct kernel_param_ops run_ops = {
+static const struct kernel_param_ops run_ops = {
        .set = dmatest_run_set,
        .get = dmatest_run_get,
 };
@@ -195,7 +195,7 @@ static int dmatest_wait_get(char *val, const struct kernel_param *kp)
        return param_get_bool(val, kp);
 }
 
-static struct kernel_param_ops wait_ops = {
+static const struct kernel_param_ops wait_ops = {
        .get = dmatest_wait_get,
        .set = param_set_bool,
 };
index 449e785def17d2af759a4b82e1a0b5723b85d4ca..e683761e0f8f899c0d693efc7714ed1194fabc0c 100644 (file)
@@ -657,7 +657,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
        INIT_LIST_HEAD(&tdev->device.channels);
 
        if (pdev->dev.of_node)
-               pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0);
+               pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0);
        else
                pool = sram_get_gpool("asram");
        if (!pool) {
index 7e98084d36451efe894f5f33c1a3b77748183af3..afea7fc625ccb30bec2c3f69e1dadf1b74dadac2 100644 (file)
@@ -151,7 +151,7 @@ static int octeon_l2c_probe(struct platform_device *pdev)
        l2c->ctl_name = "octeon_l2c_err";
 
 
-       if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
+       if (OCTEON_IS_OCTEON1PLUS()) {
                union cvmx_l2t_err l2t_err;
                union cvmx_l2d_err l2d_err;
 
index bb19e0732681ce6a4af6c2e980c0c4bb382889f9..cda6dab5067a57709393fad7913a2e76a531a23b 100644 (file)
@@ -234,7 +234,7 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
        layers[0].size = 1;
        layers[0].is_virt_csrow = false;
 
-       if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
+       if (OCTEON_IS_OCTEON1PLUS()) {
                union cvmx_lmcx_mem_cfg0 cfg0;
 
                cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0));
index 0f83c33a7d1fcbb08180d60609aafc92e76a6425..2ab6cf24c9598f0be36dba02cb6f024495e667da 100644 (file)
@@ -73,7 +73,7 @@ static int  co_cache_error_event(struct notifier_block *this,
                        edac_device_handle_ce(p->ed, cpu, 0, "dcache");
 
                /* Clear the error indication */
-               if (OCTEON_IS_MODEL(OCTEON_FAM_2))
+               if (OCTEON_IS_OCTEON2())
                        write_octeon_c0_dcacheerr(1);
                else
                        write_octeon_c0_dcacheerr(0);
index ca617f40574ac2bb8e5697b858a1c52f93c8fad9..9fa8084a7c8d7d9e5aff23bdd372af512e39047a 100644 (file)
@@ -66,7 +66,6 @@ static int __init parse_efi_cmdline(char *str)
 early_param("efi", parse_efi_cmdline);
 
 struct kobject *efi_kobj;
-static struct kobject *efivars_kobj;
 
 /*
  * Let's not leave out systab information that snuck into
@@ -218,10 +217,9 @@ static int __init efisubsys_init(void)
                goto err_remove_group;
 
        /* and the standard mountpoint for efivarfs */
-       efivars_kobj = kobject_create_and_add("efivars", efi_kobj);
-       if (!efivars_kobj) {
+       error = sysfs_create_mount_point(efi_kobj, "efivars");
+       if (error) {
                pr_err("efivars: Subsystem registration failed.\n");
-               error = -ENOMEM;
                goto err_remove_group;
        }
 
index 280bc0a6336519392fabfd4cb4ffd0f7173d416f..816dbe9f4b82e68314f5c5408b52649e4b9b4383 100644 (file)
@@ -24,8 +24,6 @@ KASAN_SANITIZE                        := n
 lib-y                          := efi-stub-helper.o
 lib-$(CONFIG_EFI_ARMSTUB)      += arm-stub.o fdt.o
 
-CFLAGS_fdt.o                   += -I$(srctree)/scripts/dtc/libfdt/
-
 #
 # arm64 puts the stub in the kernel proper, which will unnecessarily retain all
 # code indefinitely unless it is annotated as __init/__initdata/__initconst etc.
index 8333f878919c4de8e22c86e81f6fc5d2b67ad2e3..40343fa92c7b9c344f1f86a824bdb779ab8dd91e 100644 (file)
@@ -657,8 +657,9 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
        }
        for (i = 0; i < kona_gpio->num_bank; i++) {
                bank = &kona_gpio->banks[i];
-               irq_set_chained_handler(bank->irq, bcm_kona_gpio_irq_handler);
-               irq_set_handler_data(bank->irq, bank);
+               irq_set_chained_handler_and_data(bank->irq,
+                                                bcm_kona_gpio_irq_handler,
+                                                bank);
        }
 
        spin_lock_init(&kona_gpio->lock);
index 58faf04fce5da02067d6706378ed6c1ca1feef33..55fa9853a7f2207984c0e99d3e1cee2c49ad788d 100644 (file)
@@ -348,8 +348,8 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
        irq_gc->chip_types[1].handler = handle_edge_irq;
 
        if (!pp->irq_shared) {
-               irq_set_chained_handler(pp->irq, dwapb_irq_handler);
-               irq_set_handler_data(pp->irq, gpio);
+               irq_set_chained_handler_and_data(pp->irq, dwapb_irq_handler,
+                                                gpio);
        } else {
                /*
                 * Request a shared IRQ since where MFD would have devices
index 01acf0a8cdb1963c3d01f1ca591aaab76b55158e..7bcfb87a5fa6812a51465d5c510a4c8a61c5d7d4 100644 (file)
@@ -309,8 +309,7 @@ static int platform_msic_gpio_probe(struct platform_device *pdev)
                                         &msic_irqchip,
                                         handle_simple_irq);
        }
-       irq_set_chained_handler(mg->irq, msic_gpio_irq_handler);
-       irq_set_handler_data(mg->irq, mg);
+       irq_set_chained_handler_and_data(mg->irq, msic_gpio_irq_handler, mg);
 
        return 0;
 err:
index be42ab368a801ff0a3c5508b9bd53c3fbf6cf78d..bf4bd1d120c38a94fb997521c1712224a7712f8d 100644 (file)
@@ -2052,14 +2052,14 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
        if (is_of_node(fwnode)) {
                enum of_gpio_flags flags;
 
-               desc = of_get_named_gpiod_flags(of_node(fwnode), propname, 0,
+               desc = of_get_named_gpiod_flags(to_of_node(fwnode), propname, 0,
                                                &flags);
                if (!IS_ERR(desc))
                        active_low = flags & OF_GPIO_ACTIVE_LOW;
        } else if (is_acpi_node(fwnode)) {
                struct acpi_gpio_info info;
 
-               desc = acpi_get_gpiod_by_index(acpi_node(fwnode), propname, 0,
+               desc = acpi_get_gpiod_by_index(to_acpi_node(fwnode), propname, 0,
                                               &info);
                if (!IS_ERR(desc))
                        active_low = info.active_low;
index 22866d1c3d69c196bdb332f7c056d9e4a2a5005f..01657830b470a49e8209fd39fa829d4a1fbb3610 100644 (file)
@@ -425,6 +425,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
                                   unsigned irq_type);
 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
                      struct amdgpu_fence **fence);
+int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
+                         uint64_t seq, struct amdgpu_fence **fence);
 void amdgpu_fence_process(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
@@ -435,9 +437,6 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
 int amdgpu_fence_wait_any(struct amdgpu_device *adev,
                          struct amdgpu_fence **fences,
                          bool intr);
-long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
-                                  u64 *target_seq, bool intr,
-                                  long timeout);
 struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
 void amdgpu_fence_unref(struct amdgpu_fence **fence);
 
@@ -1622,6 +1621,7 @@ struct amdgpu_vce {
        unsigned                fb_version;
        atomic_t                handles[AMDGPU_MAX_VCE_HANDLES];
        struct drm_file         *filp[AMDGPU_MAX_VCE_HANDLES];
+       uint32_t                img_size[AMDGPU_MAX_VCE_HANDLES];
        struct delayed_work     idle_work;
        const struct firmware   *fw;    /* VCE firmware */
        struct amdgpu_ring      ring[AMDGPU_MAX_VCE_RINGS];
index 36d34e0afbc3a5cacb51d590af6506c62a7d58ee..f82a2dd83874dea20c7e7b2a6ddf8aee74e0fe1d 100644 (file)
@@ -30,6 +30,7 @@
 
 #include <drm/drmP.h>
 #include "amdgpu.h"
+#include "amdgpu_trace.h"
 
 static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
                                 struct amdgpu_bo_list **result,
@@ -124,6 +125,8 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
                        gws_obj = entry->robj;
                if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
                        oa_obj = entry->robj;
+
+               trace_amdgpu_bo_list_set(list, entry->robj);
        }
 
        for (i = 0; i < list->num_entries; ++i)
index f09b2cba40ca505649decf23a27b60850b62407a..d63135bf29c0c258f72025fa6f41f34677576ec4 100644 (file)
@@ -181,8 +181,6 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                }
                p->chunks[i].chunk_id = user_chunk.chunk_id;
                p->chunks[i].length_dw = user_chunk.length_dw;
-               if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_IB)
-                       p->num_ibs++;
 
                size = p->chunks[i].length_dw;
                cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -199,7 +197,12 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                        goto out;
                }
 
-               if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_FENCE) {
+               switch (p->chunks[i].chunk_id) {
+               case AMDGPU_CHUNK_ID_IB:
+                       p->num_ibs++;
+                       break;
+
+               case AMDGPU_CHUNK_ID_FENCE:
                        size = sizeof(struct drm_amdgpu_cs_chunk_fence);
                        if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) {
                                uint32_t handle;
@@ -221,6 +224,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                                r = -EINVAL;
                                goto out;
                        }
+                       break;
+
+               case AMDGPU_CHUNK_ID_DEPENDENCIES:
+                       break;
+
+               default:
+                       r = -EINVAL;
+                       goto out;
                }
        }
 
@@ -445,8 +456,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
        for (i = 0; i < parser->nchunks; i++)
                drm_free_large(parser->chunks[i].kdata);
        kfree(parser->chunks);
-       for (i = 0; i < parser->num_ibs; i++)
-               amdgpu_ib_free(parser->adev, &parser->ibs[i]);
+       if (parser->ibs)
+               for (i = 0; i < parser->num_ibs; i++)
+                       amdgpu_ib_free(parser->adev, &parser->ibs[i]);
        kfree(parser->ibs);
        if (parser->uf.bo)
                drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
@@ -654,6 +666,55 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
        return 0;
 }
 
+static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
+                                 struct amdgpu_cs_parser *p)
+{
+       struct amdgpu_ib *ib;
+       int i, j, r;
+
+       if (!p->num_ibs)
+               return 0;
+
+       /* Add dependencies to first IB */
+       ib = &p->ibs[0];
+       for (i = 0; i < p->nchunks; ++i) {
+               struct drm_amdgpu_cs_chunk_dep *deps;
+               struct amdgpu_cs_chunk *chunk;
+               unsigned num_deps;
+
+               chunk = &p->chunks[i];
+
+               if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
+                       continue;
+
+               deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
+               num_deps = chunk->length_dw * 4 /
+                       sizeof(struct drm_amdgpu_cs_chunk_dep);
+
+               for (j = 0; j < num_deps; ++j) {
+                       struct amdgpu_fence *fence;
+                       struct amdgpu_ring *ring;
+
+                       r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
+                                              deps[j].ip_instance,
+                                              deps[j].ring, &ring);
+                       if (r)
+                               return r;
+
+                       r = amdgpu_fence_recreate(ring, p->filp,
+                                                 deps[j].handle,
+                                                 &fence);
+                       if (r)
+                               return r;
+
+                       amdgpu_sync_fence(&ib->sync, fence);
+                       amdgpu_fence_unref(&fence);
+               }
+       }
+
+       return 0;
+}
+
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        struct amdgpu_device *adev = dev->dev_private;
@@ -688,11 +749,16 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        else
                                DRM_ERROR("Failed to process the buffer list %d!\n", r);
                }
-       } else {
+       }
+
+       if (!r) {
                reserved_buffers = true;
                r = amdgpu_cs_ib_fill(adev, &parser);
        }
 
+       if (!r)
+               r = amdgpu_cs_dependencies(adev, &parser);
+
        if (r) {
                amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
                up_read(&adev->exclusive_lock);
@@ -730,9 +796,9 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
 {
        union drm_amdgpu_wait_cs *wait = data;
        struct amdgpu_device *adev = dev->dev_private;
-       uint64_t seq[AMDGPU_MAX_RINGS] = {0};
-       struct amdgpu_ring *ring = NULL;
        unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
+       struct amdgpu_fence *fence = NULL;
+       struct amdgpu_ring *ring = NULL;
        struct amdgpu_ctx *ctx;
        long r;
 
@@ -745,9 +811,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
        if (r)
                return r;
 
-       seq[ring->idx] = wait->in.handle;
+       r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
+       if (r)
+               return r;
 
-       r = amdgpu_fence_wait_seq_timeout(adev, seq, true, timeout);
+       r = fence_wait_timeout(&fence->base, true, timeout);
+       amdgpu_fence_unref(&fence);
        amdgpu_ctx_put(ctx);
        if (r < 0)
                return r;
index fec487d1c870ae27ea586ddda2def8b6c45272e4..ba46be361c9b2c9f40bf0acb751e65b156d4b171 100644 (file)
@@ -1191,7 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-
+       adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
+       if (adev->ip_block_enabled == NULL)
+               return -ENOMEM;
 
        if (adev->ip_blocks == NULL) {
                DRM_ERROR("No IP blocks found!\n");
@@ -1575,8 +1577,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        amdgpu_fence_driver_fini(adev);
        amdgpu_fbdev_fini(adev);
        r = amdgpu_fini(adev);
-       if (adev->ip_block_enabled)
-               kfree(adev->ip_block_enabled);
+       kfree(adev->ip_block_enabled);
        adev->ip_block_enabled = NULL;
        adev->accel_working = false;
        /* free i2c buses */
@@ -2000,4 +2001,10 @@ int amdgpu_debugfs_init(struct drm_minor *minor)
 void amdgpu_debugfs_cleanup(struct drm_minor *minor)
 {
 }
+#else
+static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+{
+       return 0;
+}
+static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
 #endif
index 5c9918d01bf984b75e2fae95daaabf7d46cf6414..a7189a1fa6a17dc308075535c6ddc0fcf3403270 100644 (file)
@@ -135,6 +135,38 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
        return 0;
 }
 
+/**
+ * amdgpu_fence_recreate - recreate a fence from an user fence
+ *
+ * @ring: ring the fence is associated with
+ * @owner: creator of the fence
+ * @seq: user fence sequence number
+ * @fence: resulting amdgpu fence object
+ *
+ * Recreates a fence command from the user fence sequence number (all asics).
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
+                         uint64_t seq, struct amdgpu_fence **fence)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (seq > ring->fence_drv.sync_seq[ring->idx])
+               return -EINVAL;
+
+       *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+       if ((*fence) == NULL)
+               return -ENOMEM;
+
+       (*fence)->seq = seq;
+       (*fence)->ring = ring;
+       (*fence)->owner = owner;
+       fence_init(&(*fence)->base, &amdgpu_fence_ops,
+               &adev->fence_queue.lock, adev->fence_context + ring->idx,
+               (*fence)->seq);
+       return 0;
+}
+
 /**
  * amdgpu_fence_check_signaled - callback from fence_queue
  *
@@ -517,12 +549,14 @@ static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
  * the wait timeout, or an error for all other cases.
  * -EDEADLK is returned when a GPU lockup has been detected.
  */
-long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq,
-                                  bool intr, long timeout)
+static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
+                                         u64 *target_seq, bool intr,
+                                         long timeout)
 {
        uint64_t last_seq[AMDGPU_MAX_RINGS];
        bool signaled;
-       int i, r;
+       int i;
+       long r;
 
        if (timeout == 0) {
                return amdgpu_fence_any_seq_signaled(adev, target_seq);
@@ -1023,7 +1057,7 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
 
                amdgpu_fence_process(ring);
 
-               seq_printf(m, "--- ring %d ---\n", i);
+               seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
                seq_printf(m, "Last signaled fence 0x%016llx\n",
                           (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
                seq_printf(m, "Last emitted        0x%016llx\n",
@@ -1031,7 +1065,8 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
 
                for (j = 0; j < AMDGPU_MAX_RINGS; ++j) {
                        struct amdgpu_ring *other = adev->rings[j];
-                       if (i != j && other && other->fence_drv.initialized)
+                       if (i != j && other && other->fence_drv.initialized &&
+                           ring->fence_drv.sync_seq[j])
                                seq_printf(m, "Last sync to ring %d 0x%016llx\n",
                                           j, ring->fence_drv.sync_seq[j]);
                }
index 0ec222295feeb50e2a0798d7c091067ff2c3975b..975edb1000a202e3dcd2b7cbe2cc2c8916aa855f 100644 (file)
@@ -496,7 +496,7 @@ error_unreserve:
 error_free:
        drm_free_large(vm_bos);
 
-       if (r)
+       if (r && r != -ERESTARTSYS)
                DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 }
 
@@ -525,8 +525,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
-                       AMDGPU_VM_PAGE_EXECUTABLE);
+       invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
+                       AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
        if ((args->flags & invalid_flags)) {
                dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
                        args->flags, invalid_flags);
@@ -579,7 +579,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                break;
        }
 
-       if (!r)
+       if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
                amdgpu_gem_va_update_vm(adev, bo_va);
 
        drm_gem_object_unreference_unlocked(gobj);
index b56dd64bd4ea78fa4f10dec8ddeb5bdbde0ff3a8..961d7265c286524956e1100b14f084eb6e5341b0 100644 (file)
@@ -30,19 +30,21 @@ TRACE_EVENT(amdgpu_cs,
            TP_PROTO(struct amdgpu_cs_parser *p, int i),
            TP_ARGS(p, i),
            TP_STRUCT__entry(
+                            __field(struct amdgpu_bo_list *, bo_list)
                             __field(u32, ring)
                             __field(u32, dw)
                             __field(u32, fences)
                             ),
 
            TP_fast_assign(
+                          __entry->bo_list = p->bo_list;
                           __entry->ring = p->ibs[i].ring->idx;
                           __entry->dw = p->ibs[i].length_dw;
                           __entry->fences = amdgpu_fence_count_emitted(
                                p->ibs[i].ring);
                           ),
-           TP_printk("ring=%u, dw=%u, fences=%u",
-                     __entry->ring, __entry->dw,
+           TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
+                     __entry->bo_list, __entry->ring, __entry->dw,
                      __entry->fences)
 );
 
@@ -61,6 +63,54 @@ TRACE_EVENT(amdgpu_vm_grab_id,
            TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
 );
 
+TRACE_EVENT(amdgpu_vm_bo_map,
+           TP_PROTO(struct amdgpu_bo_va *bo_va,
+                    struct amdgpu_bo_va_mapping *mapping),
+           TP_ARGS(bo_va, mapping),
+           TP_STRUCT__entry(
+                            __field(struct amdgpu_bo *, bo)
+                            __field(long, start)
+                            __field(long, last)
+                            __field(u64, offset)
+                            __field(u32, flags)
+                            ),
+
+           TP_fast_assign(
+                          __entry->bo = bo_va->bo;
+                          __entry->start = mapping->it.start;
+                          __entry->last = mapping->it.last;
+                          __entry->offset = mapping->offset;
+                          __entry->flags = mapping->flags;
+                          ),
+           TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
+                     __entry->bo, __entry->start, __entry->last,
+                     __entry->offset, __entry->flags)
+);
+
+TRACE_EVENT(amdgpu_vm_bo_unmap,
+           TP_PROTO(struct amdgpu_bo_va *bo_va,
+                    struct amdgpu_bo_va_mapping *mapping),
+           TP_ARGS(bo_va, mapping),
+           TP_STRUCT__entry(
+                            __field(struct amdgpu_bo *, bo)
+                            __field(long, start)
+                            __field(long, last)
+                            __field(u64, offset)
+                            __field(u32, flags)
+                            ),
+
+           TP_fast_assign(
+                          __entry->bo = bo_va->bo;
+                          __entry->start = mapping->it.start;
+                          __entry->last = mapping->it.last;
+                          __entry->offset = mapping->offset;
+                          __entry->flags = mapping->flags;
+                          ),
+           TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
+                     __entry->bo, __entry->start, __entry->last,
+                     __entry->offset, __entry->flags)
+);
+
 TRACE_EVENT(amdgpu_vm_bo_update,
            TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
            TP_ARGS(mapping),
@@ -121,6 +171,21 @@ TRACE_EVENT(amdgpu_vm_flush,
                      __entry->pd_addr, __entry->ring, __entry->id)
 );
 
+TRACE_EVENT(amdgpu_bo_list_set,
+           TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo),
+           TP_ARGS(list, bo),
+           TP_STRUCT__entry(
+                            __field(struct amdgpu_bo_list *, list)
+                            __field(struct amdgpu_bo *, bo)
+                            ),
+
+           TP_fast_assign(
+                          __entry->list = list;
+                          __entry->bo = bo;
+                          ),
+           TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
+);
+
 DECLARE_EVENT_CLASS(amdgpu_fence_request,
 
            TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
index d3706a4982933a35d09e36ec946f3bed959430fc..dd3415d2e45dcbb2f3cba5fa1ca6688ef779cfd5 100644 (file)
@@ -674,7 +674,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
                return 0;
 
        if (gtt && gtt->userptr) {
-               ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL);
+               ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
                if (!ttm->sg)
                        return -ENOMEM;
 
index 1127a504f11854f421ee2e202a96472094745ad4..d3ca73090e39d94f8eaf0762dcb22b4209a07712 100644 (file)
@@ -464,28 +464,42 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
  * @p: parser context
  * @lo: address of lower dword
  * @hi: address of higher dword
+ * @size: minimum size
  *
  * Patch relocation inside command stream with real buffer address
  */
-int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi)
+static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
+                              int lo, int hi, unsigned size, uint32_t index)
 {
        struct amdgpu_bo_va_mapping *mapping;
        struct amdgpu_ib *ib = &p->ibs[ib_idx];
        struct amdgpu_bo *bo;
        uint64_t addr;
 
+       if (index == 0xffffffff)
+               index = 0;
+
        addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
               ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
+       addr += ((uint64_t)size) * ((uint64_t)index);
 
        mapping = amdgpu_cs_find_mapping(p, addr, &bo);
        if (mapping == NULL) {
-               DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d\n",
+               DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
+                         addr, lo, hi, size, index);
+               return -EINVAL;
+       }
+
+       if ((addr + (uint64_t)size) >
+           ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+               DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
                          addr, lo, hi);
                return -EINVAL;
        }
 
        addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
        addr += amdgpu_bo_gpu_offset(bo);
+       addr -= ((uint64_t)size) * ((uint64_t)index);
 
        ib->ptr[lo] = addr & 0xFFFFFFFF;
        ib->ptr[hi] = addr >> 32;
@@ -493,6 +507,48 @@ int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int
        return 0;
 }
 
+/**
+ * amdgpu_vce_validate_handle - validate stream handle
+ *
+ * @p: parser context
+ * @handle: handle to validate
+ * @allocated: allocated a new handle?
+ *
+ * Validates the handle and return the found session index or -EINVAL
+ * we we don't have another free session index.
+ */
+static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
+                                     uint32_t handle, bool *allocated)
+{
+       unsigned i;
+
+       *allocated = false;
+
+       /* validate the handle */
+       for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
+               if (atomic_read(&p->adev->vce.handles[i]) == handle) {
+                       if (p->adev->vce.filp[i] != p->filp) {
+                               DRM_ERROR("VCE handle collision detected!\n");
+                               return -EINVAL;
+                       }
+                       return i;
+               }
+       }
+
+       /* handle not found try to alloc a new one */
+       for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
+               if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
+                       p->adev->vce.filp[i] = p->filp;
+                       p->adev->vce.img_size[i] = 0;
+                       *allocated = true;
+                       return i;
+               }
+       }
+
+       DRM_ERROR("No more free VCE handles!\n");
+       return -EINVAL;
+}
+
 /**
  * amdgpu_vce_cs_parse - parse and validate the command stream
  *
@@ -501,10 +557,15 @@ int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int
  */
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
 {
-       uint32_t handle = 0;
-       bool destroy = false;
-       int i, r, idx = 0;
        struct amdgpu_ib *ib = &p->ibs[ib_idx];
+       unsigned fb_idx = 0, bs_idx = 0;
+       int session_idx = -1;
+       bool destroyed = false;
+       bool created = false;
+       bool allocated = false;
+       uint32_t tmp, handle = 0;
+       uint32_t *size = &tmp;
+       int i, r = 0, idx = 0;
 
        amdgpu_vce_note_usage(p->adev);
 
@@ -514,16 +575,44 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
 
                if ((len < 8) || (len & 3)) {
                        DRM_ERROR("invalid VCE command length (%d)!\n", len);
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto out;
+               }
+
+               if (destroyed) {
+                       DRM_ERROR("No other command allowed after destroy!\n");
+                       r = -EINVAL;
+                       goto out;
                }
 
                switch (cmd) {
                case 0x00000001: // session
                        handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+                       session_idx = amdgpu_vce_validate_handle(p, handle,
+                                                                &allocated);
+                       if (session_idx < 0)
+                               return session_idx;
+                       size = &p->adev->vce.img_size[session_idx];
                        break;
 
                case 0x00000002: // task info
+                       fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
+                       bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
+                       break;
+
                case 0x01000001: // create
+                       created = true;
+                       if (!allocated) {
+                               DRM_ERROR("Handle already in use!\n");
+                               r = -EINVAL;
+                               goto out;
+                       }
+
+                       *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
+                               amdgpu_get_ib_value(p, ib_idx, idx + 10) *
+                               8 * 3 / 2;
+                       break;
+
                case 0x04000001: // config extension
                case 0x04000002: // pic control
                case 0x04000005: // rate control
@@ -534,60 +623,74 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
                        break;
 
                case 0x03000001: // encode
-                       r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9);
+                       r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
+                                               *size, 0);
                        if (r)
-                               return r;
+                               goto out;
 
-                       r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11);
+                       r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
+                                               *size / 3, 0);
                        if (r)
-                               return r;
+                               goto out;
                        break;
 
                case 0x02000001: // destroy
-                       destroy = true;
+                       destroyed = true;
                        break;
 
                case 0x05000001: // context buffer
+                       r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
+                                               *size * 2, 0);
+                       if (r)
+                               goto out;
+                       break;
+
                case 0x05000004: // video bitstream buffer
+                       tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
+                       r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
+                                               tmp, bs_idx);
+                       if (r)
+                               goto out;
+                       break;
+
                case 0x05000005: // feedback buffer
-                       r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2);
+                       r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
+                                               4096, fb_idx);
                        if (r)
-                               return r;
+                               goto out;
                        break;
 
                default:
                        DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto out;
                }
 
-               idx += len / 4;
-       }
-
-       if (destroy) {
-               /* IB contains a destroy msg, free the handle */
-               for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
-                       atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
+               if (session_idx == -1) {
+                       DRM_ERROR("no session command at start of IB\n");
+                       r = -EINVAL;
+                       goto out;
+               }
 
-               return 0;
+               idx += len / 4;
        }
 
-       /* create or encode, validate the handle */
-       for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
-               if (atomic_read(&p->adev->vce.handles[i]) == handle)
-                       return 0;
+       if (allocated && !created) {
+               DRM_ERROR("New session without create command!\n");
+               r = -ENOENT;
        }
 
-       /* handle not found try to alloc a new one */
-       for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
-               if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
-                       p->adev->vce.filp[i] = p->filp;
-                       return 0;
-               }
+out:
+       if ((!r && destroyed) || (r && allocated)) {
+               /*
+                * IB contains a destroy msg or we have allocated an
+                * handle and got an error, anyway free the handle
+                */
+               for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+                       atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
        }
 
-       DRM_ERROR("No more free VCE handles!\n");
-
-       return -EINVAL;
+       return r;
 }
 
 /**
index b6a9d0956c6060befa3bd0b8a9b7dc69db5568c2..7ccdb5927da5ce4bcc7f1db1009c5b5297bca29b 100644 (file)
@@ -33,7 +33,6 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
                               struct amdgpu_fence **fence);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
-int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
                                    struct amdgpu_semaphore *semaphore,
index 407882b233c7952c99ed3128fbd9c7aa2adcab58..9a4e3b63f1cb4bf7ca9c73e813a0568f320c6574 100644 (file)
@@ -1001,6 +1001,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 
        list_add(&mapping->list, &bo_va->mappings);
        interval_tree_insert(&mapping->it, &vm->va);
+       trace_amdgpu_vm_bo_map(bo_va, mapping);
 
        bo_va->addr = 0;
 
@@ -1058,6 +1059,7 @@ error_free:
        mutex_lock(&vm->mutex);
        list_del(&mapping->list);
        interval_tree_remove(&mapping->it, &vm->va);
+       trace_amdgpu_vm_bo_unmap(bo_va, mapping);
        kfree(mapping);
 
 error_unlock:
@@ -1099,6 +1101,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
        mutex_lock(&vm->mutex);
        list_del(&mapping->list);
        interval_tree_remove(&mapping->it, &vm->va);
+       trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 
        if (bo_va->addr) {
                /* clear the old address */
@@ -1139,6 +1142,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
        list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) {
                list_del(&mapping->list);
                interval_tree_remove(&mapping->it, &vm->va);
+               trace_amdgpu_vm_bo_unmap(bo_va, mapping);
                if (bo_va->addr)
                        list_add(&mapping->list, &vm->freed);
                else
index 5dab578d6462ab2949e005b9996612e09f5f1fa5..341c566818419317a0c3d16a3d5b738840e30b46 100644 (file)
@@ -2256,10 +2256,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-       adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
-       if (adev->ip_block_enabled == NULL)
-               return -ENOMEM;
-
        return 0;
 }
 
index 220865a44814a59a1934b4de16a87d1d797a3541..d19085a9706489a00a0e13306af0d6275587b88c 100644 (file)
 #define VCE_CMD_IB_AUTO                0x00000005
 #define VCE_CMD_SEMAPHORE      0x00000006
 
+/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
+enum {
+       MTYPE_CACHED = 0,
+       MTYPE_NONCACHED = 3
+};
+
 #endif
index e4936a452bc6981e91dfc4bfa1c8dc4202992a13..f75a31df30bdb704f93e5dd465a3a74d93b524d8 100644 (file)
@@ -425,7 +425,7 @@ static int cz_dpm_init(struct amdgpu_device *adev)
        pi->mgcg_cgtt_local1 = 0x0;
        pi->clock_slow_down_step = 25000;
        pi->skip_clock_slow_down = 1;
-       pi->enable_nb_ps_policy = 1;
+       pi->enable_nb_ps_policy = 0;
        pi->caps_power_containment = true;
        pi->caps_cac = true;
        pi->didt_enabled = false;
index 782a74107664df05a7d7abff6e503d80389fc275..99e1afc896294c90c13f7e3ed0cc2ceff2aae1d7 100644 (file)
@@ -46,7 +46,7 @@
 
 /* Do not change the following, it is also defined in SMU8.h */
 #define SMU_EnabledFeatureScoreboard_AcpDpmOn          0x00000001
-#define SMU_EnabledFeatureScoreboard_SclkDpmOn         0x00100000
+#define SMU_EnabledFeatureScoreboard_SclkDpmOn         0x00200000
 #define SMU_EnabledFeatureScoreboard_UvdDpmOn          0x00800000
 #define SMU_EnabledFeatureScoreboard_VceDpmOn          0x01000000
 
index 72c27ac915f2a8681f35db4d15e232918405eb95..aaca8d663f2c60e97921e0c06a69a1c7a4549322 100644 (file)
@@ -3379,7 +3379,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
        uint32_t disp_int, mask, int_control, tmp;
        unsigned hpd;
 
-       if (entry->src_data > 6) {
+       if (entry->src_data >= adev->mode_info.num_hpd) {
                DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
                return 0;
        }
index cb7907447b81dd3696312ce65dc58606c52a9ab4..2c188fb9fd22ff1a3528673beb8866639d5ef631 100644 (file)
@@ -2009,6 +2009,46 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev,
        mutex_unlock(&adev->grbm_idx_mutex);
 }
 
+/**
+ * gmc_v7_0_init_compute_vmid - gart enable
+ *
+ * @rdev: amdgpu_device pointer
+ *
+ * Initialize compute vmid sh_mem registers
+ *
+ */
+#define DEFAULT_SH_MEM_BASES   (0x6000)
+#define FIRST_COMPUTE_VMID     (8)
+#define LAST_COMPUTE_VMID      (16)
+static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
+{
+       int i;
+       uint32_t sh_mem_config;
+       uint32_t sh_mem_bases;
+
+       /*
+        * Configure apertures:
+        * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
+        * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
+        * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
+       */
+       sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
+       sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+                       SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+       sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
+       mutex_lock(&adev->srbm_mutex);
+       for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+               cik_srbm_select(adev, 0, 0, 0, i);
+               /* CP and shaders */
+               WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+               WREG32(mmSH_MEM_APE1_BASE, 1);
+               WREG32(mmSH_MEM_APE1_LIMIT, 0);
+               WREG32(mmSH_MEM_BASES, sh_mem_bases);
+       }
+       cik_srbm_select(adev, 0, 0, 0, 0);
+       mutex_unlock(&adev->srbm_mutex);
+}
+
 /**
  * gfx_v7_0_gpu_init - setup the 3D engine
  *
@@ -2230,6 +2270,8 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
        cik_srbm_select(adev, 0, 0, 0, 0);
        mutex_unlock(&adev->srbm_mutex);
 
+       gmc_v7_0_init_compute_vmid(adev);
+
        WREG32(mmSX_DEBUG_1, 0x20);
 
        WREG32(mmTA_CNTL_AUX, 0x00010000);
index 14242bd33363d3e26b368a1913bb73a2bf931ba9..7b683fb2173c728fff760c926f1204b3897b4eae 100644 (file)
@@ -1894,6 +1894,51 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
        mutex_unlock(&adev->grbm_idx_mutex);
 }
 
+/**
+ * gmc_v8_0_init_compute_vmid - gart enable
+ *
+ * @rdev: amdgpu_device pointer
+ *
+ * Initialize compute vmid sh_mem registers
+ *
+ */
+#define DEFAULT_SH_MEM_BASES   (0x6000)
+#define FIRST_COMPUTE_VMID     (8)
+#define LAST_COMPUTE_VMID      (16)
+static void gmc_v8_0_init_compute_vmid(struct amdgpu_device *adev)
+{
+       int i;
+       uint32_t sh_mem_config;
+       uint32_t sh_mem_bases;
+
+       /*
+        * Configure apertures:
+        * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
+        * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
+        * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
+        */
+       sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
+
+       sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
+                       SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
+                       SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+                       SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+                       MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+                       SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+       mutex_lock(&adev->srbm_mutex);
+       for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+               vi_srbm_select(adev, 0, 0, 0, i);
+               /* CP and shaders */
+               WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+               WREG32(mmSH_MEM_APE1_BASE, 1);
+               WREG32(mmSH_MEM_APE1_LIMIT, 0);
+               WREG32(mmSH_MEM_BASES, sh_mem_bases);
+       }
+       vi_srbm_select(adev, 0, 0, 0, 0);
+       mutex_unlock(&adev->srbm_mutex);
+}
+
 static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
 {
        u32 gb_addr_config;
@@ -2113,6 +2158,8 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
        vi_srbm_select(adev, 0, 0, 0, 0);
        mutex_unlock(&adev->srbm_mutex);
 
+       gmc_v8_0_init_compute_vmid(adev);
+
        mutex_lock(&adev->grbm_idx_mutex);
        /*
         * making sure that the following register writes will be broadcasted
@@ -3081,7 +3128,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
                                WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
                                       AMDGPU_DOORBELL_KIQ << 2);
                                WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
-                                      AMDGPU_DOORBELL_MEC_RING7 << 2);
+                                               0x7FFFF << 2);
                        }
                        tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
                        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -3097,6 +3144,12 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
                WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
                       mqd->cp_hqd_pq_doorbell_control);
 
+               /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+               ring->wptr = 0;
+               mqd->cp_hqd_pq_wptr = ring->wptr;
+               WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
+               mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
+
                /* set the vmid for the queue */
                mqd->cp_hqd_vmid = 0;
                WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid);
index e3c1fde753638de09e9465464bb47339165aa62e..7bb37b93993fb5312eb2d46189bf09bf789c3989 100644 (file)
@@ -438,6 +438,31 @@ static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
        /* XXX todo */
 }
 
+/**
+ * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs context switch.
+ *
+ * Halt or unhalt the async dma engines context switch (VI).
+ */
+static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
+{
+       u32 f32_cntl;
+       int i;
+
+       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+               f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
+               if (enable)
+                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+                                       AUTO_CTXSW_ENABLE, 1);
+               else
+                       f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+                                       AUTO_CTXSW_ENABLE, 0);
+               WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
+       }
+}
+
 /**
  * sdma_v3_0_enable - stop the async dma engines
  *
@@ -648,6 +673,8 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
 
        /* unhalt the MEs */
        sdma_v3_0_enable(adev, true);
+       /* enable sdma ring preemption */
+       sdma_v3_0_ctx_switch_enable(adev, true);
 
        /* start the gfx rings and rlc compute queues */
        r = sdma_v3_0_gfx_resume(adev);
@@ -1079,6 +1106,7 @@ static int sdma_v3_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       sdma_v3_0_ctx_switch_enable(adev, false);
        sdma_v3_0_enable(adev, false);
 
        return 0;
index 90fc93c2c1d04571e4a694af23e5a7af51267a0a..fa5a4448531dfe9dd307d88b55e051761821a28d 100644 (file)
@@ -1189,10 +1189,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-       adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
-       if (adev->ip_block_enabled == NULL)
-               return -ENOMEM;
-
        return 0;
 }
 
index b69ed97d447c9346d4c90b615c7943f06e18bae9..b9ba06176eb1e10c6e4074a7a935a08a50313819 100644 (file)
@@ -4732,7 +4732,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
                return 0;
 
        if (edid)
-               size = EDID_LENGTH + (1 + edid->extensions);
+               size = EDID_LENGTH * (1 + edid->extensions);
 
        ret = drm_property_replace_global_blob(dev,
                                               &connector->edid_blob_ptr,
index 619dad1b23863716972a23b12226b16af533a068..9daa2883ac186f73c64baa40ba61aadfd0a94c9a 100644 (file)
@@ -516,17 +516,17 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
                struct page *page_table;
 
                if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
-                       continue;
+                       break;
 
                pd = ppgtt->pdp.page_directory[pdpe];
 
                if (WARN_ON(!pd->page_table[pde]))
-                       continue;
+                       break;
 
                pt = pd->page_table[pde];
 
                if (WARN_ON(!pt->page))
-                       continue;
+                       break;
 
                page_table = pt->page;
 
index f5edb3504167ec09b5165d6f54aa760a85f864d1..2030f602cbf8b74366bcb78f9f2ddc4a5f0dd9c6 100644 (file)
@@ -3491,6 +3491,7 @@ enum skl_disp_power_wells {
 #define   BLM_POLARITY_PNV                     (1 << 0) /* pnv only */
 
 #define BLC_HIST_CTL   (dev_priv->info.display_mmio_offset + 0x61260)
+#define  BLM_HISTOGRAM_ENABLE                  (1 << 31)
 
 /* New registers for PCH-split platforms. Safe where new bits show up, the
  * register layout machtes with gen4 BLC_PWM_CTL[12]. */
index dcb1d25d6f051ee88ae84ba7eee7e5fe4f4a2c76..1b61f98103870171e75338595b32e5dd473523e0 100644 (file)
@@ -13303,6 +13303,16 @@ intel_check_primary_plane(struct drm_plane *plane,
                                intel_crtc->atomic.wait_vblank = true;
                }
 
+               /*
+                * FIXME: Actually if we will still have any other plane enabled
+                * on the pipe we could let IPS enabled still, but for
+                * now lets consider that when we make primary invisible
+                * by setting DSPCNTR to 0 on update_primary_plane function
+                * IPS needs to be disable.
+                */
+               if (!state->visible || !fb)
+                       intel_crtc->atomic.disable_ips = true;
+
                intel_crtc->atomic.fb_bits |=
                        INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
 
@@ -13400,6 +13410,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc)
        if (intel_crtc->atomic.disable_fbc)
                intel_fbc_disable(dev);
 
+       if (intel_crtc->atomic.disable_ips)
+               hsw_disable_ips(intel_crtc);
+
        if (intel_crtc->atomic.pre_disable_primary)
                intel_pre_disable_primary(crtc);
 
index 76afc62373d75bc8c6d9349acd42a38cc71a5a02..6e8faa25379240cab60f57631adf42612f28df33 100644 (file)
@@ -1140,6 +1140,9 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
 static void
 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
 {
+       memset(&pipe_config->dpll_hw_state, 0,
+              sizeof(pipe_config->dpll_hw_state));
+
        switch (link_bw) {
        case DP_LINK_BW_1_62:
                pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
index 2afb31a4627573a3f97d0a4530231f27451e0793..105928382e216239043faf4e651d10520c92b678 100644 (file)
@@ -485,6 +485,7 @@ struct intel_crtc_atomic_commit {
        /* Sleepable operations to perform before commit */
        bool wait_for_flips;
        bool disable_fbc;
+       bool disable_ips;
        bool pre_disable_primary;
        bool update_wm;
        unsigned disabled_planes;
index 7d83527f95f797fd3e020227753923c6b029d41b..55aad2322e10ec8e7ea168aa84e0b7d98dcf9b91 100644 (file)
@@ -907,6 +907,14 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
 
        /* XXX: combine this into above write? */
        intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+       /*
+        * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
+        * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
+        * that has backlight.
+        */
+       if (IS_GEN2(dev))
+               I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
 }
 
 static void i965_enable_backlight(struct intel_connector *connector)
index 0e690bf19fc9c5d04d2fdab02df59a4eeddd3a2b..af1ee517f372212af7b432e13fa39d0490075b35 100644 (file)
@@ -555,10 +555,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 static inline void
 u_free(void *addr)
 {
-       if (!is_vmalloc_addr(addr))
-               kfree(addr);
-       else
-               vfree(addr);
+       kvfree(addr);
 }
 
 static inline void *
index b0688b0c8908f5ba0704657f6aa39d1b47508a54..4ecf5caa8c6d9745f9421710179aa2f81ef3587f 100644 (file)
@@ -4604,6 +4604,31 @@ void cik_compute_set_wptr(struct radeon_device *rdev,
        WDOORBELL32(ring->doorbell_index, ring->wptr);
 }
 
+static void cik_compute_stop(struct radeon_device *rdev,
+                            struct radeon_ring *ring)
+{
+       u32 j, tmp;
+
+       cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
+       /* Disable wptr polling. */
+       tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
+       tmp &= ~WPTR_POLL_EN;
+       WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
+       /* Disable HQD. */
+       if (RREG32(CP_HQD_ACTIVE) & 1) {
+               WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
+               for (j = 0; j < rdev->usec_timeout; j++) {
+                       if (!(RREG32(CP_HQD_ACTIVE) & 1))
+                               break;
+                       udelay(1);
+               }
+               WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
+               WREG32(CP_HQD_PQ_RPTR, 0);
+               WREG32(CP_HQD_PQ_WPTR, 0);
+       }
+       cik_srbm_select(rdev, 0, 0, 0, 0);
+}
+
 /**
  * cik_cp_compute_enable - enable/disable the compute CP MEs
  *
@@ -4617,6 +4642,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
        if (enable)
                WREG32(CP_MEC_CNTL, 0);
        else {
+               /*
+                * To make hibernation reliable we need to clear compute ring
+                * configuration before halting the compute ring.
+                */
+               mutex_lock(&rdev->srbm_mutex);
+               cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
+               cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+               mutex_unlock(&rdev->srbm_mutex);
+
                WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
                rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
                rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
index f86eb54e7763d65341006c4ff25f4e8f8076760c..d16f2eebd95e6b2df5412d072023a89d43d32ae2 100644 (file)
@@ -268,6 +268,17 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
        }
        rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
        rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+
+       /* FIXME use something else than big hammer but after few days can not
+        * seem to find good combination so reset SDMA blocks as it seems we
+        * do not shut them down properly. This fix hibernation and does not
+        * affect suspend to ram.
+        */
+       WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
+       (void)RREG32(SRBM_SOFT_RESET);
+       udelay(50);
+       WREG32(SRBM_SOFT_RESET, 0);
+       (void)RREG32(SRBM_SOFT_RESET);
 }
 
 /**
index c89215275053d3168e6deba468b32fc05ba89bf6..fa719c53449bcd90e009e1b59d1b3e1ed5bff6f3 100644 (file)
@@ -469,22 +469,22 @@ void radeon_audio_detect(struct drm_connector *connector,
        dig = radeon_encoder->enc_priv;
 
        if (status == connector_status_connected) {
-               struct radeon_connector *radeon_connector;
-               int sink_type;
-
                if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
                        radeon_encoder->audio = NULL;
                        return;
                }
 
-               radeon_connector = to_radeon_connector(connector);
-               sink_type = radeon_dp_getsinktype(radeon_connector);
+               if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+                       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
-               if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
-                       sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
-                       radeon_encoder->audio = rdev->audio.dp_funcs;
-               else
+                       if (radeon_dp_getsinktype(radeon_connector) ==
+                           CONNECTOR_OBJECT_ID_DISPLAYPORT)
+                               radeon_encoder->audio = rdev->audio.dp_funcs;
+                       else
+                               radeon_encoder->audio = rdev->audio.hdmi_funcs;
+               } else {
                        radeon_encoder->audio = rdev->audio.hdmi_funcs;
+               }
 
                dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
                radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
index aeb676708e60cfb1871326bfc5a689631bb98741..634793ea841889847ac090c32470548a1cae418d 100644 (file)
@@ -257,7 +257,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
        }
 
        info->par = rfbdev;
-       info->skip_vt_switch = true;
 
        ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
        if (ret) {
index edafd3c2b17028a73ff5128568c73adfaff0f85b..06ac59fe332ab089d21b279aba092f615710c7ca 100644 (file)
@@ -719,7 +719,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
                return 0;
 
        if (gtt && gtt->userptr) {
-               ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL);
+               ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
                if (!ttm->sg)
                        return -ENOMEM;
 
index 3662157c2b1582b54291b607be1667579eef368b..ec10533a49b87a905aa82eda96e48bddae32d87f 100644 (file)
@@ -1129,12 +1129,12 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
                interval_tree_remove(&bo_va->it, &vm->va);
 
        spin_lock(&vm->status_lock);
-       if (list_empty(&bo_va->vm_status)) {
+       list_del(&bo_va->vm_status);
+       if (bo_va->it.start || bo_va->it.last) {
                bo_va->bo = radeon_bo_ref(bo_va->bo);
                list_add(&bo_va->vm_status, &vm->freed);
        } else {
                radeon_fence_unref(&bo_va->last_pt_update);
-               list_del(&bo_va->vm_status);
                kfree(bo_va);
        }
        spin_unlock(&vm->status_lock);
index 3962176ee71325ca4434fa34643f117c58b2d217..01b558fe369539f447d36493f3bcd7e1bc4f3ded 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/drm_fb_helper.h>
 #include <linux/dma-mapping.h>
 #include <linux/pm_runtime.h>
+#include <linux/module.h>
 #include <linux/of_graph.h>
 #include <linux/component.h>
 
index 4557f335a8a56f243ad4aa326a1826572116d3a5..dc65161d7cad20acb2f079bdb6b229cf779b3613 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_plane_helper.h>
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/of.h>
index 9d056417d88c5ee9d70d018c7341f10d7e92fb96..f9aaf37262be4cb120201313c30b167896f66aaf 100644 (file)
@@ -24,6 +24,7 @@
 #define __LINUX_HSI_OMAP_SSI_H__
 
 #include <linux/device.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/hsi/hsi.h>
 #include <linux/gpio.h>
index 2a808822af2163d89d83fb02ce1e67eb5f1474d7..37c16afe007a0524eaacb5edcae9399bebfae897 100644 (file)
@@ -777,7 +777,7 @@ static int __init i8k_init_hwmon(void)
        if (err >= 0)
                i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
 
-       i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "dell-smm",
+       i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "dell_smm",
                                                          NULL, i8k_groups);
        if (IS_ERR(i8k_hwmon_dev)) {
                err = PTR_ERR(i8k_hwmon_dev);
index d219c06a857bb5795a176bb88d9e186fd6a4b286..972444a14cca5feb333c5244fd438fbb435f1e7e 100644 (file)
 /* output format */
 #define MCP3021_SAR_SHIFT      2
 #define MCP3021_SAR_MASK       0x3ff
-
 #define MCP3021_OUTPUT_RES     10      /* 10-bit resolution */
-#define MCP3021_OUTPUT_SCALE   4
 
 #define MCP3221_SAR_SHIFT      0
 #define MCP3221_SAR_MASK       0xfff
 #define MCP3221_OUTPUT_RES     12      /* 12-bit resolution */
-#define MCP3221_OUTPUT_SCALE   1
 
 enum chips {
        mcp3021,
@@ -54,7 +51,6 @@ struct mcp3021_data {
        u16 sar_shift;
        u16 sar_mask;
        u8 output_res;
-       u8 output_scale;
 };
 
 static int mcp3021_read16(struct i2c_client *client)
@@ -84,13 +80,7 @@ static int mcp3021_read16(struct i2c_client *client)
 
 static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
 {
-       if (val == 0)
-               return 0;
-
-       val = val * data->output_scale - data->output_scale / 2;
-
-       return val * DIV_ROUND_CLOSEST(data->vdd,
-                       (1 << data->output_res) * data->output_scale);
+       return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
 }
 
 static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
@@ -132,14 +122,12 @@ static int mcp3021_probe(struct i2c_client *client,
                data->sar_shift = MCP3021_SAR_SHIFT;
                data->sar_mask = MCP3021_SAR_MASK;
                data->output_res = MCP3021_OUTPUT_RES;
-               data->output_scale = MCP3021_OUTPUT_SCALE;
                break;
 
        case mcp3221:
                data->sar_shift = MCP3221_SAR_SHIFT;
                data->sar_mask = MCP3221_SAR_MASK;
                data->output_res = MCP3221_OUTPUT_RES;
-               data->output_scale = MCP3221_OUTPUT_SCALE;
                break;
        }
 
index 55765790907b3768eb1c4b23e2e3bd77d4eaf294..28fcb2e246d55a7acc52703e434b98de3e22c45b 100644 (file)
@@ -547,7 +547,7 @@ static umode_t nct7802_temp_is_visible(struct kobject *kobj,
        if (index >= 9 && index < 18 &&
            (reg & 0x0c) != 0x04 && (reg & 0x0c) != 0x08)       /* RD2 */
                return 0;
-       if (index >= 18 && index < 27 && (reg & 0x30) != 0x10)  /* RD3 */
+       if (index >= 18 && index < 27 && (reg & 0x30) != 0x20)  /* RD3 */
                return 0;
        if (index >= 27 && index < 35)                          /* local */
                return attr->mode;
index b10353b31806be160dffac0d65be632c1f840eb9..697007afb99c2221e931da3617704dd0da3a1033 100644 (file)
@@ -1937,27 +1937,11 @@ static inline void w83627ehf_init_device(struct w83627ehf_data *data,
 static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
                                   int r1, int r2)
 {
-       u16 tmp;
-
-       tmp = data->temp_src[r1];
-       data->temp_src[r1] = data->temp_src[r2];
-       data->temp_src[r2] = tmp;
-
-       tmp = data->reg_temp[r1];
-       data->reg_temp[r1] = data->reg_temp[r2];
-       data->reg_temp[r2] = tmp;
-
-       tmp = data->reg_temp_over[r1];
-       data->reg_temp_over[r1] = data->reg_temp_over[r2];
-       data->reg_temp_over[r2] = tmp;
-
-       tmp = data->reg_temp_hyst[r1];
-       data->reg_temp_hyst[r1] = data->reg_temp_hyst[r2];
-       data->reg_temp_hyst[r2] = tmp;
-
-       tmp = data->reg_temp_config[r1];
-       data->reg_temp_config[r1] = data->reg_temp_config[r2];
-       data->reg_temp_config[r2] = tmp;
+       swap(data->temp_src[r1], data->temp_src[r2]);
+       swap(data->reg_temp[r1], data->reg_temp[r2]);
+       swap(data->reg_temp_over[r1], data->reg_temp_over[r2]);
+       swap(data->reg_temp_hyst[r1], data->reg_temp_hyst[r2]);
+       swap(data->reg_temp_config[r1], data->reg_temp_config[r2]);
 }
 
 static void
index 4068db4d9580b538c70ffccc7db71d29c5515967..0a8bce726b4b037797126ea8b8b5cf8d229bb253 100644 (file)
@@ -289,10 +289,7 @@ struct w83792d_data {
        u8 temp1[3];            /* current, over, thyst */
        u8 temp_add[2][6];      /* Register value */
        u8 fan_div[7];          /* Register encoding, shifted right */
-       u8 pwm[7];              /*
-                                * We only consider the first 3 set of pwm,
-                                * although 792 chip has 7 set of pwm.
-                                */
+       u8 pwm[7];              /* The 7 PWM outputs */
        u8 pwmenable[3];
        u32 alarms;             /* realtime status register encoding,combined */
        u8 chassis;             /* Chassis status */
@@ -1075,6 +1072,10 @@ static DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR,
 static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1);
 static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2);
+static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3);
+static SENSOR_DEVICE_ATTR(pwm5, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 4);
+static SENSOR_DEVICE_ATTR(pwm6, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 5);
+static SENSOR_DEVICE_ATTR(pwm7, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 6);
 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
                        show_pwmenable, store_pwmenable, 1);
 static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO,
@@ -1087,6 +1088,14 @@ static SENSOR_DEVICE_ATTR(pwm2_mode, S_IWUSR | S_IRUGO,
                        show_pwm_mode, store_pwm_mode, 1);
 static SENSOR_DEVICE_ATTR(pwm3_mode, S_IWUSR | S_IRUGO,
                        show_pwm_mode, store_pwm_mode, 2);
+static SENSOR_DEVICE_ATTR(pwm4_mode, S_IWUSR | S_IRUGO,
+                       show_pwm_mode, store_pwm_mode, 3);
+static SENSOR_DEVICE_ATTR(pwm5_mode, S_IWUSR | S_IRUGO,
+                       show_pwm_mode, store_pwm_mode, 4);
+static SENSOR_DEVICE_ATTR(pwm6_mode, S_IWUSR | S_IRUGO,
+                       show_pwm_mode, store_pwm_mode, 5);
+static SENSOR_DEVICE_ATTR(pwm7_mode, S_IWUSR | S_IRUGO,
+                       show_pwm_mode, store_pwm_mode, 6);
 static SENSOR_DEVICE_ATTR(tolerance1, S_IWUSR | S_IRUGO,
                        show_tolerance, store_tolerance, 1);
 static SENSOR_DEVICE_ATTR(tolerance2, S_IWUSR | S_IRUGO,
@@ -1177,30 +1186,38 @@ static SENSOR_DEVICE_ATTR(fan6_div, S_IWUSR | S_IRUGO,
 static SENSOR_DEVICE_ATTR(fan7_div, S_IWUSR | S_IRUGO,
                        show_fan_div, store_fan_div, 7);
 
-static struct attribute *w83792d_attributes_fan[4][5] = {
+static struct attribute *w83792d_attributes_fan[4][7] = {
        {
                &sensor_dev_attr_fan4_input.dev_attr.attr,
                &sensor_dev_attr_fan4_min.dev_attr.attr,
                &sensor_dev_attr_fan4_div.dev_attr.attr,
                &sensor_dev_attr_fan4_alarm.dev_attr.attr,
+               &sensor_dev_attr_pwm4.dev_attr.attr,
+               &sensor_dev_attr_pwm4_mode.dev_attr.attr,
                NULL
        }, {
                &sensor_dev_attr_fan5_input.dev_attr.attr,
                &sensor_dev_attr_fan5_min.dev_attr.attr,
                &sensor_dev_attr_fan5_div.dev_attr.attr,
                &sensor_dev_attr_fan5_alarm.dev_attr.attr,
+               &sensor_dev_attr_pwm5.dev_attr.attr,
+               &sensor_dev_attr_pwm5_mode.dev_attr.attr,
                NULL
        }, {
                &sensor_dev_attr_fan6_input.dev_attr.attr,
                &sensor_dev_attr_fan6_min.dev_attr.attr,
                &sensor_dev_attr_fan6_div.dev_attr.attr,
                &sensor_dev_attr_fan6_alarm.dev_attr.attr,
+               &sensor_dev_attr_pwm6.dev_attr.attr,
+               &sensor_dev_attr_pwm6_mode.dev_attr.attr,
                NULL
        }, {
                &sensor_dev_attr_fan7_input.dev_attr.attr,
                &sensor_dev_attr_fan7_min.dev_attr.attr,
                &sensor_dev_attr_fan7_div.dev_attr.attr,
                &sensor_dev_attr_fan7_alarm.dev_attr.attr,
+               &sensor_dev_attr_pwm7.dev_attr.attr,
+               &sensor_dev_attr_pwm7_mode.dev_attr.attr,
                NULL
        }
 };
index 3612cb5b30b206a066c75d835402654cacf7d002..73a40166285362f49326f3c2069721e2d061e3aa 100644 (file)
@@ -18,6 +18,30 @@ config HWSPINLOCK_OMAP
 
          If unsure, say N.
 
+config HWSPINLOCK_QCOM
+       tristate "Qualcomm Hardware Spinlock device"
+       depends on ARCH_QCOM
+       select HWSPINLOCK
+       select MFD_SYSCON
+       help
+         Say y here to support the Qualcomm Hardware Mutex functionality, which
+         provides a synchronisation mechanism for the various processors on
+         the SoC.
+
+         If unsure, say N.
+
+config HWSPINLOCK_SIRF
+       tristate "SIRF Hardware Spinlock device"
+       depends on ARCH_SIRF
+       select HWSPINLOCK
+       help
+         Say y here to support the SIRF Hardware Spinlock device, which
+         provides a synchronisation mechanism for the various processors
+         on the SoC.
+
+         It's safe to say n here if you're not interested in SIRF hardware
+         spinlock or just want a bare minimum kernel.
+
 config HSEM_U8500
        tristate "STE Hardware Semaphore functionality"
        depends on ARCH_U8500
index 93eb64b664863b58b3cb1fb0c32c5c04ead0dad1..6b59cb5a4f3a857cf26d4dc7eebfd014f338414f 100644 (file)
@@ -4,4 +4,6 @@
 
 obj-$(CONFIG_HWSPINLOCK)               += hwspinlock_core.o
 obj-$(CONFIG_HWSPINLOCK_OMAP)          += omap_hwspinlock.o
+obj-$(CONFIG_HWSPINLOCK_QCOM)          += qcom_hwspinlock.o
+obj-$(CONFIG_HWSPINLOCK_SIRF)          += sirf_hwspinlock.o
 obj-$(CONFIG_HSEM_U8500)               += u8500_hsem.o
index 461a0d739d75b0d646d9197a89ed6852f932be07..52f708bcf77f397952ea0f278ce5b161780e076a 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/hwspinlock.h>
 #include <linux/pm_runtime.h>
 #include <linux/mutex.h>
+#include <linux/of.h>
 
 #include "hwspinlock_internal.h"
 
@@ -257,6 +258,84 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
 }
 EXPORT_SYMBOL_GPL(__hwspin_unlock);
 
+/**
+ * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
+ * @bank: the hwspinlock device bank
+ * @hwlock_spec: hwlock specifier as found in the device tree
+ *
+ * This is a simple translation function, suitable for hwspinlock platform
+ * drivers that only has a lock specifier length of 1.
+ *
+ * Returns a relative index of the lock within a specified bank on success,
+ * or -EINVAL on invalid specifier cell count.
+ */
+static inline int
+of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
+{
+       if (WARN_ON(hwlock_spec->args_count != 1))
+               return -EINVAL;
+
+       return hwlock_spec->args[0];
+}
+
+/**
+ * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
+ * @np: device node from which to request the specific hwlock
+ * @index: index of the hwlock in the list of values
+ *
+ * This function provides a means for DT users of the hwspinlock module to
+ * get the global lock id of a specific hwspinlock using the phandle of the
+ * hwspinlock device, so that it can be requested using the normal
+ * hwspin_lock_request_specific() API.
+ *
+ * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
+ * device is not yet registered, -EINVAL on invalid args specifier value or an
+ * appropriate error as returned from the OF parsing of the DT client node.
+ */
+int of_hwspin_lock_get_id(struct device_node *np, int index)
+{
+       struct of_phandle_args args;
+       struct hwspinlock *hwlock;
+       struct radix_tree_iter iter;
+       void **slot;
+       int id;
+       int ret;
+
+       ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
+                                        &args);
+       if (ret)
+               return ret;
+
+       /* Find the hwspinlock device: we need its base_id */
+       ret = -EPROBE_DEFER;
+       rcu_read_lock();
+       radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
+               hwlock = radix_tree_deref_slot(slot);
+               if (unlikely(!hwlock))
+                       continue;
+
+               if (hwlock->bank->dev->of_node == args.np) {
+                       ret = 0;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+       if (ret < 0)
+               goto out;
+
+       id = of_hwspin_lock_simple_xlate(&args);
+       if (id < 0 || id >= hwlock->bank->num_locks) {
+               ret = -EINVAL;
+               goto out;
+       }
+       id += hwlock->bank->base_id;
+
+out:
+       of_node_put(args.np);
+       return ret ? ret : id;
+}
+EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
+
 static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
 {
        struct hwspinlock *tmp;
index 47a275c6ece1d24487ca0f832f2913a597b8a87b..ad2f8cac8487c0a6edde5d6847dc57f97409210d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * OMAP hardware spinlock driver
  *
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2010-2015 Texas Instruments Incorporated - http://www.ti.com
  *
  * Contact: Simon Que <sque@ti.com>
  *          Hari Kanigeri <h-kanigeri2@ti.com>
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/hwspinlock.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 
 #include "hwspinlock_internal.h"
@@ -80,14 +81,16 @@ static const struct hwspinlock_ops omap_hwspinlock_ops = {
 
 static int omap_hwspinlock_probe(struct platform_device *pdev)
 {
-       struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
+       struct device_node *node = pdev->dev.of_node;
        struct hwspinlock_device *bank;
        struct hwspinlock *hwlock;
        struct resource *res;
        void __iomem *io_base;
        int num_locks, i, ret;
+       /* Only a single hwspinlock block device is supported */
+       int base_id = 0;
 
-       if (!pdata)
+       if (!node)
                return -ENODEV;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -141,7 +144,7 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
                hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
 
        ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
-                                               pdata->base_id, num_locks);
+                                               base_id, num_locks);
        if (ret)
                goto reg_fail;
 
@@ -174,11 +177,18 @@ static int omap_hwspinlock_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id omap_hwspinlock_of_match[] = {
+       { .compatible = "ti,omap4-hwspinlock", },
+       { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match);
+
 static struct platform_driver omap_hwspinlock_driver = {
        .probe          = omap_hwspinlock_probe,
        .remove         = omap_hwspinlock_remove,
        .driver         = {
                .name   = "omap_hwspinlock",
+               .of_match_table = of_match_ptr(omap_hwspinlock_of_match),
        },
 };
 
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
new file mode 100644 (file)
index 0000000..c752447
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, Sony Mobile Communications AB
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include "hwspinlock_internal.h"
+
+#define QCOM_MUTEX_APPS_PROC_ID        1
+#define QCOM_MUTEX_NUM_LOCKS   32
+
+static int qcom_hwspinlock_trylock(struct hwspinlock *lock)
+{
+       struct regmap_field *field = lock->priv;
+       u32 lock_owner;
+       int ret;
+
+       ret = regmap_field_write(field, QCOM_MUTEX_APPS_PROC_ID);
+       if (ret)
+               return ret;
+
+       ret = regmap_field_read(field, &lock_owner);
+       if (ret)
+               return ret;
+
+       return lock_owner == QCOM_MUTEX_APPS_PROC_ID;
+}
+
+static void qcom_hwspinlock_unlock(struct hwspinlock *lock)
+{
+       struct regmap_field *field = lock->priv;
+       u32 lock_owner;
+       int ret;
+
+       ret = regmap_field_read(field, &lock_owner);
+       if (ret) {
+               pr_err("%s: unable to query spinlock owner\n", __func__);
+               return;
+       }
+
+       if (lock_owner != QCOM_MUTEX_APPS_PROC_ID) {
+               pr_err("%s: spinlock not owned by us (actual owner is %d)\n",
+                               __func__, lock_owner);
+       }
+
+       ret = regmap_field_write(field, 0);
+       if (ret)
+               pr_err("%s: failed to unlock spinlock\n", __func__);
+}
+
+static const struct hwspinlock_ops qcom_hwspinlock_ops = {
+       .trylock        = qcom_hwspinlock_trylock,
+       .unlock         = qcom_hwspinlock_unlock,
+};
+
+static const struct of_device_id qcom_hwspinlock_of_match[] = {
+       { .compatible = "qcom,sfpb-mutex" },
+       { .compatible = "qcom,tcsr-mutex" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, qcom_hwspinlock_of_match);
+
+static int qcom_hwspinlock_probe(struct platform_device *pdev)
+{
+       struct hwspinlock_device *bank;
+       struct device_node *syscon;
+       struct reg_field field;
+       struct regmap *regmap;
+       size_t array_size;
+       u32 stride;
+       u32 base;
+       int ret;
+       int i;
+
+       syscon = of_parse_phandle(pdev->dev.of_node, "syscon", 0);
+       if (!syscon) {
+               dev_err(&pdev->dev, "no syscon property\n");
+               return -ENODEV;
+       }
+
+       regmap = syscon_node_to_regmap(syscon);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1, &base);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "no offset in syscon\n");
+               return -EINVAL;
+       }
+
+       ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 2, &stride);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "no stride syscon\n");
+               return -EINVAL;
+       }
+
+       array_size = QCOM_MUTEX_NUM_LOCKS * sizeof(struct hwspinlock);
+       bank = devm_kzalloc(&pdev->dev, sizeof(*bank) + array_size, GFP_KERNEL);
+       if (!bank)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, bank);
+
+       for (i = 0; i < QCOM_MUTEX_NUM_LOCKS; i++) {
+               field.reg = base + i * stride;
+               field.lsb = 0;
+               field.msb = 31;
+
+               bank->lock[i].priv = devm_regmap_field_alloc(&pdev->dev,
+                                                            regmap, field);
+       }
+
+       pm_runtime_enable(&pdev->dev);
+
+       ret = hwspin_lock_register(bank, &pdev->dev, &qcom_hwspinlock_ops,
+                                  0, QCOM_MUTEX_NUM_LOCKS);
+       if (ret)
+               pm_runtime_disable(&pdev->dev);
+
+       return ret;
+}
+
+static int qcom_hwspinlock_remove(struct platform_device *pdev)
+{
+       struct hwspinlock_device *bank = platform_get_drvdata(pdev);
+       int ret;
+
+       ret = hwspin_lock_unregister(bank);
+       if (ret) {
+               dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
+               return ret;
+       }
+
+       pm_runtime_disable(&pdev->dev);
+
+       return 0;
+}
+
+static struct platform_driver qcom_hwspinlock_driver = {
+       .probe          = qcom_hwspinlock_probe,
+       .remove         = qcom_hwspinlock_remove,
+       .driver         = {
+               .name   = "qcom_hwspinlock",
+               .of_match_table = qcom_hwspinlock_of_match,
+       },
+};
+
+static int __init qcom_hwspinlock_init(void)
+{
+       return platform_driver_register(&qcom_hwspinlock_driver);
+}
+/* board init code might need to reserve hwspinlocks for predefined purposes */
+postcore_initcall(qcom_hwspinlock_init);
+
+static void __exit qcom_hwspinlock_exit(void)
+{
+       platform_driver_unregister(&qcom_hwspinlock_driver);
+}
+module_exit(qcom_hwspinlock_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware spinlock driver for Qualcomm SoCs");
diff --git a/drivers/hwspinlock/sirf_hwspinlock.c b/drivers/hwspinlock/sirf_hwspinlock.c
new file mode 100644 (file)
index 0000000..1601854
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * SIRF hardware spinlock driver
+ *
+ * Copyright (c) 2015 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/hwspinlock.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "hwspinlock_internal.h"
+
+struct sirf_hwspinlock {
+       void __iomem *io_base;
+       struct hwspinlock_device bank;
+};
+
+/* Number of Hardware Spinlocks*/
+#define        HW_SPINLOCK_NUMBER      30
+
+/* Hardware spinlock register offsets */
+#define HW_SPINLOCK_BASE       0x404
+#define HW_SPINLOCK_OFFSET(x)  (HW_SPINLOCK_BASE + 0x4 * (x))
+
+static int sirf_hwspinlock_trylock(struct hwspinlock *lock)
+{
+       void __iomem *lock_addr = lock->priv;
+
+       /* attempt to acquire the lock by reading value == 1 from it */
+       return !!readl(lock_addr);
+}
+
+static void sirf_hwspinlock_unlock(struct hwspinlock *lock)
+{
+       void __iomem *lock_addr = lock->priv;
+
+       /* release the lock by writing 0 to it */
+       writel(0, lock_addr);
+}
+
+static const struct hwspinlock_ops sirf_hwspinlock_ops = {
+       .trylock = sirf_hwspinlock_trylock,
+       .unlock = sirf_hwspinlock_unlock,
+};
+
+static int sirf_hwspinlock_probe(struct platform_device *pdev)
+{
+       struct sirf_hwspinlock *hwspin;
+       struct hwspinlock *hwlock;
+       int idx, ret;
+
+       if (!pdev->dev.of_node)
+               return -ENODEV;
+
+       hwspin = devm_kzalloc(&pdev->dev, sizeof(*hwspin) +
+                       sizeof(*hwlock) * HW_SPINLOCK_NUMBER, GFP_KERNEL);
+       if (!hwspin)
+               return -ENOMEM;
+
+       /* retrieve io base */
+       hwspin->io_base = of_iomap(pdev->dev.of_node, 0);
+       if (!hwspin->io_base)
+               return -ENOMEM;
+
+       for (idx = 0; idx < HW_SPINLOCK_NUMBER; idx++) {
+               hwlock = &hwspin->bank.lock[idx];
+               hwlock->priv = hwspin->io_base + HW_SPINLOCK_OFFSET(idx);
+       }
+
+       platform_set_drvdata(pdev, hwspin);
+
+       pm_runtime_enable(&pdev->dev);
+
+       ret = hwspin_lock_register(&hwspin->bank, &pdev->dev,
+                                  &sirf_hwspinlock_ops, 0,
+                                  HW_SPINLOCK_NUMBER);
+       if (ret)
+               goto reg_failed;
+
+       return 0;
+
+reg_failed:
+       pm_runtime_disable(&pdev->dev);
+       iounmap(hwspin->io_base);
+
+       return ret;
+}
+
+static int sirf_hwspinlock_remove(struct platform_device *pdev)
+{
+       struct sirf_hwspinlock *hwspin = platform_get_drvdata(pdev);
+       int ret;
+
+       ret = hwspin_lock_unregister(&hwspin->bank);
+       if (ret) {
+               dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
+               return ret;
+       }
+
+       pm_runtime_disable(&pdev->dev);
+
+       iounmap(hwspin->io_base);
+
+       return 0;
+}
+
+static const struct of_device_id sirf_hwpinlock_ids[] = {
+       { .compatible = "sirf,hwspinlock", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, sirf_hwpinlock_ids);
+
+static struct platform_driver sirf_hwspinlock_driver = {
+       .probe = sirf_hwspinlock_probe,
+       .remove = sirf_hwspinlock_remove,
+       .driver = {
+               .name = "atlas7_hwspinlock",
+               .of_match_table = of_match_ptr(sirf_hwpinlock_ids),
+       },
+};
+
+module_platform_driver(sirf_hwspinlock_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SIRF Hardware spinlock driver");
+MODULE_AUTHOR("Wei Chen <wei.chen@csr.com>");
index e29b02ca9e915d31735d35d9cf46438d38182e22..f086ef387475989867df3ab2b34f6b8232ba7fa5 100644 (file)
@@ -199,7 +199,7 @@ static int ide_set_dev_param_mask(const char *s, const struct kernel_param *kp)
        return 0;
 }
 
-static struct kernel_param_ops param_ops_ide_dev_mask = {
+static const struct kernel_param_ops param_ops_ide_dev_mask = {
        .set = ide_set_dev_param_mask
 };
 
index 8d594517cd292821182e0dc363a32ea0f6d98c9f..7ffc748cb97357fa3aeb4d6f8e2f20cbff5d2ecf 100644 (file)
@@ -245,10 +245,7 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
 ipz_queue_ctor_exit0:
        ehca_gen_err("Couldn't alloc pages queue=%p "
                 "nr_of_pages=%x",  queue, nr_of_pages);
-       if (is_vmalloc_addr(queue->queue_pages))
-               vfree(queue->queue_pages);
-       else
-               kfree(queue->queue_pages);
+       kvfree(queue->queue_pages);
 
        return 0;
 }
@@ -270,10 +267,7 @@ int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
                        free_page((unsigned long)queue->queue_pages[i]);
        }
 
-       if (is_vmalloc_addr(queue->queue_pages))
-               vfree(queue->queue_pages);
-       else
-               kfree(queue->queue_pages);
+       kvfree(queue->queue_pages);
 
        return 1;
 }
index 1ca8e32a95927961a326f33bae480c11a02a226f..25422a3a72387eb39bd5bac638bcf62a198373bd 100644 (file)
@@ -277,7 +277,7 @@ static int remove_file(struct dentry *parent, char *name)
        }
 
        spin_lock(&tmp->d_lock);
-       if (!d_unhashed(tmp) && d_really_is_positive(tmp)) {
+       if (simple_positive(tmp)) {
                dget_dlock(tmp);
                __d_drop(tmp);
                spin_unlock(&tmp->d_lock);
index bdd5d3857203198bca5c8f04f701b14c9c53dcb1..13ef22bd9459200309f2ab9fccb248eccbef9027 100644 (file)
@@ -455,7 +455,7 @@ static int remove_file(struct dentry *parent, char *name)
        }
 
        spin_lock(&tmp->d_lock);
-       if (!d_unhashed(tmp) && d_really_is_positive(tmp)) {
+       if (simple_positive(tmp)) {
                __d_drop(tmp);
                spin_unlock(&tmp->d_lock);
                simple_unlink(d_inode(parent), tmp);
index f3b7a34e10d81c019217917ac6ebce0462278b50..7717009631271ea6965a7d60b9dcf3f74acdd1d6 100644 (file)
@@ -1356,7 +1356,7 @@ sequence_cmd:
        if (!rc && dump_payload == false && unsol_data)
                iscsit_set_unsoliticed_dataout(cmd);
        else if (dump_payload && imm_data)
-               target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+               target_put_sess_cmd(&cmd->se_cmd);
 
        return 0;
 }
@@ -1781,7 +1781,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
                            cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
                                struct se_cmd *se_cmd = &cmd->se_cmd;
 
-                               target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+                               target_put_sess_cmd(se_cmd);
                        }
                }
 
@@ -1954,7 +1954,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
        spin_unlock_bh(&cmd->istate_lock);
 
        if (ret) {
-               target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+               target_put_sess_cmd(se_cmd);
                transport_send_check_condition_and_sense(se_cmd,
                                                         se_cmd->pi_err, 0);
        } else {
index eada8f758ad4089ec0e15a7469ccc50cb626a98f..267dc4f7550236e89fae58ff80c4a3953cff877f 100644 (file)
@@ -99,7 +99,7 @@ module_param(register_always, bool, 0444);
 MODULE_PARM_DESC(register_always,
                 "Use memory registration even for contiguous memory regions");
 
-static struct kernel_param_ops srp_tmo_ops;
+static const struct kernel_param_ops srp_tmo_ops;
 
 static int srp_reconnect_delay = 10;
 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
@@ -184,7 +184,7 @@ out:
        return res;
 }
 
-static struct kernel_param_ops srp_tmo_ops = {
+static const struct kernel_param_ops srp_tmo_ops = {
        .get = srp_tmo_get,
        .set = srp_tmo_set,
 };
index 4556cd11288e755a348cc8553cdf28af38f08880..82897ca17f32349df3e3cc332b9b0204bbc524a3 100644 (file)
@@ -47,7 +47,6 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric_configfs.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 #include "ib_srpt.h"
 
 /* Name of this kernel module. */
@@ -94,7 +93,6 @@ MODULE_PARM_DESC(srpt_service_guid,
                 " instead of using the node_guid of the first HCA.");
 
 static struct ib_client srpt_client;
-static const struct target_core_fabric_ops srpt_template;
 static void srpt_release_channel(struct srpt_rdma_ch *ch);
 static int srpt_queue_status(struct se_cmd *cmd);
 
@@ -1336,12 +1334,12 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
 
                BUG_ON(ch->sess == NULL);
 
-               target_put_sess_cmd(ch->sess, &ioctx->cmd);
+               target_put_sess_cmd(&ioctx->cmd);
                goto out;
        }
 
        pr_debug("Aborting cmd with state %d and tag %lld\n", state,
-                ioctx->tag);
+                ioctx->cmd.tag);
 
        switch (state) {
        case SRPT_STATE_NEW:
@@ -1367,11 +1365,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
                 * not been received in time.
                 */
                srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
-               target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
+               target_put_sess_cmd(&ioctx->cmd);
                break;
        case SRPT_STATE_MGMT_RSP_SENT:
                srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
-               target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
+               target_put_sess_cmd(&ioctx->cmd);
                break;
        default:
                WARN(1, "Unexpected command state (%d)", state);
@@ -1389,7 +1387,6 @@ static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
 {
        struct srpt_send_ioctx *ioctx;
        enum srpt_command_state state;
-       struct se_cmd *cmd;
        u32 index;
 
        atomic_inc(&ch->sq_wr_avail);
@@ -1397,7 +1394,6 @@ static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
        index = idx_from_wr_id(wr_id);
        ioctx = ch->ioctx_ring[index];
        state = srpt_get_cmd_state(ioctx);
-       cmd = &ioctx->cmd;
 
        WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
                && state != SRPT_STATE_MGMT_RSP_SENT
@@ -1474,10 +1470,8 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
                                      struct srpt_send_ioctx *ioctx,
                                      enum srpt_opcode opcode)
 {
-       struct se_cmd *cmd;
        enum srpt_command_state state;
 
-       cmd = &ioctx->cmd;
        state = srpt_get_cmd_state(ioctx);
        switch (opcode) {
        case SRPT_RDMA_READ_LAST:
@@ -1681,7 +1675,7 @@ static int srpt_check_stop_free(struct se_cmd *cmd)
        struct srpt_send_ioctx *ioctx = container_of(cmd,
                                struct srpt_send_ioctx, cmd);
 
-       return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
+       return target_put_sess_cmd(&ioctx->cmd);
 }
 
 /**
@@ -1703,7 +1697,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
 
        srp_cmd = recv_ioctx->ioctx.buf;
        cmd = &send_ioctx->cmd;
-       send_ioctx->tag = srp_cmd->tag;
+       cmd->tag = srp_cmd->tag;
 
        switch (srp_cmd->task_attr) {
        case SRP_CMD_SIMPLE_Q:
@@ -1774,7 +1768,7 @@ static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
        for (i = 0; i < ch->rq_size; ++i) {
                target = ch->ioctx_ring[i];
                if (target->cmd.se_lun == ioctx->cmd.se_lun &&
-                   target->tag == tag &&
+                   target->cmd.tag == tag &&
                    srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
                        ret = 0;
                        /* now let the target core abort &target->cmd; */
@@ -1833,7 +1827,7 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
                 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
 
        srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
-       send_ioctx->tag = srp_tsk->tag;
+       send_ioctx->cmd.tag = srp_tsk->tag;
        tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
        if (tcm_tmr < 0) {
                send_ioctx->cmd.se_tmr_req->response =
@@ -2180,12 +2174,9 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
  */
 static void __srpt_close_ch(struct srpt_rdma_ch *ch)
 {
-       struct srpt_device *sdev;
        enum rdma_ch_state prev_state;
        unsigned long flags;
 
-       sdev = ch->sport->sdev;
-
        spin_lock_irqsave(&ch->spinlock, flags);
        prev_state = ch->state;
        switch (prev_state) {
@@ -2983,7 +2974,7 @@ static int srpt_write_pending(struct se_cmd *se_cmd)
        case CH_DRAINING:
        case CH_RELEASING:
                pr_debug("cmd with tag %lld: channel disconnecting\n",
-                        ioctx->tag);
+                        ioctx->cmd.tag);
                srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
                ret = -EINVAL;
                goto out;
@@ -3058,27 +3049,27 @@ static void srpt_queue_response(struct se_cmd *cmd)
                ret = srpt_xfer_data(ch, ioctx);
                if (ret) {
                        pr_err("xfer_data failed for tag %llu\n",
-                              ioctx->tag);
+                              ioctx->cmd.tag);
                        return;
                }
        }
 
        if (state != SRPT_STATE_MGMT)
-               resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
+               resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
                                              cmd->scsi_status);
        else {
                srp_tm_status
                        = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
                resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
-                                                ioctx->tag);
+                                                ioctx->cmd.tag);
        }
        ret = srpt_post_send(ch, ioctx, resp_len);
        if (ret) {
                pr_err("sending cmd response failed for tag %llu\n",
-                      ioctx->tag);
+                      ioctx->cmd.tag);
                srpt_unmap_sg_to_ib_sge(ch, ioctx);
                srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
-               target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
+               target_put_sess_cmd(&ioctx->cmd);
        }
 }
 
@@ -3398,11 +3389,6 @@ static char *srpt_get_fabric_name(void)
        return "srpt";
 }
 
-static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
-       return SCSI_TRANSPORTID_PROTOCOLID_SRP;
-}
-
 static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
 {
        struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
@@ -3415,69 +3401,6 @@ static u16 srpt_get_tag(struct se_portal_group *tpg)
        return 1;
 }
 
-static u32 srpt_get_default_depth(struct se_portal_group *se_tpg)
-{
-       return 1;
-}
-
-static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg,
-                                   struct se_node_acl *se_nacl,
-                                   struct t10_pr_registration *pr_reg,
-                                   int *format_code, unsigned char *buf)
-{
-       struct srpt_node_acl *nacl;
-       struct spc_rdma_transport_id *tr_id;
-
-       nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
-       tr_id = (void *)buf;
-       tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
-       memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id));
-       return sizeof(*tr_id);
-}
-
-static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg,
-                                       struct se_node_acl *se_nacl,
-                                       struct t10_pr_registration *pr_reg,
-                                       int *format_code)
-{
-       *format_code = 0;
-       return sizeof(struct spc_rdma_transport_id);
-}
-
-static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
-                                           const char *buf, u32 *out_tid_len,
-                                           char **port_nexus_ptr)
-{
-       struct spc_rdma_transport_id *tr_id;
-
-       *port_nexus_ptr = NULL;
-       *out_tid_len = sizeof(struct spc_rdma_transport_id);
-       tr_id = (void *)buf;
-       return (char *)tr_id->i_port_id;
-}
-
-static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
-       struct srpt_node_acl *nacl;
-
-       nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
-       if (!nacl) {
-               pr_err("Unable to allocate struct srpt_node_acl\n");
-               return NULL;
-       }
-
-       return &nacl->nacl;
-}
-
-static void srpt_release_fabric_acl(struct se_portal_group *se_tpg,
-                                   struct se_node_acl *se_nacl)
-{
-       struct srpt_node_acl *nacl;
-
-       nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
-       kfree(nacl);
-}
-
 static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
 {
        return 1;
@@ -3551,14 +3474,6 @@ static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
 {
 }
 
-static u32 srpt_get_task_tag(struct se_cmd *se_cmd)
-{
-       struct srpt_send_ioctx *ioctx;
-
-       ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
-       return ioctx->tag;
-}
-
 /* Note: only used from inside debug printk's by the TCM core. */
 static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
 {
@@ -3601,40 +3516,19 @@ out:
  * configfs callback function invoked for
  * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
  */
-static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
-                                            struct config_group *group,
-                                            const char *name)
+static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
 {
-       struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
-       struct se_node_acl *se_nacl, *se_nacl_new;
-       struct srpt_node_acl *nacl;
-       int ret = 0;
-       u32 nexus_depth = 1;
+       struct srpt_port *sport =
+               container_of(se_nacl->se_tpg, struct srpt_port, port_tpg_1);
+       struct srpt_node_acl *nacl =
+               container_of(se_nacl, struct srpt_node_acl, nacl);
        u8 i_port_id[16];
 
        if (srpt_parse_i_port_id(i_port_id, name) < 0) {
                pr_err("invalid initiator port ID %s\n", name);
-               ret = -EINVAL;
-               goto err;
+               return -EINVAL;
        }
 
-       se_nacl_new = srpt_alloc_fabric_acl(tpg);
-       if (!se_nacl_new) {
-               ret = -ENOMEM;
-               goto err;
-       }
-       /*
-        * nacl_new may be released by core_tpg_add_initiator_node_acl()
-        * when converting a node ACL from demo mode to explict
-        */
-       se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name,
-                                                 nexus_depth);
-       if (IS_ERR(se_nacl)) {
-               ret = PTR_ERR(se_nacl);
-               goto err;
-       }
-       /* Locate our struct srpt_node_acl and set sdev and i_port_id. */
-       nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
        memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
        nacl->sport = sport;
 
@@ -3642,29 +3536,22 @@ static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
        list_add_tail(&nacl->list, &sport->port_acl_list);
        spin_unlock_irq(&sport->port_acl_lock);
 
-       return se_nacl;
-err:
-       return ERR_PTR(ret);
+       return 0;
 }
 
 /*
  * configfs callback function invoked for
  * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
  */
-static void srpt_drop_nodeacl(struct se_node_acl *se_nacl)
+static void srpt_cleanup_nodeacl(struct se_node_acl *se_nacl)
 {
-       struct srpt_node_acl *nacl;
-       struct srpt_device *sdev;
-       struct srpt_port *sport;
+       struct srpt_node_acl *nacl =
+               container_of(se_nacl, struct srpt_node_acl, nacl);
+       struct srpt_port *sport = nacl->sport;
 
-       nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
-       sport = nacl->sport;
-       sdev = sport->sdev;
        spin_lock_irq(&sport->port_acl_lock);
        list_del(&nacl->list);
        spin_unlock_irq(&sport->port_acl_lock);
-       core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1);
-       srpt_release_fabric_acl(NULL, se_nacl);
 }
 
 static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size(
@@ -3849,8 +3736,7 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
        int res;
 
        /* Initialize sport->port_wwn and sport->port_tpg_1 */
-       res = core_tpg_register(&srpt_template, &sport->port_wwn,
-                       &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
+       res = core_tpg_register(&sport->port_wwn, &sport->port_tpg_1, SCSI_PROTOCOL_SRP);
        if (res)
                return ERR_PTR(res);
 
@@ -3920,20 +3806,14 @@ static struct configfs_attribute *srpt_wwn_attrs[] = {
 static const struct target_core_fabric_ops srpt_template = {
        .module                         = THIS_MODULE,
        .name                           = "srpt",
+       .node_acl_size                  = sizeof(struct srpt_node_acl),
        .get_fabric_name                = srpt_get_fabric_name,
-       .get_fabric_proto_ident         = srpt_get_fabric_proto_ident,
        .tpg_get_wwn                    = srpt_get_fabric_wwn,
        .tpg_get_tag                    = srpt_get_tag,
-       .tpg_get_default_depth          = srpt_get_default_depth,
-       .tpg_get_pr_transport_id        = srpt_get_pr_transport_id,
-       .tpg_get_pr_transport_id_len    = srpt_get_pr_transport_id_len,
-       .tpg_parse_pr_out_transport_id  = srpt_parse_pr_out_transport_id,
        .tpg_check_demo_mode            = srpt_check_false,
        .tpg_check_demo_mode_cache      = srpt_check_true,
        .tpg_check_demo_mode_write_protect = srpt_check_true,
        .tpg_check_prod_mode_write_protect = srpt_check_false,
-       .tpg_alloc_fabric_acl           = srpt_alloc_fabric_acl,
-       .tpg_release_fabric_acl         = srpt_release_fabric_acl,
        .tpg_get_inst_index             = srpt_tpg_get_inst_index,
        .release_cmd                    = srpt_release_cmd,
        .check_stop_free                = srpt_check_stop_free,
@@ -3944,7 +3824,6 @@ static const struct target_core_fabric_ops srpt_template = {
        .write_pending                  = srpt_write_pending,
        .write_pending_status           = srpt_write_pending_status,
        .set_default_node_attributes    = srpt_set_default_node_attrs,
-       .get_task_tag                   = srpt_get_task_tag,
        .get_cmd_state                  = srpt_get_tcm_cmd_state,
        .queue_data_in                  = srpt_queue_data_in,
        .queue_status                   = srpt_queue_status,
@@ -3958,12 +3837,8 @@ static const struct target_core_fabric_ops srpt_template = {
        .fabric_drop_wwn                = srpt_drop_tport,
        .fabric_make_tpg                = srpt_make_tpg,
        .fabric_drop_tpg                = srpt_drop_tpg,
-       .fabric_post_link               = NULL,
-       .fabric_pre_unlink              = NULL,
-       .fabric_make_np                 = NULL,
-       .fabric_drop_np                 = NULL,
-       .fabric_make_nodeacl            = srpt_make_nodeacl,
-       .fabric_drop_nodeacl            = srpt_drop_nodeacl,
+       .fabric_init_nodeacl            = srpt_init_nodeacl,
+       .fabric_cleanup_nodeacl         = srpt_cleanup_nodeacl,
 
        .tfc_wwn_attrs                  = srpt_wwn_attrs,
        .tfc_tpg_base_attrs             = srpt_tpg_attrs,
index d85c0c2056257b0ffa9a19e49aa96dba0e33399e..21f8df67522ab5131c43c5447ed4e3b57002badb 100644 (file)
@@ -238,7 +238,6 @@ struct srpt_send_ioctx {
        bool                    rdma_aborted;
        struct se_cmd           cmd;
        struct completion       tx_done;
-       u64                     tag;
        int                     sg_cnt;
        int                     mapped_sg_count;
        u16                     n_rdma_ius;
@@ -410,34 +409,16 @@ struct srpt_device {
 
 /**
  * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
+ * @nacl:      Target core node ACL information.
  * @i_port_id: 128-bit SRP initiator port ID.
  * @sport:     port information.
- * @nacl:      Target core node ACL information.
  * @list:      Element of the per-HCA ACL list.
  */
 struct srpt_node_acl {
+       struct se_node_acl      nacl;
        u8                      i_port_id[16];
        struct srpt_port        *sport;
-       struct se_node_acl      nacl;
        struct list_head        list;
 };
 
-/*
- * SRP-releated SCSI persistent reservation definitions.
- *
- * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction).
- * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using
- * SCSI over an RDMA interface).
- */
-
-enum {
-       SCSI_TRANSPORTID_PROTOCOLID_SRP = 4,
-};
-
-struct spc_rdma_transport_id {
-       uint8_t protocol_identifier;
-       uint8_t reserved[7];
-       uint8_t i_port_id[16];
-};
-
 #endif                         /* IB_SRPT_H */
index f315784236361b186249212db7d118ec31ef298f..78d24990a816c10e6312108a316e61ddb8e164b9 100644 (file)
@@ -677,12 +677,9 @@ static void input_dev_release_keys(struct input_dev *dev)
        int code;
 
        if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
-               for (code = 0; code <= KEY_MAX; code++) {
-                       if (is_event_supported(code, dev->keybit, KEY_MAX) &&
-                           __test_and_clear_bit(code, dev->key)) {
-                               input_pass_event(dev, EV_KEY, code, 0);
-                       }
-               }
+               for_each_set_bit(code, dev->key, KEY_CNT)
+                       input_pass_event(dev, EV_KEY, code, 0);
+               memset(dev->key, 0, sizeof(dev->key));
                input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
        }
 }
@@ -1626,10 +1623,7 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
                if (!test_bit(EV_##type, dev->evbit))                   \
                        break;                                          \
                                                                        \
-               for (i = 0; i < type##_MAX; i++) {                      \
-                       if (!test_bit(i, dev->bits##bit))               \
-                               continue;                               \
-                                                                       \
+               for_each_set_bit(i, dev->bits##bit, type##_CNT) {       \
                        active = test_bit(i, dev->bits);                \
                        if (!active && !on)                             \
                                continue;                               \
@@ -1980,22 +1974,12 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
 
        events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
 
-       if (test_bit(EV_ABS, dev->evbit)) {
-               for (i = 0; i < ABS_CNT; i++) {
-                       if (test_bit(i, dev->absbit)) {
-                               if (input_is_mt_axis(i))
-                                       events += mt_slots;
-                               else
-                                       events++;
-                       }
-               }
-       }
+       if (test_bit(EV_ABS, dev->evbit))
+               for_each_set_bit(i, dev->absbit, ABS_CNT)
+                       events += input_is_mt_axis(i) ? mt_slots : 1;
 
-       if (test_bit(EV_REL, dev->evbit)) {
-               for (i = 0; i < REL_CNT; i++)
-                       if (test_bit(i, dev->relbit))
-                               events++;
-       }
+       if (test_bit(EV_REL, dev->evbit))
+               events += bitmap_weight(dev->relbit, REL_CNT);
 
        /* Make room for KEY and MSC events */
        events += 7;
index 61c7611563712d9cfc829217701061af4bcdd3fa..f8850f9cb33103ccfef4d696389f2b6ddad1249d 100644 (file)
@@ -344,6 +344,7 @@ struct usb_xpad {
 
        int mapping;                    /* map d-pad to buttons or to axes */
        int xtype;                      /* type of xbox device */
+       unsigned long led_no;           /* led to lit on xbox360 controllers */
 };
 
 /*
@@ -488,6 +489,8 @@ static void xpad360_process_packet(struct usb_xpad *xpad,
        input_sync(dev);
 }
 
+static void xpad_identify_controller(struct usb_xpad *xpad);
+
 /*
  * xpad360w_process_packet
  *
@@ -510,6 +513,11 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha
                if (data[1] & 0x80) {
                        xpad->pad_present = 1;
                        usb_submit_urb(xpad->bulk_out, GFP_ATOMIC);
+                       /*
+                        * Light up the segment corresponding to
+                        * controller number.
+                        */
+                       xpad_identify_controller(xpad);
                } else
                        xpad->pad_present = 0;
        }
@@ -881,17 +889,63 @@ struct xpad_led {
        struct usb_xpad *xpad;
 };
 
+/**
+ * @param command
+ *  0: off
+ *  1: all blink, then previous setting
+ *  2: 1/top-left blink, then on
+ *  3: 2/top-right blink, then on
+ *  4: 3/bottom-left blink, then on
+ *  5: 4/bottom-right blink, then on
+ *  6: 1/top-left on
+ *  7: 2/top-right on
+ *  8: 3/bottom-left on
+ *  9: 4/bottom-right on
+ * 10: rotate
+ * 11: blink, based on previous setting
+ * 12: slow blink, based on previous setting
+ * 13: rotate with two lights
+ * 14: persistent slow all blink
+ * 15: blink once, then previous setting
+ */
 static void xpad_send_led_command(struct usb_xpad *xpad, int command)
 {
-       if (command >= 0 && command < 14) {
-               mutex_lock(&xpad->odata_mutex);
+       command %= 16;
+
+       mutex_lock(&xpad->odata_mutex);
+
+       switch (xpad->xtype) {
+       case XTYPE_XBOX360:
                xpad->odata[0] = 0x01;
                xpad->odata[1] = 0x03;
                xpad->odata[2] = command;
                xpad->irq_out->transfer_buffer_length = 3;
-               usb_submit_urb(xpad->irq_out, GFP_KERNEL);
-               mutex_unlock(&xpad->odata_mutex);
+               break;
+       case XTYPE_XBOX360W:
+               xpad->odata[0] = 0x00;
+               xpad->odata[1] = 0x00;
+               xpad->odata[2] = 0x08;
+               xpad->odata[3] = 0x40 + command;
+               xpad->odata[4] = 0x00;
+               xpad->odata[5] = 0x00;
+               xpad->odata[6] = 0x00;
+               xpad->odata[7] = 0x00;
+               xpad->odata[8] = 0x00;
+               xpad->odata[9] = 0x00;
+               xpad->odata[10] = 0x00;
+               xpad->odata[11] = 0x00;
+               xpad->irq_out->transfer_buffer_length = 12;
+               break;
        }
+
+       usb_submit_urb(xpad->irq_out, GFP_KERNEL);
+       mutex_unlock(&xpad->odata_mutex);
+}
+
+static void xpad_identify_controller(struct usb_xpad *xpad)
+{
+       /* Light up the segment corresponding to controller number */
+       xpad_send_led_command(xpad, (xpad->led_no % 4) + 2);
 }
 
 static void xpad_led_set(struct led_classdev *led_cdev,
@@ -905,22 +959,21 @@ static void xpad_led_set(struct led_classdev *led_cdev,
 
 static int xpad_led_probe(struct usb_xpad *xpad)
 {
-       static atomic_t led_seq = ATOMIC_INIT(-1);
-       unsigned long led_no;
+       static atomic_t led_seq = ATOMIC_INIT(-1);
        struct xpad_led *led;
        struct led_classdev *led_cdev;
        int error;
 
-       if (xpad->xtype != XTYPE_XBOX360)
+       if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX360W)
                return 0;
 
        xpad->led = led = kzalloc(sizeof(struct xpad_led), GFP_KERNEL);
        if (!led)
                return -ENOMEM;
 
-       led_no = atomic_inc_return(&led_seq);
+       xpad->led_no = atomic_inc_return(&led_seq);
 
-       snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
+       snprintf(led->name, sizeof(led->name), "xpad%lu", xpad->led_no);
        led->xpad = xpad;
 
        led_cdev = &led->led_cdev;
@@ -934,10 +987,8 @@ static int xpad_led_probe(struct usb_xpad *xpad)
                return error;
        }
 
-       /*
-        * Light up the segment corresponding to controller number
-        */
-       xpad_send_led_command(xpad, (led_no % 4) + 2);
+       /* Light up the segment corresponding to controller number */
+       xpad_identify_controller(xpad);
 
        return 0;
 }
@@ -954,6 +1005,7 @@ static void xpad_led_disconnect(struct usb_xpad *xpad)
 #else
 static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
 static void xpad_led_disconnect(struct usb_xpad *xpad) { }
+static void xpad_identify_controller(struct usb_xpad *xpad) { }
 #endif
 
 
index 2e855e6f3565cffe0644922445581dec92c00914..d2ea863d6a45fed60d118f9242d434f259504546 100644 (file)
@@ -506,7 +506,9 @@ static int imx_keypad_probe(struct platform_device *pdev)
        input_set_drvdata(input_dev, keypad);
 
        /* Ensure that the keypad will stay dormant until opened */
-       clk_prepare_enable(keypad->clk);
+       error = clk_prepare_enable(keypad->clk);
+       if (error)
+               return error;
        imx_keypad_inhibit(keypad);
        clk_disable_unprepare(keypad->clk);
 
index f63341f20b91aed210208d62bf50c17b9da261c5..cfd58e87da2620061f2d93e01244c9d052319991 100644 (file)
@@ -94,7 +94,7 @@ static int ati_remote2_get_mode_mask(char *buffer,
 
 static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK;
 #define param_check_channel_mask(name, p) __param_check(name, p, unsigned int)
-static struct kernel_param_ops param_ops_channel_mask = {
+static const struct kernel_param_ops param_ops_channel_mask = {
        .set = ati_remote2_set_channel_mask,
        .get = ati_remote2_get_channel_mask,
 };
@@ -103,7 +103,7 @@ MODULE_PARM_DESC(channel_mask, "Bitmask of channels to accept <15:Channel16>...<
 
 static unsigned int mode_mask = ATI_REMOTE2_MAX_MODE_MASK;
 #define param_check_mode_mask(name, p) __param_check(name, p, unsigned int)
-static struct kernel_param_ops param_ops_mode_mask = {
+static const struct kernel_param_ops param_ops_mode_mask = {
        .set = ati_remote2_set_mode_mask,
        .get = ati_remote2_get_mode_mask,
 };
index f1c844739cd7691d66630efcfb5456fa74842c90..10e140af5aac1a9ea309d2b237af065cc7abf684 100644 (file)
@@ -167,9 +167,13 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
        struct input_dev *idev = pwr;
        struct axp20x_pek *axp20x_pek = input_get_drvdata(idev);
 
-       if (irq == axp20x_pek->irq_dbr)
+       /*
+        * The power-button is connected to ground so a falling edge (dbf)
+        * means it is pressed.
+        */
+       if (irq == axp20x_pek->irq_dbf)
                input_report_key(idev, KEY_POWER, true);
-       else if (irq == axp20x_pek->irq_dbf)
+       else if (irq == axp20x_pek->irq_dbr)
                input_report_key(idev, KEY_POWER, false);
 
        input_sync(idev);
index 7c4ba43d253e7661088c8e241fca6ab02e1f68d3..ec34770361501254e8e2c417315f74e3e00e5a97 100644 (file)
@@ -47,7 +47,7 @@ MODULE_LICENSE("GPL");
 static unsigned int psmouse_max_proto = PSMOUSE_AUTO;
 static int psmouse_set_maxproto(const char *val, const struct kernel_param *);
 static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp);
-static struct kernel_param_ops param_ops_proto_abbrev = {
+static const struct kernel_param_ops param_ops_proto_abbrev = {
        .set = psmouse_set_maxproto,
        .get = psmouse_get_maxproto,
 };
index 77833d7a004bfa4f6af1b9a7cd0aaa66ef0f8b22..200841b77edb85b22caf110f4693443957354915 100644 (file)
@@ -244,6 +244,7 @@ config SERIO_PS2MULT
 
 config SERIO_ARC_PS2
        tristate "ARC PS/2 support"
+       depends on HAS_IOMEM
        help
          Say Y here if you have an ARC FPGA platform with a PS/2
          controller in it.
index d20fe1dff403ed78cdf33f6c7113629d2beef4f9..a854c6e5f09eca6d01515ae09065eaa31bc364e8 100644 (file)
@@ -658,6 +658,18 @@ config TOUCHSCREEN_PIXCIR
          To compile this driver as a module, choose M here: the
          module will be called pixcir_i2c_ts.
 
+config TOUCHSCREEN_WDT87XX_I2C
+       tristate "Weida HiTech I2C touchscreen"
+       depends on I2C
+       help
+         Say Y here if you have a Weida WDT87XX I2C touchscreen
+         connected to your system.
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called wdt87xx_i2c.
+
 config TOUCHSCREEN_WM831X
        tristate "Support for WM831x touchscreen controllers"
        depends on MFD_WM831X
index 44deea743d022539a08a44d49c7611f7bfbd4908..fa3d33bac7fc0755e9fbeec0e5983ad4d3b6f8ca 100644 (file)
@@ -72,6 +72,7 @@ obj-$(CONFIG_TOUCHSCREEN_TSC2007)     += tsc2007.o
 obj-$(CONFIG_TOUCHSCREEN_UCB1400)      += ucb1400_ts.o
 obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001)  += wacom_w8001.o
 obj-$(CONFIG_TOUCHSCREEN_WACOM_I2C)    += wacom_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_WDT87XX_I2C)  += wdt87xx_i2c.o
 obj-$(CONFIG_TOUCHSCREEN_WM831X)       += wm831x-ts.o
 obj-$(CONFIG_TOUCHSCREEN_WM97XX)       += wm97xx-ts.o
 wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o
index e6aef3e48bd9207f982a73da37e60537c491835c..394b1de9a2a3fc7d955861ce6666093c8c8dcb4d 100644 (file)
@@ -1035,20 +1035,15 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
        input->id.bustype = BUS_I2C;
        input->dev.parent = &client->dev;
 
-       __set_bit(EV_KEY, input->evbit);
-       __set_bit(EV_ABS, input->evbit);
-       __set_bit(BTN_TOUCH, input->keybit);
-       input_set_abs_params(input, ABS_X, 0, tsdata->num_x * 64 - 1, 0, 0);
-       input_set_abs_params(input, ABS_Y, 0, tsdata->num_y * 64 - 1, 0, 0);
        input_set_abs_params(input, ABS_MT_POSITION_X,
                             0, tsdata->num_x * 64 - 1, 0, 0);
        input_set_abs_params(input, ABS_MT_POSITION_Y,
                             0, tsdata->num_y * 64 - 1, 0, 0);
 
        if (!pdata)
-               touchscreen_parse_of_params(input);
+               touchscreen_parse_of_params(input, true);
 
-       error = input_mt_init_slots(input, MAX_SUPPORT_POINTS, 0);
+       error = input_mt_init_slots(input, MAX_SUPPORT_POINTS, INPUT_MT_DIRECT);
        if (error) {
                dev_err(&client->dev, "Unable to init MT slots.\n");
                return error;
index b82b5207c78bdd3597af9f3abd11215abbaf9c2e..806cd0ad160f95c9b08e0bc4f9e9a33e2655f8bc 100644 (file)
 #include <linux/input/mt.h>
 #include <linux/input/touchscreen.h>
 
-static u32 of_get_optional_u32(struct device_node *np,
-                              const char *property)
+static bool touchscreen_get_prop_u32(struct device_node *np,
+                                    const char *property,
+                                    unsigned int default_value,
+                                    unsigned int *value)
 {
-       u32 val = 0;
+       u32 val;
+       int error;
 
-       of_property_read_u32(np, property, &val);
+       error = of_property_read_u32(np, property, &val);
+       if (error) {
+               *value = default_value;
+               return false;
+       }
 
-       return val;
+       *value = val;
+       return true;
 }
 
 static void touchscreen_set_params(struct input_dev *dev,
@@ -54,34 +62,45 @@ static void touchscreen_set_params(struct input_dev *dev,
  * input device accordingly. The function keeps previously setuped default
  * values if no value is specified via DT.
  */
-void touchscreen_parse_of_params(struct input_dev *dev)
+void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch)
 {
        struct device_node *np = dev->dev.parent->of_node;
-       u32 maximum, fuzz;
+       unsigned int axis;
+       unsigned int maximum, fuzz;
+       bool data_present;
 
        input_alloc_absinfo(dev);
        if (!dev->absinfo)
                return;
 
-       maximum = of_get_optional_u32(np, "touchscreen-size-x");
-       fuzz = of_get_optional_u32(np, "touchscreen-fuzz-x");
-       if (maximum || fuzz) {
-               touchscreen_set_params(dev, ABS_X, maximum, fuzz);
-               touchscreen_set_params(dev, ABS_MT_POSITION_X, maximum, fuzz);
-       }
+       axis = multitouch ? ABS_MT_POSITION_X : ABS_X;
+       data_present = touchscreen_get_prop_u32(np, "touchscreen-size-x",
+                                               input_abs_get_max(dev, axis),
+                                               &maximum) |
+                      touchscreen_get_prop_u32(np, "touchscreen-fuzz-x",
+                                               input_abs_get_fuzz(dev, axis),
+                                               &fuzz);
+       if (data_present)
+               touchscreen_set_params(dev, axis, maximum, fuzz);
 
-       maximum = of_get_optional_u32(np, "touchscreen-size-y");
-       fuzz = of_get_optional_u32(np, "touchscreen-fuzz-y");
-       if (maximum || fuzz) {
-               touchscreen_set_params(dev, ABS_Y, maximum, fuzz);
-               touchscreen_set_params(dev, ABS_MT_POSITION_Y, maximum, fuzz);
-       }
+       axis = multitouch ? ABS_MT_POSITION_Y : ABS_Y;
+       data_present = touchscreen_get_prop_u32(np, "touchscreen-size-y",
+                                               input_abs_get_max(dev, axis),
+                                               &maximum) |
+                      touchscreen_get_prop_u32(np, "touchscreen-fuzz-y",
+                                               input_abs_get_fuzz(dev, axis),
+                                               &fuzz);
+       if (data_present)
+               touchscreen_set_params(dev, axis, maximum, fuzz);
 
-       maximum = of_get_optional_u32(np, "touchscreen-max-pressure");
-       fuzz = of_get_optional_u32(np, "touchscreen-fuzz-pressure");
-       if (maximum || fuzz) {
-               touchscreen_set_params(dev, ABS_PRESSURE, maximum, fuzz);
-               touchscreen_set_params(dev, ABS_MT_PRESSURE, maximum, fuzz);
-       }
+       axis = multitouch ? ABS_MT_PRESSURE : ABS_PRESSURE;
+       data_present = touchscreen_get_prop_u32(np, "touchscreen-max-pressure",
+                                               input_abs_get_max(dev, axis),
+                                               &maximum) |
+                      touchscreen_get_prop_u32(np, "touchscreen-fuzz-pressure",
+                                               input_abs_get_fuzz(dev, axis),
+                                               &fuzz);
+       if (data_present)
+               touchscreen_set_params(dev, axis, maximum, fuzz);
 }
 EXPORT_SYMBOL(touchscreen_parse_of_params);
index 72657c5794303d6c7a2a7b251e848046ec674210..d8c025b0f88cd4fe2c16712d1852111d782e08da 100644 (file)
@@ -709,7 +709,7 @@ static int tsc2005_probe(struct spi_device *spi)
        input_set_abs_params(input_dev, ABS_PRESSURE, 0, max_p, fudge_p, 0);
 
        if (np)
-               touchscreen_parse_of_params(input_dev);
+               touchscreen_parse_of_params(input_dev, false);
 
        input_dev->open = tsc2005_open;
        input_dev->close = tsc2005_close;
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
new file mode 100644 (file)
index 0000000..fb92ae1
--- /dev/null
@@ -0,0 +1,1149 @@
+/*
+ * Weida HiTech WDT87xx TouchScreen I2C driver
+ *
+ * Copyright (c) 2015  Weida Hi-Tech Co., Ltd.
+ * HN Chen <hn.chen@weidahitech.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <linux/input/mt.h>
+#include <linux/acpi.h>
+#include <asm/unaligned.h>
+
+#define WDT87XX_NAME           "wdt87xx_i2c"
+#define WDT87XX_DRV_VER                "0.9.6"
+#define WDT87XX_FW_NAME                "wdt87xx_fw.bin"
+#define WDT87XX_CFG_NAME       "wdt87xx_cfg.bin"
+
+#define MODE_ACTIVE                    0x01
+#define MODE_READY                     0x02
+#define MODE_IDLE                      0x03
+#define MODE_SLEEP                     0x04
+#define MODE_STOP                      0xFF
+
+#define WDT_MAX_FINGER                 10
+#define WDT_RAW_BUF_COUNT              54
+#define WDT_V1_RAW_BUF_COUNT           74
+#define WDT_FIRMWARE_ID                        0xa9e368f5
+
+#define PG_SIZE                                0x1000
+#define MAX_RETRIES                    3
+
+#define MAX_UNIT_AXIS                  0x7FFF
+
+#define PKT_READ_SIZE                  72
+#define PKT_WRITE_SIZE                 80
+
+/* the finger definition of the report event */
+#define FINGER_EV_OFFSET_ID            0
+#define FINGER_EV_OFFSET_X             1
+#define FINGER_EV_OFFSET_Y             3
+#define FINGER_EV_SIZE                 5
+
+#define FINGER_EV_V1_OFFSET_ID         0
+#define FINGER_EV_V1_OFFSET_W          1
+#define FINGER_EV_V1_OFFSET_P          2
+#define FINGER_EV_V1_OFFSET_X          3
+#define FINGER_EV_V1_OFFSET_Y          5
+#define FINGER_EV_V1_SIZE              7
+
+/* The definition of a report packet */
+#define TOUCH_PK_OFFSET_REPORT_ID      0
+#define TOUCH_PK_OFFSET_EVENT          1
+#define TOUCH_PK_OFFSET_SCAN_TIME      51
+#define TOUCH_PK_OFFSET_FNGR_NUM       53
+
+#define TOUCH_PK_V1_OFFSET_REPORT_ID   0
+#define TOUCH_PK_V1_OFFSET_EVENT       1
+#define TOUCH_PK_V1_OFFSET_SCAN_TIME   71
+#define TOUCH_PK_V1_OFFSET_FNGR_NUM    73
+
+/* The definition of the controller parameters */
+#define CTL_PARAM_OFFSET_FW_ID         0
+#define CTL_PARAM_OFFSET_PLAT_ID       2
+#define CTL_PARAM_OFFSET_XMLS_ID1      4
+#define CTL_PARAM_OFFSET_XMLS_ID2      6
+#define CTL_PARAM_OFFSET_PHY_CH_X      8
+#define CTL_PARAM_OFFSET_PHY_CH_Y      10
+#define CTL_PARAM_OFFSET_PHY_X0                12
+#define CTL_PARAM_OFFSET_PHY_X1                14
+#define CTL_PARAM_OFFSET_PHY_Y0                16
+#define CTL_PARAM_OFFSET_PHY_Y1                18
+#define CTL_PARAM_OFFSET_PHY_W         22
+#define CTL_PARAM_OFFSET_PHY_H         24
+#define CTL_PARAM_OFFSET_FACTOR                32
+
+/* Communication commands */
+#define PACKET_SIZE                    56
+#define VND_REQ_READ                   0x06
+#define VND_READ_DATA                  0x07
+#define VND_REQ_WRITE                  0x08
+
+#define VND_CMD_START                  0x00
+#define VND_CMD_STOP                   0x01
+#define VND_CMD_RESET                  0x09
+
+#define VND_CMD_ERASE                  0x1A
+
+#define VND_GET_CHECKSUM               0x66
+
+#define VND_SET_DATA                   0x83
+#define VND_SET_COMMAND_DATA           0x84
+#define VND_SET_CHECKSUM_CALC          0x86
+#define VND_SET_CHECKSUM_LENGTH                0x87
+
+#define VND_CMD_SFLCK                  0xFC
+#define VND_CMD_SFUNL                  0xFD
+
+#define CMD_SFLCK_KEY                  0xC39B
+#define CMD_SFUNL_KEY                  0x95DA
+
+#define STRIDX_PLATFORM_ID             0x80
+#define STRIDX_PARAMETERS              0x81
+
+#define CMD_BUF_SIZE                   8
+#define PKT_BUF_SIZE                   64
+
+/* The definition of the command packet */
+#define CMD_REPORT_ID_OFFSET           0x0
+#define CMD_TYPE_OFFSET                        0x1
+#define CMD_INDEX_OFFSET               0x2
+#define CMD_KEY_OFFSET                 0x3
+#define CMD_LENGTH_OFFSET              0x4
+#define CMD_DATA_OFFSET                        0x8
+
+/* The definition of firmware chunk tags */
+#define FOURCC_ID_RIFF                 0x46464952
+#define FOURCC_ID_WHIF                 0x46494857
+#define FOURCC_ID_FRMT                 0x544D5246
+#define FOURCC_ID_FRWR                 0x52575246
+#define FOURCC_ID_CNFG                 0x47464E43
+
+#define CHUNK_ID_FRMT                  FOURCC_ID_FRMT
+#define CHUNK_ID_FRWR                  FOURCC_ID_FRWR
+#define CHUNK_ID_CNFG                  FOURCC_ID_CNFG
+
+#define FW_FOURCC1_OFFSET              0
+#define FW_SIZE_OFFSET                 4
+#define FW_FOURCC2_OFFSET              8
+#define FW_PAYLOAD_OFFSET              40
+
+#define FW_CHUNK_ID_OFFSET             0
+#define FW_CHUNK_SIZE_OFFSET           4
+#define FW_CHUNK_TGT_START_OFFSET      8
+#define FW_CHUNK_PAYLOAD_LEN_OFFSET    12
+#define FW_CHUNK_SRC_START_OFFSET      16
+#define FW_CHUNK_VERSION_OFFSET                20
+#define FW_CHUNK_ATTR_OFFSET           24
+#define FW_CHUNK_PAYLOAD_OFFSET                32
+
+/* Controller requires minimum 300us between commands */
+#define WDT_COMMAND_DELAY_MS           2
+#define WDT_FLASH_WRITE_DELAY_MS       4
+
+struct wdt87xx_sys_param {
+       u16     fw_id;
+       u16     plat_id;
+       u16     xmls_id1;
+       u16     xmls_id2;
+       u16     phy_ch_x;
+       u16     phy_ch_y;
+       u16     phy_w;
+       u16     phy_h;
+       u16     scaling_factor;
+       u32     max_x;
+       u32     max_y;
+};
+
+struct wdt87xx_data {
+       struct i2c_client               *client;
+       struct input_dev                *input;
+       /* Mutex for fw update to prevent concurrent access */
+       struct mutex                    fw_mutex;
+       struct wdt87xx_sys_param        param;
+       u8                              phys[32];
+};
+
+static int wdt87xx_i2c_xfer(struct i2c_client *client,
+                           void *txdata, size_t txlen,
+                           void *rxdata, size_t rxlen)
+{
+       struct i2c_msg msgs[] = {
+               {
+                       .addr   = client->addr,
+                       .flags  = 0,
+                       .len    = txlen,
+                       .buf    = txdata,
+               },
+               {
+                       .addr   = client->addr,
+                       .flags  = I2C_M_RD,
+                       .len    = rxlen,
+                       .buf    = rxdata,
+               },
+       };
+       int error;
+       int ret;
+
+       ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+       if (ret != ARRAY_SIZE(msgs)) {
+               error = ret < 0 ? ret : -EIO;
+               dev_err(&client->dev, "%s: i2c transfer failed: %d\n",
+                       __func__, error);
+               return error;
+       }
+
+       return 0;
+}
+
+static int wdt87xx_get_string(struct i2c_client *client, u8 str_idx,
+                             u8 *buf, size_t len)
+{
+       u8 tx_buf[] = { 0x22, 0x00, 0x13, 0x0E, str_idx, 0x23, 0x00 };
+       u8 rx_buf[PKT_WRITE_SIZE];
+       size_t rx_len = len + 2;
+       int error;
+
+       if (rx_len > sizeof(rx_buf))
+               return -EINVAL;
+
+       error = wdt87xx_i2c_xfer(client, tx_buf, sizeof(tx_buf),
+                                rx_buf, rx_len);
+       if (error) {
+               dev_err(&client->dev, "get string failed: %d\n", error);
+               return error;
+       }
+
+       if (rx_buf[1] != 0x03) {
+               dev_err(&client->dev, "unexpected response to get string: %d\n",
+                       rx_buf[1]);
+               return -EINVAL;
+       }
+
+       rx_len = min_t(size_t, len, rx_buf[0]);
+       memcpy(buf, &rx_buf[2], rx_len);
+
+       mdelay(WDT_COMMAND_DELAY_MS);
+
+       return 0;
+}
+
+static int wdt87xx_get_feature(struct i2c_client *client,
+                              u8 *buf, size_t buf_size)
+{
+       u8 tx_buf[8];
+       u8 rx_buf[PKT_WRITE_SIZE];
+       size_t tx_len = 0;
+       size_t rx_len = buf_size + 2;
+       int error;
+
+       if (rx_len > sizeof(rx_buf))
+               return -EINVAL;
+
+       /* Get feature command packet */
+       tx_buf[tx_len++] = 0x22;
+       tx_buf[tx_len++] = 0x00;
+       if (buf[CMD_REPORT_ID_OFFSET] > 0xF) {
+               tx_buf[tx_len++] = 0x30;
+               tx_buf[tx_len++] = 0x02;
+               tx_buf[tx_len++] = buf[CMD_REPORT_ID_OFFSET];
+       } else {
+               tx_buf[tx_len++] = 0x30 | buf[CMD_REPORT_ID_OFFSET];
+               tx_buf[tx_len++] = 0x02;
+       }
+       tx_buf[tx_len++] = 0x23;
+       tx_buf[tx_len++] = 0x00;
+
+       error = wdt87xx_i2c_xfer(client, tx_buf, tx_len, rx_buf, rx_len);
+       if (error) {
+               dev_err(&client->dev, "get feature failed: %d\n", error);
+               return error;
+       }
+
+       rx_len = min_t(size_t, buf_size, get_unaligned_le16(rx_buf));
+       memcpy(buf, &rx_buf[2], rx_len);
+
+       mdelay(WDT_COMMAND_DELAY_MS);
+
+       return 0;
+}
+
+static int wdt87xx_set_feature(struct i2c_client *client,
+                              const u8 *buf, size_t buf_size)
+{
+       u8 tx_buf[PKT_WRITE_SIZE];
+       int tx_len = 0;
+       int error;
+
+       /* Set feature command packet */
+       tx_buf[tx_len++] = 0x22;
+       tx_buf[tx_len++] = 0x00;
+       if (buf[CMD_REPORT_ID_OFFSET] > 0xF) {
+               tx_buf[tx_len++] = 0x30;
+               tx_buf[tx_len++] = 0x03;
+               tx_buf[tx_len++] = buf[CMD_REPORT_ID_OFFSET];
+       } else {
+               tx_buf[tx_len++] = 0x30 | buf[CMD_REPORT_ID_OFFSET];
+               tx_buf[tx_len++] = 0x03;
+       }
+       tx_buf[tx_len++] = 0x23;
+       tx_buf[tx_len++] = 0x00;
+       tx_buf[tx_len++] = (buf_size & 0xFF);
+       tx_buf[tx_len++] = ((buf_size & 0xFF00) >> 8);
+
+       if (tx_len + buf_size > sizeof(tx_buf))
+               return -EINVAL;
+
+       memcpy(&tx_buf[tx_len], buf, buf_size);
+       tx_len += buf_size;
+
+       error = i2c_master_send(client, tx_buf, tx_len);
+       if (error < 0) {
+               dev_err(&client->dev, "set feature failed: %d\n", error);
+               return error;
+       }
+
+       mdelay(WDT_COMMAND_DELAY_MS);
+
+       return 0;
+}
+
+static int wdt87xx_send_command(struct i2c_client *client, int cmd, int value)
+{
+       u8 cmd_buf[CMD_BUF_SIZE];
+
+       /* Set the command packet */
+       cmd_buf[CMD_REPORT_ID_OFFSET] = VND_REQ_WRITE;
+       cmd_buf[CMD_TYPE_OFFSET] = VND_SET_COMMAND_DATA;
+       put_unaligned_le16((u16)cmd, &cmd_buf[CMD_INDEX_OFFSET]);
+
+       switch (cmd) {
+       case VND_CMD_START:
+       case VND_CMD_STOP:
+       case VND_CMD_RESET:
+               /* Mode selector */
+               put_unaligned_le32((value & 0xFF), &cmd_buf[CMD_LENGTH_OFFSET]);
+               break;
+
+       case VND_CMD_SFLCK:
+               put_unaligned_le16(CMD_SFLCK_KEY, &cmd_buf[CMD_KEY_OFFSET]);
+               break;
+
+       case VND_CMD_SFUNL:
+               put_unaligned_le16(CMD_SFUNL_KEY, &cmd_buf[CMD_KEY_OFFSET]);
+               break;
+
+       case VND_CMD_ERASE:
+       case VND_SET_CHECKSUM_CALC:
+       case VND_SET_CHECKSUM_LENGTH:
+               put_unaligned_le32(value, &cmd_buf[CMD_KEY_OFFSET]);
+               break;
+
+       default:
+               cmd_buf[CMD_REPORT_ID_OFFSET] = 0;
+               dev_err(&client->dev, "Invalid command: %d\n", cmd);
+               return -EINVAL;
+       }
+
+       return wdt87xx_set_feature(client, cmd_buf, sizeof(cmd_buf));
+}
+
+static int wdt87xx_sw_reset(struct i2c_client *client)
+{
+       int error;
+
+       dev_dbg(&client->dev, "resetting device now\n");
+
+       error = wdt87xx_send_command(client, VND_CMD_RESET, 0);
+       if (error) {
+               dev_err(&client->dev, "reset failed\n");
+               return error;
+       }
+
+       /* Wait the device to be ready */
+       msleep(200);
+
+       return 0;
+}
+
+static const void *wdt87xx_get_fw_chunk(const struct firmware *fw, u32 id)
+{
+       size_t pos = FW_PAYLOAD_OFFSET;
+       u32 chunk_id, chunk_size;
+
+       while (pos < fw->size) {
+               chunk_id = get_unaligned_le32(fw->data +
+                                             pos + FW_CHUNK_ID_OFFSET);
+               if (chunk_id == id)
+                       return fw->data + pos;
+
+               chunk_size = get_unaligned_le32(fw->data +
+                                               pos + FW_CHUNK_SIZE_OFFSET);
+               pos += chunk_size + 2 * sizeof(u32); /* chunk ID + size */
+       }
+
+       return NULL;
+}
+
+static int wdt87xx_get_sysparam(struct i2c_client *client,
+                               struct wdt87xx_sys_param *param)
+{
+       u8 buf[PKT_READ_SIZE];
+       int error;
+
+       error = wdt87xx_get_string(client, STRIDX_PARAMETERS, buf, 34);
+       if (error) {
+               dev_err(&client->dev, "failed to get parameters\n");
+               return error;
+       }
+
+       param->xmls_id1 = get_unaligned_le16(buf + CTL_PARAM_OFFSET_XMLS_ID1);
+       param->xmls_id2 = get_unaligned_le16(buf + CTL_PARAM_OFFSET_XMLS_ID2);
+       param->phy_ch_x = get_unaligned_le16(buf + CTL_PARAM_OFFSET_PHY_CH_X);
+       param->phy_ch_y = get_unaligned_le16(buf + CTL_PARAM_OFFSET_PHY_CH_Y);
+       param->phy_w = get_unaligned_le16(buf + CTL_PARAM_OFFSET_PHY_W) / 10;
+       param->phy_h = get_unaligned_le16(buf + CTL_PARAM_OFFSET_PHY_H) / 10;
+
+       /* Get the scaling factor of pixel to logical coordinate */
+       param->scaling_factor =
+                       get_unaligned_le16(buf + CTL_PARAM_OFFSET_FACTOR);
+
+       param->max_x = MAX_UNIT_AXIS;
+       param->max_y = DIV_ROUND_CLOSEST(MAX_UNIT_AXIS * param->phy_h,
+                                        param->phy_w);
+
+       error = wdt87xx_get_string(client, STRIDX_PLATFORM_ID, buf, 8);
+       if (error) {
+               dev_err(&client->dev, "failed to get platform id\n");
+               return error;
+       }
+
+       param->plat_id = buf[1];
+
+       buf[0] = 0xf2;
+       error = wdt87xx_get_feature(client, buf, 16);
+       if (error) {
+               dev_err(&client->dev, "failed to get firmware id\n");
+               return error;
+       }
+
+       if (buf[0] != 0xf2) {
+               dev_err(&client->dev, "wrong id of fw response: 0x%x\n",
+                       buf[0]);
+               return -EINVAL;
+       }
+
+       param->fw_id = get_unaligned_le16(&buf[1]);
+
+       dev_info(&client->dev,
+                "fw_id: 0x%x, plat_id: 0x%x, xml_id1: %04x, xml_id2: %04x\n",
+                param->fw_id, param->plat_id,
+                param->xmls_id1, param->xmls_id2);
+
+       return 0;
+}
+
+static int wdt87xx_validate_firmware(struct wdt87xx_data *wdt,
+                                    const struct firmware *fw)
+{
+       const void *fw_chunk;
+       u32 data1, data2;
+       u32 size;
+       u8 fw_chip_id;
+       u8 chip_id;
+
+       data1 = get_unaligned_le32(fw->data + FW_FOURCC1_OFFSET);
+       data2 = get_unaligned_le32(fw->data + FW_FOURCC2_OFFSET);
+       if (data1 != FOURCC_ID_RIFF || data2 != FOURCC_ID_WHIF) {
+               dev_err(&wdt->client->dev, "check fw tag failed\n");
+               return -EINVAL;
+       }
+
+       size = get_unaligned_le32(fw->data + FW_SIZE_OFFSET);
+       if (size != fw->size) {
+               dev_err(&wdt->client->dev,
+                       "fw size mismatch: expected %d, actual %zu\n",
+                       size, fw->size);
+               return -EINVAL;
+       }
+
+       /*
+        * Get the chip_id from the firmware. Make sure that it is the
+        * right controller to do the firmware and config update.
+        */
+       fw_chunk = wdt87xx_get_fw_chunk(fw, CHUNK_ID_FRWR);
+       if (!fw_chunk) {
+               dev_err(&wdt->client->dev,
+                       "unable to locate firmware chunk\n");
+               return -EINVAL;
+       }
+
+       fw_chip_id = (get_unaligned_le32(fw_chunk +
+                                        FW_CHUNK_VERSION_OFFSET) >> 12) & 0xF;
+       chip_id = (wdt->param.fw_id >> 12) & 0xF;
+
+       if (fw_chip_id != chip_id) {
+               dev_err(&wdt->client->dev,
+                       "fw version mismatch: fw %d vs. chip %d\n",
+                       fw_chip_id, chip_id);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int wdt87xx_validate_fw_chunk(const void *data, int id)
+{
+       if (id == CHUNK_ID_FRWR) {
+               u32 fw_id;
+
+               fw_id = get_unaligned_le32(data + FW_CHUNK_PAYLOAD_OFFSET);
+               if (fw_id != WDT_FIRMWARE_ID)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int wdt87xx_write_data(struct i2c_client *client, const char *data,
+                             u32 address, int length)
+{
+       u16 packet_size;
+       int count = 0;
+       int error;
+       u8 pkt_buf[PKT_BUF_SIZE];
+
+       /* Address and length should be 4 bytes aligned */
+       if ((address & 0x3) != 0 || (length & 0x3) != 0) {
+               dev_err(&client->dev,
+                       "addr & len must be 4 bytes aligned %x, %x\n",
+                       address, length);
+               return -EINVAL;
+       }
+
+       while (length) {
+               packet_size = min(length, PACKET_SIZE);
+
+               pkt_buf[CMD_REPORT_ID_OFFSET] = VND_REQ_WRITE;
+               pkt_buf[CMD_TYPE_OFFSET] = VND_SET_DATA;
+               put_unaligned_le16(packet_size, &pkt_buf[CMD_INDEX_OFFSET]);
+               put_unaligned_le32(address, &pkt_buf[CMD_LENGTH_OFFSET]);
+               memcpy(&pkt_buf[CMD_DATA_OFFSET], data, packet_size);
+
+               error = wdt87xx_set_feature(client, pkt_buf, sizeof(pkt_buf));
+               if (error)
+                       return error;
+
+               length -= packet_size;
+               data += packet_size;
+               address += packet_size;
+
+               /* Wait for the controller to finish the write */
+               mdelay(WDT_FLASH_WRITE_DELAY_MS);
+
+               if ((++count % 32) == 0) {
+                       /* Delay for fw to clear watch dog */
+                       msleep(20);
+               }
+       }
+
+       return 0;
+}
+
+static u16 misr(u16 cur_value, u8 new_value)
+{
+       u32 a, b;
+       u32 bit0;
+       u32 y;
+
+       a = cur_value;
+       b = new_value;
+       bit0 = a ^ (b & 1);
+       bit0 ^= a >> 1;
+       bit0 ^= a >> 2;
+       bit0 ^= a >> 4;
+       bit0 ^= a >> 5;
+       bit0 ^= a >> 7;
+       bit0 ^= a >> 11;
+       bit0 ^= a >> 15;
+       y = (a << 1) ^ b;
+       y = (y & ~1) | (bit0 & 1);
+
+       return (u16)y;
+}
+
+static u16 wdt87xx_calculate_checksum(const u8 *data, size_t length)
+{
+       u16 checksum = 0;
+       size_t i;
+
+       for (i = 0; i < length; i++)
+               checksum = misr(checksum, data[i]);
+
+       return checksum;
+}
+
+static int wdt87xx_get_checksum(struct i2c_client *client, u16 *checksum,
+                               u32 address, int length)
+{
+       int error;
+       int time_delay;
+       u8 pkt_buf[PKT_BUF_SIZE];
+       u8 cmd_buf[CMD_BUF_SIZE];
+
+       error = wdt87xx_send_command(client, VND_SET_CHECKSUM_LENGTH, length);
+       if (error) {
+               dev_err(&client->dev, "failed to set checksum length\n");
+               return error;
+       }
+
+       error = wdt87xx_send_command(client, VND_SET_CHECKSUM_CALC, address);
+       if (error) {
+               dev_err(&client->dev, "failed to set checksum address\n");
+               return error;
+       }
+
+       /* Wait the operation to complete */
+       time_delay = DIV_ROUND_UP(length, 1024);
+       msleep(time_delay * 30);
+
+       memset(cmd_buf, 0, sizeof(cmd_buf));
+       cmd_buf[CMD_REPORT_ID_OFFSET] = VND_REQ_READ;
+       cmd_buf[CMD_TYPE_OFFSET] = VND_GET_CHECKSUM;
+       error = wdt87xx_set_feature(client, cmd_buf, sizeof(cmd_buf));
+       if (error) {
+               dev_err(&client->dev, "failed to request checksum\n");
+               return error;
+       }
+
+       memset(pkt_buf, 0, sizeof(pkt_buf));
+       pkt_buf[CMD_REPORT_ID_OFFSET] = VND_READ_DATA;
+       error = wdt87xx_get_feature(client, pkt_buf, sizeof(pkt_buf));
+       if (error) {
+               dev_err(&client->dev, "failed to read checksum\n");
+               return error;
+       }
+
+       *checksum = get_unaligned_le16(&pkt_buf[CMD_DATA_OFFSET]);
+       return 0;
+}
+
+static int wdt87xx_write_firmware(struct i2c_client *client, const void *chunk)
+{
+       u32 start_addr = get_unaligned_le32(chunk + FW_CHUNK_TGT_START_OFFSET);
+       u32 size = get_unaligned_le32(chunk + FW_CHUNK_PAYLOAD_LEN_OFFSET);
+       const void *data = chunk + FW_CHUNK_PAYLOAD_OFFSET;
+       int error;
+       int err1;
+       int page_size;
+       int retry = 0;
+       u16 device_checksum, firmware_checksum;
+
+       dev_dbg(&client->dev, "start 4k page program\n");
+
+       error = wdt87xx_send_command(client, VND_CMD_STOP, MODE_STOP);
+       if (error) {
+               dev_err(&client->dev, "stop report mode failed\n");
+               return error;
+       }
+
+       error = wdt87xx_send_command(client, VND_CMD_SFUNL, 0);
+       if (error) {
+               dev_err(&client->dev, "unlock failed\n");
+               goto out_enable_reporting;
+       }
+
+       mdelay(10);
+
+       while (size) {
+               dev_dbg(&client->dev, "%s: %x, %x\n", __func__,
+                       start_addr, size);
+
+               page_size = min_t(u32, size, PG_SIZE);
+               size -= page_size;
+
+               for (retry = 0; retry < MAX_RETRIES; retry++) {
+                       error = wdt87xx_send_command(client, VND_CMD_ERASE,
+                                                    start_addr);
+                       if (error) {
+                               dev_err(&client->dev,
+                                       "erase failed at %#08x\n", start_addr);
+                               break;
+                       }
+
+                       msleep(50);
+
+                       error = wdt87xx_write_data(client, data, start_addr,
+                                                  page_size);
+                       if (error) {
+                               dev_err(&client->dev,
+                                       "write failed at %#08x (%d bytes)\n",
+                                       start_addr, page_size);
+                               break;
+                       }
+
+                       error = wdt87xx_get_checksum(client, &device_checksum,
+                                                    start_addr, page_size);
+                       if (error) {
+                               dev_err(&client->dev,
+                                       "failed to retrieve checksum for %#08x (len: %d)\n",
+                                       start_addr, page_size);
+                               break;
+                       }
+
+                       firmware_checksum =
+                               wdt87xx_calculate_checksum(data, page_size);
+
+                       if (device_checksum == firmware_checksum)
+                               break;
+
+                       dev_err(&client->dev,
+                               "checksum fail: %d vs %d, retry %d\n",
+                               device_checksum, firmware_checksum, retry);
+               }
+
+               if (retry == MAX_RETRIES) {
+                       dev_err(&client->dev, "page write failed\n");
+                       error = -EIO;
+                       goto out_lock_device;
+               }
+
+               start_addr = start_addr + page_size;
+               data = data + page_size;
+       }
+
+out_lock_device:
+       err1 = wdt87xx_send_command(client, VND_CMD_SFLCK, 0);
+       if (err1)
+               dev_err(&client->dev, "lock failed\n");
+
+       mdelay(10);
+
+out_enable_reporting:
+       err1 = wdt87xx_send_command(client, VND_CMD_START, 0);
+       if (err1)
+               dev_err(&client->dev, "start to report failed\n");
+
+       return error ? error : err1;
+}
+
+static int wdt87xx_load_chunk(struct i2c_client *client,
+                             const struct firmware *fw, u32 ck_id)
+{
+       const void *chunk;
+       int error;
+
+       chunk = wdt87xx_get_fw_chunk(fw, ck_id);
+       if (!chunk) {
+               dev_err(&client->dev, "unable to locate chunk (type %d)\n",
+                       ck_id);
+               return -EINVAL;
+       }
+
+       error = wdt87xx_validate_fw_chunk(chunk, ck_id);
+       if (error) {
+               dev_err(&client->dev, "invalid chunk (type %d): %d\n",
+                       ck_id, error);
+               return error;
+       }
+
+       error = wdt87xx_write_firmware(client, chunk);
+       if (error) {
+               dev_err(&client->dev,
+                       "failed to write fw chunk (type %d): %d\n",
+                       ck_id, error);
+               return error;
+       }
+
+       return 0;
+}
+
+static int wdt87xx_do_update_firmware(struct i2c_client *client,
+                                     const struct firmware *fw,
+                                     unsigned int chunk_id)
+{
+       struct wdt87xx_data *wdt = i2c_get_clientdata(client);
+       int error;
+
+       error = wdt87xx_validate_firmware(wdt, fw);
+       if (error)
+               return error;
+
+       error = mutex_lock_interruptible(&wdt->fw_mutex);
+       if (error)
+               return error;
+
+       disable_irq(client->irq);
+
+       error = wdt87xx_load_chunk(client, fw, chunk_id);
+       if (error) {
+               dev_err(&client->dev,
+                       "firmware load failed (type: %d): %d\n",
+                       chunk_id, error);
+               goto out;
+       }
+
+       error = wdt87xx_sw_reset(client);
+       if (error) {
+               dev_err(&client->dev, "soft reset failed: %d\n", error);
+               goto out;
+       }
+
+       /* Refresh the parameters */
+       error = wdt87xx_get_sysparam(client, &wdt->param);
+       if (error)
+               dev_err(&client->dev,
+                       "failed to refresh system paramaters: %d\n", error);
+out:
+       enable_irq(client->irq);
+       mutex_unlock(&wdt->fw_mutex);
+
+       return error ? error : 0;
+}
+
+static int wdt87xx_update_firmware(struct device *dev,
+                                  const char *fw_name, unsigned int chunk_id)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       const struct firmware *fw;
+       int error;
+
+       error = request_firmware(&fw, fw_name, dev);
+       if (error) {
+               dev_err(&client->dev, "unable to retrieve firmware %s: %d\n",
+                       fw_name, error);
+               return error;
+       }
+
+       error = wdt87xx_do_update_firmware(client, fw, chunk_id);
+
+       release_firmware(fw);
+
+       return error ? error : 0;
+}
+
+static ssize_t config_csum_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct wdt87xx_data *wdt = i2c_get_clientdata(client);
+       u32 cfg_csum;
+
+       cfg_csum = wdt->param.xmls_id1;
+       cfg_csum = (cfg_csum << 16) | wdt->param.xmls_id2;
+
+       return scnprintf(buf, PAGE_SIZE, "%x\n", cfg_csum);
+}
+
+static ssize_t fw_version_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct wdt87xx_data *wdt = i2c_get_clientdata(client);
+
+       return scnprintf(buf, PAGE_SIZE, "%x\n", wdt->param.fw_id);
+}
+
+static ssize_t plat_id_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct wdt87xx_data *wdt = i2c_get_clientdata(client);
+
+       return scnprintf(buf, PAGE_SIZE, "%x\n", wdt->param.plat_id);
+}
+
+static ssize_t update_config_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t count)
+{
+       int error;
+
+       error = wdt87xx_update_firmware(dev, WDT87XX_CFG_NAME, CHUNK_ID_CNFG);
+
+       return error ? error : count;
+}
+
+static ssize_t update_fw_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t count)
+{
+       int error;
+
+       error = wdt87xx_update_firmware(dev, WDT87XX_FW_NAME, CHUNK_ID_FRWR);
+
+       return error ? error : count;
+}
+
+static DEVICE_ATTR_RO(config_csum);
+static DEVICE_ATTR_RO(fw_version);
+static DEVICE_ATTR_RO(plat_id);
+static DEVICE_ATTR_WO(update_config);
+static DEVICE_ATTR_WO(update_fw);
+
+static struct attribute *wdt87xx_attrs[] = {
+       &dev_attr_config_csum.attr,
+       &dev_attr_fw_version.attr,
+       &dev_attr_plat_id.attr,
+       &dev_attr_update_config.attr,
+       &dev_attr_update_fw.attr,
+       NULL
+};
+
+static const struct attribute_group wdt87xx_attr_group = {
+       .attrs = wdt87xx_attrs,
+};
+
+static void wdt87xx_report_contact(struct input_dev *input,
+                                  struct wdt87xx_sys_param *param,
+                                  u8 *buf)
+{
+       int finger_id;
+       u32 x, y, w;
+       u8 p;
+
+       finger_id = (buf[FINGER_EV_V1_OFFSET_ID] >> 3) - 1;
+       if (finger_id < 0)
+               return;
+
+       /* Check if this is an active contact */
+       if (!(buf[FINGER_EV_V1_OFFSET_ID] & 0x1))
+               return;
+
+       w = buf[FINGER_EV_V1_OFFSET_W];
+       w *= param->scaling_factor;
+
+       p = buf[FINGER_EV_V1_OFFSET_P];
+
+       x = get_unaligned_le16(buf + FINGER_EV_V1_OFFSET_X);
+
+       y = get_unaligned_le16(buf + FINGER_EV_V1_OFFSET_Y);
+       y = DIV_ROUND_CLOSEST(y * param->phy_h, param->phy_w);
+
+       /* Refuse incorrect coordinates */
+       if (x > param->max_x || y > param->max_y)
+               return;
+
+       dev_dbg(input->dev.parent, "tip on (%d), x(%d), y(%d)\n",
+               finger_id, x, y);
+
+       input_mt_slot(input, finger_id);
+       input_mt_report_slot_state(input, MT_TOOL_FINGER, 1);
+       input_report_abs(input, ABS_MT_TOUCH_MAJOR, w);
+       input_report_abs(input, ABS_MT_PRESSURE, p);
+       input_report_abs(input, ABS_MT_POSITION_X, x);
+       input_report_abs(input, ABS_MT_POSITION_Y, y);
+}
+
+static irqreturn_t wdt87xx_ts_interrupt(int irq, void *dev_id)
+{
+       struct wdt87xx_data *wdt = dev_id;
+       struct i2c_client *client = wdt->client;
+       int i, fingers;
+       int error;
+       u8 raw_buf[WDT_V1_RAW_BUF_COUNT] = {0};
+
+       error = i2c_master_recv(client, raw_buf, WDT_V1_RAW_BUF_COUNT);
+       if (error < 0) {
+               dev_err(&client->dev, "read v1 raw data failed: %d\n", error);
+               goto irq_exit;
+       }
+
+       fingers = raw_buf[TOUCH_PK_V1_OFFSET_FNGR_NUM];
+       if (!fingers)
+               goto irq_exit;
+
+       for (i = 0; i < WDT_MAX_FINGER; i++)
+               wdt87xx_report_contact(wdt->input,
+                                      &wdt->param,
+                                      &raw_buf[TOUCH_PK_V1_OFFSET_EVENT +
+                                               i * FINGER_EV_V1_SIZE]);
+
+       input_mt_sync_frame(wdt->input);
+       input_sync(wdt->input);
+
+irq_exit:
+       return IRQ_HANDLED;
+}
+
+static int wdt87xx_ts_create_input_device(struct wdt87xx_data *wdt)
+{
+       struct device *dev = &wdt->client->dev;
+       struct input_dev *input;
+       unsigned int res = DIV_ROUND_CLOSEST(MAX_UNIT_AXIS, wdt->param.phy_w);
+       int error;
+
+       input = devm_input_allocate_device(dev);
+       if (!input) {
+               dev_err(dev, "failed to allocate input device\n");
+               return -ENOMEM;
+       }
+       wdt->input = input;
+
+       input->name = "WDT87xx Touchscreen";
+       input->id.bustype = BUS_I2C;
+       input->phys = wdt->phys;
+
+       input_set_abs_params(input, ABS_MT_POSITION_X, 0,
+                            wdt->param.max_x, 0, 0);
+       input_set_abs_params(input, ABS_MT_POSITION_Y, 0,
+                            wdt->param.max_y, 0, 0);
+       input_abs_set_res(input, ABS_MT_POSITION_X, res);
+       input_abs_set_res(input, ABS_MT_POSITION_Y, res);
+
+       input_set_abs_params(input, ABS_MT_TOUCH_MAJOR,
+                            0, wdt->param.max_x, 0, 0);
+       input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xFF, 0, 0);
+
+       input_mt_init_slots(input, WDT_MAX_FINGER,
+                           INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+
+       error = input_register_device(input);
+       if (error) {
+               dev_err(dev, "failed to register input device: %d\n", error);
+               return error;
+       }
+
+       return 0;
+}
+
+static int wdt87xx_ts_probe(struct i2c_client *client,
+                           const struct i2c_device_id *id)
+{
+       struct wdt87xx_data *wdt;
+       int error;
+
+       dev_dbg(&client->dev, "adapter=%d, client irq: %d\n",
+               client->adapter->nr, client->irq);
+
+       /* Check if the I2C function is ok in this adaptor */
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+               return -ENXIO;
+
+       wdt = devm_kzalloc(&client->dev, sizeof(*wdt), GFP_KERNEL);
+       if (!wdt)
+               return -ENOMEM;
+
+       wdt->client = client;
+       mutex_init(&wdt->fw_mutex);
+       i2c_set_clientdata(client, wdt);
+
+       snprintf(wdt->phys, sizeof(wdt->phys), "i2c-%u-%04x/input0",
+                client->adapter->nr, client->addr);
+
+       error = wdt87xx_get_sysparam(client, &wdt->param);
+       if (error)
+               return error;
+
+       error = wdt87xx_ts_create_input_device(wdt);
+       if (error)
+               return error;
+
+       error = devm_request_threaded_irq(&client->dev, client->irq,
+                                         NULL, wdt87xx_ts_interrupt,
+                                         IRQF_ONESHOT,
+                                         client->name, wdt);
+       if (error) {
+               dev_err(&client->dev, "request irq failed: %d\n", error);
+               return error;
+       }
+
+       error = sysfs_create_group(&client->dev.kobj, &wdt87xx_attr_group);
+       if (error) {
+               dev_err(&client->dev, "create sysfs failed: %d\n", error);
+               return error;
+       }
+
+       return 0;
+}
+
+static int wdt87xx_ts_remove(struct i2c_client *client)
+{
+       sysfs_remove_group(&client->dev.kobj, &wdt87xx_attr_group);
+
+       return 0;
+}
+
+static int __maybe_unused wdt87xx_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       int error;
+
+       disable_irq(client->irq);
+
+       error = wdt87xx_send_command(client, VND_CMD_STOP, MODE_IDLE);
+       if (error) {
+               enable_irq(client->irq);
+               dev_err(&client->dev,
+                       "failed to stop device when suspending: %d\n",
+                       error);
+               return error;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused wdt87xx_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       int error;
+
+       /*
+        * The chip may have been reset while system is resuming,
+        * give it some time to settle.
+        */
+       mdelay(100);
+
+       error = wdt87xx_send_command(client, VND_CMD_START, 0);
+       if (error)
+               dev_err(&client->dev,
+                       "failed to start device when resuming: %d\n",
+                       error);
+
+       enable_irq(client->irq);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(wdt87xx_pm_ops, wdt87xx_suspend, wdt87xx_resume);
+
+static const struct i2c_device_id wdt87xx_dev_id[] = {
+       { WDT87XX_NAME, 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, wdt87xx_dev_id);
+
+static const struct acpi_device_id wdt87xx_acpi_id[] = {
+       { "WDHT0001", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, wdt87xx_acpi_id);
+
+static struct i2c_driver wdt87xx_driver = {
+       .probe          = wdt87xx_ts_probe,
+       .remove         = wdt87xx_ts_remove,
+       .id_table       = wdt87xx_dev_id,
+       .driver = {
+               .name   = WDT87XX_NAME,
+               .pm     = &wdt87xx_pm_ops,
+               .acpi_match_table = ACPI_PTR(wdt87xx_acpi_id),
+       },
+};
+module_i2c_driver(wdt87xx_driver);
+
+MODULE_AUTHOR("HN Chen <hn.chen@weidahitech.com>");
+MODULE_DESCRIPTION("WeidaHiTech WDT87XX Touchscreen driver");
+MODULE_VERSION(WDT87XX_DRV_VER);
+MODULE_LICENSE("GPL");
index d3e5e9abe3b6cc36f4b488491ad38e24f0a4bf8b..a57e9b7498953bb9ebf947695caea46d73e2bfeb 100644 (file)
@@ -117,6 +117,7 @@ struct kmem_cache *amd_iommu_irq_cache;
 
 static void update_domain(struct protection_domain *domain);
 static int alloc_passthrough_domain(void);
+static int protection_domain_init(struct protection_domain *domain);
 
 /****************************************************************************
  *
@@ -1881,12 +1882,9 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
        if (!dma_dom)
                return NULL;
 
-       spin_lock_init(&dma_dom->domain.lock);
-
-       dma_dom->domain.id = domain_id_alloc();
-       if (dma_dom->domain.id == 0)
+       if (protection_domain_init(&dma_dom->domain))
                goto free_dma_dom;
-       INIT_LIST_HEAD(&dma_dom->domain.dev_list);
+
        dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
        dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
        dma_dom->domain.flags = PD_DMA_OPS_MASK;
@@ -2916,6 +2914,18 @@ static void protection_domain_free(struct protection_domain *domain)
        kfree(domain);
 }
 
+static int protection_domain_init(struct protection_domain *domain)
+{
+       spin_lock_init(&domain->lock);
+       mutex_init(&domain->api_lock);
+       domain->id = domain_id_alloc();
+       if (!domain->id)
+               return -ENOMEM;
+       INIT_LIST_HEAD(&domain->dev_list);
+
+       return 0;
+}
+
 static struct protection_domain *protection_domain_alloc(void)
 {
        struct protection_domain *domain;
@@ -2924,12 +2934,8 @@ static struct protection_domain *protection_domain_alloc(void)
        if (!domain)
                return NULL;
 
-       spin_lock_init(&domain->lock);
-       mutex_init(&domain->api_lock);
-       domain->id = domain_id_alloc();
-       if (!domain->id)
+       if (protection_domain_init(domain))
                goto out_err;
-       INIT_LIST_HEAD(&domain->dev_list);
 
        add_domain_to_list(domain);
 
index f14130121298bfb5739cedab54480a737b3ba6ce..8e9ec81ce4bbd85473d6d6a35e7c3567569187ee 100644 (file)
@@ -1389,8 +1389,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_device *smmu = smmu_domain->smmu;
 
-       if (smmu_domain->pgtbl_ops)
-               free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+       free_io_pgtable_ops(smmu_domain->pgtbl_ops);
 
        /* Free the CD and ASID, if we allocated them */
        if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
index dce041b1c1394be7057528a5b6fd23dcbbbf6253..4cd0c29cb585000c0e5899651948ad1dc2ffbf1f 100644 (file)
@@ -1566,7 +1566,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                return -ENODEV;
        }
 
-       if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
+       if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
                smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
                dev_notice(smmu->dev, "\taddress translation ops\n");
        }
index 49e7542510d15caac5622cdb01fdcf8b77bb80e8..f286090931cc874f6851eab4f279b5f9f44276d1 100644 (file)
@@ -847,13 +847,24 @@ static int add_iommu_group(struct device *dev, void *data)
 {
        struct iommu_callback_data *cb = data;
        const struct iommu_ops *ops = cb->ops;
+       int ret;
 
        if (!ops->add_device)
                return 0;
 
        WARN_ON(dev->iommu_group);
 
-       return ops->add_device(dev);
+       ret = ops->add_device(dev);
+
+       /*
+        * We ignore -ENODEV errors for now, as they just mean that the
+        * device is not translated by an IOMMU. We still care about
+        * other errors and fail to initialize when they happen.
+        */
+       if (ret == -ENODEV)
+               ret = 0;
+
+       return ret;
 }
 
 static int remove_iommu_group(struct device *dev, void *data)
index 0f6486d4f1b0d50bdcfee34c0a12d110a1047c27..0f67ae32464fd22f3e6509fe5fddc27727afdf1a 100644 (file)
@@ -8,21 +8,4 @@
  * warranty of any kind, whether express or implied.
  */
 
-#ifndef _IRQCHIP_H
-#define _IRQCHIP_H
-
-#include <linux/of.h>
-
-/*
- * This macro must be used by the different irqchip drivers to declare
- * the association between their DT compatible string and their
- * initialization function.
- *
- * @name: name that must be unique accross all IRQCHIP_DECLARE of the
- * same file.
- * @compstr: compatible string of the irqchip driver
- * @fn: initialization function
- */
-#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
-
-#endif
+#include <linux/irqchip.h>
index 4191614c4651eaef0e3d58dffa2008e737ad877b..9ad35f72ab4c077791cf7942cceb36301325872d 100644 (file)
@@ -39,6 +39,32 @@ config LEDS_88PM860X
          This option enables support for on-chip LED drivers found on Marvell
          Semiconductor 88PM8606 PMIC.
 
+config LEDS_AAT1290
+       tristate "LED support for the AAT1290"
+       depends on LEDS_CLASS_FLASH
+       depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
+       depends on GPIOLIB
+       depends on OF
+       depends on PINCTRL
+       help
+        This option enables support for the LEDs on the AAT1290.
+
+config LEDS_BCM6328
+       tristate "LED Support for Broadcom BCM6328"
+       depends on LEDS_CLASS
+       depends on OF
+       help
+         This option enables support for LEDs connected to the BCM6328
+         LED HW controller accessed via MMIO registers.
+
+config LEDS_BCM6358
+       tristate "LED Support for Broadcom BCM6358"
+       depends on LEDS_CLASS
+       depends on OF
+       help
+         This option enables support for LEDs connected to the BCM6358
+         LED HW controller accessed via MMIO registers.
+
 config LEDS_LM3530
        tristate "LCD Backlight driver for LM3530"
        depends on LEDS_CLASS
@@ -179,7 +205,7 @@ config LEDS_PCA9532_GPIO
 config LEDS_GPIO
        tristate "LED Support for GPIO connected LEDs"
        depends on LEDS_CLASS
-       depends on GPIOLIB
+       depends on GPIOLIB || COMPILE_TEST
        help
          This option enables support for the LEDs connected to GPIO
          outputs. To be useful the particular board must have LEDs
@@ -203,6 +229,7 @@ config LEDS_LP55XX_COMMON
        tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501"
        depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501
        select FW_LOADER
+       select FW_LOADER_USER_HELPER_FALLBACK
        help
          This option supports common operations for LP5521/5523/55231/5562/8501
          devices.
@@ -464,6 +491,25 @@ config LEDS_TCA6507
          LED driver chips accessed via the I2C bus.
          Driver support brightness control and hardware-assisted blinking.
 
+config LEDS_TLC591XX
+       tristate "LED driver for TLC59108 and TLC59116 controllers"
+       depends on LEDS_CLASS && I2C
+       select REGMAP_I2C
+       help
+         This option enables support for Texas Instruments TLC59108
+         and TLC59116 LED controllers.
+
+config LEDS_MAX77693
+       tristate "LED support for MAX77693 Flash"
+       depends on LEDS_CLASS_FLASH
+       depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
+       depends on MFD_MAX77693
+       depends on OF
+       help
+         This option enables support for the flash part of the MAX77693
+         multifunction device. It has build in control for two leds in flash
+         and torch mode.
+
 config LEDS_MAX8997
        tristate "LED support for MAX8997 PMIC"
        depends on LEDS_CLASS && MFD_MAX8997
@@ -495,6 +541,15 @@ config LEDS_MENF21BMC
          This driver can also be built as a module. If so the module
          will be called leds-menf21bmc.
 
+config LEDS_KTD2692
+       tristate "LED support for KTD2692 flash LED controller"
+       depends on LEDS_CLASS_FLASH && GPIOLIB && OF
+       help
+         This option enables support for KTD2692 LED flash connected
+         through ExpressWire interface.
+
+         Say Y to enable this driver.
+
 comment "LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)"
 
 config LEDS_BLINKM
index bf4609338e1047fee8119d690c376a31a0eb9074..8d6a24a2f51376fb9a6aaa704cc1d1908dc51d5a 100644 (file)
@@ -7,6 +7,9 @@ obj-$(CONFIG_LEDS_TRIGGERS)             += led-triggers.o
 
 # LED Platform Drivers
 obj-$(CONFIG_LEDS_88PM860X)            += leds-88pm860x.o
+obj-$(CONFIG_LEDS_AAT1290)             += leds-aat1290.o
+obj-$(CONFIG_LEDS_BCM6328)             += leds-bcm6328.o
+obj-$(CONFIG_LEDS_BCM6358)             += leds-bcm6358.o
 obj-$(CONFIG_LEDS_BD2802)              += leds-bd2802.o
 obj-$(CONFIG_LEDS_LOCOMO)              += leds-locomo.o
 obj-$(CONFIG_LEDS_LM3530)              += leds-lm3530.o
@@ -31,6 +34,7 @@ obj-$(CONFIG_LEDS_LP8501)             += leds-lp8501.o
 obj-$(CONFIG_LEDS_LP8788)              += leds-lp8788.o
 obj-$(CONFIG_LEDS_LP8860)              += leds-lp8860.o
 obj-$(CONFIG_LEDS_TCA6507)             += leds-tca6507.o
+obj-$(CONFIG_LEDS_TLC591XX)            += leds-tlc591xx.o
 obj-$(CONFIG_LEDS_CLEVO_MAIL)          += leds-clevo-mail.o
 obj-$(CONFIG_LEDS_IPAQ_MICRO)          += leds-ipaq-micro.o
 obj-$(CONFIG_LEDS_HP6XX)               += leds-hp6xx.o
@@ -52,6 +56,7 @@ obj-$(CONFIG_LEDS_MC13783)            += leds-mc13783.o
 obj-$(CONFIG_LEDS_NS2)                 += leds-ns2.o
 obj-$(CONFIG_LEDS_NETXBIG)             += leds-netxbig.o
 obj-$(CONFIG_LEDS_ASIC3)               += leds-asic3.o
+obj-$(CONFIG_LEDS_MAX77693)            += leds-max77693.o
 obj-$(CONFIG_LEDS_MAX8997)             += leds-max8997.o
 obj-$(CONFIG_LEDS_LM355x)              += leds-lm355x.o
 obj-$(CONFIG_LEDS_BLINKM)              += leds-blinkm.o
@@ -59,6 +64,7 @@ obj-$(CONFIG_LEDS_SYSCON)             += leds-syscon.o
 obj-$(CONFIG_LEDS_VERSATILE)           += leds-versatile.o
 obj-$(CONFIG_LEDS_MENF21BMC)           += leds-menf21bmc.o
 obj-$(CONFIG_LEDS_PM8941_WLED)         += leds-pm8941-wled.o
+obj-$(CONFIG_LEDS_KTD2692)             += leds-ktd2692.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_DAC124S085)          += leds-dac124s085.o
index 7fb2a19ac649c55906f96f17eb2a9e988658260e..beabfbc6f7cdd406f6ba96692bd5b376a893bdc4 100644 (file)
@@ -121,6 +121,11 @@ static void led_timer_function(unsigned long data)
        brightness = led_get_brightness(led_cdev);
        if (!brightness) {
                /* Time to switch the LED on. */
+               if (led_cdev->delayed_set_value) {
+                       led_cdev->blink_brightness =
+                                       led_cdev->delayed_set_value;
+                       led_cdev->delayed_set_value = 0;
+               }
                brightness = led_cdev->blink_brightness;
                delay = led_cdev->blink_delay_on;
        } else {
index 9886dace5ad226950e0b70cc9ee2b5ccd42ff228..549de7e24cfdf445f27180ab7c776da21af50a94 100644 (file)
@@ -119,10 +119,11 @@ void led_set_brightness(struct led_classdev *led_cdev,
 {
        int ret = 0;
 
-       /* delay brightness setting if need to stop soft-blink timer */
+       /* delay brightness if soft-blink is active */
        if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) {
                led_cdev->delayed_set_value = brightness;
-               schedule_work(&led_cdev->set_brightness_work);
+               if (brightness == LED_OFF)
+                       schedule_work(&led_cdev->set_brightness_work);
                return;
        }
 
diff --git a/drivers/leds/leds-aat1290.c b/drivers/leds/leds-aat1290.c
new file mode 100644 (file)
index 0000000..fd7c25f
--- /dev/null
@@ -0,0 +1,576 @@
+/*
+ *     LED Flash class driver for the AAT1290
+ *     1.5A Step-Up Current Regulator for Flash LEDs
+ *
+ *     Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ *     Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/led-class-flash.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-flash-led-class.h>
+
+#define AAT1290_MOVIE_MODE_CURRENT_ADDR        17
+#define AAT1290_MAX_MM_CURR_PERCENT_0  16
+#define AAT1290_MAX_MM_CURR_PERCENT_100        1
+
+#define AAT1290_FLASH_SAFETY_TIMER_ADDR        18
+
+#define AAT1290_MOVIE_MODE_CONFIG_ADDR 19
+#define AAT1290_MOVIE_MODE_OFF         1
+#define AAT1290_MOVIE_MODE_ON          3
+
+#define AAT1290_MM_CURRENT_RATIO_ADDR  20
+#define AAT1290_MM_TO_FL_1_92          1
+
+#define AAT1290_MM_TO_FL_RATIO         1000 / 1920
+#define AAT1290_MAX_MM_CURRENT(fl_max) (fl_max * AAT1290_MM_TO_FL_RATIO)
+
+#define AAT1290_LATCH_TIME_MIN_US      500
+#define AAT1290_LATCH_TIME_MAX_US      1000
+#define AAT1290_EN_SET_TICK_TIME_US    1
+#define AAT1290_FLEN_OFF_DELAY_TIME_US 10
+#define AAT1290_FLASH_TM_NUM_LEVELS    16
+#define AAT1290_MM_CURRENT_SCALE_SIZE  15
+
+
+struct aat1290_led_config_data {
+       /* maximum LED current in movie mode */
+       u32 max_mm_current;
+       /* maximum LED current in flash mode */
+       u32 max_flash_current;
+       /* maximum flash timeout */
+       u32 max_flash_tm;
+       /* external strobe capability */
+       bool has_external_strobe;
+       /* max LED brightness level */
+       enum led_brightness max_brightness;
+};
+
+struct aat1290_led {
+       /* platform device data */
+       struct platform_device *pdev;
+       /* secures access to the device */
+       struct mutex lock;
+
+       /* corresponding LED Flash class device */
+       struct led_classdev_flash fled_cdev;
+       /* V4L2 Flash device */
+       struct v4l2_flash *v4l2_flash;
+
+       /* FLEN pin */
+       struct gpio_desc *gpio_fl_en;
+       /* EN|SET pin  */
+       struct gpio_desc *gpio_en_set;
+       /* movie mode current scale */
+       int *mm_current_scale;
+       /* device mode */
+       bool movie_mode;
+
+       /* brightness cache */
+       unsigned int torch_brightness;
+       /* assures led-triggers compatibility */
+       struct work_struct work_brightness_set;
+};
+
+static struct aat1290_led *fled_cdev_to_led(
+                               struct led_classdev_flash *fled_cdev)
+{
+       return container_of(fled_cdev, struct aat1290_led, fled_cdev);
+}
+
+static void aat1290_as2cwire_write(struct aat1290_led *led, int addr, int value)
+{
+       int i;
+
+       gpiod_direction_output(led->gpio_fl_en, 0);
+       gpiod_direction_output(led->gpio_en_set, 0);
+
+       udelay(AAT1290_FLEN_OFF_DELAY_TIME_US);
+
+       /* write address */
+       for (i = 0; i < addr; ++i) {
+               udelay(AAT1290_EN_SET_TICK_TIME_US);
+               gpiod_direction_output(led->gpio_en_set, 0);
+               udelay(AAT1290_EN_SET_TICK_TIME_US);
+               gpiod_direction_output(led->gpio_en_set, 1);
+       }
+
+       usleep_range(AAT1290_LATCH_TIME_MIN_US, AAT1290_LATCH_TIME_MAX_US);
+
+       /* write data */
+       for (i = 0; i < value; ++i) {
+               udelay(AAT1290_EN_SET_TICK_TIME_US);
+               gpiod_direction_output(led->gpio_en_set, 0);
+               udelay(AAT1290_EN_SET_TICK_TIME_US);
+               gpiod_direction_output(led->gpio_en_set, 1);
+       }
+
+       usleep_range(AAT1290_LATCH_TIME_MIN_US, AAT1290_LATCH_TIME_MAX_US);
+}
+
+static void aat1290_set_flash_safety_timer(struct aat1290_led *led,
+                                       unsigned int micro_sec)
+{
+       struct led_classdev_flash *fled_cdev = &led->fled_cdev;
+       struct led_flash_setting *flash_tm = &fled_cdev->timeout;
+       int flash_tm_reg = AAT1290_FLASH_TM_NUM_LEVELS -
+                               (micro_sec / flash_tm->step) + 1;
+
+       aat1290_as2cwire_write(led, AAT1290_FLASH_SAFETY_TIMER_ADDR,
+                                                       flash_tm_reg);
+}
+
+static void aat1290_brightness_set(struct aat1290_led *led,
+                                       enum led_brightness brightness)
+{
+       mutex_lock(&led->lock);
+
+       if (brightness == 0) {
+               gpiod_direction_output(led->gpio_fl_en, 0);
+               gpiod_direction_output(led->gpio_en_set, 0);
+               led->movie_mode = false;
+       } else {
+               if (!led->movie_mode) {
+                       aat1290_as2cwire_write(led,
+                               AAT1290_MM_CURRENT_RATIO_ADDR,
+                               AAT1290_MM_TO_FL_1_92);
+                       led->movie_mode = true;
+               }
+
+               aat1290_as2cwire_write(led, AAT1290_MOVIE_MODE_CURRENT_ADDR,
+                               AAT1290_MAX_MM_CURR_PERCENT_0 - brightness);
+               aat1290_as2cwire_write(led, AAT1290_MOVIE_MODE_CONFIG_ADDR,
+                               AAT1290_MOVIE_MODE_ON);
+       }
+
+       mutex_unlock(&led->lock);
+}
+
+/* LED subsystem callbacks */
+
+static void aat1290_brightness_set_work(struct work_struct *work)
+{
+       struct aat1290_led *led =
+               container_of(work, struct aat1290_led, work_brightness_set);
+
+       aat1290_brightness_set(led, led->torch_brightness);
+}
+
+static void aat1290_led_brightness_set(struct led_classdev *led_cdev,
+                                       enum led_brightness brightness)
+{
+       struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+       struct aat1290_led *led = fled_cdev_to_led(fled_cdev);
+
+       led->torch_brightness = brightness;
+       schedule_work(&led->work_brightness_set);
+}
+
+static int aat1290_led_brightness_set_sync(struct led_classdev *led_cdev,
+                                       enum led_brightness brightness)
+{
+       struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+       struct aat1290_led *led = fled_cdev_to_led(fled_cdev);
+
+       aat1290_brightness_set(led, brightness);
+
+       return 0;
+}
+
+static int aat1290_led_flash_strobe_set(struct led_classdev_flash *fled_cdev,
+                                        bool state)
+
+{
+       struct aat1290_led *led = fled_cdev_to_led(fled_cdev);
+       struct led_classdev *led_cdev = &fled_cdev->led_cdev;
+       struct led_flash_setting *timeout = &fled_cdev->timeout;
+
+       mutex_lock(&led->lock);
+
+       if (state) {
+               aat1290_set_flash_safety_timer(led, timeout->val);
+               gpiod_direction_output(led->gpio_fl_en, 1);
+       } else {
+               gpiod_direction_output(led->gpio_fl_en, 0);
+               gpiod_direction_output(led->gpio_en_set, 0);
+       }
+
+       /*
+        * To reenter movie mode after a flash event the part must be cycled
+        * off and back on to reset the movie mode and reprogrammed via the
+        * AS2Cwire. Therefore the brightness and movie_mode properties needs
+        * to be updated here to reflect the actual state.
+        */
+       led_cdev->brightness = 0;
+       led->movie_mode = false;
+
+       mutex_unlock(&led->lock);
+
+       return 0;
+}
+
+static int aat1290_led_flash_timeout_set(struct led_classdev_flash *fled_cdev,
+                                               u32 timeout)
+{
+       /*
+        * Don't do anything - flash timeout is cached in the led-class-flash
+        * core and will be applied in the strobe_set op, as writing the
+        * safety timer register spuriously turns the torch mode on.
+        */
+
+       return 0;
+}
+
+static int aat1290_led_parse_dt(struct aat1290_led *led,
+                       struct aat1290_led_config_data *cfg,
+                       struct device_node **sub_node)
+{
+       struct led_classdev *led_cdev = &led->fled_cdev.led_cdev;
+       struct device *dev = &led->pdev->dev;
+       struct device_node *child_node;
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+       struct pinctrl *pinctrl;
+#endif
+       int ret = 0;
+
+       led->gpio_fl_en = devm_gpiod_get(dev, "flen", GPIOD_ASIS);
+       if (IS_ERR(led->gpio_fl_en)) {
+               ret = PTR_ERR(led->gpio_fl_en);
+               dev_err(dev, "Unable to claim gpio \"flen\".\n");
+               return ret;
+       }
+
+       led->gpio_en_set = devm_gpiod_get(dev, "enset", GPIOD_ASIS);
+       if (IS_ERR(led->gpio_en_set)) {
+               ret = PTR_ERR(led->gpio_en_set);
+               dev_err(dev, "Unable to claim gpio \"enset\".\n");
+               return ret;
+       }
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+       pinctrl = devm_pinctrl_get_select_default(&led->pdev->dev);
+       if (IS_ERR(pinctrl)) {
+               cfg->has_external_strobe = false;
+               dev_info(dev,
+                        "No support for external strobe detected.\n");
+       } else {
+               cfg->has_external_strobe = true;
+       }
+#endif
+
+       child_node = of_get_next_available_child(dev->of_node, NULL);
+       if (!child_node) {
+               dev_err(dev, "No DT child node found for connected LED.\n");
+               return -EINVAL;
+       }
+
+       led_cdev->name = of_get_property(child_node, "label", NULL) ? :
+                                               child_node->name;
+
+       ret = of_property_read_u32(child_node, "led-max-microamp",
+                               &cfg->max_mm_current);
+       /*
+        * led-max-microamp will default to 1/20 of flash-max-microamp
+        * in case it is missing.
+        */
+       if (ret < 0)
+               dev_warn(dev,
+                       "led-max-microamp DT property missing\n");
+
+       ret = of_property_read_u32(child_node, "flash-max-microamp",
+                               &cfg->max_flash_current);
+       if (ret < 0) {
+               dev_err(dev,
+                       "flash-max-microamp DT property missing\n");
+               return ret;
+       }
+
+       ret = of_property_read_u32(child_node, "flash-max-timeout-us",
+                               &cfg->max_flash_tm);
+       if (ret < 0) {
+               dev_err(dev,
+                       "flash-max-timeout-us DT property missing\n");
+               return ret;
+       }
+
+       of_node_put(child_node);
+
+       *sub_node = child_node;
+
+       return ret;
+}
+
+static void aat1290_led_validate_mm_current(struct aat1290_led *led,
+                                       struct aat1290_led_config_data *cfg)
+{
+       int i, b = 0, e = AAT1290_MM_CURRENT_SCALE_SIZE;
+
+       while (e - b > 1) {
+               i = b + (e - b) / 2;
+               if (cfg->max_mm_current < led->mm_current_scale[i])
+                       e = i;
+               else
+                       b = i;
+       }
+
+       cfg->max_mm_current = led->mm_current_scale[b];
+       cfg->max_brightness = b + 1;
+}
+
+int init_mm_current_scale(struct aat1290_led *led,
+                       struct aat1290_led_config_data *cfg)
+{
+       int max_mm_current_percent[] = { 20, 22, 25, 28, 32, 36, 40, 45, 50, 56,
+                                               63, 71, 79, 89, 100 };
+       int i, max_mm_current =
+                       AAT1290_MAX_MM_CURRENT(cfg->max_flash_current);
+
+       led->mm_current_scale = devm_kzalloc(&led->pdev->dev,
+                                       sizeof(max_mm_current_percent),
+                                       GFP_KERNEL);
+       if (!led->mm_current_scale)
+               return -ENOMEM;
+
+       for (i = 0; i < AAT1290_MM_CURRENT_SCALE_SIZE; ++i)
+               led->mm_current_scale[i] = max_mm_current *
+                                         max_mm_current_percent[i] / 100;
+
+       return 0;
+}
+
+static int aat1290_led_get_configuration(struct aat1290_led *led,
+                                       struct aat1290_led_config_data *cfg,
+                                       struct device_node **sub_node)
+{
+       int ret;
+
+       ret = aat1290_led_parse_dt(led, cfg, sub_node);
+       if (ret < 0)
+               return ret;
+       /*
+        * Init non-linear movie mode current scale basing
+        * on the max flash current from led configuration.
+        */
+       ret = init_mm_current_scale(led, cfg);
+       if (ret < 0)
+               return ret;
+
+       aat1290_led_validate_mm_current(led, cfg);
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+#else
+       devm_kfree(&led->pdev->dev, led->mm_current_scale);
+#endif
+
+       return 0;
+}
+
+static void aat1290_init_flash_timeout(struct aat1290_led *led,
+                               struct aat1290_led_config_data *cfg)
+{
+       struct led_classdev_flash *fled_cdev = &led->fled_cdev;
+       struct led_flash_setting *setting;
+
+       /* Init flash timeout setting */
+       setting = &fled_cdev->timeout;
+       setting->min = cfg->max_flash_tm / AAT1290_FLASH_TM_NUM_LEVELS;
+       setting->max = cfg->max_flash_tm;
+       setting->step = setting->min;
+       setting->val = setting->max;
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+static enum led_brightness aat1290_intensity_to_brightness(
+                                       struct v4l2_flash *v4l2_flash,
+                                       s32 intensity)
+{
+       struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+       struct aat1290_led *led = fled_cdev_to_led(fled_cdev);
+       int i;
+
+       for (i = AAT1290_MM_CURRENT_SCALE_SIZE - 1; i >= 0; --i)
+               if (intensity >= led->mm_current_scale[i])
+                       return i + 1;
+
+       return 1;
+}
+
+static s32 aat1290_brightness_to_intensity(struct v4l2_flash *v4l2_flash,
+                                       enum led_brightness brightness)
+{
+       struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+       struct aat1290_led *led = fled_cdev_to_led(fled_cdev);
+
+       return led->mm_current_scale[brightness - 1];
+}
+
+static int aat1290_led_external_strobe_set(struct v4l2_flash *v4l2_flash,
+                                               bool enable)
+{
+       struct aat1290_led *led = fled_cdev_to_led(v4l2_flash->fled_cdev);
+       struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+       struct led_classdev *led_cdev = &fled_cdev->led_cdev;
+       struct pinctrl *pinctrl;
+
+       gpiod_direction_output(led->gpio_fl_en, 0);
+       gpiod_direction_output(led->gpio_en_set, 0);
+
+       led->movie_mode = false;
+       led_cdev->brightness = 0;
+
+       pinctrl = devm_pinctrl_get_select(&led->pdev->dev,
+                                               enable ? "isp" : "host");
+       if (IS_ERR(pinctrl)) {
+               dev_warn(&led->pdev->dev, "Unable to switch strobe source.\n");
+               return PTR_ERR(pinctrl);
+       }
+
+       return 0;
+}
+
+static void aat1290_init_v4l2_flash_config(struct aat1290_led *led,
+                                       struct aat1290_led_config_data *led_cfg,
+                                       struct v4l2_flash_config *v4l2_sd_cfg)
+{
+       struct led_classdev *led_cdev = &led->fled_cdev.led_cdev;
+       struct led_flash_setting *s;
+
+       strlcpy(v4l2_sd_cfg->dev_name, led_cdev->name,
+               sizeof(v4l2_sd_cfg->dev_name));
+
+       s = &v4l2_sd_cfg->torch_intensity;
+       s->min = led->mm_current_scale[0];
+       s->max = led_cfg->max_mm_current;
+       s->step = 1;
+       s->val = s->max;
+
+       v4l2_sd_cfg->has_external_strobe = led_cfg->has_external_strobe;
+}
+
+static const struct v4l2_flash_ops v4l2_flash_ops = {
+       .external_strobe_set = aat1290_led_external_strobe_set,
+       .intensity_to_led_brightness = aat1290_intensity_to_brightness,
+       .led_brightness_to_intensity = aat1290_brightness_to_intensity,
+};
+#else
+static inline void aat1290_init_v4l2_flash_config(struct aat1290_led *led,
+                               struct aat1290_led_config_data *led_cfg,
+                               struct v4l2_flash_config *v4l2_sd_cfg)
+{
+}
+static const struct v4l2_flash_ops v4l2_flash_ops;
+#endif
+
+static const struct led_flash_ops flash_ops = {
+       .strobe_set = aat1290_led_flash_strobe_set,
+       .timeout_set = aat1290_led_flash_timeout_set,
+};
+
+static int aat1290_led_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *sub_node = NULL;
+       struct aat1290_led *led;
+       struct led_classdev *led_cdev;
+       struct led_classdev_flash *fled_cdev;
+       struct aat1290_led_config_data led_cfg = {};
+       struct v4l2_flash_config v4l2_sd_cfg = {};
+       int ret;
+
+       led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+       if (!led)
+               return -ENOMEM;
+
+       led->pdev = pdev;
+       platform_set_drvdata(pdev, led);
+
+       fled_cdev = &led->fled_cdev;
+       fled_cdev->ops = &flash_ops;
+       led_cdev = &fled_cdev->led_cdev;
+
+       ret = aat1290_led_get_configuration(led, &led_cfg, &sub_node);
+       if (ret < 0)
+               return ret;
+
+       mutex_init(&led->lock);
+
+       /* Initialize LED Flash class device */
+       led_cdev->brightness_set = aat1290_led_brightness_set;
+       led_cdev->brightness_set_sync = aat1290_led_brightness_set_sync;
+       led_cdev->max_brightness = led_cfg.max_brightness;
+       led_cdev->flags |= LED_DEV_CAP_FLASH;
+       INIT_WORK(&led->work_brightness_set, aat1290_brightness_set_work);
+
+       aat1290_init_flash_timeout(led, &led_cfg);
+
+       /* Register LED Flash class device */
+       ret = led_classdev_flash_register(&pdev->dev, fled_cdev);
+       if (ret < 0)
+               goto err_flash_register;
+
+       aat1290_init_v4l2_flash_config(led, &led_cfg, &v4l2_sd_cfg);
+
+       /* Create V4L2 Flash subdev. */
+       led->v4l2_flash = v4l2_flash_init(dev, sub_node, fled_cdev, NULL,
+                                         &v4l2_flash_ops, &v4l2_sd_cfg);
+       if (IS_ERR(led->v4l2_flash)) {
+               ret = PTR_ERR(led->v4l2_flash);
+               goto error_v4l2_flash_init;
+       }
+
+       return 0;
+
+error_v4l2_flash_init:
+       led_classdev_flash_unregister(fled_cdev);
+err_flash_register:
+       mutex_destroy(&led->lock);
+
+       return ret;
+}
+
+static int aat1290_led_remove(struct platform_device *pdev)
+{
+       struct aat1290_led *led = platform_get_drvdata(pdev);
+
+       v4l2_flash_release(led->v4l2_flash);
+       led_classdev_flash_unregister(&led->fled_cdev);
+       cancel_work_sync(&led->work_brightness_set);
+
+       mutex_destroy(&led->lock);
+
+       return 0;
+}
+
+static const struct of_device_id aat1290_led_dt_match[] = {
+       { .compatible = "skyworks,aat1290" },
+       {},
+};
+
+static struct platform_driver aat1290_led_driver = {
+       .probe          = aat1290_led_probe,
+       .remove         = aat1290_led_remove,
+       .driver         = {
+               .name   = "aat1290",
+               .of_match_table = aat1290_led_dt_match,
+       },
+};
+
+module_platform_driver(aat1290_led_driver);
+
+MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
+MODULE_DESCRIPTION("Skyworks Current Regulator for Flash LEDs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
new file mode 100644 (file)
index 0000000..986fe1e
--- /dev/null
@@ -0,0 +1,413 @@
+/*
+ * Driver for BCM6328 memory-mapped LEDs, based on leds-syscon.c
+ *
+ * Copyright 2015 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright 2015 Jonas Gorski <jogo@openwrt.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define BCM6328_REG_INIT               0x00
+#define BCM6328_REG_MODE_HI            0x04
+#define BCM6328_REG_MODE_LO            0x08
+#define BCM6328_REG_HWDIS              0x0c
+#define BCM6328_REG_STROBE             0x10
+#define BCM6328_REG_LNKACTSEL_HI       0x14
+#define BCM6328_REG_LNKACTSEL_LO       0x18
+#define BCM6328_REG_RBACK              0x1c
+#define BCM6328_REG_SERMUX             0x20
+
+#define BCM6328_LED_MAX_COUNT          24
+#define BCM6328_LED_DEF_DELAY          500
+#define BCM6328_LED_INTERVAL_MS                20
+
+#define BCM6328_LED_INTV_MASK          0x3f
+#define BCM6328_LED_FAST_INTV_SHIFT    6
+#define BCM6328_LED_FAST_INTV_MASK     (BCM6328_LED_INTV_MASK << \
+                                        BCM6328_LED_FAST_INTV_SHIFT)
+#define BCM6328_SERIAL_LED_EN          BIT(12)
+#define BCM6328_SERIAL_LED_MUX         BIT(13)
+#define BCM6328_SERIAL_LED_CLK_NPOL    BIT(14)
+#define BCM6328_SERIAL_LED_DATA_PPOL   BIT(15)
+#define BCM6328_SERIAL_LED_SHIFT_DIR   BIT(16)
+#define BCM6328_LED_SHIFT_TEST         BIT(30)
+#define BCM6328_LED_TEST               BIT(31)
+
+#define BCM6328_LED_MODE_MASK          3
+#define BCM6328_LED_MODE_OFF           0
+#define BCM6328_LED_MODE_FAST          1
+#define BCM6328_LED_MODE_BLINK         2
+#define BCM6328_LED_MODE_ON            3
+#define BCM6328_LED_SHIFT(X)           ((X) << 1)
+
+/**
+ * struct bcm6328_led - state container for bcm6328 based LEDs
+ * @cdev: LED class device for this LED
+ * @mem: memory resource
+ * @lock: memory lock
+ * @pin: LED pin number
+ * @blink_leds: blinking LEDs
+ * @blink_delay: blinking delay
+ * @active_low: LED is active low
+ */
+struct bcm6328_led {
+       struct led_classdev cdev;
+       void __iomem *mem;
+       spinlock_t *lock;
+       unsigned long pin;
+       unsigned long *blink_leds;
+       unsigned long *blink_delay;
+       bool active_low;
+};
+
+static void bcm6328_led_write(void __iomem *reg, unsigned long data)
+{
+       iowrite32be(data, reg);
+}
+
+static unsigned long bcm6328_led_read(void __iomem *reg)
+{
+       return ioread32be(reg);
+}
+
+/**
+ * LEDMode 64 bits / 24 LEDs
+ * bits [31:0] -> LEDs 8-23
+ * bits [47:32] -> LEDs 0-7
+ * bits [63:48] -> unused
+ */
+static unsigned long bcm6328_pin2shift(unsigned long pin)
+{
+       if (pin < 8)
+               return pin + 16; /* LEDs 0-7 (bits 47:32) */
+       else
+               return pin - 8; /* LEDs 8-23 (bits 31:0) */
+}
+
+static void bcm6328_led_mode(struct bcm6328_led *led, unsigned long value)
+{
+       void __iomem *mode;
+       unsigned long val, shift;
+
+       shift = bcm6328_pin2shift(led->pin);
+       if (shift / 16)
+               mode = led->mem + BCM6328_REG_MODE_HI;
+       else
+               mode = led->mem + BCM6328_REG_MODE_LO;
+
+       val = bcm6328_led_read(mode);
+       val &= ~(BCM6328_LED_MODE_MASK << BCM6328_LED_SHIFT(shift % 16));
+       val |= (value << BCM6328_LED_SHIFT(shift % 16));
+       bcm6328_led_write(mode, val);
+}
+
+static void bcm6328_led_set(struct led_classdev *led_cdev,
+                           enum led_brightness value)
+{
+       struct bcm6328_led *led =
+               container_of(led_cdev, struct bcm6328_led, cdev);
+       unsigned long flags;
+
+       spin_lock_irqsave(led->lock, flags);
+       *(led->blink_leds) &= ~BIT(led->pin);
+       if ((led->active_low && value == LED_OFF) ||
+           (!led->active_low && value != LED_OFF))
+               bcm6328_led_mode(led, BCM6328_LED_MODE_OFF);
+       else
+               bcm6328_led_mode(led, BCM6328_LED_MODE_ON);
+       spin_unlock_irqrestore(led->lock, flags);
+}
+
+static int bcm6328_blink_set(struct led_classdev *led_cdev,
+                            unsigned long *delay_on, unsigned long *delay_off)
+{
+       struct bcm6328_led *led =
+               container_of(led_cdev, struct bcm6328_led, cdev);
+       unsigned long delay, flags;
+
+       if (!*delay_on)
+               *delay_on = BCM6328_LED_DEF_DELAY;
+       if (!*delay_off)
+               *delay_off = BCM6328_LED_DEF_DELAY;
+
+       if (*delay_on != *delay_off) {
+               dev_dbg(led_cdev->dev,
+                       "fallback to soft blinking (delay_on != delay_off)\n");
+               return -EINVAL;
+       }
+
+       delay = *delay_on / BCM6328_LED_INTERVAL_MS;
+       if (delay == 0)
+               delay = 1;
+       else if (delay > BCM6328_LED_INTV_MASK) {
+               dev_dbg(led_cdev->dev,
+                       "fallback to soft blinking (delay > %ums)\n",
+                       BCM6328_LED_INTV_MASK * BCM6328_LED_INTERVAL_MS);
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(led->lock, flags);
+       if (*(led->blink_leds) == 0 ||
+           *(led->blink_leds) == BIT(led->pin) ||
+           *(led->blink_delay) == delay) {
+               unsigned long val;
+
+               *(led->blink_leds) |= BIT(led->pin);
+               *(led->blink_delay) = delay;
+
+               val = bcm6328_led_read(led->mem + BCM6328_REG_INIT);
+               val &= ~BCM6328_LED_FAST_INTV_MASK;
+               val |= (delay << BCM6328_LED_FAST_INTV_SHIFT);
+               bcm6328_led_write(led->mem + BCM6328_REG_INIT, val);
+
+               bcm6328_led_mode(led, BCM6328_LED_MODE_BLINK);
+
+               spin_unlock_irqrestore(led->lock, flags);
+       } else {
+               spin_unlock_irqrestore(led->lock, flags);
+               dev_dbg(led_cdev->dev,
+                       "fallback to soft blinking (delay already set)\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg,
+                        void __iomem *mem, spinlock_t *lock)
+{
+       int i, cnt;
+       unsigned long flags, val;
+
+       spin_lock_irqsave(lock, flags);
+       val = bcm6328_led_read(mem + BCM6328_REG_HWDIS);
+       val &= ~BIT(reg);
+       bcm6328_led_write(mem + BCM6328_REG_HWDIS, val);
+       spin_unlock_irqrestore(lock, flags);
+
+       /* Only LEDs 0-7 can be activity/link controlled */
+       if (reg >= 8)
+               return 0;
+
+       cnt = of_property_count_elems_of_size(nc, "brcm,link-signal-sources",
+                                             sizeof(u32));
+       for (i = 0; i < cnt; i++) {
+               u32 sel;
+               void __iomem *addr;
+
+               if (reg < 4)
+                       addr = mem + BCM6328_REG_LNKACTSEL_LO;
+               else
+                       addr = mem + BCM6328_REG_LNKACTSEL_HI;
+
+               of_property_read_u32_index(nc, "brcm,link-signal-sources", i,
+                                          &sel);
+
+               if (reg / 4 != sel / 4) {
+                       dev_warn(dev, "invalid link signal source\n");
+                       continue;
+               }
+
+               spin_lock_irqsave(lock, flags);
+               val = bcm6328_led_read(addr);
+               val |= (BIT(reg) << (((sel % 4) * 4) + 16));
+               bcm6328_led_write(addr, val);
+               spin_unlock_irqrestore(lock, flags);
+       }
+
+       cnt = of_property_count_elems_of_size(nc,
+                                             "brcm,activity-signal-sources",
+                                             sizeof(u32));
+       for (i = 0; i < cnt; i++) {
+               u32 sel;
+               void __iomem *addr;
+
+               if (reg < 4)
+                       addr = mem + BCM6328_REG_LNKACTSEL_LO;
+               else
+                       addr = mem + BCM6328_REG_LNKACTSEL_HI;
+
+               of_property_read_u32_index(nc, "brcm,activity-signal-sources",
+                                          i, &sel);
+
+               if (reg / 4 != sel / 4) {
+                       dev_warn(dev, "invalid activity signal source\n");
+                       continue;
+               }
+
+               spin_lock_irqsave(lock, flags);
+               val = bcm6328_led_read(addr);
+               val |= (BIT(reg) << ((sel % 4) * 4));
+               bcm6328_led_write(addr, val);
+               spin_unlock_irqrestore(lock, flags);
+       }
+
+       return 0;
+}
+
+static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
+                      void __iomem *mem, spinlock_t *lock,
+                      unsigned long *blink_leds, unsigned long *blink_delay)
+{
+       struct bcm6328_led *led;
+       unsigned long flags;
+       const char *state;
+       int rc;
+
+       led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+       if (!led)
+               return -ENOMEM;
+
+       led->pin = reg;
+       led->mem = mem;
+       led->lock = lock;
+       led->blink_leds = blink_leds;
+       led->blink_delay = blink_delay;
+
+       if (of_property_read_bool(nc, "active-low"))
+               led->active_low = true;
+
+       led->cdev.name = of_get_property(nc, "label", NULL) ? : nc->name;
+       led->cdev.default_trigger = of_get_property(nc,
+                                                   "linux,default-trigger",
+                                                   NULL);
+
+       if (!of_property_read_string(nc, "default-state", &state)) {
+               spin_lock_irqsave(lock, flags);
+               if (!strcmp(state, "on")) {
+                       led->cdev.brightness = LED_FULL;
+                       bcm6328_led_mode(led, BCM6328_LED_MODE_ON);
+               } else if (!strcmp(state, "keep")) {
+                       void __iomem *mode;
+                       unsigned long val, shift;
+
+                       shift = bcm6328_pin2shift(led->pin);
+                       if (shift / 16)
+                               mode = mem + BCM6328_REG_MODE_HI;
+                       else
+                               mode = mem + BCM6328_REG_MODE_LO;
+
+                       val = bcm6328_led_read(mode) >> (shift % 16);
+                       val &= BCM6328_LED_MODE_MASK;
+                       if (val == BCM6328_LED_MODE_ON)
+                               led->cdev.brightness = LED_FULL;
+                       else {
+                               led->cdev.brightness = LED_OFF;
+                               bcm6328_led_mode(led, BCM6328_LED_MODE_OFF);
+                       }
+               } else {
+                       led->cdev.brightness = LED_OFF;
+                       bcm6328_led_mode(led, BCM6328_LED_MODE_OFF);
+               }
+               spin_unlock_irqrestore(lock, flags);
+       }
+
+       led->cdev.brightness_set = bcm6328_led_set;
+       led->cdev.blink_set = bcm6328_blink_set;
+
+       rc = led_classdev_register(dev, &led->cdev);
+       if (rc < 0)
+               return rc;
+
+       dev_dbg(dev, "registered LED %s\n", led->cdev.name);
+
+       return 0;
+}
+
+static int bcm6328_leds_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = pdev->dev.of_node;
+       struct device_node *child;
+       struct resource *mem_r;
+       void __iomem *mem;
+       spinlock_t *lock;
+       unsigned long val, *blink_leds, *blink_delay;
+
+       mem_r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem_r)
+               return -EINVAL;
+
+       mem = devm_ioremap_resource(dev, mem_r);
+       if (IS_ERR(mem))
+               return PTR_ERR(mem);
+
+       lock = devm_kzalloc(dev, sizeof(*lock), GFP_KERNEL);
+       if (!lock)
+               return -ENOMEM;
+
+       blink_leds = devm_kzalloc(dev, sizeof(*blink_leds), GFP_KERNEL);
+       if (!blink_leds)
+               return -ENOMEM;
+
+       blink_delay = devm_kzalloc(dev, sizeof(*blink_delay), GFP_KERNEL);
+       if (!blink_delay)
+               return -ENOMEM;
+
+       spin_lock_init(lock);
+
+       bcm6328_led_write(mem + BCM6328_REG_HWDIS, ~0);
+       bcm6328_led_write(mem + BCM6328_REG_LNKACTSEL_HI, 0);
+       bcm6328_led_write(mem + BCM6328_REG_LNKACTSEL_LO, 0);
+
+       val = bcm6328_led_read(mem + BCM6328_REG_INIT);
+       val &= ~BCM6328_SERIAL_LED_EN;
+       if (of_property_read_bool(np, "brcm,serial-leds"))
+               val |= BCM6328_SERIAL_LED_EN;
+       bcm6328_led_write(mem + BCM6328_REG_INIT, val);
+
+       for_each_available_child_of_node(np, child) {
+               int rc;
+               u32 reg;
+
+               if (of_property_read_u32(child, "reg", &reg))
+                       continue;
+
+               if (reg >= BCM6328_LED_MAX_COUNT) {
+                       dev_err(dev, "invalid LED (>= %d)\n",
+                               BCM6328_LED_MAX_COUNT);
+                       continue;
+               }
+
+               if (of_property_read_bool(child, "brcm,hardware-controlled"))
+                       rc = bcm6328_hwled(dev, child, reg, mem, lock);
+               else
+                       rc = bcm6328_led(dev, child, reg, mem, lock,
+                                        blink_leds, blink_delay);
+
+               if (rc < 0)
+                       return rc;
+       }
+
+       return 0;
+}
+
+static const struct of_device_id bcm6328_leds_of_match[] = {
+       { .compatible = "brcm,bcm6328-leds", },
+       { },
+};
+
+static struct platform_driver bcm6328_leds_driver = {
+       .probe = bcm6328_leds_probe,
+       .driver = {
+               .name = "leds-bcm6328",
+               .of_match_table = bcm6328_leds_of_match,
+       },
+};
+
+module_platform_driver(bcm6328_leds_driver);
+
+MODULE_AUTHOR("Álvaro Fernández Rojas <noltari@gmail.com>");
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_DESCRIPTION("LED driver for BCM6328 controllers");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:leds-bcm6328");
diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c
new file mode 100644 (file)
index 0000000..21f9693
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * Driver for BCM6358 memory-mapped LEDs, based on leds-syscon.c
+ *
+ * Copyright 2015 Álvaro Fernández Rojas <noltari@gmail.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define BCM6358_REG_MODE               0x0
+#define BCM6358_REG_CTRL               0x4
+
+#define BCM6358_SLED_CLKDIV_MASK       3
+#define BCM6358_SLED_CLKDIV_1          0
+#define BCM6358_SLED_CLKDIV_2          1
+#define BCM6358_SLED_CLKDIV_4          2
+#define BCM6358_SLED_CLKDIV_8          3
+
+#define BCM6358_SLED_POLARITY          BIT(2)
+#define BCM6358_SLED_BUSY              BIT(3)
+
+#define BCM6358_SLED_MAX_COUNT         32
+#define BCM6358_SLED_WAIT              100
+
+/**
+ * struct bcm6358_led - state container for bcm6358 based LEDs
+ * @cdev: LED class device for this LED
+ * @mem: memory resource
+ * @lock: memory lock
+ * @pin: LED pin number
+ * @active_low: LED is active low
+ */
+struct bcm6358_led {
+       struct led_classdev cdev;
+       void __iomem *mem;
+       spinlock_t *lock;
+       unsigned long pin;
+       bool active_low;
+};
+
+static void bcm6358_led_write(void __iomem *reg, unsigned long data)
+{
+       iowrite32be(data, reg);
+}
+
+static unsigned long bcm6358_led_read(void __iomem *reg)
+{
+       return ioread32be(reg);
+}
+
+static unsigned long bcm6358_led_busy(void __iomem *mem)
+{
+       unsigned long val;
+
+       while ((val = bcm6358_led_read(mem + BCM6358_REG_CTRL)) &
+               BCM6358_SLED_BUSY)
+               udelay(BCM6358_SLED_WAIT);
+
+       return val;
+}
+
+static void bcm6358_led_mode(struct bcm6358_led *led, unsigned long value)
+{
+       unsigned long val;
+
+       bcm6358_led_busy(led->mem);
+
+       val = bcm6358_led_read(led->mem + BCM6358_REG_MODE);
+       if ((led->active_low && value == LED_OFF) ||
+           (!led->active_low && value != LED_OFF))
+               val |= BIT(led->pin);
+       else
+               val &= ~(BIT(led->pin));
+       bcm6358_led_write(led->mem + BCM6358_REG_MODE, val);
+}
+
+static void bcm6358_led_set(struct led_classdev *led_cdev,
+                           enum led_brightness value)
+{
+       struct bcm6358_led *led =
+               container_of(led_cdev, struct bcm6358_led, cdev);
+       unsigned long flags;
+
+       spin_lock_irqsave(led->lock, flags);
+       bcm6358_led_mode(led, value);
+       spin_unlock_irqrestore(led->lock, flags);
+}
+
+static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg,
+                      void __iomem *mem, spinlock_t *lock)
+{
+       struct bcm6358_led *led;
+       unsigned long flags;
+       const char *state;
+       int rc;
+
+       led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+       if (!led)
+               return -ENOMEM;
+
+       led->pin = reg;
+       led->mem = mem;
+       led->lock = lock;
+
+       if (of_property_read_bool(nc, "active-low"))
+               led->active_low = true;
+
+       led->cdev.name = of_get_property(nc, "label", NULL) ? : nc->name;
+       led->cdev.default_trigger = of_get_property(nc,
+                                                   "linux,default-trigger",
+                                                   NULL);
+
+       spin_lock_irqsave(lock, flags);
+       if (!of_property_read_string(nc, "default-state", &state)) {
+               if (!strcmp(state, "on")) {
+                       led->cdev.brightness = LED_FULL;
+               } else if (!strcmp(state, "keep")) {
+                       unsigned long val;
+
+                       bcm6358_led_busy(led->mem);
+
+                       val = bcm6358_led_read(led->mem + BCM6358_REG_MODE);
+                       val &= BIT(led->pin);
+                       if ((led->active_low && !val) ||
+                           (!led->active_low && val))
+                               led->cdev.brightness = LED_FULL;
+                       else
+                               led->cdev.brightness = LED_OFF;
+               } else {
+                       led->cdev.brightness = LED_OFF;
+               }
+       } else {
+               led->cdev.brightness = LED_OFF;
+       }
+       bcm6358_led_mode(led, led->cdev.brightness);
+       spin_unlock_irqrestore(lock, flags);
+
+       led->cdev.brightness_set = bcm6358_led_set;
+
+       rc = led_classdev_register(dev, &led->cdev);
+       if (rc < 0)
+               return rc;
+
+       dev_dbg(dev, "registered LED %s\n", led->cdev.name);
+
+       return 0;
+}
+
+static int bcm6358_leds_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = pdev->dev.of_node;
+       struct device_node *child;
+       struct resource *mem_r;
+       void __iomem *mem;
+       spinlock_t *lock; /* memory lock */
+       unsigned long val;
+       u32 clk_div;
+
+       mem_r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem_r)
+               return -EINVAL;
+
+       mem = devm_ioremap_resource(dev, mem_r);
+       if (IS_ERR(mem))
+               return PTR_ERR(mem);
+
+       lock = devm_kzalloc(dev, sizeof(*lock), GFP_KERNEL);
+       if (!lock)
+               return -ENOMEM;
+
+       spin_lock_init(lock);
+
+       val = bcm6358_led_busy(mem);
+       val &= ~(BCM6358_SLED_POLARITY | BCM6358_SLED_CLKDIV_MASK);
+       if (of_property_read_bool(np, "brcm,clk-dat-low"))
+               val |= BCM6358_SLED_POLARITY;
+       of_property_read_u32(np, "brcm,clk-div", &clk_div);
+       switch (clk_div) {
+       case 8:
+               val |= BCM6358_SLED_CLKDIV_8;
+               break;
+       case 4:
+               val |= BCM6358_SLED_CLKDIV_4;
+               break;
+       case 2:
+               val |= BCM6358_SLED_CLKDIV_2;
+               break;
+       default:
+               val |= BCM6358_SLED_CLKDIV_1;
+               break;
+       }
+       bcm6358_led_write(mem + BCM6358_REG_CTRL, val);
+
+       for_each_available_child_of_node(np, child) {
+               int rc;
+               u32 reg;
+
+               if (of_property_read_u32(child, "reg", &reg))
+                       continue;
+
+               if (reg >= BCM6358_SLED_MAX_COUNT) {
+                       dev_err(dev, "invalid LED (%u >= %d)\n", reg,
+                               BCM6358_SLED_MAX_COUNT);
+                       continue;
+               }
+
+               rc = bcm6358_led(dev, child, reg, mem, lock);
+               if (rc < 0)
+                       return rc;
+       }
+
+       return 0;
+}
+
+static const struct of_device_id bcm6358_leds_of_match[] = {
+       { .compatible = "brcm,bcm6358-leds", },
+       { },
+};
+
+static struct platform_driver bcm6358_leds_driver = {
+       .probe = bcm6358_leds_probe,
+       .driver = {
+               .name = "leds-bcm6358",
+               .of_match_table = bcm6358_leds_of_match,
+       },
+};
+
+module_platform_driver(bcm6358_leds_driver);
+
+MODULE_AUTHOR("Álvaro Fernández Rojas <noltari@gmail.com>");
+MODULE_DESCRIPTION("LED driver for BCM6358 controllers");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:leds-bcm6358");
index 06dbe18a2065b8d0d799ed1cbcd92083d2a09fd9..b316df4a8c1e375c5c2ed3e20454fcc661159d3e 100644 (file)
@@ -108,20 +108,8 @@ err_null:
        return retval;
 }
 
-static int cobalt_raq_led_remove(struct platform_device *pdev)
-{
-       led_classdev_unregister(&raq_power_off_led);
-       led_classdev_unregister(&raq_web_led);
-
-       if (led_port)
-               led_port = NULL;
-
-       return 0;
-}
-
 static struct platform_driver cobalt_raq_led_driver = {
        .probe  = cobalt_raq_led_probe,
-       .remove = cobalt_raq_led_remove,
        .driver = {
                .name   = "cobalt-raq-leds",
        },
@@ -131,5 +119,4 @@ static int __init cobalt_raq_led_init(void)
 {
        return platform_driver_register(&cobalt_raq_led_driver);
 }
-
-module_init(cobalt_raq_led_init);
+device_initcall(cobalt_raq_led_init);
index 15eb3f86f670ffe43605615b81c994aa58b40b2c..af1876a3a77c883a630f1c7765864ea28d3f0a03 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/kernel.h>
 #include <linux/leds.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/property.h>
 #include <linux/slab.h>
@@ -191,15 +192,17 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
                        goto err;
                }
 
-               np = of_node(child);
+               np = to_of_node(child);
 
                if (fwnode_property_present(child, "label")) {
                        fwnode_property_read_string(child, "label", &led.name);
                } else {
                        if (IS_ENABLED(CONFIG_OF) && !led.name && np)
                                led.name = np->name;
-                       if (!led.name)
-                               return ERR_PTR(-EINVAL);
+                       if (!led.name) {
+                               ret = -EINVAL;
+                               goto err;
+                       }
                }
                fwnode_property_read_string(child, "linux,default-trigger",
                                            &led.default_trigger);
@@ -217,18 +220,19 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
                if (fwnode_property_present(child, "retain-state-suspended"))
                        led.retain_state_suspended = 1;
 
-               ret = create_gpio_led(&led, &priv->leds[priv->num_leds++],
+               ret = create_gpio_led(&led, &priv->leds[priv->num_leds],
                                      dev, NULL);
                if (ret < 0) {
                        fwnode_handle_put(child);
                        goto err;
                }
+               priv->num_leds++;
        }
 
        return priv;
 
 err:
-       for (count = priv->num_leds - 2; count >= 0; count--)
+       for (count = priv->num_leds - 1; count >= 0; count--)
                delete_gpio_led(&priv->leds[count]);
        return ERR_PTR(ret);
 }
diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
new file mode 100644 (file)
index 0000000..2ae8c4d
--- /dev/null
@@ -0,0 +1,443 @@
+/*
+ * LED driver : leds-ktd2692.c
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Ingi Kim <ingi2.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/led-class-flash.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+
+/* Value related the movie mode */
+#define KTD2692_MOVIE_MODE_CURRENT_LEVELS      16
+#define KTD2692_MM_TO_FL_RATIO(x)              ((x) / 3)
+#define KTD2962_MM_MIN_CURR_THRESHOLD_SCALE    8
+
+/* Value related the flash mode */
+#define KTD2692_FLASH_MODE_TIMEOUT_LEVELS      8
+#define KTD2692_FLASH_MODE_TIMEOUT_DISABLE     0
+#define KTD2692_FLASH_MODE_CURR_PERCENT(x)     (((x) * 16) / 100)
+
+/* Macro for getting offset of flash timeout */
+#define GET_TIMEOUT_OFFSET(timeout, step)      ((timeout) / (step))
+
+/* Base register address */
+#define KTD2692_REG_LVP_BASE                   0x00
+#define KTD2692_REG_FLASH_TIMEOUT_BASE         0x20
+#define KTD2692_REG_MM_MIN_CURR_THRESHOLD_BASE 0x40
+#define KTD2692_REG_MOVIE_CURRENT_BASE         0x60
+#define KTD2692_REG_FLASH_CURRENT_BASE         0x80
+#define KTD2692_REG_MODE_BASE                  0xA0
+
+/* Set bit coding time for expresswire interface */
+#define KTD2692_TIME_RESET_US                  700
+#define KTD2692_TIME_DATA_START_TIME_US                10
+#define KTD2692_TIME_HIGH_END_OF_DATA_US       350
+#define KTD2692_TIME_LOW_END_OF_DATA_US                10
+#define KTD2692_TIME_SHORT_BITSET_US           4
+#define KTD2692_TIME_LONG_BITSET_US            12
+
+/* KTD2692 default length of name */
+#define KTD2692_NAME_LENGTH                    20
+
+enum ktd2692_bitset {
+       KTD2692_LOW = 0,
+       KTD2692_HIGH,
+};
+
+/* Movie / Flash Mode Control */
+enum ktd2692_led_mode {
+       KTD2692_MODE_DISABLE = 0,       /* default */
+       KTD2692_MODE_MOVIE,
+       KTD2692_MODE_FLASH,
+};
+
+struct ktd2692_led_config_data {
+       /* maximum LED current in movie mode */
+       u32 movie_max_microamp;
+       /* maximum LED current in flash mode */
+       u32 flash_max_microamp;
+       /* maximum flash timeout */
+       u32 flash_max_timeout;
+       /* max LED brightness level */
+       enum led_brightness max_brightness;
+};
+
+struct ktd2692_context {
+       /* Related LED Flash class device */
+       struct led_classdev_flash fled_cdev;
+
+       /* secures access to the device */
+       struct mutex lock;
+       struct regulator *regulator;
+       struct work_struct work_brightness_set;
+
+       struct gpio_desc *aux_gpio;
+       struct gpio_desc *ctrl_gpio;
+
+       enum ktd2692_led_mode mode;
+       enum led_brightness torch_brightness;
+};
+
+static struct ktd2692_context *fled_cdev_to_led(
+                               struct led_classdev_flash *fled_cdev)
+{
+       return container_of(fled_cdev, struct ktd2692_context, fled_cdev);
+}
+
+static void ktd2692_expresswire_start(struct ktd2692_context *led)
+{
+       gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH);
+       udelay(KTD2692_TIME_DATA_START_TIME_US);
+}
+
+static void ktd2692_expresswire_reset(struct ktd2692_context *led)
+{
+       gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW);
+       udelay(KTD2692_TIME_RESET_US);
+}
+
+static void ktd2692_expresswire_end(struct ktd2692_context *led)
+{
+       gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW);
+       udelay(KTD2692_TIME_LOW_END_OF_DATA_US);
+       gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH);
+       udelay(KTD2692_TIME_HIGH_END_OF_DATA_US);
+}
+
+static void ktd2692_expresswire_set_bit(struct ktd2692_context *led, bool bit)
+{
+       /*
+        * The Low Bit(0) and High Bit(1) is based on a time detection
+        * algorithm between time low and time high
+        * Time_(L_LB) : Low time of the Low Bit(0)
+        * Time_(H_LB) : High time of the LOW Bit(0)
+        * Time_(L_HB) : Low time of the High Bit(1)
+        * Time_(H_HB) : High time of the High Bit(1)
+        *
+        * It can be simplified to:
+        * Low Bit(0) : 2 * Time_(H_LB) < Time_(L_LB)
+        * High Bit(1) : 2 * Time_(L_HB) < Time_(H_HB)
+        * HIGH  ___           ____    _..     _________    ___
+        *          |_________|    |_..  |____|         |__|
+        * LOW        <L_LB>  <H_LB>     <L_HB>  <H_HB>
+        *          [  Low Bit (0) ]     [  High Bit(1) ]
+        */
+       if (bit) {
+               gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW);
+               udelay(KTD2692_TIME_SHORT_BITSET_US);
+               gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH);
+               udelay(KTD2692_TIME_LONG_BITSET_US);
+       } else {
+               gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW);
+               udelay(KTD2692_TIME_LONG_BITSET_US);
+               gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH);
+               udelay(KTD2692_TIME_SHORT_BITSET_US);
+       }
+}
+
+static void ktd2692_expresswire_write(struct ktd2692_context *led, u8 value)
+{
+       int i;
+
+       ktd2692_expresswire_start(led);
+       for (i = 7; i >= 0; i--)
+               ktd2692_expresswire_set_bit(led, value & BIT(i));
+       ktd2692_expresswire_end(led);
+}
+
+static void ktd2692_brightness_set(struct ktd2692_context *led,
+                                  enum led_brightness brightness)
+{
+       mutex_lock(&led->lock);
+
+       if (brightness == LED_OFF) {
+               led->mode = KTD2692_MODE_DISABLE;
+               gpiod_direction_output(led->aux_gpio, KTD2692_LOW);
+       } else {
+               ktd2692_expresswire_write(led, brightness |
+                                       KTD2692_REG_MOVIE_CURRENT_BASE);
+               led->mode = KTD2692_MODE_MOVIE;
+       }
+
+       ktd2692_expresswire_write(led, led->mode | KTD2692_REG_MODE_BASE);
+       mutex_unlock(&led->lock);
+}
+
+static void ktd2692_brightness_set_work(struct work_struct *work)
+{
+       struct ktd2692_context *led =
+               container_of(work, struct ktd2692_context, work_brightness_set);
+
+       ktd2692_brightness_set(led, led->torch_brightness);
+}
+
+static void ktd2692_led_brightness_set(struct led_classdev *led_cdev,
+                                      enum led_brightness brightness)
+{
+       struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+       struct ktd2692_context *led = fled_cdev_to_led(fled_cdev);
+
+       led->torch_brightness = brightness;
+       schedule_work(&led->work_brightness_set);
+}
+
+static int ktd2692_led_brightness_set_sync(struct led_classdev *led_cdev,
+                                          enum led_brightness brightness)
+{
+       struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+       struct ktd2692_context *led = fled_cdev_to_led(fled_cdev);
+
+       ktd2692_brightness_set(led, brightness);
+
+       return 0;
+}
+
+static int ktd2692_led_flash_strobe_set(struct led_classdev_flash *fled_cdev,
+                                       bool state)
+{
+       struct ktd2692_context *led = fled_cdev_to_led(fled_cdev);
+       struct led_flash_setting *timeout = &fled_cdev->timeout;
+       u32 flash_tm_reg;
+
+       mutex_lock(&led->lock);
+
+       if (state) {
+               flash_tm_reg = GET_TIMEOUT_OFFSET(timeout->val, timeout->step);
+               ktd2692_expresswire_write(led, flash_tm_reg
+                               | KTD2692_REG_FLASH_TIMEOUT_BASE);
+
+               led->mode = KTD2692_MODE_FLASH;
+               gpiod_direction_output(led->aux_gpio, KTD2692_HIGH);
+       } else {
+               led->mode = KTD2692_MODE_DISABLE;
+               gpiod_direction_output(led->aux_gpio, KTD2692_LOW);
+       }
+
+       ktd2692_expresswire_write(led, led->mode | KTD2692_REG_MODE_BASE);
+
+       fled_cdev->led_cdev.brightness = LED_OFF;
+       led->mode = KTD2692_MODE_DISABLE;
+
+       mutex_unlock(&led->lock);
+
+       return 0;
+}
+
+static int ktd2692_led_flash_timeout_set(struct led_classdev_flash *fled_cdev,
+                                        u32 timeout)
+{
+       return 0;
+}
+
+static void ktd2692_init_movie_current_max(struct ktd2692_led_config_data *cfg)
+{
+       u32 offset, step;
+       u32 movie_current_microamp;
+
+       offset = KTD2692_MOVIE_MODE_CURRENT_LEVELS;
+       step = KTD2692_MM_TO_FL_RATIO(cfg->flash_max_microamp)
+               / KTD2692_MOVIE_MODE_CURRENT_LEVELS;
+
+       do {
+               movie_current_microamp = step * offset;
+               offset--;
+       } while ((movie_current_microamp > cfg->movie_max_microamp) &&
+               (offset > 0));
+
+       cfg->max_brightness = offset;
+}
+
+static void ktd2692_init_flash_timeout(struct led_classdev_flash *fled_cdev,
+                                      struct ktd2692_led_config_data *cfg)
+{
+       struct led_flash_setting *setting;
+
+       setting = &fled_cdev->timeout;
+       setting->min = KTD2692_FLASH_MODE_TIMEOUT_DISABLE;
+       setting->max = cfg->flash_max_timeout;
+       setting->step = cfg->flash_max_timeout
+                       / (KTD2692_FLASH_MODE_TIMEOUT_LEVELS - 1);
+       setting->val = cfg->flash_max_timeout;
+}
+
+static void ktd2692_setup(struct ktd2692_context *led)
+{
+       led->mode = KTD2692_MODE_DISABLE;
+       ktd2692_expresswire_reset(led);
+       gpiod_direction_output(led->aux_gpio, KTD2692_LOW);
+
+       ktd2692_expresswire_write(led, (KTD2962_MM_MIN_CURR_THRESHOLD_SCALE - 1)
+                                | KTD2692_REG_MM_MIN_CURR_THRESHOLD_BASE);
+       ktd2692_expresswire_write(led, KTD2692_FLASH_MODE_CURR_PERCENT(45)
+                                | KTD2692_REG_FLASH_CURRENT_BASE);
+}
+
+static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
+                           struct ktd2692_led_config_data *cfg)
+{
+       struct device_node *np = dev->of_node;
+       struct device_node *child_node;
+       int ret;
+
+       if (!dev->of_node)
+               return -ENXIO;
+
+       led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
+       if (IS_ERR(led->ctrl_gpio)) {
+               ret = PTR_ERR(led->ctrl_gpio);
+               dev_err(dev, "cannot get ctrl-gpios %d\n", ret);
+               return ret;
+       }
+
+       led->aux_gpio = devm_gpiod_get(dev, "aux", GPIOD_ASIS);
+       if (IS_ERR(led->aux_gpio)) {
+               ret = PTR_ERR(led->aux_gpio);
+               dev_err(dev, "cannot get aux-gpios %d\n", ret);
+               return ret;
+       }
+
+       led->regulator = devm_regulator_get(dev, "vin");
+       if (IS_ERR(led->regulator))
+               led->regulator = NULL;
+
+       if (led->regulator) {
+               ret = regulator_enable(led->regulator);
+               if (ret)
+                       dev_err(dev, "Failed to enable supply: %d\n", ret);
+       }
+
+       child_node = of_get_next_available_child(np, NULL);
+       if (!child_node) {
+               dev_err(dev, "No DT child node found for connected LED.\n");
+               return -EINVAL;
+       }
+
+       led->fled_cdev.led_cdev.name =
+               of_get_property(child_node, "label", NULL) ? : child_node->name;
+
+       ret = of_property_read_u32(child_node, "led-max-microamp",
+                                  &cfg->movie_max_microamp);
+       if (ret) {
+               dev_err(dev, "failed to parse led-max-microamp\n");
+               return ret;
+       }
+
+       ret = of_property_read_u32(child_node, "flash-max-microamp",
+                                  &cfg->flash_max_microamp);
+       if (ret) {
+               dev_err(dev, "failed to parse flash-max-microamp\n");
+               return ret;
+       }
+
+       ret = of_property_read_u32(child_node, "flash-max-timeout-us",
+                                  &cfg->flash_max_timeout);
+       if (ret)
+               dev_err(dev, "failed to parse flash-max-timeout-us\n");
+
+       of_node_put(child_node);
+       return ret;
+}
+
+static const struct led_flash_ops flash_ops = {
+       .strobe_set = ktd2692_led_flash_strobe_set,
+       .timeout_set = ktd2692_led_flash_timeout_set,
+};
+
+static int ktd2692_probe(struct platform_device *pdev)
+{
+       struct ktd2692_context *led;
+       struct led_classdev *led_cdev;
+       struct led_classdev_flash *fled_cdev;
+       struct ktd2692_led_config_data led_cfg;
+       int ret;
+
+       led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+       if (!led)
+               return -ENOMEM;
+
+       fled_cdev = &led->fled_cdev;
+       led_cdev = &fled_cdev->led_cdev;
+
+       ret = ktd2692_parse_dt(led, &pdev->dev, &led_cfg);
+       if (ret)
+               return ret;
+
+       ktd2692_init_flash_timeout(fled_cdev, &led_cfg);
+       ktd2692_init_movie_current_max(&led_cfg);
+
+       fled_cdev->ops = &flash_ops;
+
+       led_cdev->max_brightness = led_cfg.max_brightness;
+       led_cdev->brightness_set = ktd2692_led_brightness_set;
+       led_cdev->brightness_set_sync = ktd2692_led_brightness_set_sync;
+       led_cdev->flags |= LED_CORE_SUSPENDRESUME | LED_DEV_CAP_FLASH;
+
+       mutex_init(&led->lock);
+       INIT_WORK(&led->work_brightness_set, ktd2692_brightness_set_work);
+
+       platform_set_drvdata(pdev, led);
+
+       ret = led_classdev_flash_register(&pdev->dev, fled_cdev);
+       if (ret) {
+               dev_err(&pdev->dev, "can't register LED %s\n", led_cdev->name);
+               mutex_destroy(&led->lock);
+               return ret;
+       }
+
+       ktd2692_setup(led);
+
+       return 0;
+}
+
+static int ktd2692_remove(struct platform_device *pdev)
+{
+       struct ktd2692_context *led = platform_get_drvdata(pdev);
+       int ret;
+
+       led_classdev_flash_unregister(&led->fled_cdev);
+       cancel_work_sync(&led->work_brightness_set);
+
+       if (led->regulator) {
+               ret = regulator_disable(led->regulator);
+               if (ret)
+                       dev_err(&pdev->dev,
+                               "Failed to disable supply: %d\n", ret);
+       }
+
+       mutex_destroy(&led->lock);
+
+       return 0;
+}
+
+static const struct of_device_id ktd2692_match[] = {
+       { .compatible = "kinetic,ktd2692", },
+       { /* sentinel */ },
+};
+
+static struct platform_driver ktd2692_driver = {
+       .driver = {
+               .name  = "ktd2692",
+               .of_match_table = ktd2692_match,
+       },
+       .probe  = ktd2692_probe,
+       .remove = ktd2692_remove,
+};
+
+module_platform_driver(ktd2692_driver);
+
+MODULE_AUTHOR("Ingi Kim <ingi2.kim@samsung.com>");
+MODULE_DESCRIPTION("Kinetic KTD2692 LED driver");
+MODULE_LICENSE("GPL v2");
index 9e1716f8098ca2b95aa9e558b912968af8d1e2ed..584dbbcec65955d49f8e9603e129b1c0f83c2122 100644 (file)
@@ -50,6 +50,7 @@
 #define LP5523_REG_OP_MODE             0x01
 #define LP5523_REG_ENABLE_LEDS_MSB     0x04
 #define LP5523_REG_ENABLE_LEDS_LSB     0x05
+#define LP5523_REG_LED_CTRL_BASE       0x06
 #define LP5523_REG_LED_PWM_BASE                0x16
 #define LP5523_REG_LED_CURRENT_BASE    0x26
 #define LP5523_REG_CONFIG              0x36
@@ -57,6 +58,7 @@
 #define LP5523_REG_RESET               0x3D
 #define LP5523_REG_LED_TEST_CTRL       0x41
 #define LP5523_REG_LED_TEST_ADC                0x42
+#define LP5523_REG_MASTER_FADER_BASE   0x48
 #define LP5523_REG_CH1_PROG_START      0x4C
 #define LP5523_REG_CH2_PROG_START      0x4D
 #define LP5523_REG_CH3_PROG_START      0x4E
@@ -78,6 +80,9 @@
 #define LP5523_EXT_CLK_USED            0x08
 #define LP5523_ENG_STATUS_MASK         0x07
 
+#define LP5523_FADER_MAPPING_MASK      0xC0
+#define LP5523_FADER_MAPPING_SHIFT     6
+
 /* Memory Page Selection */
 #define LP5523_PAGE_ENG1               0
 #define LP5523_PAGE_ENG2               1
@@ -666,6 +671,137 @@ release_lock:
        return pos;
 }
 
+#define show_fader(nr)                                         \
+static ssize_t show_master_fader##nr(struct device *dev,       \
+                           struct device_attribute *attr,      \
+                           char *buf)                          \
+{                                                              \
+       return show_master_fader(dev, attr, buf, nr);           \
+}
+
+#define store_fader(nr)                                                \
+static ssize_t store_master_fader##nr(struct device *dev,      \
+                            struct device_attribute *attr,     \
+                            const char *buf, size_t len)       \
+{                                                              \
+       return store_master_fader(dev, attr, buf, len, nr);     \
+}
+
+static ssize_t show_master_fader(struct device *dev,
+                                struct device_attribute *attr,
+                                char *buf, int nr)
+{
+       struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+       struct lp55xx_chip *chip = led->chip;
+       int ret;
+       u8 val;
+
+       mutex_lock(&chip->lock);
+       ret = lp55xx_read(chip, LP5523_REG_MASTER_FADER_BASE + nr - 1, &val);
+       mutex_unlock(&chip->lock);
+
+       if (ret == 0)
+               ret = sprintf(buf, "%u\n", val);
+
+       return ret;
+}
+show_fader(1)
+show_fader(2)
+show_fader(3)
+
+static ssize_t store_master_fader(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t len, int nr)
+{
+       struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+       struct lp55xx_chip *chip = led->chip;
+       int ret;
+       unsigned long val;
+
+       if (kstrtoul(buf, 0, &val))
+               return -EINVAL;
+
+       if (val > 0xff)
+               return -EINVAL;
+
+       mutex_lock(&chip->lock);
+       ret = lp55xx_write(chip, LP5523_REG_MASTER_FADER_BASE + nr - 1,
+                          (u8)val);
+       mutex_unlock(&chip->lock);
+
+       if (ret == 0)
+               ret = len;
+
+       return ret;
+}
+store_fader(1)
+store_fader(2)
+store_fader(3)
+
+static ssize_t show_master_fader_leds(struct device *dev,
+                                     struct device_attribute *attr,
+                                     char *buf)
+{
+       struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+       struct lp55xx_chip *chip = led->chip;
+       int i, ret, pos = 0;
+       u8 val;
+
+       mutex_lock(&chip->lock);
+
+       for (i = 0; i < LP5523_MAX_LEDS; i++) {
+               ret = lp55xx_read(chip, LP5523_REG_LED_CTRL_BASE + i, &val);
+               if (ret)
+                       goto leave;
+
+               val = (val & LP5523_FADER_MAPPING_MASK)
+                       >> LP5523_FADER_MAPPING_SHIFT;
+               if (val > 3) {
+                       ret = -EINVAL;
+                       goto leave;
+               }
+               buf[pos++] = val + '0';
+       }
+       buf[pos++] = '\n';
+       ret = pos;
+leave:
+       mutex_unlock(&chip->lock);
+       return ret;
+}
+
+static ssize_t store_master_fader_leds(struct device *dev,
+                                      struct device_attribute *attr,
+                                      const char *buf, size_t len)
+{
+       struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
+       struct lp55xx_chip *chip = led->chip;
+       int i, n, ret;
+       u8 val;
+
+       n = min_t(int, len, LP5523_MAX_LEDS);
+
+       mutex_lock(&chip->lock);
+
+       for (i = 0; i < n; i++) {
+               if (buf[i] >= '0' && buf[i] <= '3') {
+                       val = (buf[i] - '0') << LP5523_FADER_MAPPING_SHIFT;
+                       ret = lp55xx_update_bits(chip,
+                                                LP5523_REG_LED_CTRL_BASE + i,
+                                                LP5523_FADER_MAPPING_MASK,
+                                                val);
+                       if (ret)
+                               goto leave;
+               } else {
+                       ret = -EINVAL;
+                       goto leave;
+               }
+       }
+       ret = len;
+leave:
+       mutex_unlock(&chip->lock);
+       return ret;
+}
+
 static void lp5523_led_brightness_work(struct work_struct *work)
 {
        struct lp55xx_led *led = container_of(work, struct lp55xx_led,
@@ -688,6 +824,14 @@ static LP55XX_DEV_ATTR_WO(engine1_load, store_engine1_load);
 static LP55XX_DEV_ATTR_WO(engine2_load, store_engine2_load);
 static LP55XX_DEV_ATTR_WO(engine3_load, store_engine3_load);
 static LP55XX_DEV_ATTR_RO(selftest, lp5523_selftest);
+static LP55XX_DEV_ATTR_RW(master_fader1, show_master_fader1,
+                         store_master_fader1);
+static LP55XX_DEV_ATTR_RW(master_fader2, show_master_fader2,
+                         store_master_fader2);
+static LP55XX_DEV_ATTR_RW(master_fader3, show_master_fader3,
+                         store_master_fader3);
+static LP55XX_DEV_ATTR_RW(master_fader_leds, show_master_fader_leds,
+                         store_master_fader_leds);
 
 static struct attribute *lp5523_attributes[] = {
        &dev_attr_engine1_mode.attr,
@@ -700,6 +844,10 @@ static struct attribute *lp5523_attributes[] = {
        &dev_attr_engine2_leds.attr,
        &dev_attr_engine3_leds.attr,
        &dev_attr_selftest.attr,
+       &dev_attr_master_fader1.attr,
+       &dev_attr_master_fader2.attr,
+       &dev_attr_master_fader3.attr,
+       &dev_attr_master_fader_leds.attr,
        NULL,
 };
 
index 77c26bc32eed561a26c4ccb45b3238e60184d017..96d51e9879c905d2c4eb6bac2325af174665a9f1 100644 (file)
@@ -223,7 +223,7 @@ static int lp55xx_request_firmware(struct lp55xx_chip *chip)
        const char *name = chip->cl->name;
        struct device *dev = &chip->cl->dev;
 
-       return request_firmware_nowait(THIS_MODULE, true, name, dev,
+       return request_firmware_nowait(THIS_MODULE, false, name, dev,
                                GFP_KERNEL, chip, lp55xx_firmware_loaded);
 }
 
diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c
new file mode 100644 (file)
index 0000000..b8b0eec
--- /dev/null
@@ -0,0 +1,1097 @@
+/*
+ * LED Flash class driver for the flash cell of max77693 mfd.
+ *
+ *     Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ *
+ *     Authors: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *              Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/led-class-flash.h>
+#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-private.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-flash-led-class.h>
+
+#define MODE_OFF               0
+#define MODE_FLASH(a)          (1 << (a))
+#define MODE_TORCH(a)          (1 << (2 + (a)))
+#define MODE_FLASH_EXTERNAL(a) (1 << (4 + (a)))
+
+#define MODE_FLASH_MASK                (MODE_FLASH(FLED1) | MODE_FLASH(FLED2) | \
+                                MODE_FLASH_EXTERNAL(FLED1) | \
+                                MODE_FLASH_EXTERNAL(FLED2))
+#define MODE_TORCH_MASK                (MODE_TORCH(FLED1) | MODE_TORCH(FLED2))
+
+#define FLED1_IOUT             (1 << 0)
+#define FLED2_IOUT             (1 << 1)
+
+enum max77693_fled {
+       FLED1,
+       FLED2,
+};
+
+enum max77693_led_mode {
+       FLASH,
+       TORCH,
+};
+
+struct max77693_led_config_data {
+       const char *label[2];
+       u32 iout_torch_max[2];
+       u32 iout_flash_max[2];
+       u32 flash_timeout_max[2];
+       u32 num_leds;
+       u32 boost_mode;
+       u32 boost_vout;
+       u32 low_vsys;
+};
+
+struct max77693_sub_led {
+       /* corresponding FLED output identifier */
+       int fled_id;
+       /* corresponding LED Flash class device */
+       struct led_classdev_flash fled_cdev;
+       /* assures led-triggers compatibility */
+       struct work_struct work_brightness_set;
+       /* V4L2 Flash device */
+       struct v4l2_flash *v4l2_flash;
+
+       /* brightness cache */
+       unsigned int torch_brightness;
+       /* flash timeout cache */
+       unsigned int flash_timeout;
+       /* flash faults that may have occurred */
+       u32 flash_faults;
+};
+
+struct max77693_led_device {
+       /* parent mfd regmap */
+       struct regmap *regmap;
+       /* platform device data */
+       struct platform_device *pdev;
+       /* secures access to the device */
+       struct mutex lock;
+
+       /* sub led data */
+       struct max77693_sub_led sub_leds[2];
+
+       /* maximum torch current values for FLED outputs */
+       u32 iout_torch_max[2];
+       /* maximum flash current values for FLED outputs */
+       u32 iout_flash_max[2];
+
+       /* current flash timeout cache */
+       unsigned int current_flash_timeout;
+       /* ITORCH register cache */
+       u8 torch_iout_reg;
+       /* mode of fled outputs */
+       unsigned int mode_flags;
+       /* recently strobed fled */
+       int strobing_sub_led_id;
+       /* bitmask of FLED outputs use state (bit 0. - FLED1, bit 1. - FLED2) */
+       u8 fled_mask;
+       /* FLED modes that can be set */
+       u8 allowed_modes;
+
+       /* arrangement of current outputs */
+       bool iout_joint;
+};
+
+static u8 max77693_led_iout_to_reg(u32 ua)
+{
+       if (ua < FLASH_IOUT_MIN)
+               ua = FLASH_IOUT_MIN;
+       return (ua - FLASH_IOUT_MIN) / FLASH_IOUT_STEP;
+}
+
+static u8 max77693_flash_timeout_to_reg(u32 us)
+{
+       return (us - FLASH_TIMEOUT_MIN) / FLASH_TIMEOUT_STEP;
+}
+
+static inline struct max77693_sub_led *flcdev_to_sub_led(
+                                       struct led_classdev_flash *fled_cdev)
+{
+       return container_of(fled_cdev, struct max77693_sub_led, fled_cdev);
+}
+
+static inline struct max77693_led_device *sub_led_to_led(
+                                       struct max77693_sub_led *sub_led)
+{
+       return container_of(sub_led, struct max77693_led_device,
+                               sub_leds[sub_led->fled_id]);
+}
+
+static inline u8 max77693_led_vsys_to_reg(u32 mv)
+{
+       return ((mv - MAX_FLASH1_VSYS_MIN) / MAX_FLASH1_VSYS_STEP) << 2;
+}
+
+static inline u8 max77693_led_vout_to_reg(u32 mv)
+{
+       return (mv - FLASH_VOUT_MIN) / FLASH_VOUT_STEP + FLASH_VOUT_RMIN;
+}
+
+static inline bool max77693_fled_used(struct max77693_led_device *led,
+                                        int fled_id)
+{
+       u8 fled_bit = (fled_id == FLED1) ? FLED1_IOUT : FLED2_IOUT;
+
+       return led->fled_mask & fled_bit;
+}
+
+static int max77693_set_mode_reg(struct max77693_led_device *led, u8 mode)
+{
+       struct regmap *rmap = led->regmap;
+       int ret, v = 0, i;
+
+       for (i = FLED1; i <= FLED2; ++i) {
+               if (mode & MODE_TORCH(i))
+                       v |= FLASH_EN_ON << TORCH_EN_SHIFT(i);
+
+               if (mode & MODE_FLASH(i)) {
+                       v |= FLASH_EN_ON << FLASH_EN_SHIFT(i);
+               } else if (mode & MODE_FLASH_EXTERNAL(i)) {
+                       v |= FLASH_EN_FLASH << FLASH_EN_SHIFT(i);
+                       /*
+                        * Enable hw triggering also for torch mode, as some
+                        * camera sensors use torch led to fathom ambient light
+                        * conditions before strobing the flash.
+                        */
+                       v |= FLASH_EN_TORCH << TORCH_EN_SHIFT(i);
+               }
+       }
+
+       /* Reset the register only prior setting flash modes */
+       if (mode & ~(MODE_TORCH(FLED1) | MODE_TORCH(FLED2))) {
+               ret = regmap_write(rmap, MAX77693_LED_REG_FLASH_EN, 0);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return regmap_write(rmap, MAX77693_LED_REG_FLASH_EN, v);
+}
+
+static int max77693_add_mode(struct max77693_led_device *led, u8 mode)
+{
+       u8 new_mode_flags;
+       int i, ret;
+
+       if (led->iout_joint)
+               /* Span the mode on FLED2 for joint iouts case */
+               mode |= (mode << 1);
+
+       /*
+        * FLASH_EXTERNAL mode activates FLASHEN and TORCHEN pins in the device.
+        * Corresponding register bit fields interfere with SW triggered modes,
+        * thus clear them to ensure proper device configuration.
+        */
+       for (i = FLED1; i <= FLED2; ++i)
+               if (mode & MODE_FLASH_EXTERNAL(i))
+                       led->mode_flags &= (~MODE_TORCH(i) & ~MODE_FLASH(i));
+
+       new_mode_flags = mode | led->mode_flags;
+       new_mode_flags &= led->allowed_modes;
+
+       if (new_mode_flags ^ led->mode_flags)
+               led->mode_flags = new_mode_flags;
+       else
+               return 0;
+
+       ret = max77693_set_mode_reg(led, led->mode_flags);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Clear flash mode flag after setting the mode to avoid spurious flash
+        * strobing on each subsequent torch mode setting.
+        */
+       if (mode & MODE_FLASH_MASK)
+               led->mode_flags &= ~mode;
+
+       return ret;
+}
+
+static int max77693_clear_mode(struct max77693_led_device *led,
+                               u8 mode)
+{
+       if (led->iout_joint)
+               /* Clear mode also on FLED2 for joint iouts case */
+               mode |= (mode << 1);
+
+       led->mode_flags &= ~mode;
+
+       return max77693_set_mode_reg(led, led->mode_flags);
+}
+
+static void max77693_add_allowed_modes(struct max77693_led_device *led,
+                               int fled_id, enum max77693_led_mode mode)
+{
+       if (mode == FLASH)
+               led->allowed_modes |= (MODE_FLASH(fled_id) |
+                                      MODE_FLASH_EXTERNAL(fled_id));
+       else
+               led->allowed_modes |= MODE_TORCH(fled_id);
+}
+
+static void max77693_distribute_currents(struct max77693_led_device *led,
+                               int fled_id, enum max77693_led_mode mode,
+                               u32 micro_amp, u32 iout_max[2], u32 iout[2])
+{
+       if (!led->iout_joint) {
+               iout[fled_id] = micro_amp;
+               max77693_add_allowed_modes(led, fled_id, mode);
+               return;
+       }
+
+       iout[FLED1] = min(micro_amp, iout_max[FLED1]);
+       iout[FLED2] = micro_amp - iout[FLED1];
+
+       if (mode == FLASH)
+               led->allowed_modes &= ~MODE_FLASH_MASK;
+       else
+               led->allowed_modes &= ~MODE_TORCH_MASK;
+
+       max77693_add_allowed_modes(led, FLED1, mode);
+
+       if (iout[FLED2])
+               max77693_add_allowed_modes(led, FLED2, mode);
+}
+
+static int max77693_set_torch_current(struct max77693_led_device *led,
+                               int fled_id, u32 micro_amp)
+{
+       struct regmap *rmap = led->regmap;
+       u8 iout1_reg = 0, iout2_reg = 0;
+       u32 iout[2];
+
+       max77693_distribute_currents(led, fled_id, TORCH, micro_amp,
+                                       led->iout_torch_max, iout);
+
+       if (fled_id == FLED1 || led->iout_joint) {
+               iout1_reg = max77693_led_iout_to_reg(iout[FLED1]);
+               led->torch_iout_reg &= TORCH_IOUT_MASK(TORCH_IOUT2_SHIFT);
+       }
+       if (fled_id == FLED2 || led->iout_joint) {
+               iout2_reg = max77693_led_iout_to_reg(iout[FLED2]);
+               led->torch_iout_reg &= TORCH_IOUT_MASK(TORCH_IOUT1_SHIFT);
+       }
+
+       led->torch_iout_reg |= ((iout1_reg << TORCH_IOUT1_SHIFT) |
+                               (iout2_reg << TORCH_IOUT2_SHIFT));
+
+       return regmap_write(rmap, MAX77693_LED_REG_ITORCH,
+                                               led->torch_iout_reg);
+}
+
+static int max77693_set_flash_current(struct max77693_led_device *led,
+                                       int fled_id,
+                                       u32 micro_amp)
+{
+       struct regmap *rmap = led->regmap;
+       u8 iout1_reg, iout2_reg;
+       u32 iout[2];
+       int ret = -EINVAL;
+
+       max77693_distribute_currents(led, fled_id, FLASH, micro_amp,
+                                       led->iout_flash_max, iout);
+
+       if (fled_id == FLED1 || led->iout_joint) {
+               iout1_reg = max77693_led_iout_to_reg(iout[FLED1]);
+               ret = regmap_write(rmap, MAX77693_LED_REG_IFLASH1,
+                                                       iout1_reg);
+               if (ret < 0)
+                       return ret;
+       }
+       if (fled_id == FLED2 || led->iout_joint) {
+               iout2_reg = max77693_led_iout_to_reg(iout[FLED2]);
+               ret = regmap_write(rmap, MAX77693_LED_REG_IFLASH2,
+                                                       iout2_reg);
+       }
+
+       return ret;
+}
+
+static int max77693_set_timeout(struct max77693_led_device *led, u32 microsec)
+{
+       struct regmap *rmap = led->regmap;
+       u8 v;
+       int ret;
+
+       v = max77693_flash_timeout_to_reg(microsec) | FLASH_TMR_LEVEL;
+
+       ret = regmap_write(rmap, MAX77693_LED_REG_FLASH_TIMER, v);
+       if (ret < 0)
+               return ret;
+
+       led->current_flash_timeout = microsec;
+
+       return 0;
+}
+
+static int max77693_get_strobe_status(struct max77693_led_device *led,
+                                       bool *state)
+{
+       struct regmap *rmap = led->regmap;
+       unsigned int v;
+       int ret;
+
+       ret = regmap_read(rmap, MAX77693_LED_REG_FLASH_STATUS, &v);
+       if (ret < 0)
+               return ret;
+
+       *state = v & FLASH_STATUS_FLASH_ON;
+
+       return ret;
+}
+
+static int max77693_get_flash_faults(struct max77693_sub_led *sub_led)
+{
+       struct max77693_led_device *led = sub_led_to_led(sub_led);
+       struct regmap *rmap = led->regmap;
+       unsigned int v;
+       u8 fault_open_mask, fault_short_mask;
+       int ret;
+
+       sub_led->flash_faults = 0;
+
+       if (led->iout_joint) {
+               fault_open_mask = FLASH_INT_FLED1_OPEN | FLASH_INT_FLED2_OPEN;
+               fault_short_mask = FLASH_INT_FLED1_SHORT |
+                                                       FLASH_INT_FLED2_SHORT;
+       } else {
+               fault_open_mask = (sub_led->fled_id == FLED1) ?
+                                               FLASH_INT_FLED1_OPEN :
+                                               FLASH_INT_FLED2_OPEN;
+               fault_short_mask = (sub_led->fled_id == FLED1) ?
+                                               FLASH_INT_FLED1_SHORT :
+                                               FLASH_INT_FLED2_SHORT;
+       }
+
+       ret = regmap_read(rmap, MAX77693_LED_REG_FLASH_INT, &v);
+       if (ret < 0)
+               return ret;
+
+       if (v & fault_open_mask)
+               sub_led->flash_faults |= LED_FAULT_OVER_VOLTAGE;
+       if (v & fault_short_mask)
+               sub_led->flash_faults |= LED_FAULT_SHORT_CIRCUIT;
+       if (v & FLASH_INT_OVER_CURRENT)
+               sub_led->flash_faults |= LED_FAULT_OVER_CURRENT;
+
+       return 0;
+}
+
+static int max77693_setup(struct max77693_led_device *led,
+                        struct max77693_led_config_data *led_cfg)
+{
+       struct regmap *rmap = led->regmap;
+       int i, first_led, last_led, ret;
+       u32 max_flash_curr[2];
+       u8 v;
+
+       /*
+        * Initialize only flash current. Torch current doesn't
+        * require initialization as ITORCH register is written with
+        * new value each time brightness_set op is called.
+        */
+       if (led->iout_joint) {
+               first_led = FLED1;
+               last_led = FLED1;
+               max_flash_curr[FLED1] = led_cfg->iout_flash_max[FLED1] +
+                                       led_cfg->iout_flash_max[FLED2];
+       } else {
+               first_led = max77693_fled_used(led, FLED1) ? FLED1 : FLED2;
+               last_led = max77693_fled_used(led, FLED2) ? FLED2 : FLED1;
+               max_flash_curr[FLED1] = led_cfg->iout_flash_max[FLED1];
+               max_flash_curr[FLED2] = led_cfg->iout_flash_max[FLED2];
+       }
+
+       for (i = first_led; i <= last_led; ++i) {
+               ret = max77693_set_flash_current(led, i,
+                                       max_flash_curr[i]);
+               if (ret < 0)
+                       return ret;
+       }
+
+       v = TORCH_TMR_NO_TIMER | MAX77693_LED_TRIG_TYPE_LEVEL;
+       ret = regmap_write(rmap, MAX77693_LED_REG_ITORCHTIMER, v);
+       if (ret < 0)
+               return ret;
+
+       if (led_cfg->low_vsys > 0)
+               v = max77693_led_vsys_to_reg(led_cfg->low_vsys) |
+                                               MAX_FLASH1_MAX_FL_EN;
+       else
+               v = 0;
+
+       ret = regmap_write(rmap, MAX77693_LED_REG_MAX_FLASH1, v);
+       if (ret < 0)
+               return ret;
+       ret = regmap_write(rmap, MAX77693_LED_REG_MAX_FLASH2, 0);
+       if (ret < 0)
+               return ret;
+
+       if (led_cfg->boost_mode == MAX77693_LED_BOOST_FIXED)
+               v = FLASH_BOOST_FIXED;
+       else
+               v = led_cfg->boost_mode | led_cfg->boost_mode << 1;
+
+       if (max77693_fled_used(led, FLED1) && max77693_fled_used(led, FLED2))
+               v |= FLASH_BOOST_LEDNUM_2;
+
+       ret = regmap_write(rmap, MAX77693_LED_REG_VOUT_CNTL, v);
+       if (ret < 0)
+               return ret;
+
+       v = max77693_led_vout_to_reg(led_cfg->boost_vout);
+       ret = regmap_write(rmap, MAX77693_LED_REG_VOUT_FLASH1, v);
+       if (ret < 0)
+               return ret;
+
+       return max77693_set_mode_reg(led, MODE_OFF);
+}
+
+static int __max77693_led_brightness_set(struct max77693_led_device *led,
+                                       int fled_id, enum led_brightness value)
+{
+       int ret;
+
+       mutex_lock(&led->lock);
+
+       if (value == 0) {
+               ret = max77693_clear_mode(led, MODE_TORCH(fled_id));
+               if (ret < 0)
+                       dev_dbg(&led->pdev->dev,
+                               "Failed to clear torch mode (%d)\n",
+                               ret);
+               goto unlock;
+       }
+
+       ret = max77693_set_torch_current(led, fled_id, value * TORCH_IOUT_STEP);
+       if (ret < 0) {
+               dev_dbg(&led->pdev->dev,
+                       "Failed to set torch current (%d)\n",
+                       ret);
+               goto unlock;
+       }
+
+       ret = max77693_add_mode(led, MODE_TORCH(fled_id));
+       if (ret < 0)
+               dev_dbg(&led->pdev->dev,
+                       "Failed to set torch mode (%d)\n",
+                       ret);
+unlock:
+       mutex_unlock(&led->lock);
+       return ret;
+}
+
+static void max77693_led_brightness_set_work(
+                                       struct work_struct *work)
+{
+       struct max77693_sub_led *sub_led =
+                       container_of(work, struct max77693_sub_led,
+                                       work_brightness_set);
+       struct max77693_led_device *led = sub_led_to_led(sub_led);
+
+       __max77693_led_brightness_set(led, sub_led->fled_id,
+                               sub_led->torch_brightness);
+}
+
+/* LED subsystem callbacks */
+
+static int max77693_led_brightness_set_sync(
+                               struct led_classdev *led_cdev,
+                               enum led_brightness value)
+{
+       struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+       struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev);
+       struct max77693_led_device *led = sub_led_to_led(sub_led);
+
+       return __max77693_led_brightness_set(led, sub_led->fled_id, value);
+}
+
+static void max77693_led_brightness_set(
+                               struct led_classdev *led_cdev,
+                               enum led_brightness value)
+{
+       struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+       struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev);
+
+       sub_led->torch_brightness = value;
+       schedule_work(&sub_led->work_brightness_set);
+}
+
+static int max77693_led_flash_brightness_set(
+                               struct led_classdev_flash *fled_cdev,
+                               u32 brightness)
+{
+       struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev);
+       struct max77693_led_device *led = sub_led_to_led(sub_led);
+       int ret;
+
+       mutex_lock(&led->lock);
+       ret = max77693_set_flash_current(led, sub_led->fled_id, brightness);
+       mutex_unlock(&led->lock);
+
+       return ret;
+}
+
+static int max77693_led_flash_strobe_set(
+                               struct led_classdev_flash *fled_cdev,
+                               bool state)
+{
+       struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev);
+       struct max77693_led_device *led = sub_led_to_led(sub_led);
+       int fled_id = sub_led->fled_id;
+       int ret;
+
+       mutex_lock(&led->lock);
+
+       if (!state) {
+               ret = max77693_clear_mode(led, MODE_FLASH(fled_id));
+               goto unlock;
+       }
+
+       if (sub_led->flash_timeout != led->current_flash_timeout) {
+               ret = max77693_set_timeout(led, sub_led->flash_timeout);
+               if (ret < 0)
+                       goto unlock;
+       }
+
+       led->strobing_sub_led_id = fled_id;
+
+       ret = max77693_add_mode(led, MODE_FLASH(fled_id));
+       if (ret < 0)
+               goto unlock;
+
+       ret = max77693_get_flash_faults(sub_led);
+
+unlock:
+       mutex_unlock(&led->lock);
+       return ret;
+}
+
+static int max77693_led_flash_fault_get(
+                               struct led_classdev_flash *fled_cdev,
+                               u32 *fault)
+{
+       struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev);
+
+       *fault = sub_led->flash_faults;
+
+       return 0;
+}
+
+static int max77693_led_flash_strobe_get(
+                               struct led_classdev_flash *fled_cdev,
+                               bool *state)
+{
+       struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev);
+       struct max77693_led_device *led = sub_led_to_led(sub_led);
+       int ret;
+
+       if (!state)
+               return -EINVAL;
+
+       mutex_lock(&led->lock);
+
+       ret = max77693_get_strobe_status(led, state);
+
+       *state = !!(*state && (led->strobing_sub_led_id == sub_led->fled_id));
+
+       mutex_unlock(&led->lock);
+
+       return ret;
+}
+
+static int max77693_led_flash_timeout_set(
+                               struct led_classdev_flash *fled_cdev,
+                               u32 timeout)
+{
+       struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev);
+       struct max77693_led_device *led = sub_led_to_led(sub_led);
+
+       mutex_lock(&led->lock);
+       sub_led->flash_timeout = timeout;
+       mutex_unlock(&led->lock);
+
+       return 0;
+}
+
+static int max77693_led_parse_dt(struct max77693_led_device *led,
+                               struct max77693_led_config_data *cfg,
+                               struct device_node **sub_nodes)
+{
+       struct device *dev = &led->pdev->dev;
+       struct max77693_sub_led *sub_leds = led->sub_leds;
+       struct device_node *node = dev->of_node, *child_node;
+       struct property *prop;
+       u32 led_sources[2];
+       int i, ret, fled_id;
+
+       of_property_read_u32(node, "maxim,boost-mode", &cfg->boost_mode);
+       of_property_read_u32(node, "maxim,boost-mvout", &cfg->boost_vout);
+       of_property_read_u32(node, "maxim,mvsys-min", &cfg->low_vsys);
+
+       for_each_available_child_of_node(node, child_node) {
+               prop = of_find_property(child_node, "led-sources", NULL);
+               if (prop) {
+                       const __be32 *srcs = NULL;
+
+                       for (i = 0; i < ARRAY_SIZE(led_sources); ++i) {
+                               srcs = of_prop_next_u32(prop, srcs,
+                                                       &led_sources[i]);
+                               if (!srcs)
+                                       break;
+                       }
+               } else {
+                       dev_err(dev,
+                               "led-sources DT property missing\n");
+                       of_node_put(child_node);
+                       return -EINVAL;
+               }
+
+               if (i == 2) {
+                       fled_id = FLED1;
+                       led->fled_mask = FLED1_IOUT | FLED2_IOUT;
+               } else if (led_sources[0] == FLED1) {
+                       fled_id = FLED1;
+                       led->fled_mask |= FLED1_IOUT;
+               } else if (led_sources[0] == FLED2) {
+                       fled_id = FLED2;
+                       led->fled_mask |= FLED2_IOUT;
+               } else {
+                       dev_err(dev,
+                               "Wrong led-sources DT property value.\n");
+                       of_node_put(child_node);
+                       return -EINVAL;
+               }
+
+               if (sub_nodes[fled_id]) {
+                       dev_err(dev,
+                               "Conflicting \"led-sources\" DT properties\n");
+                       return -EINVAL;
+               }
+
+               sub_nodes[fled_id] = child_node;
+               sub_leds[fled_id].fled_id = fled_id;
+
+               cfg->label[fled_id] =
+                       of_get_property(child_node, "label", NULL) ? :
+                                               child_node->name;
+
+               ret = of_property_read_u32(child_node, "led-max-microamp",
+                                       &cfg->iout_torch_max[fled_id]);
+               if (ret < 0) {
+                       cfg->iout_torch_max[fled_id] = TORCH_IOUT_MIN;
+                       dev_warn(dev, "led-max-microamp DT property missing\n");
+               }
+
+               ret = of_property_read_u32(child_node, "flash-max-microamp",
+                                       &cfg->iout_flash_max[fled_id]);
+               if (ret < 0) {
+                       cfg->iout_flash_max[fled_id] = FLASH_IOUT_MIN;
+                       dev_warn(dev,
+                                "flash-max-microamp DT property missing\n");
+               }
+
+               ret = of_property_read_u32(child_node, "flash-max-timeout-us",
+                                       &cfg->flash_timeout_max[fled_id]);
+               if (ret < 0) {
+                       cfg->flash_timeout_max[fled_id] = FLASH_TIMEOUT_MIN;
+                       dev_warn(dev,
+                                "flash-max-timeout-us DT property missing\n");
+               }
+
+               if (++cfg->num_leds == 2 ||
+                   (max77693_fled_used(led, FLED1) &&
+                    max77693_fled_used(led, FLED2))) {
+                       of_node_put(child_node);
+                       break;
+               }
+       }
+
+       if (cfg->num_leds == 0) {
+               dev_err(dev, "No DT child node found for connected LED(s).\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void clamp_align(u32 *v, u32 min, u32 max, u32 step)
+{
+       *v = clamp_val(*v, min, max);
+       if (step > 1)
+               *v = (*v - min) / step * step + min;
+}
+
+static void max77693_align_iout_current(struct max77693_led_device *led,
+                                       u32 *iout, u32 min, u32 max, u32 step)
+{
+       int i;
+
+       if (led->iout_joint) {
+               if (iout[FLED1] > min) {
+                       iout[FLED1] /= 2;
+                       iout[FLED2] = iout[FLED1];
+               } else {
+                       iout[FLED1] = min;
+                       iout[FLED2] = 0;
+                       return;
+               }
+       }
+
+       for (i = FLED1; i <= FLED2; ++i)
+               if (max77693_fled_used(led, i))
+                       clamp_align(&iout[i], min, max, step);
+               else
+                       iout[i] = 0;
+}
+
+static void max77693_led_validate_configuration(struct max77693_led_device *led,
+                                       struct max77693_led_config_data *cfg)
+{
+       u32 flash_iout_max = cfg->boost_mode ? FLASH_IOUT_MAX_2LEDS :
+                                              FLASH_IOUT_MAX_1LED;
+       int i;
+
+       if (cfg->num_leds == 1 &&
+           max77693_fled_used(led, FLED1) && max77693_fled_used(led, FLED2))
+               led->iout_joint = true;
+
+       cfg->boost_mode = clamp_val(cfg->boost_mode, MAX77693_LED_BOOST_NONE,
+                           MAX77693_LED_BOOST_FIXED);
+
+       /* Boost must be enabled if both current outputs are used */
+       if ((cfg->boost_mode == MAX77693_LED_BOOST_NONE) && led->iout_joint)
+               cfg->boost_mode = MAX77693_LED_BOOST_FIXED;
+
+       max77693_align_iout_current(led, cfg->iout_torch_max,
+                       TORCH_IOUT_MIN, TORCH_IOUT_MAX, TORCH_IOUT_STEP);
+
+       max77693_align_iout_current(led, cfg->iout_flash_max,
+                       FLASH_IOUT_MIN, flash_iout_max, FLASH_IOUT_STEP);
+
+       for (i = 0; i < ARRAY_SIZE(cfg->flash_timeout_max); ++i)
+               clamp_align(&cfg->flash_timeout_max[i], FLASH_TIMEOUT_MIN,
+                               FLASH_TIMEOUT_MAX, FLASH_TIMEOUT_STEP);
+
+       clamp_align(&cfg->boost_vout, FLASH_VOUT_MIN, FLASH_VOUT_MAX,
+                                                       FLASH_VOUT_STEP);
+
+       if (cfg->low_vsys)
+               clamp_align(&cfg->low_vsys, MAX_FLASH1_VSYS_MIN,
+                               MAX_FLASH1_VSYS_MAX, MAX_FLASH1_VSYS_STEP);
+}
+
+static int max77693_led_get_configuration(struct max77693_led_device *led,
+                               struct max77693_led_config_data *cfg,
+                               struct device_node **sub_nodes)
+{
+       int ret;
+
+       ret = max77693_led_parse_dt(led, cfg, sub_nodes);
+       if (ret < 0)
+               return ret;
+
+       max77693_led_validate_configuration(led, cfg);
+
+       memcpy(led->iout_torch_max, cfg->iout_torch_max,
+                               sizeof(led->iout_torch_max));
+       memcpy(led->iout_flash_max, cfg->iout_flash_max,
+                               sizeof(led->iout_flash_max));
+
+       return 0;
+}
+
+static const struct led_flash_ops flash_ops = {
+       .flash_brightness_set   = max77693_led_flash_brightness_set,
+       .strobe_set             = max77693_led_flash_strobe_set,
+       .strobe_get             = max77693_led_flash_strobe_get,
+       .timeout_set            = max77693_led_flash_timeout_set,
+       .fault_get              = max77693_led_flash_fault_get,
+};
+
+static void max77693_init_flash_settings(struct max77693_sub_led *sub_led,
+                                struct max77693_led_config_data *led_cfg)
+{
+       struct led_classdev_flash *fled_cdev = &sub_led->fled_cdev;
+       struct max77693_led_device *led = sub_led_to_led(sub_led);
+       int fled_id = sub_led->fled_id;
+       struct led_flash_setting *setting;
+
+       /* Init flash intensity setting */
+       setting = &fled_cdev->brightness;
+       setting->min = FLASH_IOUT_MIN;
+       setting->max = led->iout_joint ?
+               led_cfg->iout_flash_max[FLED1] +
+               led_cfg->iout_flash_max[FLED2] :
+               led_cfg->iout_flash_max[fled_id];
+       setting->step = FLASH_IOUT_STEP;
+       setting->val = setting->max;
+
+       /* Init flash timeout setting */
+       setting = &fled_cdev->timeout;
+       setting->min = FLASH_TIMEOUT_MIN;
+       setting->max = led_cfg->flash_timeout_max[fled_id];
+       setting->step = FLASH_TIMEOUT_STEP;
+       setting->val = setting->max;
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+
+static int max77693_led_external_strobe_set(
+                               struct v4l2_flash *v4l2_flash,
+                               bool enable)
+{
+       struct max77693_sub_led *sub_led =
+                               flcdev_to_sub_led(v4l2_flash->fled_cdev);
+       struct max77693_led_device *led = sub_led_to_led(sub_led);
+       int fled_id = sub_led->fled_id;
+       int ret;
+
+       mutex_lock(&led->lock);
+
+       if (enable)
+               ret = max77693_add_mode(led, MODE_FLASH_EXTERNAL(fled_id));
+       else
+               ret = max77693_clear_mode(led, MODE_FLASH_EXTERNAL(fled_id));
+
+       mutex_unlock(&led->lock);
+
+       return ret;
+}
+
+static void max77693_init_v4l2_flash_config(struct max77693_sub_led *sub_led,
+                               struct max77693_led_config_data *led_cfg,
+                               struct v4l2_flash_config *v4l2_sd_cfg)
+{
+       struct max77693_led_device *led = sub_led_to_led(sub_led);
+       struct device *dev = &led->pdev->dev;
+       struct max77693_dev *iodev = dev_get_drvdata(dev->parent);
+       struct i2c_client *i2c = iodev->i2c;
+       struct led_flash_setting *s;
+
+       snprintf(v4l2_sd_cfg->dev_name, sizeof(v4l2_sd_cfg->dev_name),
+                "%s %d-%04x", sub_led->fled_cdev.led_cdev.name,
+                i2c_adapter_id(i2c->adapter), i2c->addr);
+
+       s = &v4l2_sd_cfg->torch_intensity;
+       s->min = TORCH_IOUT_MIN;
+       s->max = sub_led->fled_cdev.led_cdev.max_brightness * TORCH_IOUT_STEP;
+       s->step = TORCH_IOUT_STEP;
+       s->val = s->max;
+
+       /* Init flash faults config */
+       v4l2_sd_cfg->flash_faults = LED_FAULT_OVER_VOLTAGE |
+                               LED_FAULT_SHORT_CIRCUIT |
+                               LED_FAULT_OVER_CURRENT;
+
+       v4l2_sd_cfg->has_external_strobe = true;
+}
+
+static const struct v4l2_flash_ops v4l2_flash_ops = {
+       .external_strobe_set = max77693_led_external_strobe_set,
+};
+#else
+static inline void max77693_init_v4l2_flash_config(
+                               struct max77693_sub_led *sub_led,
+                               struct max77693_led_config_data *led_cfg,
+                               struct v4l2_flash_config *v4l2_sd_cfg)
+{
+}
+static const struct v4l2_flash_ops v4l2_flash_ops;
+#endif
+
+static void max77693_init_fled_cdev(struct max77693_sub_led *sub_led,
+                               struct max77693_led_config_data *led_cfg)
+{
+       struct max77693_led_device *led = sub_led_to_led(sub_led);
+       int fled_id = sub_led->fled_id;
+       struct led_classdev_flash *fled_cdev;
+       struct led_classdev *led_cdev;
+
+       /* Initialize LED Flash class device */
+       fled_cdev = &sub_led->fled_cdev;
+       fled_cdev->ops = &flash_ops;
+       led_cdev = &fled_cdev->led_cdev;
+
+       led_cdev->name = led_cfg->label[fled_id];
+
+       led_cdev->brightness_set = max77693_led_brightness_set;
+       led_cdev->brightness_set_sync = max77693_led_brightness_set_sync;
+       led_cdev->max_brightness = (led->iout_joint ?
+                                       led_cfg->iout_torch_max[FLED1] +
+                                       led_cfg->iout_torch_max[FLED2] :
+                                       led_cfg->iout_torch_max[fled_id]) /
+                                  TORCH_IOUT_STEP;
+       led_cdev->flags |= LED_DEV_CAP_FLASH;
+       INIT_WORK(&sub_led->work_brightness_set,
+                       max77693_led_brightness_set_work);
+
+       max77693_init_flash_settings(sub_led, led_cfg);
+
+       /* Init flash timeout cache */
+       sub_led->flash_timeout = fled_cdev->timeout.val;
+}
+
+static int max77693_register_led(struct max77693_sub_led *sub_led,
+                                struct max77693_led_config_data *led_cfg,
+                                struct device_node *sub_node)
+{
+       struct max77693_led_device *led = sub_led_to_led(sub_led);
+       struct led_classdev_flash *fled_cdev = &sub_led->fled_cdev;
+       struct device *dev = &led->pdev->dev;
+       struct v4l2_flash_config v4l2_sd_cfg = {};
+       int ret;
+
+       /* Register in the LED subsystem */
+       ret = led_classdev_flash_register(dev, fled_cdev);
+       if (ret < 0)
+               return ret;
+
+       max77693_init_v4l2_flash_config(sub_led, led_cfg, &v4l2_sd_cfg);
+
+       /* Register in the V4L2 subsystem. */
+       sub_led->v4l2_flash = v4l2_flash_init(dev, sub_node, fled_cdev, NULL,
+                                             &v4l2_flash_ops, &v4l2_sd_cfg);
+       if (IS_ERR(sub_led->v4l2_flash)) {
+               ret = PTR_ERR(sub_led->v4l2_flash);
+               goto err_v4l2_flash_init;
+       }
+
+       return 0;
+
+err_v4l2_flash_init:
+       led_classdev_flash_unregister(fled_cdev);
+       return ret;
+}
+
+static int max77693_led_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct max77693_dev *iodev = dev_get_drvdata(dev->parent);
+       struct max77693_led_device *led;
+       struct max77693_sub_led *sub_leds;
+       struct device_node *sub_nodes[2] = {};
+       struct max77693_led_config_data led_cfg = {};
+       int init_fled_cdev[2], i, ret;
+
+       led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+       if (!led)
+               return -ENOMEM;
+
+       led->pdev = pdev;
+       led->regmap = iodev->regmap;
+       led->allowed_modes = MODE_FLASH_MASK;
+       sub_leds = led->sub_leds;
+
+       platform_set_drvdata(pdev, led);
+       ret = max77693_led_get_configuration(led, &led_cfg, sub_nodes);
+       if (ret < 0)
+               return ret;
+
+       ret = max77693_setup(led, &led_cfg);
+       if (ret < 0)
+               return ret;
+
+       mutex_init(&led->lock);
+
+       init_fled_cdev[FLED1] =
+                       led->iout_joint || max77693_fled_used(led, FLED1);
+       init_fled_cdev[FLED2] =
+                       !led->iout_joint && max77693_fled_used(led, FLED2);
+
+       for (i = FLED1; i <= FLED2; ++i) {
+               if (!init_fled_cdev[i])
+                       continue;
+
+               /* Initialize LED Flash class device */
+               max77693_init_fled_cdev(&sub_leds[i], &led_cfg);
+
+               /*
+                * Register LED Flash class device and corresponding
+                * V4L2 Flash device.
+                */
+               ret = max77693_register_led(&sub_leds[i], &led_cfg,
+                                               sub_nodes[i]);
+               if (ret < 0) {
+                       /*
+                        * At this moment FLED1 might have been already
+                        * registered and it needs to be released.
+                        */
+                       if (i == FLED2)
+                               goto err_register_led2;
+                       else
+                               goto err_register_led1;
+               }
+       }
+
+       return 0;
+
+err_register_led2:
+       /* It is possible than only FLED2 was to be registered */
+       if (!init_fled_cdev[FLED1])
+               goto err_register_led1;
+       v4l2_flash_release(sub_leds[FLED1].v4l2_flash);
+       led_classdev_flash_unregister(&sub_leds[FLED1].fled_cdev);
+err_register_led1:
+       mutex_destroy(&led->lock);
+
+       return ret;
+}
+
+static int max77693_led_remove(struct platform_device *pdev)
+{
+       struct max77693_led_device *led = platform_get_drvdata(pdev);
+       struct max77693_sub_led *sub_leds = led->sub_leds;
+
+       if (led->iout_joint || max77693_fled_used(led, FLED1)) {
+               v4l2_flash_release(sub_leds[FLED1].v4l2_flash);
+               led_classdev_flash_unregister(&sub_leds[FLED1].fled_cdev);
+               cancel_work_sync(&sub_leds[FLED1].work_brightness_set);
+       }
+
+       if (!led->iout_joint && max77693_fled_used(led, FLED2)) {
+               v4l2_flash_release(sub_leds[FLED2].v4l2_flash);
+               led_classdev_flash_unregister(&sub_leds[FLED2].fled_cdev);
+               cancel_work_sync(&sub_leds[FLED2].work_brightness_set);
+       }
+
+       mutex_destroy(&led->lock);
+
+       return 0;
+}
+
+static const struct of_device_id max77693_led_dt_match[] = {
+       { .compatible = "maxim,max77693-led" },
+       {},
+};
+
+static struct platform_driver max77693_led_driver = {
+       .probe          = max77693_led_probe,
+       .remove         = max77693_led_remove,
+       .driver         = {
+               .name   = "max77693-led",
+               .of_match_table = max77693_led_dt_match,
+       },
+};
+
+module_platform_driver(max77693_led_driver);
+
+MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("Maxim MAX77693 led flash driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
new file mode 100644 (file)
index 0000000..de16c29
--- /dev/null
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2014 Belkin Inc.
+ * Copyright 2015 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#define TLC591XX_MAX_LEDS      16
+
+#define TLC591XX_REG_MODE1     0x00
+#define MODE1_RESPON_ADDR_MASK 0xF0
+#define MODE1_NORMAL_MODE      (0 << 4)
+#define MODE1_SPEED_MODE       (1 << 4)
+
+#define TLC591XX_REG_MODE2     0x01
+#define MODE2_DIM              (0 << 5)
+#define MODE2_BLINK            (1 << 5)
+#define MODE2_OCH_STOP         (0 << 3)
+#define MODE2_OCH_ACK          (1 << 3)
+
+#define TLC591XX_REG_PWM(x)    (0x02 + (x))
+
+#define TLC591XX_REG_GRPPWM    0x12
+#define TLC591XX_REG_GRPFREQ   0x13
+
+/* LED Driver Output State, determine the source that drives LED outputs */
+#define LEDOUT_OFF             0x0     /* Output LOW */
+#define LEDOUT_ON              0x1     /* Output HI-Z */
+#define LEDOUT_DIM             0x2     /* Dimming */
+#define LEDOUT_BLINK           0x3     /* Blinking */
+#define LEDOUT_MASK            0x3
+
+#define ldev_to_led(c)         container_of(c, struct tlc591xx_led, ldev)
+#define work_to_led(work)      container_of(work, struct tlc591xx_led, work)
+
+struct tlc591xx_led {
+       bool active;
+       unsigned int led_no;
+       struct led_classdev ldev;
+       struct work_struct work;
+       struct tlc591xx_priv *priv;
+};
+
+struct tlc591xx_priv {
+       struct tlc591xx_led leds[TLC591XX_MAX_LEDS];
+       struct regmap *regmap;
+       unsigned int reg_ledout_offset;
+};
+
+struct tlc591xx {
+       unsigned int max_leds;
+       unsigned int reg_ledout_offset;
+};
+
+static const struct tlc591xx tlc59116 = {
+       .max_leds = 16,
+       .reg_ledout_offset = 0x14,
+};
+
+static const struct tlc591xx tlc59108 = {
+       .max_leds = 8,
+       .reg_ledout_offset = 0x0c,
+};
+
+static int
+tlc591xx_set_mode(struct regmap *regmap, u8 mode)
+{
+       int err;
+       u8 val;
+
+       err = regmap_write(regmap, TLC591XX_REG_MODE1, MODE1_NORMAL_MODE);
+       if (err)
+               return err;
+
+       val = MODE2_OCH_STOP | mode;
+
+       return regmap_write(regmap, TLC591XX_REG_MODE2, val);
+}
+
+static int
+tlc591xx_set_ledout(struct tlc591xx_priv *priv, struct tlc591xx_led *led,
+                   u8 val)
+{
+       unsigned int i = (led->led_no % 4) * 2;
+       unsigned int mask = LEDOUT_MASK << i;
+       unsigned int addr = priv->reg_ledout_offset + (led->led_no >> 2);
+
+       val = val << i;
+
+       return regmap_update_bits(priv->regmap, addr, mask, val);
+}
+
+static int
+tlc591xx_set_pwm(struct tlc591xx_priv *priv, struct tlc591xx_led *led,
+                u8 brightness)
+{
+       u8 pwm = TLC591XX_REG_PWM(led->led_no);
+
+       return regmap_write(priv->regmap, pwm, brightness);
+}
+
+static void
+tlc591xx_led_work(struct work_struct *work)
+{
+       struct tlc591xx_led *led = work_to_led(work);
+       struct tlc591xx_priv *priv = led->priv;
+       enum led_brightness brightness = led->ldev.brightness;
+       int err;
+
+       switch (brightness) {
+       case 0:
+               err = tlc591xx_set_ledout(priv, led, LEDOUT_OFF);
+               break;
+       case LED_FULL:
+               err = tlc591xx_set_ledout(priv, led, LEDOUT_ON);
+               break;
+       default:
+               err = tlc591xx_set_ledout(priv, led, LEDOUT_DIM);
+               if (!err)
+                       err = tlc591xx_set_pwm(priv, led, brightness);
+       }
+
+       if (err)
+               dev_err(led->ldev.dev, "Failed setting brightness\n");
+}
+
+static void
+tlc591xx_brightness_set(struct led_classdev *led_cdev,
+                       enum led_brightness brightness)
+{
+       struct tlc591xx_led *led = ldev_to_led(led_cdev);
+
+       led->ldev.brightness = brightness;
+       schedule_work(&led->work);
+}
+
+static void
+tlc591xx_destroy_devices(struct tlc591xx_priv *priv, unsigned int j)
+{
+       int i = j;
+
+       while (--i >= 0) {
+               if (priv->leds[i].active) {
+                       led_classdev_unregister(&priv->leds[i].ldev);
+                       cancel_work_sync(&priv->leds[i].work);
+               }
+       }
+}
+
+static int
+tlc591xx_configure(struct device *dev,
+                  struct tlc591xx_priv *priv,
+                  const struct tlc591xx *tlc591xx)
+{
+       unsigned int i;
+       int err = 0;
+
+       tlc591xx_set_mode(priv->regmap, MODE2_DIM);
+       for (i = 0; i < TLC591XX_MAX_LEDS; i++) {
+               struct tlc591xx_led *led = &priv->leds[i];
+
+               if (!led->active)
+                       continue;
+
+               led->priv = priv;
+               led->led_no = i;
+               led->ldev.brightness_set = tlc591xx_brightness_set;
+               led->ldev.max_brightness = LED_FULL;
+               INIT_WORK(&led->work, tlc591xx_led_work);
+               err = led_classdev_register(dev, &led->ldev);
+               if (err < 0) {
+                       dev_err(dev, "couldn't register LED %s\n",
+                               led->ldev.name);
+                       goto exit;
+               }
+       }
+
+       return 0;
+
+exit:
+       tlc591xx_destroy_devices(priv, i);
+       return err;
+}
+
+static const struct regmap_config tlc591xx_regmap = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 0x1e,
+};
+
+static const struct of_device_id of_tlc591xx_leds_match[] = {
+       { .compatible = "ti,tlc59116",
+         .data = &tlc59116 },
+       { .compatible = "ti,tlc59108",
+         .data = &tlc59108 },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_tlc591xx_leds_match);
+
+static int
+tlc591xx_probe(struct i2c_client *client,
+              const struct i2c_device_id *id)
+{
+       struct device_node *np = client->dev.of_node, *child;
+       struct device *dev = &client->dev;
+       const struct of_device_id *match;
+       const struct tlc591xx *tlc591xx;
+       struct tlc591xx_priv *priv;
+       int err, count, reg;
+
+       match = of_match_device(of_tlc591xx_leds_match, dev);
+       if (!match)
+               return -ENODEV;
+
+       tlc591xx = match->data;
+       if (!np)
+               return -ENODEV;
+
+       count = of_get_child_count(np);
+       if (!count || count > tlc591xx->max_leds)
+               return -EINVAL;
+
+       if (!i2c_check_functionality(client->adapter,
+                                    I2C_FUNC_SMBUS_BYTE_DATA))
+               return -EIO;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->regmap = devm_regmap_init_i2c(client, &tlc591xx_regmap);
+       if (IS_ERR(priv->regmap)) {
+               err = PTR_ERR(priv->regmap);
+               dev_err(dev, "Failed to allocate register map: %d\n", err);
+               return err;
+       }
+       priv->reg_ledout_offset = tlc591xx->reg_ledout_offset;
+
+       i2c_set_clientdata(client, priv);
+
+       for_each_child_of_node(np, child) {
+               err = of_property_read_u32(child, "reg", &reg);
+               if (err)
+                       return err;
+               if (reg < 0 || reg >= tlc591xx->max_leds)
+                       return -EINVAL;
+               if (priv->leds[reg].active)
+                       return -EINVAL;
+               priv->leds[reg].active = true;
+               priv->leds[reg].ldev.name =
+                       of_get_property(child, "label", NULL) ? : child->name;
+               priv->leds[reg].ldev.default_trigger =
+                       of_get_property(child, "linux,default-trigger", NULL);
+       }
+       return tlc591xx_configure(dev, priv, tlc591xx);
+}
+
+static int
+tlc591xx_remove(struct i2c_client *client)
+{
+       struct tlc591xx_priv *priv = i2c_get_clientdata(client);
+
+       tlc591xx_destroy_devices(priv, TLC591XX_MAX_LEDS);
+
+       return 0;
+}
+
+static const struct i2c_device_id tlc591xx_id[] = {
+       { "tlc59116" },
+       { "tlc59108" },
+       {},
+};
+MODULE_DEVICE_TABLE(i2c, tlc591xx_id);
+
+static struct i2c_driver tlc591xx_driver = {
+       .driver = {
+               .name = "tlc591xx",
+               .of_match_table = of_match_ptr(of_tlc591xx_leds_match),
+       },
+       .probe = tlc591xx_probe,
+       .remove = tlc591xx_remove,
+       .id_table = tlc591xx_id,
+};
+
+module_i2c_driver(tlc591xx_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TLC591XX LED driver");
index 79efe57c74058ffd8fecf6583257f3b6c193b3dd..bc89d7ace2c44e48b1e4c318dc53103767eba942 100644 (file)
@@ -13,7 +13,6 @@
 #ifndef __LEDS_H_INCLUDED
 #define __LEDS_H_INCLUDED
 
-#include <linux/device.h>
 #include <linux/rwsem.h>
 #include <linux/leds.h>
 
@@ -50,27 +49,4 @@ void led_stop_software_blink(struct led_classdev *led_cdev);
 extern struct rw_semaphore leds_list_lock;
 extern struct list_head leds_list;
 
-#ifdef CONFIG_LEDS_TRIGGERS
-void led_trigger_set_default(struct led_classdev *led_cdev);
-void led_trigger_set(struct led_classdev *led_cdev,
-                       struct led_trigger *trigger);
-void led_trigger_remove(struct led_classdev *led_cdev);
-
-static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
-{
-       return led_cdev->trigger_data;
-}
-
-#else
-#define led_trigger_set_default(x) do {} while (0)
-#define led_trigger_set(x, y) do {} while (0)
-#define led_trigger_remove(x) do {} while (0)
-#define led_get_trigger_data(x) (NULL)
-#endif
-
-ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count);
-ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
-                       char *buf);
-
 #endif /* __LEDS_H_INCLUDED */
index f3755e0aa935c96a3aa0b0d3e4122b82e8cfe241..f80acb36ff075ca6a25b398861d97993d32fd57b 100644 (file)
@@ -195,4 +195,4 @@ static int __init ipc_init(void)
 {
        return amba_driver_register(&pl320_driver);
 }
-module_init(ipc_init);
+subsys_initcall(ipc_init);
index fe080ad0e55841e5c95bfcb3dcf3a0f1a703b76c..ce64fc8512518c6bff63b9a551d931341aaf6f84 100644 (file)
@@ -157,7 +157,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
 
        for_each_cache(ca, c, iter) {
                struct journal_device *ja = &ca->journal;
-               unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
+               DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
                unsigned i, l, r, m;
                uint64_t seq;
 
index 4dd2bb7167f05e94bda5d33b213dc67356fc9f5a..94980bfca43473c5ad141171300d71f6bd5cae55 100644 (file)
@@ -760,14 +760,8 @@ static void bcache_device_free(struct bcache_device *d)
        bio_split_pool_free(&d->bio_split_hook);
        if (d->bio_split)
                bioset_free(d->bio_split);
-       if (is_vmalloc_addr(d->full_dirty_stripes))
-               vfree(d->full_dirty_stripes);
-       else
-               kfree(d->full_dirty_stripes);
-       if (is_vmalloc_addr(d->stripe_sectors_dirty))
-               vfree(d->stripe_sectors_dirty);
-       else
-               kfree(d->stripe_sectors_dirty);
+       kvfree(d->full_dirty_stripes);
+       kvfree(d->stripe_sectors_dirty);
 
        closure_debug_destroy(&d->cl);
 }
index 98df7572b5f7f82b9091965e199a301159374bee..1d04c4859c70cba7339f6fe9bd10edd01ebe3ddb 100644 (file)
@@ -52,10 +52,7 @@ struct closure;
 
 #define free_heap(heap)                                                        \
 do {                                                                   \
-       if (is_vmalloc_addr((heap)->data))                              \
-               vfree((heap)->data);                                    \
-       else                                                            \
-               kfree((heap)->data);                                    \
+       kvfree((heap)->data);                                           \
        (heap)->data = NULL;                                            \
 } while (0)
 
@@ -163,10 +160,7 @@ do {                                                                       \
 
 #define free_fifo(fifo)                                                        \
 do {                                                                   \
-       if (is_vmalloc_addr((fifo)->data))                              \
-               vfree((fifo)->data);                                    \
-       else                                                            \
-               kfree((fifo)->data);                                    \
+       kvfree((fifo)->data);                                           \
        (fifo)->data = NULL;                                            \
 } while (0)
 
index 135a0907e9de413d140e9fb9b793a91b638a1606..ed2346ddf4c9fb54dafeb92ae9c795a0584444e8 100644 (file)
@@ -839,7 +839,7 @@ static void bitmap_file_kick(struct bitmap *bitmap)
                if (bitmap->storage.file) {
                        path = kmalloc(PAGE_SIZE, GFP_KERNEL);
                        if (path)
-                               ptr = d_path(&bitmap->storage.file->f_path,
+                               ptr = file_path(bitmap->storage.file,
                                             path, PAGE_SIZE);
 
                        printk(KERN_ALERT
@@ -1927,7 +1927,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
                   chunk_kb ? "KB" : "B");
        if (bitmap->storage.file) {
                seq_printf(seq, ", file: ");
-               seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
+               seq_file_path(seq, bitmap->storage.file, " \t\n");
        }
 
        seq_printf(seq, "\n");
index df92d30ca054c68a2af9cc3ee299525d1635a0eb..d429c30cd51471c26cb1c07cb3e6a413106133d4 100644 (file)
@@ -5766,7 +5766,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
        /* bitmap disabled, zero the first byte and copy out */
        if (!mddev->bitmap_info.file)
                file->pathname[0] = '\0';
-       else if ((ptr = d_path(&mddev->bitmap_info.file->f_path,
+       else if ((ptr = file_path(mddev->bitmap_info.file,
                               file->pathname, sizeof(file->pathname))),
                 IS_ERR(ptr))
                err = PTR_ERR(ptr);
index 6d6e0ca91fb46cbb099cdc4762b016293378aa40..58f65486de332cd893d6175ad98ca405c999a221 100644 (file)
@@ -2155,9 +2155,9 @@ static int coda_probe(struct platform_device *pdev)
        }
 
        /* Get IRAM pool from device tree or platform data */
-       pool = of_get_named_gen_pool(np, "iram", 0);
+       pool = of_gen_pool_get(np, "iram", 0);
        if (!pool && pdata)
-               pool = dev_get_gen_pool(pdata->iram_dev);
+               pool = gen_pool_get(pdata->iram_dev);
        if (!pool) {
                dev_err(&pdev->dev, "iram pool not available\n");
                return -ENOMEM;
index f7a01a72eb9e09e39f3128f16ed86f09ff82345f..b4b022933e29e463c8075b53a2ec8b0a0d7271b5 100644 (file)
@@ -44,6 +44,17 @@ config V4L2_MEM2MEM_DEV
         tristate
         depends on VIDEOBUF2_CORE
 
+# Used by LED subsystem flash drivers
+config V4L2_FLASH_LED_CLASS
+       tristate "V4L2 flash API for LED flash class devices"
+       depends on VIDEO_V4L2_SUBDEV_API
+       depends on LEDS_CLASS_FLASH
+       ---help---
+         Say Y here to enable V4L2 flash API support for LED flash
+         class drivers.
+
+         When in doubt, say N.
+
 # Used by drivers that need Videobuf modules
 config VIDEOBUF_GEN
        tristate
index 63d29f27538c079a20397d7f00c45c77d9104f81..dc3de00d68b5a4d88c140cfa0dfc0f2aa459fb5c 100644 (file)
@@ -22,6 +22,8 @@ obj-$(CONFIG_VIDEO_TUNER) += tuner.o
 
 obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
 
+obj-$(CONFIG_V4L2_FLASH_LED_CLASS) += v4l2-flash-led-class.o
+
 obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
 obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
 obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
index 85a6a34128a8ecfcba63c372886bcc00cadf74d7..5bada202b2d38c264cbcd87022278d699e411a40 100644 (file)
 #include <media/v4l2-device.h>
 #include <media/v4l2-subdev.h>
 
-static bool match_i2c(struct device *dev, struct v4l2_async_subdev *asd)
+static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
 {
 #if IS_ENABLED(CONFIG_I2C)
-       struct i2c_client *client = i2c_verify_client(dev);
+       struct i2c_client *client = i2c_verify_client(sd->dev);
        return client &&
                asd->match.i2c.adapter_id == client->adapter->nr &&
                asd->match.i2c.address == client->addr;
@@ -34,14 +34,24 @@ static bool match_i2c(struct device *dev, struct v4l2_async_subdev *asd)
 #endif
 }
 
-static bool match_devname(struct device *dev, struct v4l2_async_subdev *asd)
+static bool match_devname(struct v4l2_subdev *sd,
+                         struct v4l2_async_subdev *asd)
 {
-       return !strcmp(asd->match.device_name.name, dev_name(dev));
+       return !strcmp(asd->match.device_name.name, dev_name(sd->dev));
 }
 
-static bool match_of(struct device *dev, struct v4l2_async_subdev *asd)
+static bool match_of(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
 {
-       return dev->of_node == asd->match.of.node;
+       return sd->of_node == asd->match.of.node;
+}
+
+static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
+{
+       if (!asd->match.custom.match)
+               /* Match always */
+               return true;
+
+       return asd->match.custom.match(sd->dev, asd);
 }
 
 static LIST_HEAD(subdev_list);
@@ -51,17 +61,14 @@ static DEFINE_MUTEX(list_lock);
 static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
                                                    struct v4l2_subdev *sd)
 {
+       bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
        struct v4l2_async_subdev *asd;
-       bool (*match)(struct device *, struct v4l2_async_subdev *);
 
        list_for_each_entry(asd, &notifier->waiting, list) {
                /* bus_type has been verified valid before */
                switch (asd->match_type) {
                case V4L2_ASYNC_MATCH_CUSTOM:
-                       match = asd->match.custom.match;
-                       if (!match)
-                               /* Match always */
-                               return asd;
+                       match = match_custom;
                        break;
                case V4L2_ASYNC_MATCH_DEVNAME:
                        match = match_devname;
@@ -79,7 +86,7 @@ static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *
                }
 
                /* match cannot be NULL here */
-               if (match(sd->dev, asd))
+               if (match(sd, asd))
                        return asd;
        }
 
@@ -266,6 +273,14 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
 {
        struct v4l2_async_notifier *notifier;
 
+       /*
+        * No reference taken. The reference is held by the device
+        * (struct v4l2_subdev.dev), and async sub-device does not
+        * exist independently of the device at any point of time.
+        */
+       if (!sd->of_node && sd->dev)
+               sd->of_node = sd->dev->of_node;
+
        mutex_lock(&list_lock);
 
        INIT_LIST_HEAD(&sd->async_list);
diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c
new file mode 100644 (file)
index 0000000..5bdfb8d
--- /dev/null
@@ -0,0 +1,710 @@
+/*
+ * V4L2 flash LED sub-device registration helpers.
+ *
+ *     Copyright (C) 2015 Samsung Electronics Co., Ltd
+ *     Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/led-class-flash.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <media/v4l2-flash-led-class.h>
+
+#define has_flash_op(v4l2_flash, op)                           \
+       (v4l2_flash && v4l2_flash->ops->op)
+
+#define call_flash_op(v4l2_flash, op, arg)                     \
+               (has_flash_op(v4l2_flash, op) ?                 \
+                       v4l2_flash->ops->op(v4l2_flash, arg) :  \
+                       -EINVAL)
+
+enum ctrl_init_data_id {
+       LED_MODE,
+       TORCH_INTENSITY,
+       FLASH_INTENSITY,
+       INDICATOR_INTENSITY,
+       FLASH_TIMEOUT,
+       STROBE_SOURCE,
+       /*
+        * Only above values are applicable to
+        * the 'ctrls' array in the struct v4l2_flash.
+        */
+       FLASH_STROBE,
+       STROBE_STOP,
+       STROBE_STATUS,
+       FLASH_FAULT,
+       NUM_FLASH_CTRLS,
+};
+
+static enum led_brightness __intensity_to_led_brightness(
+                                       struct v4l2_ctrl *ctrl, s32 intensity)
+{
+       intensity -= ctrl->minimum;
+       intensity /= (u32) ctrl->step;
+
+       /*
+        * Indicator LEDs, unlike torch LEDs, are turned on/off basing on
+        * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only.
+        * Therefore it must be possible to set it to 0 level which in
+        * the LED subsystem reflects LED_OFF state.
+        */
+       if (ctrl->minimum)
+               ++intensity;
+
+       return intensity;
+}
+
+static s32 __led_brightness_to_intensity(struct v4l2_ctrl *ctrl,
+                                        enum led_brightness brightness)
+{
+       /*
+        * Indicator LEDs, unlike torch LEDs, are turned on/off basing on
+        * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only.
+        * Do not decrement brightness read from the LED subsystem for
+        * indicator LED as it may equal 0. For torch LEDs this function
+        * is called only when V4L2_FLASH_LED_MODE_TORCH is set and the
+        * brightness read is guaranteed to be greater than 0. In the mode
+        * V4L2_FLASH_LED_MODE_NONE the cached torch intensity value is used.
+        */
+       if (ctrl->id != V4L2_CID_FLASH_INDICATOR_INTENSITY)
+               --brightness;
+
+       return (brightness * ctrl->step) + ctrl->minimum;
+}
+
+static void v4l2_flash_set_led_brightness(struct v4l2_flash *v4l2_flash,
+                                       struct v4l2_ctrl *ctrl)
+{
+       struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
+       enum led_brightness brightness;
+
+       if (has_flash_op(v4l2_flash, intensity_to_led_brightness))
+               brightness = call_flash_op(v4l2_flash,
+                                       intensity_to_led_brightness,
+                                       ctrl->val);
+       else
+               brightness = __intensity_to_led_brightness(ctrl, ctrl->val);
+       /*
+        * In case a LED Flash class driver provides ops for custom
+        * brightness <-> intensity conversion, it also must have defined
+        * related v4l2 control step == 1. In such a case a backward conversion
+        * from led brightness to v4l2 intensity is required to find out the
+        * the aligned intensity value.
+        */
+       if (has_flash_op(v4l2_flash, led_brightness_to_intensity))
+               ctrl->val = call_flash_op(v4l2_flash,
+                                       led_brightness_to_intensity,
+                                       brightness);
+
+       if (ctrl == ctrls[TORCH_INTENSITY]) {
+               if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH)
+                       return;
+
+               led_set_brightness(&v4l2_flash->fled_cdev->led_cdev,
+                                       brightness);
+       } else {
+               led_set_brightness(&v4l2_flash->iled_cdev->led_cdev,
+                                       brightness);
+       }
+}
+
+static int v4l2_flash_update_led_brightness(struct v4l2_flash *v4l2_flash,
+                                       struct v4l2_ctrl *ctrl)
+{
+       struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
+       struct led_classdev *led_cdev;
+       int ret;
+
+       if (ctrl == ctrls[TORCH_INTENSITY]) {
+               /*
+                * Update torch brightness only if in TORCH_MODE. In other modes
+                * torch led is turned off, which would spuriously inform the
+                * user space that V4L2_CID_FLASH_TORCH_INTENSITY control value
+                * has changed to 0.
+                */
+               if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH)
+                       return 0;
+               led_cdev = &v4l2_flash->fled_cdev->led_cdev;
+       } else {
+               led_cdev = &v4l2_flash->iled_cdev->led_cdev;
+       }
+
+       ret = led_update_brightness(led_cdev);
+       if (ret < 0)
+               return ret;
+
+       if (has_flash_op(v4l2_flash, led_brightness_to_intensity))
+               ctrl->val = call_flash_op(v4l2_flash,
+                                               led_brightness_to_intensity,
+                                               led_cdev->brightness);
+       else
+               ctrl->val = __led_brightness_to_intensity(ctrl,
+                                               led_cdev->brightness);
+
+       return 0;
+}
+
+static int v4l2_flash_g_volatile_ctrl(struct v4l2_ctrl *c)
+{
+       struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c);
+       struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+       bool is_strobing;
+       int ret;
+
+       switch (c->id) {
+       case V4L2_CID_FLASH_TORCH_INTENSITY:
+       case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+               return v4l2_flash_update_led_brightness(v4l2_flash, c);
+       case V4L2_CID_FLASH_INTENSITY:
+               ret = led_update_flash_brightness(fled_cdev);
+               if (ret < 0)
+                       return ret;
+               /*
+                * No conversion is needed as LED Flash class also uses
+                * microamperes for flash intensity units.
+                */
+               c->val = fled_cdev->brightness.val;
+               return 0;
+       case V4L2_CID_FLASH_STROBE_STATUS:
+               ret = led_get_flash_strobe(fled_cdev, &is_strobing);
+               if (ret < 0)
+                       return ret;
+               c->val = is_strobing;
+               return 0;
+       case V4L2_CID_FLASH_FAULT:
+               /* LED faults map directly to V4L2 flash faults */
+               return led_get_flash_fault(fled_cdev, &c->val);
+       default:
+               return -EINVAL;
+       }
+}
+
+static bool __software_strobe_mode_inactive(struct v4l2_ctrl **ctrls)
+{
+       return ((ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH) ||
+               (ctrls[STROBE_SOURCE] && (ctrls[STROBE_SOURCE]->val !=
+                               V4L2_FLASH_STROBE_SOURCE_SOFTWARE)));
+}
+
+static int v4l2_flash_s_ctrl(struct v4l2_ctrl *c)
+{
+       struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c);
+       struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+       struct led_classdev *led_cdev = &fled_cdev->led_cdev;
+       struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
+       bool external_strobe;
+       int ret = 0;
+
+       switch (c->id) {
+       case V4L2_CID_FLASH_LED_MODE:
+               switch (c->val) {
+               case V4L2_FLASH_LED_MODE_NONE:
+                       led_set_brightness(led_cdev, LED_OFF);
+                       return led_set_flash_strobe(fled_cdev, false);
+               case V4L2_FLASH_LED_MODE_FLASH:
+                       /* Turn the torch LED off */
+                       led_set_brightness(led_cdev, LED_OFF);
+                       if (ctrls[STROBE_SOURCE]) {
+                               external_strobe = (ctrls[STROBE_SOURCE]->val ==
+                                       V4L2_FLASH_STROBE_SOURCE_EXTERNAL);
+
+                               ret = call_flash_op(v4l2_flash,
+                                               external_strobe_set,
+                                               external_strobe);
+                       }
+                       return ret;
+               case V4L2_FLASH_LED_MODE_TORCH:
+                       if (ctrls[STROBE_SOURCE]) {
+                               ret = call_flash_op(v4l2_flash,
+                                               external_strobe_set,
+                                               false);
+                               if (ret < 0)
+                                       return ret;
+                       }
+                       /* Stop flash strobing */
+                       ret = led_set_flash_strobe(fled_cdev, false);
+                       if (ret < 0)
+                               return ret;
+
+                       v4l2_flash_set_led_brightness(v4l2_flash,
+                                                       ctrls[TORCH_INTENSITY]);
+                       return 0;
+               }
+               break;
+       case V4L2_CID_FLASH_STROBE_SOURCE:
+               external_strobe = (c->val == V4L2_FLASH_STROBE_SOURCE_EXTERNAL);
+               /*
+                * For some hardware arrangements setting strobe source may
+                * affect torch mode. Therefore, if not in the flash mode,
+                * cache only this setting. It will be applied upon switching
+                * to flash mode.
+                */
+               if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH)
+                       return 0;
+
+               return call_flash_op(v4l2_flash, external_strobe_set,
+                                       external_strobe);
+       case V4L2_CID_FLASH_STROBE:
+               if (__software_strobe_mode_inactive(ctrls))
+                       return -EBUSY;
+               return led_set_flash_strobe(fled_cdev, true);
+       case V4L2_CID_FLASH_STROBE_STOP:
+               if (__software_strobe_mode_inactive(ctrls))
+                       return -EBUSY;
+               return led_set_flash_strobe(fled_cdev, false);
+       case V4L2_CID_FLASH_TIMEOUT:
+               /*
+                * No conversion is needed as LED Flash class also uses
+                * microseconds for flash timeout units.
+                */
+               return led_set_flash_timeout(fled_cdev, c->val);
+       case V4L2_CID_FLASH_INTENSITY:
+               /*
+                * No conversion is needed as LED Flash class also uses
+                * microamperes for flash intensity units.
+                */
+               return led_set_flash_brightness(fled_cdev, c->val);
+       case V4L2_CID_FLASH_TORCH_INTENSITY:
+       case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+               v4l2_flash_set_led_brightness(v4l2_flash, c);
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops v4l2_flash_ctrl_ops = {
+       .g_volatile_ctrl = v4l2_flash_g_volatile_ctrl,
+       .s_ctrl = v4l2_flash_s_ctrl,
+};
+
+static void __lfs_to_v4l2_ctrl_config(struct led_flash_setting *s,
+                               struct v4l2_ctrl_config *c)
+{
+       c->min = s->min;
+       c->max = s->max;
+       c->step = s->step;
+       c->def = s->val;
+}
+
+static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash,
+                         struct v4l2_flash_config *flash_cfg,
+                         struct v4l2_flash_ctrl_data *ctrl_init_data)
+{
+       struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+       const struct led_flash_ops *fled_cdev_ops = fled_cdev->ops;
+       struct led_classdev *led_cdev = &fled_cdev->led_cdev;
+       struct v4l2_ctrl_config *ctrl_cfg;
+       u32 mask;
+
+       /* Init FLASH_FAULT ctrl data */
+       if (flash_cfg->flash_faults) {
+               ctrl_init_data[FLASH_FAULT].cid = V4L2_CID_FLASH_FAULT;
+               ctrl_cfg = &ctrl_init_data[FLASH_FAULT].config;
+               ctrl_cfg->id = V4L2_CID_FLASH_FAULT;
+               ctrl_cfg->max = flash_cfg->flash_faults;
+               ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+                                 V4L2_CTRL_FLAG_READ_ONLY;
+       }
+
+       /* Init FLASH_LED_MODE ctrl data */
+       mask = 1 << V4L2_FLASH_LED_MODE_NONE |
+              1 << V4L2_FLASH_LED_MODE_TORCH;
+       if (led_cdev->flags & LED_DEV_CAP_FLASH)
+               mask |= 1 << V4L2_FLASH_LED_MODE_FLASH;
+
+       ctrl_init_data[LED_MODE].cid = V4L2_CID_FLASH_LED_MODE;
+       ctrl_cfg = &ctrl_init_data[LED_MODE].config;
+       ctrl_cfg->id = V4L2_CID_FLASH_LED_MODE;
+       ctrl_cfg->max = V4L2_FLASH_LED_MODE_TORCH;
+       ctrl_cfg->menu_skip_mask = ~mask;
+       ctrl_cfg->def = V4L2_FLASH_LED_MODE_NONE;
+       ctrl_cfg->flags = 0;
+
+       /* Init TORCH_INTENSITY ctrl data */
+       ctrl_init_data[TORCH_INTENSITY].cid = V4L2_CID_FLASH_TORCH_INTENSITY;
+       ctrl_cfg = &ctrl_init_data[TORCH_INTENSITY].config;
+       __lfs_to_v4l2_ctrl_config(&flash_cfg->torch_intensity, ctrl_cfg);
+       ctrl_cfg->id = V4L2_CID_FLASH_TORCH_INTENSITY;
+       ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+                         V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+
+       /* Init INDICATOR_INTENSITY ctrl data */
+       if (v4l2_flash->iled_cdev) {
+               ctrl_init_data[INDICATOR_INTENSITY].cid =
+                                       V4L2_CID_FLASH_INDICATOR_INTENSITY;
+               ctrl_cfg = &ctrl_init_data[INDICATOR_INTENSITY].config;
+               __lfs_to_v4l2_ctrl_config(&flash_cfg->indicator_intensity,
+                                         ctrl_cfg);
+               ctrl_cfg->id = V4L2_CID_FLASH_INDICATOR_INTENSITY;
+               ctrl_cfg->min = 0;
+               ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+                                 V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+       }
+
+       if (!(led_cdev->flags & LED_DEV_CAP_FLASH))
+               return;
+
+       /* Init FLASH_STROBE ctrl data */
+       ctrl_init_data[FLASH_STROBE].cid = V4L2_CID_FLASH_STROBE;
+       ctrl_cfg = &ctrl_init_data[FLASH_STROBE].config;
+       ctrl_cfg->id = V4L2_CID_FLASH_STROBE;
+
+       /* Init STROBE_STOP ctrl data */
+       ctrl_init_data[STROBE_STOP].cid = V4L2_CID_FLASH_STROBE_STOP;
+       ctrl_cfg = &ctrl_init_data[STROBE_STOP].config;
+       ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STOP;
+
+       /* Init FLASH_STROBE_SOURCE ctrl data */
+       if (flash_cfg->has_external_strobe) {
+               mask = (1 << V4L2_FLASH_STROBE_SOURCE_SOFTWARE) |
+                      (1 << V4L2_FLASH_STROBE_SOURCE_EXTERNAL);
+               ctrl_init_data[STROBE_SOURCE].cid =
+                                       V4L2_CID_FLASH_STROBE_SOURCE;
+               ctrl_cfg = &ctrl_init_data[STROBE_SOURCE].config;
+               ctrl_cfg->id = V4L2_CID_FLASH_STROBE_SOURCE;
+               ctrl_cfg->max = V4L2_FLASH_STROBE_SOURCE_EXTERNAL;
+               ctrl_cfg->menu_skip_mask = ~mask;
+               ctrl_cfg->def = V4L2_FLASH_STROBE_SOURCE_SOFTWARE;
+       }
+
+       /* Init STROBE_STATUS ctrl data */
+       if (fled_cdev_ops->strobe_get) {
+               ctrl_init_data[STROBE_STATUS].cid =
+                                       V4L2_CID_FLASH_STROBE_STATUS;
+               ctrl_cfg = &ctrl_init_data[STROBE_STATUS].config;
+               ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STATUS;
+               ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+                                 V4L2_CTRL_FLAG_READ_ONLY;
+       }
+
+       /* Init FLASH_TIMEOUT ctrl data */
+       if (fled_cdev_ops->timeout_set) {
+               ctrl_init_data[FLASH_TIMEOUT].cid = V4L2_CID_FLASH_TIMEOUT;
+               ctrl_cfg = &ctrl_init_data[FLASH_TIMEOUT].config;
+               __lfs_to_v4l2_ctrl_config(&fled_cdev->timeout, ctrl_cfg);
+               ctrl_cfg->id = V4L2_CID_FLASH_TIMEOUT;
+       }
+
+       /* Init FLASH_INTENSITY ctrl data */
+       if (fled_cdev_ops->flash_brightness_set) {
+               ctrl_init_data[FLASH_INTENSITY].cid = V4L2_CID_FLASH_INTENSITY;
+               ctrl_cfg = &ctrl_init_data[FLASH_INTENSITY].config;
+               __lfs_to_v4l2_ctrl_config(&fled_cdev->brightness, ctrl_cfg);
+               ctrl_cfg->id = V4L2_CID_FLASH_INTENSITY;
+               ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+                                 V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+       }
+}
+
+static int v4l2_flash_init_controls(struct v4l2_flash *v4l2_flash,
+                               struct v4l2_flash_config *flash_cfg)
+
+{
+       struct v4l2_flash_ctrl_data *ctrl_init_data;
+       struct v4l2_ctrl *ctrl;
+       struct v4l2_ctrl_config *ctrl_cfg;
+       int i, ret, num_ctrls = 0;
+
+       v4l2_flash->ctrls = devm_kzalloc(v4l2_flash->sd.dev,
+                                       sizeof(*v4l2_flash->ctrls) *
+                                       (STROBE_SOURCE + 1), GFP_KERNEL);
+       if (!v4l2_flash->ctrls)
+               return -ENOMEM;
+
+       /* allocate memory dynamically so as not to exceed stack frame size */
+       ctrl_init_data = kcalloc(NUM_FLASH_CTRLS, sizeof(*ctrl_init_data),
+                                       GFP_KERNEL);
+       if (!ctrl_init_data)
+               return -ENOMEM;
+
+       __fill_ctrl_init_data(v4l2_flash, flash_cfg, ctrl_init_data);
+
+       for (i = 0; i < NUM_FLASH_CTRLS; ++i)
+               if (ctrl_init_data[i].cid)
+                       ++num_ctrls;
+
+       v4l2_ctrl_handler_init(&v4l2_flash->hdl, num_ctrls);
+
+       for (i = 0; i < NUM_FLASH_CTRLS; ++i) {
+               ctrl_cfg = &ctrl_init_data[i].config;
+               if (!ctrl_init_data[i].cid)
+                       continue;
+
+               if (ctrl_cfg->id == V4L2_CID_FLASH_LED_MODE ||
+                   ctrl_cfg->id == V4L2_CID_FLASH_STROBE_SOURCE)
+                       ctrl = v4l2_ctrl_new_std_menu(&v4l2_flash->hdl,
+                                               &v4l2_flash_ctrl_ops,
+                                               ctrl_cfg->id,
+                                               ctrl_cfg->max,
+                                               ctrl_cfg->menu_skip_mask,
+                                               ctrl_cfg->def);
+               else
+                       ctrl = v4l2_ctrl_new_std(&v4l2_flash->hdl,
+                                               &v4l2_flash_ctrl_ops,
+                                               ctrl_cfg->id,
+                                               ctrl_cfg->min,
+                                               ctrl_cfg->max,
+                                               ctrl_cfg->step,
+                                               ctrl_cfg->def);
+
+               if (ctrl)
+                       ctrl->flags |= ctrl_cfg->flags;
+
+               if (i <= STROBE_SOURCE)
+                       v4l2_flash->ctrls[i] = ctrl;
+       }
+
+       kfree(ctrl_init_data);
+
+       if (v4l2_flash->hdl.error) {
+               ret = v4l2_flash->hdl.error;
+               goto error_free_handler;
+       }
+
+       v4l2_ctrl_handler_setup(&v4l2_flash->hdl);
+
+       v4l2_flash->sd.ctrl_handler = &v4l2_flash->hdl;
+
+       return 0;
+
+error_free_handler:
+       v4l2_ctrl_handler_free(&v4l2_flash->hdl);
+       return ret;
+}
+
+static int __sync_device_with_v4l2_controls(struct v4l2_flash *v4l2_flash)
+{
+       struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+       struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
+       int ret = 0;
+
+       v4l2_flash_set_led_brightness(v4l2_flash, ctrls[TORCH_INTENSITY]);
+
+       if (ctrls[INDICATOR_INTENSITY])
+               v4l2_flash_set_led_brightness(v4l2_flash,
+                                               ctrls[INDICATOR_INTENSITY]);
+
+       if (ctrls[FLASH_TIMEOUT]) {
+               ret = led_set_flash_timeout(fled_cdev,
+                                       ctrls[FLASH_TIMEOUT]->val);
+               if (ret < 0)
+                       return ret;
+       }
+
+       if (ctrls[FLASH_INTENSITY]) {
+               ret = led_set_flash_brightness(fled_cdev,
+                                       ctrls[FLASH_INTENSITY]->val);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /*
+        * For some hardware arrangements setting strobe source may affect
+        * torch mode. Synchronize strobe source setting only if not in torch
+        * mode. For torch mode case it will get synchronized upon switching
+        * to flash mode.
+        */
+       if (ctrls[STROBE_SOURCE] &&
+           ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH)
+               ret = call_flash_op(v4l2_flash, external_strobe_set,
+                                       ctrls[STROBE_SOURCE]->val);
+
+       return ret;
+}
+
+/*
+ * V4L2 subdev internal operations
+ */
+
+static int v4l2_flash_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+       struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd);
+       struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+       struct led_classdev *led_cdev = &fled_cdev->led_cdev;
+       struct led_classdev_flash *iled_cdev = v4l2_flash->iled_cdev;
+       struct led_classdev *led_cdev_ind = NULL;
+       int ret = 0;
+
+       if (!v4l2_fh_is_singular(&fh->vfh))
+               return 0;
+
+       mutex_lock(&led_cdev->led_access);
+
+       led_sysfs_disable(led_cdev);
+       led_trigger_remove(led_cdev);
+
+       mutex_unlock(&led_cdev->led_access);
+
+       if (iled_cdev) {
+               led_cdev_ind = &iled_cdev->led_cdev;
+
+               mutex_lock(&led_cdev_ind->led_access);
+
+               led_sysfs_disable(led_cdev_ind);
+               led_trigger_remove(led_cdev_ind);
+
+               mutex_unlock(&led_cdev_ind->led_access);
+       }
+
+       ret = __sync_device_with_v4l2_controls(v4l2_flash);
+       if (ret < 0)
+               goto out_sync_device;
+
+       return 0;
+out_sync_device:
+       mutex_lock(&led_cdev->led_access);
+       led_sysfs_enable(led_cdev);
+       mutex_unlock(&led_cdev->led_access);
+
+       if (led_cdev_ind) {
+               mutex_lock(&led_cdev_ind->led_access);
+               led_sysfs_enable(led_cdev_ind);
+               mutex_unlock(&led_cdev_ind->led_access);
+       }
+
+       return ret;
+}
+
+static int v4l2_flash_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+       struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd);
+       struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+       struct led_classdev *led_cdev = &fled_cdev->led_cdev;
+       struct led_classdev_flash *iled_cdev = v4l2_flash->iled_cdev;
+       int ret = 0;
+
+       if (!v4l2_fh_is_singular(&fh->vfh))
+               return 0;
+
+       mutex_lock(&led_cdev->led_access);
+
+       if (v4l2_flash->ctrls[STROBE_SOURCE])
+               ret = v4l2_ctrl_s_ctrl(v4l2_flash->ctrls[STROBE_SOURCE],
+                               V4L2_FLASH_STROBE_SOURCE_SOFTWARE);
+       led_sysfs_enable(led_cdev);
+
+       mutex_unlock(&led_cdev->led_access);
+
+       if (iled_cdev) {
+               struct led_classdev *led_cdev_ind = &iled_cdev->led_cdev;
+
+               mutex_lock(&led_cdev_ind->led_access);
+               led_sysfs_enable(led_cdev_ind);
+               mutex_unlock(&led_cdev_ind->led_access);
+       }
+
+       return ret;
+}
+
+static const struct v4l2_subdev_internal_ops v4l2_flash_subdev_internal_ops = {
+       .open = v4l2_flash_open,
+       .close = v4l2_flash_close,
+};
+
+static const struct v4l2_subdev_core_ops v4l2_flash_core_ops = {
+       .queryctrl = v4l2_subdev_queryctrl,
+       .querymenu = v4l2_subdev_querymenu,
+};
+
+static const struct v4l2_subdev_ops v4l2_flash_subdev_ops = {
+       .core = &v4l2_flash_core_ops,
+};
+
+struct v4l2_flash *v4l2_flash_init(
+       struct device *dev, struct device_node *of_node,
+       struct led_classdev_flash *fled_cdev,
+       struct led_classdev_flash *iled_cdev,
+       const struct v4l2_flash_ops *ops,
+       struct v4l2_flash_config *config)
+{
+       struct v4l2_flash *v4l2_flash;
+       struct led_classdev *led_cdev;
+       struct v4l2_subdev *sd;
+       int ret;
+
+       if (!fled_cdev || !ops || !config)
+               return ERR_PTR(-EINVAL);
+
+       led_cdev = &fled_cdev->led_cdev;
+
+       v4l2_flash = devm_kzalloc(led_cdev->dev, sizeof(*v4l2_flash),
+                                       GFP_KERNEL);
+       if (!v4l2_flash)
+               return ERR_PTR(-ENOMEM);
+
+       sd = &v4l2_flash->sd;
+       v4l2_flash->fled_cdev = fled_cdev;
+       v4l2_flash->iled_cdev = iled_cdev;
+       v4l2_flash->ops = ops;
+       sd->dev = dev;
+       sd->of_node = of_node;
+       v4l2_subdev_init(sd, &v4l2_flash_subdev_ops);
+       sd->internal_ops = &v4l2_flash_subdev_internal_ops;
+       sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+       strlcpy(sd->name, config->dev_name, sizeof(sd->name));
+
+       ret = media_entity_init(&sd->entity, 0, NULL, 0);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
+       sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH;
+
+       ret = v4l2_flash_init_controls(v4l2_flash, config);
+       if (ret < 0)
+               goto err_init_controls;
+
+       if (sd->of_node)
+               of_node_get(sd->of_node);
+       else
+               of_node_get(led_cdev->dev->of_node);
+
+       ret = v4l2_async_register_subdev(sd);
+       if (ret < 0)
+               goto err_async_register_sd;
+
+       return v4l2_flash;
+
+err_async_register_sd:
+       of_node_put(led_cdev->dev->of_node);
+       v4l2_ctrl_handler_free(sd->ctrl_handler);
+err_init_controls:
+       media_entity_cleanup(&sd->entity);
+
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(v4l2_flash_init);
+
+void v4l2_flash_release(struct v4l2_flash *v4l2_flash)
+{
+       struct v4l2_subdev *sd;
+       struct led_classdev *led_cdev;
+
+       if (IS_ERR_OR_NULL(v4l2_flash))
+               return;
+
+       sd = &v4l2_flash->sd;
+       led_cdev = &v4l2_flash->fled_cdev->led_cdev;
+
+       v4l2_async_unregister_subdev(sd);
+
+       if (sd->of_node)
+               of_node_put(sd->of_node);
+       else
+               of_node_put(led_cdev->dev->of_node);
+
+       v4l2_ctrl_handler_free(sd->ctrl_handler);
+       media_entity_cleanup(&sd->entity);
+}
+EXPORT_SYMBOL_GPL(v4l2_flash_release);
+
+MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
+MODULE_DESCRIPTION("V4L2 Flash sub-device helpers");
+MODULE_LICENSE("GPL v2");
index aeabaa5aedf7d2cbf4a76fa15b2547349adc0dd5..48db922075e2adbcec880950edf7a9700827218b 100644 (file)
@@ -419,10 +419,10 @@ static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
        }
 
        if (host->cmd_flags & DMA_DATA) {
-               if (1 != pci_map_sg(host->chip->pdev, &host->req->sg, 1,
+               if (1 != dma_map_sg(&host->chip->pdev->dev, &host->req->sg, 1,
                                    host->req->data_dir == READ
-                                   ? PCI_DMA_FROMDEVICE
-                                   : PCI_DMA_TODEVICE)) {
+                                   ? DMA_FROM_DEVICE
+                                   : DMA_TO_DEVICE)) {
                        host->req->error = -ENOMEM;
                        return host->req->error;
                }
@@ -487,9 +487,9 @@ static void jmb38x_ms_complete_cmd(struct memstick_host *msh, int last)
        writel(0, host->addr + DMA_CONTROL);
 
        if (host->cmd_flags & DMA_DATA) {
-               pci_unmap_sg(host->chip->pdev, &host->req->sg, 1,
+               dma_unmap_sg(&host->chip->pdev->dev, &host->req->sg, 1,
                             host->req->data_dir == READ
-                            ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+                            ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
        } else {
                t_val = readl(host->addr + INT_STATUS_ENABLE);
                if (host->req->data_dir == READ)
@@ -925,7 +925,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
        int pci_dev_busy = 0;
        int rc, cnt;
 
-       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (rc)
                return rc;
 
index e2a4f5f415b2be30d0b223d7fac1e423dec411cf..ef09ba0289d723698b3ce7bfc06ff7cefd59cc96 100644 (file)
@@ -754,7 +754,7 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto error2;
 
        pci_set_master(pdev);
-       error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (error)
                goto error3;
 
@@ -787,8 +787,8 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        /* This is just a precation, so don't fail */
-       dev->dummy_dma_page = pci_alloc_consistent(pdev, PAGE_SIZE,
-               &dev->dummy_dma_page_physical_address);
+       dev->dummy_dma_page = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+               &dev->dummy_dma_page_physical_address, GFP_KERNEL);
        r592_stop_dma(dev , 0);
 
        if (request_irq(dev->irq, &r592_irq, IRQF_SHARED,
@@ -805,7 +805,7 @@ error7:
        free_irq(dev->irq, dev);
 error6:
        if (dev->dummy_dma_page)
-               pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page,
+               dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
                        dev->dummy_dma_page_physical_address);
 
        kthread_stop(dev->io_thread);
@@ -845,7 +845,7 @@ static void r592_remove(struct pci_dev *pdev)
        memstick_free_host(dev->host);
 
        if (dev->dummy_dma_page)
-               pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page,
+               dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
                        dev->dummy_dma_page_physical_address);
 }
 
index 977bd3a3eed01149d9d5ef670cfa175922250232..120df5c08741a1b0f2fe53ca592fae2d7fd94ee2 100644 (file)
@@ -417,9 +417,8 @@ static int __init asic3_irq_probe(struct platform_device *pdev)
        asic3_write_register(asic, ASIC3_OFFSET(INTR, INT_MASK),
                             ASIC3_INTMASK_GINTMASK);
 
-       irq_set_chained_handler(asic->irq_nr, asic3_irq_demux);
+       irq_set_chained_handler_and_data(asic->irq_nr, asic3_irq_demux, asic);
        irq_set_irq_type(asic->irq_nr, IRQ_TYPE_EDGE_RISING);
-       irq_set_handler_data(asic->irq_nr, asic);
 
        return 0;
 }
index 4739689d23ad08aec40c4478d57e3fae3a118adb..fb8705fc3aca7c37e421892b377622759834131a 100644 (file)
@@ -115,7 +115,7 @@ static int param_set_axis(const char *val, const struct kernel_param *kp)
        return ret;
 }
 
-static struct kernel_param_ops param_ops_axis = {
+static const struct kernel_param_ops param_ops_axis = {
        .set = param_set_axis,
        .get = param_get_int,
 };
index 357b6ae4d207d785176c22d19a2c572617b8220f..458aa5a09c522816febc6abef20d034a67cfea5d 100644 (file)
@@ -552,22 +552,6 @@ void mei_cl_bus_rx_event(struct mei_cl *cl)
        schedule_work(&device->event_work);
 }
 
-void mei_cl_bus_remove_devices(struct mei_device *dev)
-{
-       struct mei_cl *cl, *next;
-
-       mutex_lock(&dev->device_lock);
-       list_for_each_entry_safe(cl, next, &dev->device_list, device_link) {
-               if (cl->device)
-                       mei_cl_remove_device(cl->device);
-
-               list_del(&cl->device_link);
-               mei_cl_unlink(cl);
-               kfree(cl);
-       }
-       mutex_unlock(&dev->device_lock);
-}
-
 int __init mei_cl_bus_init(void)
 {
        return bus_register(&mei_cl_bus_type);
index 94514b2c7a50277ba4adf8604ce6084b01309967..00c3865ca3b1d42042d78d5558eac547ea8c03a8 100644 (file)
@@ -333,8 +333,6 @@ void mei_stop(struct mei_device *dev)
 
        mei_nfc_host_exit(dev);
 
-       mei_cl_bus_remove_devices(dev);
-
        mutex_lock(&dev->device_lock);
 
        mei_wd_stop(dev);
index b983c4ecad3800d34e21a453e034ed15d84574f4..290ef3037437816114e6d373d4889c6ee2855c1b 100644 (file)
@@ -402,11 +402,12 @@ void mei_nfc_host_exit(struct mei_device *dev)
 
        cldev->priv_data = NULL;
 
-       mutex_lock(&dev->device_lock);
        /* Need to remove the device here
         * since mei_nfc_free will unlink the clients
         */
        mei_cl_remove_device(cldev);
+
+       mutex_lock(&dev->device_lock);
        mei_nfc_free(ndev);
        mutex_unlock(&dev->device_lock);
 }
index 9df2b6801f767c9c0da6904b689299c93d031417..b2b411da297b06e73441f8dd51c8bae0b004bcc0 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
 #include <linux/platform_data/hsmmc-omap.h>
 
 /* OMAP HSMMC Host Controller Registers */
@@ -218,7 +219,6 @@ struct omap_hsmmc_host {
        unsigned int            flags;
 #define AUTO_CMD23             (1 << 0)        /* Auto CMD23 support */
 #define HSMMC_SDIO_IRQ_ENABLED (1 << 1)        /* SDIO irq enabled */
-#define HSMMC_WAKE_IRQ_ENABLED (1 << 2)
        struct omap_hsmmc_next  next_data;
        struct  omap_hsmmc_platform_data        *pdata;
 
@@ -1117,22 +1117,6 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t omap_hsmmc_wake_irq(int irq, void *dev_id)
-{
-       struct omap_hsmmc_host *host = dev_id;
-
-       /* cirq is level triggered, disable to avoid infinite loop */
-       spin_lock(&host->irq_lock);
-       if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
-               disable_irq_nosync(host->wake_irq);
-               host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
-       }
-       spin_unlock(&host->irq_lock);
-       pm_request_resume(host->dev); /* no use counter */
-
-       return IRQ_HANDLED;
-}
-
 static void set_sd_bus_power(struct omap_hsmmc_host *host)
 {
        unsigned long i;
@@ -1665,7 +1649,6 @@ static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 
 static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
 {
-       struct mmc_host *mmc = host->mmc;
        int ret;
 
        /*
@@ -1677,11 +1660,7 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
        if (!host->dev->of_node || !host->wake_irq)
                return -ENODEV;
 
-       /* Prevent auto-enabling of IRQ */
-       irq_set_status_flags(host->wake_irq, IRQ_NOAUTOEN);
-       ret = devm_request_irq(host->dev, host->wake_irq, omap_hsmmc_wake_irq,
-                              IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-                              mmc_hostname(mmc), host);
+       ret = dev_pm_set_dedicated_wake_irq(host->dev, host->wake_irq);
        if (ret) {
                dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n");
                goto err;
@@ -1718,7 +1697,7 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
        return 0;
 
 err_free_irq:
-       devm_free_irq(host->dev, host->wake_irq, host);
+       dev_pm_clear_wake_irq(host->dev);
 err:
        dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n");
        host->wake_irq = 0;
@@ -2007,6 +1986,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
                omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
        }
 
+       device_init_wakeup(&pdev->dev, true);
        pm_runtime_enable(host->dev);
        pm_runtime_get_sync(host->dev);
        pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
@@ -2147,6 +2127,7 @@ err_slot_name:
        if (host->use_reg)
                omap_hsmmc_reg_put(host);
 err_irq:
+       device_init_wakeup(&pdev->dev, false);
        if (host->tx_chan)
                dma_release_channel(host->tx_chan);
        if (host->rx_chan)
@@ -2178,6 +2159,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
 
        pm_runtime_put_sync(host->dev);
        pm_runtime_disable(host->dev);
+       device_init_wakeup(&pdev->dev, false);
        if (host->dbclk)
                clk_disable_unprepare(host->dbclk);
 
@@ -2204,11 +2186,6 @@ static int omap_hsmmc_suspend(struct device *dev)
                                OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
        }
 
-       /* do not wake up due to sdio irq */
-       if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
-           !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
-               disable_irq(host->wake_irq);
-
        if (host->dbclk)
                clk_disable_unprepare(host->dbclk);
 
@@ -2233,11 +2210,6 @@ static int omap_hsmmc_resume(struct device *dev)
                omap_hsmmc_conf_bus_power(host);
 
        omap_hsmmc_protect_card(host);
-
-       if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
-           !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
-               enable_irq(host->wake_irq);
-
        pm_runtime_mark_last_busy(host->dev);
        pm_runtime_put_autosuspend(host->dev);
        return 0;
@@ -2277,10 +2249,6 @@ static int omap_hsmmc_runtime_suspend(struct device *dev)
                }
 
                pinctrl_pm_select_idle_state(dev);
-
-               WARN_ON(host->flags & HSMMC_WAKE_IRQ_ENABLED);
-               enable_irq(host->wake_irq);
-               host->flags |= HSMMC_WAKE_IRQ_ENABLED;
        } else {
                pinctrl_pm_select_idle_state(dev);
        }
@@ -2302,11 +2270,6 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
        spin_lock_irqsave(&host->irq_lock, flags);
        if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
            (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
-               /* sdio irq flag can't change while in runtime suspend */
-               if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
-                       disable_irq_nosync(host->wake_irq);
-                       host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
-               }
 
                pinctrl_pm_select_default_state(host->dev);
 
index 1a92d30689e76e5cd1ce1a22bff6bbac6a1bbbbb..ebf46ad2d513edf5422e10d998043a14b9714747 100644 (file)
@@ -162,7 +162,7 @@ static int __init ubiblock_set_param(const char *val,
        return 0;
 }
 
-static struct kernel_param_ops ubiblock_param_ops = {
+static const struct kernel_param_ops ubiblock_param_ops = {
        .set    = ubiblock_set_param,
 };
 module_param_cb(block, &ubiblock_param_ops, NULL, 0);
index 019fceffc9e52980d1f3837b01a299b97782dfca..c18f9e62a9fa2ea181c7ab2c75faaa9bc713b2ec 100644 (file)
@@ -217,8 +217,8 @@ config NET_POLL_CONTROLLER
        def_bool NETPOLL
 
 config NTB_NETDEV
-       tristate "Virtual Ethernet over NTB"
-       depends on NTB
+       tristate "Virtual Ethernet over NTB Transport"
+       depends on NTB_TRANSPORT
 
 config RIONET
        tristate "RapidIO Ethernet over messaging driver support"
@@ -258,6 +258,20 @@ config TUN
 
          If you don't know what to use this for, you don't need it.
 
+config TUN_VNET_CROSS_LE
+       bool "Support for cross-endian vnet headers on little-endian kernels"
+       default n
+       ---help---
+         This option allows TUN/TAP and MACVTAP device drivers in a
+         little-endian kernel to parse vnet headers that come from a
+         big-endian legacy virtio device.
+
+         Userspace programs can control the feature using the TUNSETVNETBE
+         and TUNGETVNETBE ioctls.
+
+         Unless you have a little-endian system hosting a big-endian virtual
+         machine with a legacy virtio NIC, you should say N.
+
 config VETH
        tristate "Virtual ethernet pair device"
        ---help---
index dd03ad865cafb83b16389e4f1840a335ff3fe9d4..661cdaa7ea96c26ebc6142c955f12dd19296e49f 100644 (file)
@@ -268,7 +268,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
        int ret;
 
        /* Try to obtain pages, decreasing order if necessary */
-       gfp |= __GFP_COLD | __GFP_COMP;
+       gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
        while (order >= 0) {
                pages = alloc_pages(gfp, order);
                if (pages)
index 95153b234c7158c655d95b7882db729d966a36dc..299eb4315fe647ba8d67302649a2cf928a4d59d5 100644 (file)
@@ -948,7 +948,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
        struct resource *res;
        void __iomem *base_addr;
        u32 offset;
-       int ret;
+       int ret = 0;
 
        pdev = pdata->pdev;
        dev = &pdev->dev;
index 7a4aaa3c01b69d43b8f0bd7f1023150f203c7179..cd4ae76bbff2f8acda89154e65cf699e141553e5 100644 (file)
@@ -530,7 +530,6 @@ enum bnx2x_tpa_mode_t {
 
 struct bnx2x_alloc_pool {
        struct page     *page;
-       dma_addr_t      dma;
        unsigned int    offset;
 };
 
@@ -2418,10 +2417,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
                                 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
                                 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
 
-#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
-               AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
-               AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
-               AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+#define HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD \
+               (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+                AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+                AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define HW_PRTY_ASSERT_SET_3 (HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD | \
+                             AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
 
 #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
                              AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
index e2a65334708d8d61703dd44d55ab3ef9d0dda67f..a90d7364334f9dfa3687dc813e068508a342861c 100644 (file)
@@ -563,23 +563,20 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                        return -ENOMEM;
                }
 
-               pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0,
-                                        PAGE_SIZE, DMA_FROM_DEVICE);
-               if (unlikely(dma_mapping_error(&bp->pdev->dev,
-                                              pool->dma))) {
-                       __free_pages(pool->page, PAGES_PER_SGE_SHIFT);
-                       pool->page = NULL;
-                       BNX2X_ERR("Can't map sge\n");
-                       return -ENOMEM;
-               }
                pool->offset = 0;
        }
 
+       mapping = dma_map_page(&bp->pdev->dev, pool->page,
+                              pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+               BNX2X_ERR("Can't map sge\n");
+               return -ENOMEM;
+       }
+
        get_page(pool->page);
        sw_buf->page = pool->page;
        sw_buf->offset = pool->offset;
 
-       mapping = pool->dma + sw_buf->offset;
        dma_unmap_addr_set(sw_buf, mapping, mapping);
 
        sge->addr_hi = cpu_to_le32(U64_HI(mapping));
@@ -648,9 +645,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                        return err;
                }
 
-               dma_unmap_single(&bp->pdev->dev,
-                                dma_unmap_addr(&old_rx_pg, mapping),
-                                SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+               dma_unmap_page(&bp->pdev->dev,
+                              dma_unmap_addr(&old_rx_pg, mapping),
+                              SGE_PAGE_SIZE, DMA_FROM_DEVICE);
                /* Add one frag and update the appropriate fields in the skb */
                if (fp->mode == TPA_MODE_LRO)
                        skb_fill_page_desc(skb, j, old_rx_pg.page,
@@ -3421,8 +3418,13 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
                        u32 wnd_sum = 0;
 
                        /* Headers length */
-                       hlen = (int)(skb_transport_header(skb) - skb->data) +
-                               tcp_hdrlen(skb);
+                       if (xmit_type & XMIT_GSO_ENC)
+                               hlen = (int)(skb_inner_transport_header(skb) -
+                                            skb->data) +
+                                            inner_tcp_hdrlen(skb);
+                       else
+                               hlen = (int)(skb_transport_header(skb) -
+                                            skb->data) + tcp_hdrlen(skb);
 
                        /* Amount of data (w/o headers) on linear part of SKB*/
                        first_bd_sz = skb_headlen(skb) - hlen;
index 2b30081ec26d128ec86c602eb5a097c77c664159..03b7404d5b9ba59c5470fe36ec0746d6b75f7eee 100644 (file)
@@ -807,8 +807,8 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
        /* Since many fragments can share the same page, make sure to
         * only unmap and free the page once.
         */
-       dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
-                        SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+       dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
+                      SGE_PAGE_SIZE, DMA_FROM_DEVICE);
 
        put_page(page);
 
@@ -974,14 +974,6 @@ static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
        if (!pool->page)
                return;
 
-       /* Page was not fully fragmented.  Unmap unused space */
-       if (pool->offset < PAGE_SIZE) {
-               dma_addr_t dma = pool->dma + pool->offset;
-               int size = PAGE_SIZE - pool->offset;
-
-               dma_unmap_single(&bp->pdev->dev, dma, size, DMA_FROM_DEVICE);
-       }
-
        put_page(pool->page);
 
        pool->page = NULL;
index 48ed005ba73fd3a9d9aa550871b647fdd0b59350..76b9052a961c517978494199d74398264583508c 100644 (file)
@@ -257,14 +257,15 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct bnx2x *bp = netdev_priv(dev);
        int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+       u32 media_type;
 
        /* Dual Media boards present all available port types */
        cmd->supported = bp->port.supported[cfg_idx] |
                (bp->port.supported[cfg_idx ^ 1] &
                 (SUPPORTED_TP | SUPPORTED_FIBRE));
        cmd->advertising = bp->port.advertising[cfg_idx];
-       if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type ==
-           ETH_PHY_SFP_1G_FIBER) {
+       media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type;
+       if (media_type == ETH_PHY_SFP_1G_FIBER) {
                cmd->supported &= ~(SUPPORTED_10000baseT_Full);
                cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
        }
@@ -312,12 +313,26 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        cmd->lp_advertising |= ADVERTISED_100baseT_Full;
                if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
                        cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
-               if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE)
-                       cmd->lp_advertising |= ADVERTISED_1000baseT_Full;
+               if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) {
+                       if (media_type == ETH_PHY_KR) {
+                               cmd->lp_advertising |=
+                                       ADVERTISED_1000baseKX_Full;
+                       } else {
+                               cmd->lp_advertising |=
+                                       ADVERTISED_1000baseT_Full;
+                       }
+               }
                if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
                        cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
-               if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
-                       cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
+               if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) {
+                       if (media_type == ETH_PHY_KR) {
+                               cmd->lp_advertising |=
+                                       ADVERTISED_10000baseKR_Full;
+                       } else {
+                               cmd->lp_advertising |=
+                                       ADVERTISED_10000baseT_Full;
+                       }
+               }
                if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
                        cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
        }
@@ -564,15 +579,20 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                                return -EINVAL;
                        }
 
-                       if (!(bp->port.supported[cfg_idx] &
-                             SUPPORTED_1000baseT_Full)) {
+                       if (bp->port.supported[cfg_idx] &
+                            SUPPORTED_1000baseT_Full) {
+                               advertising = (ADVERTISED_1000baseT_Full |
+                                              ADVERTISED_TP);
+
+                       } else if (bp->port.supported[cfg_idx] &
+                                  SUPPORTED_1000baseKX_Full) {
+                               advertising = ADVERTISED_1000baseKX_Full;
+                       } else {
                                DP(BNX2X_MSG_ETHTOOL,
                                   "1G full not supported\n");
                                return -EINVAL;
                        }
 
-                       advertising = (ADVERTISED_1000baseT_Full |
-                                      ADVERTISED_TP);
                        break;
 
                case SPEED_2500:
@@ -600,17 +620,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                                return -EINVAL;
                        }
                        phy_idx = bnx2x_get_cur_phy_idx(bp);
-                       if (!(bp->port.supported[cfg_idx]
-                             & SUPPORTED_10000baseT_Full) ||
-                           (bp->link_params.phy[phy_idx].media_type ==
+                       if ((bp->port.supported[cfg_idx] &
+                            SUPPORTED_10000baseT_Full) &&
+                           (bp->link_params.phy[phy_idx].media_type !=
                             ETH_PHY_SFP_1G_FIBER)) {
+                               advertising = (ADVERTISED_10000baseT_Full |
+                                              ADVERTISED_FIBRE);
+                       } else if (bp->port.supported[cfg_idx] &
+                              SUPPORTED_10000baseKR_Full) {
+                               advertising = (ADVERTISED_10000baseKR_Full |
+                                              ADVERTISED_FIBRE);
+                       } else {
                                DP(BNX2X_MSG_ETHTOOL,
                                   "10G full not supported\n");
                                return -EINVAL;
                        }
 
-                       advertising = (ADVERTISED_10000baseT_Full |
-                                      ADVERTISED_FIBRE);
                        break;
 
                default:
@@ -633,6 +658,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        bp->link_params.multi_phy_config = new_multi_phy_config;
        if (netif_running(dev)) {
                bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+               bnx2x_force_link_reset(bp);
                bnx2x_link_set(bp);
        }
 
@@ -1204,6 +1230,7 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
        if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
                DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
                   "cannot get access to nvram interface\n");
+               bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
                return -EBUSY;
        }
 
@@ -1944,6 +1971,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
 
        if (netif_running(dev)) {
                bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+               bnx2x_force_link_reset(bp);
                bnx2x_link_set(bp);
        }
 
index 21a0d6afca4a53a24100289585f1e9b6d59ee497..a0b03c27e0a302c08fd1a78c5dbd2dd7606ae16a 100644 (file)
@@ -3392,9 +3392,9 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
        case BNX2X_FLOW_CTRL_AUTO:
                switch (params->req_fc_auto_adv) {
                case BNX2X_FLOW_CTRL_BOTH:
+               case BNX2X_FLOW_CTRL_RX:
                        *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
                        break;
-               case BNX2X_FLOW_CTRL_RX:
                case BNX2X_FLOW_CTRL_TX:
                        *ieee_fc |=
                                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
@@ -3488,14 +3488,21 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
        bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
 }
 
-static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
-{                                              /*  LD      LP   */
+static void bnx2x_pause_resolve(struct bnx2x_phy *phy,
+                               struct link_params *params,
+                               struct link_vars *vars,
+                               u32 pause_result)
+{
+       struct bnx2x *bp = params->bp;
+                                               /*  LD      LP   */
        switch (pause_result) {                 /* ASYM P ASYM P */
        case 0xb:                               /*   1  0   1  1 */
+               DP(NETIF_MSG_LINK, "Flow Control: TX only\n");
                vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
                break;
 
        case 0xe:                               /*   1  1   1  0 */
+               DP(NETIF_MSG_LINK, "Flow Control: RX only\n");
                vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
                break;
 
@@ -3503,10 +3510,22 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
        case 0x7:                               /*   0  1   1  1 */
        case 0xd:                               /*   1  1   0  1 */
        case 0xf:                               /*   1  1   1  1 */
-               vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+               /* If the user selected to advertise RX ONLY,
+                * although we advertised both, need to enable
+                * RX only.
+                */
+               if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
+                       DP(NETIF_MSG_LINK, "Flow Control: RX & TX\n");
+                       vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+               } else {
+                       DP(NETIF_MSG_LINK, "Flow Control: RX only\n");
+                       vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
+               }
                break;
 
        default:
+               DP(NETIF_MSG_LINK, "Flow Control: None\n");
+               vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
                break;
        }
        if (pause_result & (1<<0))
@@ -3567,7 +3586,7 @@ static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
        pause_result |= (lp_pause &
                         MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
        DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result);
-       bnx2x_pause_resolve(vars, pause_result);
+       bnx2x_pause_resolve(phy, params, vars, pause_result);
 
 }
 
@@ -5396,7 +5415,7 @@ static void bnx2x_update_adv_fc(struct bnx2x_phy *phy,
                                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
                DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result);
        }
-       bnx2x_pause_resolve(vars, pause_result);
+       bnx2x_pause_resolve(phy, params, vars, pause_result);
 
 }
 
@@ -7129,7 +7148,7 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
                pause_result |= (lp_pause &
                                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
 
-               bnx2x_pause_resolve(vars, pause_result);
+               bnx2x_pause_resolve(phy, params, vars, pause_result);
                DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
                           pause_result);
        }
@@ -11474,7 +11493,9 @@ static const struct bnx2x_phy phy_warpcore = {
                           SUPPORTED_100baseT_Half |
                           SUPPORTED_100baseT_Full |
                           SUPPORTED_1000baseT_Full |
+                          SUPPORTED_1000baseKX_Full |
                           SUPPORTED_10000baseT_Full |
+                          SUPPORTED_10000baseKR_Full |
                           SUPPORTED_20000baseKR2_Full |
                           SUPPORTED_20000baseMLD2_Full |
                           SUPPORTED_FIBRE |
@@ -11980,8 +12001,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
                        break;
                case PORT_HW_CFG_NET_SERDES_IF_KR:
                        phy->media_type = ETH_PHY_KR;
-                       phy->supported &= (SUPPORTED_1000baseT_Full |
-                                          SUPPORTED_10000baseT_Full |
+                       phy->supported &= (SUPPORTED_1000baseKX_Full |
+                                          SUPPORTED_10000baseKR_Full |
                                           SUPPORTED_FIBRE |
                                           SUPPORTED_Autoneg |
                                           SUPPORTED_Pause |
@@ -11999,8 +12020,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
                        phy->media_type = ETH_PHY_KR;
                        phy->flags |= FLAGS_WC_DUAL_MODE;
                        phy->supported &= (SUPPORTED_20000baseKR2_Full |
-                                          SUPPORTED_10000baseT_Full |
-                                          SUPPORTED_1000baseT_Full |
+                                          SUPPORTED_10000baseKR_Full |
+                                          SUPPORTED_1000baseKX_Full |
                                           SUPPORTED_Autoneg |
                                           SUPPORTED_FIBRE |
                                           SUPPORTED_Pause |
index 33501bcddc48eb1f6157a08e3e3d1e08dc087c25..c27af12314ed29ae19e73a9c00f56c062a5aa830 100644 (file)
@@ -2287,13 +2287,11 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
 void bnx2x_calc_fc_adv(struct bnx2x *bp)
 {
        u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+
+       bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+                                          ADVERTISED_Pause);
        switch (bp->link_vars.ieee_fc &
                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
-       case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
-               bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
-                                                  ADVERTISED_Pause);
-               break;
-
        case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
                bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
                                                  ADVERTISED_Pause);
@@ -2304,8 +2302,6 @@ void bnx2x_calc_fc_adv(struct bnx2x *bp)
                break;
 
        default:
-               bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
-                                                  ADVERTISED_Pause);
                break;
        }
 }
@@ -2351,12 +2347,16 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
                if (load_mode == LOAD_DIAG) {
                        struct link_params *lp = &bp->link_params;
                        lp->loopback_mode = LOOPBACK_XGXS;
-                       /* do PHY loopback at 10G speed, if possible */
-                       if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
+                       /* Prefer doing PHY loopback at highest speed */
+                       if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
                                if (lp->speed_cap_mask[cfx_idx] &
-                                   PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+                                   PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
                                        lp->req_line_speed[cfx_idx] =
-                                       SPEED_10000;
+                                       SPEED_20000;
+                               else if (lp->speed_cap_mask[cfx_idx] &
+                                           PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+                                               lp->req_line_speed[cfx_idx] =
+                                               SPEED_10000;
                                else
                                        lp->req_line_speed[cfx_idx] =
                                        SPEED_1000;
@@ -4867,9 +4867,7 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
                                res = true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
-                               if (print)
-                                       _print_next_block((*par_num)++,
-                                                         "MCP SCPAD");
+                               (*par_num)++;
                                /* clear latched SCPAD PATIRY from MCP */
                                REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
                                       1UL << 10);
@@ -4931,6 +4929,7 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
            (sig[3] & HW_PRTY_ASSERT_SET_3) ||
            (sig[4] & HW_PRTY_ASSERT_SET_4)) {
                int par_num = 0;
+
                DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
                                 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
                          sig[0] & HW_PRTY_ASSERT_SET_0,
@@ -4938,9 +4937,18 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
                          sig[2] & HW_PRTY_ASSERT_SET_2,
                          sig[3] & HW_PRTY_ASSERT_SET_3,
                          sig[4] & HW_PRTY_ASSERT_SET_4);
-               if (print)
-                       netdev_err(bp->dev,
-                                  "Parity errors detected in blocks: ");
+               if (print) {
+                       if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
+                            (sig[1] & HW_PRTY_ASSERT_SET_1) ||
+                            (sig[2] & HW_PRTY_ASSERT_SET_2) ||
+                            (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
+                            (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
+                               netdev_err(bp->dev,
+                                          "Parity errors detected in blocks: ");
+                       } else {
+                               print = false;
+                       }
+               }
                res |= bnx2x_check_blocks_with_parity0(bp,
                        sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
                res |= bnx2x_check_blocks_with_parity1(bp,
@@ -8431,7 +8439,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
                                         BNX2X_ETH_MAC, &ramrod_flags);
        } else { /* vf */
                return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
-                                            bp->fp->index, true);
+                                            bp->fp->index, set);
        }
 }
 
@@ -9323,7 +9331,8 @@ unload_error:
         * function stop ramrod is sent, since as part of this ramrod FW access
         * PTP registers.
         */
-       bnx2x_stop_ptp(bp);
+       if (bp->flags & PTP_SUPPORTED)
+               bnx2x_stop_ptp(bp);
 
        /* Disable HW interrupts, NAPI */
        bnx2x_netif_stop(bp, 1);
@@ -11147,6 +11156,12 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
                                bp->port.advertising[idx] |=
                                        (ADVERTISED_1000baseT_Full |
                                         ADVERTISED_TP);
+                       } else if (bp->port.supported[idx] &
+                                  SUPPORTED_1000baseKX_Full) {
+                               bp->link_params.req_line_speed[idx] =
+                                       SPEED_1000;
+                               bp->port.advertising[idx] |=
+                                       ADVERTISED_1000baseKX_Full;
                        } else {
                                BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
                                    link_config,
@@ -11179,6 +11194,13 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
                                bp->port.advertising[idx] |=
                                        (ADVERTISED_10000baseT_Full |
                                                ADVERTISED_FIBRE);
+                       } else if (bp->port.supported[idx] &
+                                  SUPPORTED_10000baseKR_Full) {
+                               bp->link_params.req_line_speed[idx] =
+                                       SPEED_10000;
+                               bp->port.advertising[idx] |=
+                                       (ADVERTISED_10000baseKR_Full |
+                                               ADVERTISED_FIBRE);
                        } else {
                                BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
                                    link_config,
index 07cdf9bbffef2ee85ff3d589f33c1a10aea405d5..4ad415ac8cfe4a56ffd00858d1f70d9f1ab01456 100644 (file)
@@ -424,7 +424,7 @@ static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
        o->head_exe_request = false;
        o->saved_ramrod_flags = 0;
        rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
-       if (rc != 0) {
+       if ((rc != 0) && (rc != 1)) {
                BNX2X_ERR("execution of pending commands failed with rc %d\n",
                          rc);
 #ifdef BNX2X_STOP_ON_ERROR
index 6f2887a5e0be693d625b6328349a2ad3b66d19ba..6159deab8c9850a0231ef3b3f1fad6dfaa31a588 100644 (file)
@@ -594,6 +594,7 @@ struct bcmgenet_priv {
        wait_queue_head_t wq;
        struct phy_device *phydev;
        struct device_node *phy_dn;
+       struct device_node *mdio_dn;
        struct mii_bus *mii_bus;
        u16 gphy_rev;
        struct clk *clk_eee;
index 6bef04e2f7354b2a9cadeac7eda5ea66e2fd25da..adf23d2ac4888e89f63c4246e7c3b33eaf3d0fd0 100644 (file)
@@ -408,6 +408,52 @@ static int bcmgenet_mii_probe(struct net_device *dev)
        return 0;
 }
 
+/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with
+ * their internal MDIO management controller making them fail to successfully
+ * be read from or written to for the first transaction.  We insert a dummy
+ * BMSR read here to make sure that phy_get_device() and get_phy_id() can
+ * correctly read the PHY MII_PHYSID1/2 registers and successfully register a
+ * PHY device for this peripheral.
+ *
+ * Once the PHY driver is registered, we can workaround subsequent reads from
+ * there (e.g: during system-wide power management).
+ *
+ * bus->reset is invoked before mdiobus_scan during mdiobus_register and is
+ * therefore the right location to stick that workaround. Since we do not want
+ * to read from non-existing PHYs, we either use bus->phy_mask or do a manual
+ * Device Tree scan to limit the search area.
+ */
+static int bcmgenet_mii_bus_reset(struct mii_bus *bus)
+{
+       struct net_device *dev = bus->priv;
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct device_node *np = priv->mdio_dn;
+       struct device_node *child = NULL;
+       u32 read_mask = 0;
+       int addr = 0;
+
+       if (!np) {
+               read_mask = 1 << priv->phy_addr;
+       } else {
+               for_each_available_child_of_node(np, child) {
+                       addr = of_mdio_parse_addr(&dev->dev, child);
+                       if (addr < 0)
+                               continue;
+
+                       read_mask |= 1 << addr;
+               }
+       }
+
+       for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+               if (read_mask & 1 << addr) {
+                       dev_dbg(&dev->dev, "Workaround for PHY @ %d\n", addr);
+                       mdiobus_read(bus, addr, MII_BMSR);
+               }
+       }
+
+       return 0;
+}
+
 static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
 {
        struct mii_bus *bus;
@@ -427,6 +473,7 @@ static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
        bus->parent = &priv->pdev->dev;
        bus->read = bcmgenet_mii_read;
        bus->write = bcmgenet_mii_write;
+       bus->reset = bcmgenet_mii_bus_reset;
        snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
                 priv->pdev->name, priv->pdev->id);
 
@@ -443,7 +490,6 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
 {
        struct device_node *dn = priv->pdev->dev.of_node;
        struct device *kdev = &priv->pdev->dev;
-       struct device_node *mdio_dn;
        char *compat;
        int ret;
 
@@ -451,14 +497,14 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
        if (!compat)
                return -ENOMEM;
 
-       mdio_dn = of_find_compatible_node(dn, NULL, compat);
+       priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
        kfree(compat);
-       if (!mdio_dn) {
+       if (!priv->mdio_dn) {
                dev_err(kdev, "unable to find MDIO bus node\n");
                return -ENODEV;
        }
 
-       ret = of_mdiobus_register(priv->mii_bus, mdio_dn);
+       ret = of_mdiobus_register(priv->mii_bus, priv->mdio_dn);
        if (ret) {
                dev_err(kdev, "failed to register MDIO bus\n");
                return ret;
index 160f8077692ce487a7d8cb87aa41d62dc21c1ddc..29f33083178431ac3735094683663d1e4ab2b836 100644 (file)
@@ -434,8 +434,9 @@ static int lio_set_phys_id(struct net_device *netdev,
                        if (ret)
                                return ret;
 
-                       octnet_mdio45_access(lio, 1, LIO68XX_LED_BEACON_ADDR,
-                                            &lio->phy_beacon_val);
+                       ret = octnet_mdio45_access(lio, 1,
+                                                  LIO68XX_LED_BEACON_ADDR,
+                                                  &lio->phy_beacon_val);
                        if (ret)
                                return ret;
 
index 0d3106b464b29548ddca47eaebb56d08e879b552..f67641a2ff9eff652a7998f4c6d8f8fab873fc63 100644 (file)
@@ -650,14 +650,12 @@ void octeon_free_device_mem(struct octeon_device *oct)
 
        for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
                /* could check  mask as well */
-               if (oct->droq[i])
-                       vfree(oct->droq[i]);
+               vfree(oct->droq[i]);
        }
 
        for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
                /* could check mask as well */
-               if (oct->instr_queue[i])
-                       vfree(oct->instr_queue[i]);
+               vfree(oct->instr_queue[i]);
        }
 
        i = oct->octeon_id;
@@ -1078,10 +1076,7 @@ octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
                oct->dispatch.count--;
 
        spin_unlock_bh(&oct->dispatch.lock);
-
-       if (dfree)
-               vfree(dfree);
-
+       vfree(dfree);
        return retval;
 }
 
index 94b502a0cf33e54b954768688c3e35c056ed1d55..4dba86eaa04559649b012cbeff8707c47a176927 100644 (file)
@@ -216,9 +216,7 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
        dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
 
        octeon_droq_destroy_ring_buffers(oct, droq);
-
-       if (droq->recv_buf_list)
-               vfree(droq->recv_buf_list);
+       vfree(droq->recv_buf_list);
 
        if (droq->info_base_addr)
                cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
index 356796bf9b871e82f4e300206e24757072bfa870..a2a24652c8f32826882f82910b76d38c8df49593 100644 (file)
@@ -175,8 +175,7 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
                desc_size =
                    CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
 
-       if (iq->request_list)
-               vfree(iq->request_list);
+       vfree(iq->request_list);
 
        if (iq->base_addr) {
                q_size = iq->max_count * desc_size;
index b0cbb2b7fd484f95ec36574feef7981753a552e9..76684dcb874cf90bf3d4ecd56a0ab2a200d4a974 100644 (file)
@@ -1169,10 +1169,7 @@ void *cxgb_alloc_mem(unsigned long size)
  */
 void cxgb_free_mem(void *addr)
 {
-       if (is_vmalloc_addr(addr))
-               vfree(addr);
-       else
-               kfree(addr);
+       kvfree(addr);
 }
 
 /*
index c64b5a99bfef0eb04c3f6de63b3ac8d7842345b5..351f3b1bf80025167c9afcc226252ec923a639b1 100644 (file)
@@ -1150,10 +1150,7 @@ void *t4_alloc_mem(size_t size)
  */
 void t4_free_mem(void *addr)
 {
-       if (is_vmalloc_addr(addr))
-               vfree(addr);
-       else
-               kfree(addr);
+       kvfree(addr);
 }
 
 /* Send a Work Request to write the filter at a specified index.  We construct
index eadae1b412c652974dde24a9a76c5d74a8c3fa29..da2004e2a74176959ece42065a26267aa2924a2b 100644 (file)
@@ -1208,7 +1208,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
                napi_complete(napi);
                vnic_intr_unmask(&enic->intr[intr]);
        }
-       enic_poll_unlock_napi(&enic->rq[cq_rq]);
+       enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
 
        return rq_work_done;
 }
@@ -1414,7 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
                 */
                enic_calc_int_moderation(enic, &enic->rq[rq]);
 
-       enic_poll_unlock_napi(&enic->rq[rq]);
+       enic_poll_unlock_napi(&enic->rq[rq], napi);
        if (work_done < work_to_do) {
 
                /* Some work done, but not enough to stay in polling,
index 8111d5202df2f38c26a8c241a7bb1c1e7cda8228..b9c82f143d7e099948c9bd5e540fee64eeb68b46 100644 (file)
@@ -21,6 +21,7 @@
 #define _VNIC_RQ_H_
 
 #include <linux/pci.h>
+#include <linux/netdevice.h>
 
 #include "vnic_dev.h"
 #include "vnic_cq.h"
@@ -75,6 +76,12 @@ struct vnic_rq_buf {
        uint64_t wr_id;
 };
 
+enum enic_poll_state {
+       ENIC_POLL_STATE_IDLE,
+       ENIC_POLL_STATE_NAPI,
+       ENIC_POLL_STATE_POLL
+};
+
 struct vnic_rq {
        unsigned int index;
        struct vnic_dev *vdev;
@@ -86,19 +93,7 @@ struct vnic_rq {
        void *os_buf_head;
        unsigned int pkts_outstanding;
 #ifdef CONFIG_NET_RX_BUSY_POLL
-#define ENIC_POLL_STATE_IDLE           0
-#define ENIC_POLL_STATE_NAPI           (1 << 0) /* NAPI owns this poll */
-#define ENIC_POLL_STATE_POLL           (1 << 1) /* poll owns this poll */
-#define ENIC_POLL_STATE_NAPI_YIELD     (1 << 2) /* NAPI yielded this poll */
-#define ENIC_POLL_STATE_POLL_YIELD     (1 << 3) /* poll yielded this poll */
-#define ENIC_POLL_YIELD                        (ENIC_POLL_STATE_NAPI_YIELD |   \
-                                        ENIC_POLL_STATE_POLL_YIELD)
-#define ENIC_POLL_LOCKED               (ENIC_POLL_STATE_NAPI |         \
-                                        ENIC_POLL_STATE_POLL)
-#define ENIC_POLL_USER_PEND            (ENIC_POLL_STATE_POLL |         \
-                                        ENIC_POLL_STATE_POLL_YIELD)
-       unsigned int bpoll_state;
-       spinlock_t bpoll_lock;
+       atomic_t bpoll_state;
 #endif /* CONFIG_NET_RX_BUSY_POLL */
 };
 
@@ -215,76 +210,43 @@ static inline int vnic_rq_fill(struct vnic_rq *rq,
 #ifdef CONFIG_NET_RX_BUSY_POLL
 static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
 {
-       spin_lock_init(&rq->bpoll_lock);
-       rq->bpoll_state = ENIC_POLL_STATE_IDLE;
+       atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
 }
 
 static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
 {
-       bool rc = true;
-
-       spin_lock(&rq->bpoll_lock);
-       if (rq->bpoll_state & ENIC_POLL_LOCKED) {
-               WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
-               rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD;
-               rc = false;
-       } else {
-               rq->bpoll_state = ENIC_POLL_STATE_NAPI;
-       }
-       spin_unlock(&rq->bpoll_lock);
+       int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
+                               ENIC_POLL_STATE_NAPI);
 
-       return rc;
+       return (rc == ENIC_POLL_STATE_IDLE);
 }
 
-static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
+static inline void enic_poll_unlock_napi(struct vnic_rq *rq,
+                                        struct napi_struct *napi)
 {
-       bool rc = false;
-
-       spin_lock(&rq->bpoll_lock);
-       WARN_ON(rq->bpoll_state &
-               (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD));
-       if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
-               rc = true;
-       rq->bpoll_state = ENIC_POLL_STATE_IDLE;
-       spin_unlock(&rq->bpoll_lock);
-
-       return rc;
+       WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_NAPI);
+       napi_gro_flush(napi, false);
+       atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
 }
 
 static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
 {
-       bool rc = true;
-
-       spin_lock_bh(&rq->bpoll_lock);
-       if (rq->bpoll_state & ENIC_POLL_LOCKED) {
-               rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD;
-               rc = false;
-       } else {
-               rq->bpoll_state |= ENIC_POLL_STATE_POLL;
-       }
-       spin_unlock_bh(&rq->bpoll_lock);
+       int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
+                               ENIC_POLL_STATE_POLL);
 
-       return rc;
+       return (rc == ENIC_POLL_STATE_IDLE);
 }
 
-static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
-{
-       bool rc = false;
 
-       spin_lock_bh(&rq->bpoll_lock);
-       WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
-       if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
-               rc = true;
-       rq->bpoll_state = ENIC_POLL_STATE_IDLE;
-       spin_unlock_bh(&rq->bpoll_lock);
-
-       return rc;
+static inline void enic_poll_unlock_poll(struct vnic_rq *rq)
+{
+       WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_POLL);
+       atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
 }
 
 static inline bool enic_poll_busy_polling(struct vnic_rq *rq)
 {
-       WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED));
-       return rq->bpoll_state & ENIC_POLL_USER_PEND;
+       return atomic_read(&rq->bpoll_state) & ENIC_POLL_STATE_POLL;
 }
 
 #else
@@ -298,7 +260,8 @@ static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
        return true;
 }
 
-static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
+static inline bool enic_poll_unlock_napi(struct vnic_rq *rq,
+                                        struct napi_struct *napi)
 {
        return false;
 }
index b8de87b03046a13d2f1eff527137446de5081a41..ff76d4e9dc1ba5eab90413f82592a876092a8ddf 100644 (file)
@@ -83,12 +83,12 @@ config UGETH_TX_ON_DEMAND
 
 config GIANFAR
        tristate "Gianfar Ethernet"
-       depends on FSL_SOC
        select FSL_PQ_MDIO
        select PHYLIB
        select CRC32
        ---help---
          This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
-         and MPC86xx family of chips, and the FEC on the 8540.
+         and MPC86xx family of chips, the eTSEC on LS1021A and the FEC
+         on the 8540.
 
 endif # NET_VENDOR_FREESCALE
index a86af8a7485dad1be3caf4a55b6d77c7c7b5c884..1eee73cccdf58deba85c810399930ffa55dfa03c 100644 (file)
@@ -428,6 +428,8 @@ struct bufdesc_ex {
 #define FEC_QUIRK_BUG_CAPTURE          (1 << 10)
 /* Controller has only one MDIO bus */
 #define FEC_QUIRK_SINGLE_MDIO          (1 << 11)
+/* Controller supports RACC register */
+#define FEC_QUIRK_HAS_RACC             (1 << 12)
 
 struct fec_enet_priv_tx_q {
        int index;
index e464aeaeed2cd9ece504a2e1494b2869dfadd138..1f89c59b43535f9b65e946c7468cb1fcb13a2022 100644 (file)
@@ -85,28 +85,30 @@ static struct platform_device_id fec_devtype[] = {
                .driver_data = 0,
        }, {
                .name = "imx25-fec",
-               .driver_data = FEC_QUIRK_USE_GASKET,
+               .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,
        }, {
                .name = "imx27-fec",
-               .driver_data = 0,
+               .driver_data = FEC_QUIRK_HAS_RACC,
        }, {
                .name = "imx28-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
-                               FEC_QUIRK_SINGLE_MDIO,
+                               FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
        }, {
                .name = "imx6q-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
                                FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
-                               FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
+                               FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
+                               FEC_QUIRK_HAS_RACC,
        }, {
                .name = "mvf600-fec",
-               .driver_data = FEC_QUIRK_ENET_MAC,
+               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
        }, {
                .name = "imx6sx-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
                                FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
                                FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
-                               FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
+                               FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+                               FEC_QUIRK_HAS_RACC,
        }, {
                /* sentinel */
        }
@@ -970,13 +972,15 @@ fec_restart(struct net_device *ndev)
        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 
 #if !defined(CONFIG_M5272)
-       /* set RX checksum */
-       val = readl(fep->hwp + FEC_RACC);
-       if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
-               val |= FEC_RACC_OPTIONS;
-       else
-               val &= ~FEC_RACC_OPTIONS;
-       writel(val, fep->hwp + FEC_RACC);
+       if (fep->quirks & FEC_QUIRK_HAS_RACC) {
+               /* set RX checksum */
+               val = readl(fep->hwp + FEC_RACC);
+               if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
+                       val |= FEC_RACC_OPTIONS;
+               else
+                       val &= ~FEC_RACC_OPTIONS;
+               writel(val, fep->hwp + FEC_RACC);
+       }
 #endif
 
        /*
index ff2903652f4bbc5ffafcedcd5cb501f31b0436e9..c3b6af83f070e40f083d7580055242fc053c11a2 100644 (file)
@@ -1028,7 +1028,7 @@ static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
 
        /* detailed rx_errors */
        sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
-               ipg_r16(IPG_FRAMETOOLONGERRRORS);
+               ipg_r16(IPG_FRAMETOOLONGERRORS);
        sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
 
        /* Unutilized IPG statistic registers. */
index a21e4f5702b57800271bfc8e5d340dd795dbfa58..de606281f97befcc63e1925875a0686f31faff26 100644 (file)
@@ -102,7 +102,7 @@ enum ipg_regs {
 #define        IPG_MCSTFRAMESRCVDOK            0xB8
 #define        IPG_BCSTFRAMESRCVDOK            0xBE
 #define        IPG_MACCONTROLFRAMESRCVD        0xC6
-#define        IPG_FRAMETOOLONGERRRORS         0xC8
+#define        IPG_FRAMETOOLONGERRORS          0xC8
 #define        IPG_INRANGELENGTHERRORS         0xCA
 #define        IPG_FRAMECHECKSEQERRORS         0xCC
 #define        IPG_FRAMESLOSTRXERRORS          0xCE
index b074b9a667b32cceae00965a0031632431e972fe..91a5a0ae9cd73932648492ce532b0e1260f1419c 100644 (file)
@@ -237,17 +237,19 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
        if (ret_val)
                return false;
 out:
-       if ((hw->mac.type == e1000_pch_lpt) ||
-           (hw->mac.type == e1000_pch_spt)) {
-               /* Unforce SMBus mode in PHY */
-               e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
-               phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
-               e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+       if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) {
+               /* Only unforce SMBus if ME is not active */
+               if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+                       /* Unforce SMBus mode in PHY */
+                       e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
+                       phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+                       e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
 
-               /* Unforce SMBus mode in MAC */
-               mac_reg = er32(CTRL_EXT);
-               mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
-               ew32(CTRL_EXT, mac_reg);
+                       /* Unforce SMBus mode in MAC */
+                       mac_reg = er32(CTRL_EXT);
+                       mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+                       ew32(CTRL_EXT, mac_reg);
+               }
        }
 
        return true;
@@ -1087,6 +1089,7 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
        u32 mac_reg;
        s32 ret_val = 0;
        u16 phy_reg;
+       u16 oem_reg = 0;
 
        if ((hw->mac.type < e1000_pch_lpt) ||
            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
@@ -1128,33 +1131,37 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
        if (ret_val)
                goto out;
 
+       /* Force SMBus mode in PHY */
+       ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+       if (ret_val)
+               goto release;
+       phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+       e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+       /* Force SMBus mode in MAC */
+       mac_reg = er32(CTRL_EXT);
+       mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+       ew32(CTRL_EXT, mac_reg);
+
        /* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
         * LPLU and disable Gig speed when entering ULP
         */
        if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
                ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
-                                                      &phy_reg);
+                                                      &oem_reg);
                if (ret_val)
                        goto release;
+
+               phy_reg = oem_reg;
                phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
+
                ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
                                                        phy_reg);
+
                if (ret_val)
                        goto release;
        }
 
-       /* Force SMBus mode in PHY */
-       ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
-       if (ret_val)
-               goto release;
-       phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
-       e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
-
-       /* Force SMBus mode in MAC */
-       mac_reg = er32(CTRL_EXT);
-       mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
-       ew32(CTRL_EXT, mac_reg);
-
        /* Set Inband ULP Exit, Reset to SMBus mode and
         * Disable SMBus Release on PERST# in PHY
         */
@@ -1166,10 +1173,15 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
        if (to_sx) {
                if (er32(WUFC) & E1000_WUFC_LNKC)
                        phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
+               else
+                       phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
 
                phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
+               phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
        } else {
                phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
+               phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
+               phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
        }
        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
 
@@ -1181,6 +1193,15 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
        /* Commit ULP changes in PHY by starting auto ULP configuration */
        phy_reg |= I218_ULP_CONFIG1_START;
        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+       if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
+           to_sx && (er32(STATUS) & E1000_STATUS_LU)) {
+               ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
+                                                       oem_reg);
+               if (ret_val)
+                       goto release;
+       }
+
 release:
        hw->phy.ops.release(hw);
 out:
@@ -1379,16 +1400,20 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
        if (((hw->mac.type == e1000_pch2lan) ||
             (hw->mac.type == e1000_pch_lpt) ||
             (hw->mac.type == e1000_pch_spt)) && link) {
-               u32 reg;
+               u16 speed, duplex;
 
-               reg = er32(STATUS);
+               e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex);
                tipg_reg = er32(TIPG);
                tipg_reg &= ~E1000_TIPG_IPGT_MASK;
 
-               if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
+               if (duplex == HALF_DUPLEX && speed == SPEED_10) {
                        tipg_reg |= 0xFF;
                        /* Reduce Rx latency in analog PHY */
                        emi_val = 0;
+               } else if (hw->mac.type == e1000_pch_spt &&
+                          duplex == FULL_DUPLEX && speed != SPEED_1000) {
+                       tipg_reg |= 0xC;
+                       emi_val = 1;
                } else {
 
                        /* Roll back the default values */
@@ -1412,14 +1437,59 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
 
                if (ret_val)
                        return ret_val;
+
+               if (hw->mac.type == e1000_pch_spt) {
+                       u16 data;
+                       u16 ptr_gap;
+
+                       if (speed == SPEED_1000) {
+                               ret_val = hw->phy.ops.acquire(hw);
+                               if (ret_val)
+                                       return ret_val;
+
+                               ret_val = e1e_rphy_locked(hw,
+                                                         PHY_REG(776, 20),
+                                                         &data);
+                               if (ret_val) {
+                                       hw->phy.ops.release(hw);
+                                       return ret_val;
+                               }
+
+                               ptr_gap = (data & (0x3FF << 2)) >> 2;
+                               if (ptr_gap < 0x18) {
+                                       data &= ~(0x3FF << 2);
+                                       data |= (0x18 << 2);
+                                       ret_val =
+                                           e1e_wphy_locked(hw,
+                                                           PHY_REG(776, 20),
+                                                           data);
+                               }
+                               hw->phy.ops.release(hw);
+                               if (ret_val)
+                                       return ret_val;
+                       }
+               }
+       }
+
+       /* I217 Packet Loss issue:
+        * ensure that FEXTNVM4 Beacon Duration is set correctly
+        * on power up.
+        * Set the Beacon Duration for I217 to 8 usec
+        */
+       if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) {
+               u32 mac_reg;
+
+               mac_reg = er32(FEXTNVM4);
+               mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+               mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+               ew32(FEXTNVM4, mac_reg);
        }
 
        /* Work-around I218 hang issue */
        if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
-           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) ||
-           (hw->mac.type == e1000_pch_spt)) {
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
                ret_val = e1000_k1_workaround_lpt_lp(hw, link);
                if (ret_val)
                        return ret_val;
index e62b9dcb91fe51309ff7280a20fcbd4d37000548..89d788d8f263e5c362c10166dc76fa59f517e12c 100644 (file)
@@ -6354,13 +6354,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
 }
 
 /**
- * e1000e_disable_aspm - Disable ASPM states
+ * __e1000e_disable_aspm - Disable ASPM states
  * @pdev: pointer to PCI device struct
  * @state: bit-mask of ASPM states to disable
+ * @locked: indication if this context holds pci_bus_sem locked.
  *
  * Some devices *must* have certain ASPM states disabled per hardware errata.
  **/
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked)
 {
        struct pci_dev *parent = pdev->bus->self;
        u16 aspm_dis_mask = 0;
@@ -6399,7 +6400,10 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
                 "L1" : "");
 
 #ifdef CONFIG_PCIEASPM
-       pci_disable_link_state_locked(pdev, state);
+       if (locked)
+               pci_disable_link_state_locked(pdev, state);
+       else
+               pci_disable_link_state(pdev, state);
 
        /* Double-check ASPM control.  If not disabled by the above, the
         * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
@@ -6422,6 +6426,32 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
                                           aspm_dis_mask);
 }
 
+/**
+ * e1000e_disable_aspm - Disable ASPM states.
+ * @pdev: pointer to PCI device struct
+ * @state: bit-mask of ASPM states to disable
+ *
+ * This function acquires the pci_bus_sem!
+ * Some devices *must* have certain ASPM states disabled per hardware errata.
+ **/
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+       __e1000e_disable_aspm(pdev, state, 0);
+}
+
+/**
+ * e1000e_disable_aspm_locked   Disable ASPM states.
+ * @pdev: pointer to PCI device struct
+ * @state: bit-mask of ASPM states to disable
+ *
+ * This function must be called with pci_bus_sem acquired!
+ * Some devices *must* have certain ASPM states disabled per hardware errata.
+ **/
+static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state)
+{
+       __e1000e_disable_aspm(pdev, state, 1);
+}
+
 #ifdef CONFIG_PM
 static int __e1000_resume(struct pci_dev *pdev)
 {
@@ -6435,7 +6465,7 @@ static int __e1000_resume(struct pci_dev *pdev)
        if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
                aspm_disable_flag |= PCIE_LINK_STATE_L1;
        if (aspm_disable_flag)
-               e1000e_disable_aspm(pdev, aspm_disable_flag);
+               e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
 
        pci_set_master(pdev);
 
index f54996f196293d8cf0c1942effe40c2e0e77b77e..395f32f226c08ac924e7d3e707ef7124b2744ec5 100644 (file)
@@ -484,6 +484,8 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
        if (!dev)
                return -ENOMEM;
 
+       /* warn if we are about to overwrite the pointer */
+       WARN_ON(tx_ring->tx_bi);
        bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
        tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
        if (!tx_ring->tx_bi)
@@ -644,6 +646,8 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
        struct device *dev = rx_ring->dev;
        int bi_size;
 
+       /* warn if we are about to overwrite the pointer */
+       WARN_ON(rx_ring->rx_bi);
        bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
        rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
        if (!rx_ring->rx_bi)
index 1b98c25b3092ac4b753eb280a9e38bc3d1fe08bd..fea3b75a9a35fcdc58b9d5f5d0f6125dbf62e0cf 100644 (file)
@@ -264,7 +264,6 @@ extern const char i40evf_driver_version[];
 
 int i40evf_up(struct i40evf_adapter *adapter);
 void i40evf_down(struct i40evf_adapter *adapter);
-void i40evf_reinit_locked(struct i40evf_adapter *adapter);
 void i40evf_reset(struct i40evf_adapter *adapter);
 void i40evf_set_ethtool_ops(struct net_device *netdev);
 void i40evf_update_stats(struct i40evf_adapter *adapter);
index f4e77665bc54b9058c85c2c8e62add23fa49b9b8..2b53c870e7f113ca0695afab3636446e1015e4e8 100644 (file)
@@ -267,8 +267,10 @@ static int i40evf_set_ringparam(struct net_device *netdev,
        adapter->tx_desc_count = new_tx_count;
        adapter->rx_desc_count = new_rx_count;
 
-       if (netif_running(netdev))
-               i40evf_reinit_locked(adapter);
+       if (netif_running(netdev)) {
+               adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+               schedule_work(&adapter->reset_task);
+       }
 
        return 0;
 }
index 7c53aca4b5a6f0b8726c32bee935478d65e3cd47..4ab4ebba07a18e5b1b0539cf0c0b8a7122f6fdc2 100644 (file)
@@ -170,7 +170,8 @@ static void i40evf_tx_timeout(struct net_device *netdev)
        struct i40evf_adapter *adapter = netdev_priv(netdev);
 
        adapter->tx_timeout_count++;
-       if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+       if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING |
+                               I40EVF_FLAG_RESET_NEEDED))) {
                adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
                schedule_work(&adapter->reset_task);
        }
@@ -1460,7 +1461,7 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
        for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
                lut = 0;
                for (j = 0; j < 4; j++) {
-                       if (cqueue == adapter->vsi_res->num_queue_pairs)
+                       if (cqueue == adapter->num_active_queues)
                                cqueue = 0;
                        lut |= ((cqueue) << (8 * j));
                        cqueue++;
@@ -1470,8 +1471,8 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
        i40e_flush(hw);
 }
 
-#define I40EVF_RESET_WAIT_MS 100
-#define I40EVF_RESET_WAIT_COUNT 200
+#define I40EVF_RESET_WAIT_MS 10
+#define I40EVF_RESET_WAIT_COUNT 500
 /**
  * i40evf_reset_task - Call-back task to handle hardware reset
  * @work: pointer to work_struct
@@ -1495,10 +1496,17 @@ static void i40evf_reset_task(struct work_struct *work)
                                &adapter->crit_section))
                usleep_range(500, 1000);
 
+       i40evf_misc_irq_disable(adapter);
        if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
-               dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
+               adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
+               /* Restart the AQ here. If we have been reset but didn't
+                * detect it, or if the PF had to reinit, our AQ will be hosed.
+                */
+               i40evf_shutdown_adminq(hw);
+               i40evf_init_adminq(hw);
                i40evf_request_reset(adapter);
        }
+       adapter->flags |= I40EVF_FLAG_RESET_PENDING;
 
        /* poll until we see the reset actually happen */
        for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
@@ -1507,10 +1515,10 @@ static void i40evf_reset_task(struct work_struct *work)
                if ((rstat_val != I40E_VFR_VFACTIVE) &&
                    (rstat_val != I40E_VFR_COMPLETED))
                        break;
-               msleep(I40EVF_RESET_WAIT_MS);
+               usleep_range(500, 1000);
        }
        if (i == I40EVF_RESET_WAIT_COUNT) {
-               adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+               dev_info(&adapter->pdev->dev, "Never saw reset\n");
                goto continue_reset; /* act like the reset happened */
        }
 
@@ -1518,11 +1526,12 @@ static void i40evf_reset_task(struct work_struct *work)
        for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
                rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
                            I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-               if ((rstat_val == I40E_VFR_VFACTIVE) ||
-                   (rstat_val == I40E_VFR_COMPLETED))
+               if (rstat_val == I40E_VFR_VFACTIVE)
                        break;
                msleep(I40EVF_RESET_WAIT_MS);
        }
+       /* extra wait to make sure minimum wait is met */
+       msleep(I40EVF_RESET_WAIT_MS);
        if (i == I40EVF_RESET_WAIT_COUNT) {
                struct i40evf_mac_filter *f, *ftmp;
                struct i40evf_vlan_filter *fv, *fvtmp;
@@ -1534,11 +1543,10 @@ static void i40evf_reset_task(struct work_struct *work)
 
                if (netif_running(adapter->netdev)) {
                        set_bit(__I40E_DOWN, &adapter->vsi.state);
-                       i40evf_irq_disable(adapter);
-                       i40evf_napi_disable_all(adapter);
-                       netif_tx_disable(netdev);
-                       netif_tx_stop_all_queues(netdev);
                        netif_carrier_off(netdev);
+                       netif_tx_disable(netdev);
+                       i40evf_napi_disable_all(adapter);
+                       i40evf_irq_disable(adapter);
                        i40evf_free_traffic_irqs(adapter);
                        i40evf_free_all_tx_resources(adapter);
                        i40evf_free_all_rx_resources(adapter);
@@ -1550,6 +1558,7 @@ static void i40evf_reset_task(struct work_struct *work)
                        list_del(&f->list);
                        kfree(f);
                }
+
                list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list,
                                         list) {
                        list_del(&fv->list);
@@ -1564,22 +1573,27 @@ static void i40evf_reset_task(struct work_struct *work)
                i40evf_shutdown_adminq(hw);
                adapter->netdev->flags &= ~IFF_UP;
                clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+               adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+               dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
                return; /* Do not attempt to reinit. It's dead, Jim. */
        }
 
 continue_reset:
-       adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
-
-       i40evf_irq_disable(adapter);
-
        if (netif_running(adapter->netdev)) {
-               i40evf_napi_disable_all(adapter);
-               netif_tx_disable(netdev);
-               netif_tx_stop_all_queues(netdev);
                netif_carrier_off(netdev);
+               netif_tx_stop_all_queues(netdev);
+               i40evf_napi_disable_all(adapter);
        }
+       i40evf_irq_disable(adapter);
 
        adapter->state = __I40EVF_RESETTING;
+       adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+
+       /* free the Tx/Rx rings and descriptors, might be better to just
+        * re-use them sometime in the future
+        */
+       i40evf_free_all_rx_resources(adapter);
+       i40evf_free_all_tx_resources(adapter);
 
        /* kill and reinit the admin queue */
        if (i40evf_shutdown_adminq(hw))
@@ -1603,6 +1617,7 @@ continue_reset:
        adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
        adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
        clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+       i40evf_misc_irq_enable(adapter);
 
        mod_timer(&adapter->watchdog_timer, jiffies + 2);
 
@@ -1624,7 +1639,10 @@ continue_reset:
                        goto reset_err;
 
                i40evf_irq_enable(adapter, true);
+       } else {
+               adapter->state = __I40EVF_DOWN;
        }
+
        return;
 reset_err:
        dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
@@ -1667,6 +1685,11 @@ static void i40evf_adminq_task(struct work_struct *work)
                        memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
        } while (pending);
 
+       if ((adapter->flags &
+            (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) ||
+           adapter->state == __I40EVF_RESETTING)
+               goto freedom;
+
        /* check for error indications */
        val = rd32(hw, hw->aq.arq.len);
        oldval = val;
@@ -1702,6 +1725,7 @@ static void i40evf_adminq_task(struct work_struct *work)
        if (oldval != val)
                wr32(hw, hw->aq.asq.len, val);
 
+freedom:
        kfree(event.msg_buf);
 out:
        /* re-enable Admin queue interrupt cause */
@@ -1896,47 +1920,6 @@ static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
        return &adapter->net_stats;
 }
 
-/**
- * i40evf_reinit_locked - Software reinit
- * @adapter: board private structure
- *
- * Reinititalizes the ring structures in response to a software configuration
- * change. Roughly the same as close followed by open, but skips releasing
- * and reallocating the interrupts.
- **/
-void i40evf_reinit_locked(struct i40evf_adapter *adapter)
-{
-       struct net_device *netdev = adapter->netdev;
-       int err;
-
-       WARN_ON(in_interrupt());
-
-       i40evf_down(adapter);
-
-       /* allocate transmit descriptors */
-       err = i40evf_setup_all_tx_resources(adapter);
-       if (err)
-               goto err_reinit;
-
-       /* allocate receive descriptors */
-       err = i40evf_setup_all_rx_resources(adapter);
-       if (err)
-               goto err_reinit;
-
-       i40evf_configure(adapter);
-
-       err = i40evf_up_complete(adapter);
-       if (err)
-               goto err_reinit;
-
-       i40evf_irq_enable(adapter, true);
-       return;
-
-err_reinit:
-       dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
-       i40evf_close(netdev);
-}
-
 /**
  * i40evf_change_mtu - Change the Maximum Transfer Unit
  * @netdev: network interface device structure
@@ -1952,9 +1935,10 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
        if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
                return -EINVAL;
 
-       /* must set new MTU before calling down or up */
        netdev->mtu = new_mtu;
-       i40evf_reinit_locked(adapter);
+       adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+       schedule_work(&adapter->reset_task);
+
        return 0;
 }
 
index 0f69ef81751a3d8154db558cc8f3d11e882928a0..b0182dd313464ccceb85dd19c9489fcd7b3cd9c6 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
+ * Copyright(c) 2007-2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -1900,8 +1900,8 @@ static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
  *  igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
  *  @hw: pointer to the HW structure
  *
- *  After rx enable if managability is enabled then there is likely some
- *  bad data at the start of the fifo and possibly in the DMA fifo.  This
+ *  After rx enable if manageability is enabled then there is likely some
+ *  bad data at the start of the fifo and possibly in the DMA fifo. This
  *  function clears the fifos and flushes any packets that came in as rx was
  *  being enabled.
  **/
@@ -1910,6 +1910,11 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
        u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
        int i, ms_wait;
 
+       /* disable IPv6 options as per hardware errata */
+       rfctl = rd32(E1000_RFCTL);
+       rfctl |= E1000_RFCTL_IPV6_EX_DIS;
+       wr32(E1000_RFCTL, rfctl);
+
        if (hw->mac.type != e1000_82575 ||
            !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
                return;
@@ -1937,7 +1942,6 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
         * incoming packets are rejected.  Set enable and wait 2ms so that
         * any packet that was coming in as RCTL.EN was set is flushed
         */
-       rfctl = rd32(E1000_RFCTL);
        wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
 
        rlpml = rd32(E1000_RLPML);
index 217f8138851bf3e229d6a0035b442e0cc3ae8175..f8684aa285be8cac987263db6676f9d1076b5f9b 100644 (file)
 #define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
 
 /* Header split receive */
-#define E1000_RFCTL_LEF        0x00040000
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_LEF                 0x00040000
 
 /* Collision related configuration parameters */
 #define E1000_COLLISION_THRESHOLD       15
index f287186192bb655ba2dc1a205fb251351d593e98..2f70a9b152bd1789349d9c4d995852e95be1e70d 100644 (file)
@@ -58,7 +58,7 @@
 
 #define MAJ 5
 #define MIN 2
-#define BUILD 15
+#define BUILD 18
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 __stringify(BUILD) "-k"
 char igb_driver_name[] = "igb";
index 5bdf78231a4e78f09c360e4a2e5c0c7c5f82482b..370e20ed224c5c76eaca92954be5800d09d81ada 100644 (file)
@@ -310,6 +310,7 @@ struct mvneta_port {
        unsigned int link;
        unsigned int duplex;
        unsigned int speed;
+       unsigned int tx_csum_limit;
        int use_inband_status:1;
 };
 
@@ -2508,8 +2509,10 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
 
        dev->mtu = mtu;
 
-       if (!netif_running(dev))
+       if (!netif_running(dev)) {
+               netdev_update_features(dev);
                return 0;
+       }
 
        /* The interface is running, so we have to force a
         * reallocation of the queues
@@ -2538,9 +2541,26 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
        mvneta_start_dev(pp);
        mvneta_port_up(pp);
 
+       netdev_update_features(dev);
+
        return 0;
 }
 
+static netdev_features_t mvneta_fix_features(struct net_device *dev,
+                                            netdev_features_t features)
+{
+       struct mvneta_port *pp = netdev_priv(dev);
+
+       if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
+               features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
+               netdev_info(dev,
+                           "Disable IP checksum for MTU greater than %dB\n",
+                           pp->tx_csum_limit);
+       }
+
+       return features;
+}
+
 /* Get mac address */
 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
 {
@@ -2862,6 +2882,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
        .ndo_set_rx_mode     = mvneta_set_rx_mode,
        .ndo_set_mac_address = mvneta_set_mac_addr,
        .ndo_change_mtu      = mvneta_change_mtu,
+       .ndo_fix_features    = mvneta_fix_features,
        .ndo_get_stats64     = mvneta_get_stats64,
        .ndo_do_ioctl        = mvneta_ioctl,
 };
@@ -3107,6 +3128,9 @@ static int mvneta_probe(struct platform_device *pdev)
                }
        }
 
+       if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
+               pp->tx_csum_limit = 1600;
+
        pp->tx_ring_size = MVNETA_MAX_TXD;
        pp->rx_ring_size = MVNETA_MAX_RXD;
 
@@ -3185,6 +3209,7 @@ static int mvneta_remove(struct platform_device *pdev)
 
 static const struct of_device_id mvneta_match[] = {
        { .compatible = "marvell,armada-370-neta" },
+       { .compatible = "marvell,armada-xp-neta" },
        { }
 };
 MODULE_DEVICE_TABLE(of, mvneta_match);
index 77179d7ae4cc786c9bc4bb4ea39763522da54d64..e0de2fd1ce124d3d668659b89544d172164037f4 100644 (file)
@@ -1977,10 +1977,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
                        mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
        }
 
-       if (priv->base_tx_qpn) {
-               mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
-               priv->base_tx_qpn = 0;
-       }
 }
 
 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
index 35f726c17e48c80bdadfc07ba6a43974619c6938..7a4f20bb7fcb4c2640ad8111f5a98ff95088075c 100644 (file)
@@ -718,7 +718,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
 }
 #endif
 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
-                     int hwtstamp_rx_filter)
+                     netdev_features_t dev_features)
 {
        __wsum hw_checksum = 0;
 
@@ -726,14 +726,8 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
 
        hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
 
-       if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) &&
-           hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) {
-               /* next protocol non IPv4 or IPv6 */
-               if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
-                   != htons(ETH_P_IP) &&
-                   ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
-                   != htons(ETH_P_IPV6))
-                       return -1;
+       if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
+           !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
                hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
                hdr += sizeof(struct vlan_hdr);
        }
@@ -896,7 +890,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 
                        if (ip_summed == CHECKSUM_COMPLETE) {
                                void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
-                               if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) {
+                               if (check_csum(cqe, gro_skb, va,
+                                              dev->features)) {
                                        ip_summed = CHECKSUM_NONE;
                                        ring->csum_none++;
                                        ring->csum_complete--;
@@ -951,7 +946,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                }
 
                if (ip_summed == CHECKSUM_COMPLETE) {
-                       if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) {
+                       if (check_csum(cqe, skb, skb->data, dev->features)) {
                                ip_summed = CHECKSUM_NONE;
                                ring->csum_complete--;
                                ring->csum_none++;
index 7bed3a88579fa9db92d7e42ad7d43265bd8a3d41..c10d98f6ad967b13640b5d9b2fe033f377565ff0 100644 (file)
@@ -66,6 +66,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
        ring->size = size;
        ring->size_mask = size - 1;
        ring->stride = stride;
+       ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
 
        tmp = size * sizeof(struct mlx4_en_tx_info);
        ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node);
@@ -180,6 +181,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
                mlx4_bf_free(mdev->dev, &ring->bf);
        mlx4_qp_remove(mdev->dev, &ring->qp);
        mlx4_qp_free(mdev->dev, &ring->qp);
+       mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
        mlx4_en_unmap_buffer(&ring->wqres.buf);
        mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
        kfree(ring->bounce_buf);
@@ -231,6 +233,11 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
                       MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
 }
 
+static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
+{
+       return ring->prod - ring->cons > ring->full_size;
+}
+
 static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
                              struct mlx4_en_tx_ring *ring, int index,
                              u8 owner)
@@ -473,11 +480,10 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
 
        netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
 
-       /*
-        * Wakeup Tx queue if this stopped, and at least 1 packet
-        * was completed
+       /* Wakeup Tx queue if this stopped, and ring is not full.
         */
-       if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
+       if (netif_tx_queue_stopped(ring->tx_queue) &&
+           !mlx4_en_is_tx_ring_full(ring)) {
                netif_tx_wake_queue(ring->tx_queue);
                ring->wake_queue++;
        }
@@ -921,8 +927,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_tx_timestamp(skb);
 
        /* Check available TXBBs And 2K spare for prefetch */
-       stop_queue = (int)(ring->prod - ring_cons) >
-                     ring->size - HEADROOM - MAX_DESC_TXBBS;
+       stop_queue = mlx4_en_is_tx_ring_full(ring);
        if (unlikely(stop_queue)) {
                netif_tx_stop_queue(ring->tx_queue);
                ring->queue_stopped++;
@@ -991,8 +996,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                smp_rmb();
 
                ring_cons = ACCESS_ONCE(ring->cons);
-               if (unlikely(((int)(ring->prod - ring_cons)) <=
-                            ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+               if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
                        netif_tx_wake_queue(ring->tx_queue);
                        ring->wake_queue++;
                }
index 6fce58718837202bd82739dd8592b753ece7ef42..0d80aed5904371c2a2358a99618e7a2328b50c09 100644 (file)
@@ -93,8 +93,14 @@ int mlx4_register_interface(struct mlx4_interface *intf)
        mutex_lock(&intf_mutex);
 
        list_add_tail(&intf->list, &intf_list);
-       list_for_each_entry(priv, &dev_list, dev_list)
+       list_for_each_entry(priv, &dev_list, dev_list) {
+               if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
+                       mlx4_dbg(&priv->dev,
+                                "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
+                       intf->flags &= ~MLX4_INTFF_BONDING;
+               }
                mlx4_add_device(intf, priv);
+       }
 
        mutex_unlock(&intf_mutex);
 
index d5f9adb6a78491d37522caa4de869e4695f83f25..666d1669eb5233f9a8e6baf5773621159375af25 100644 (file)
@@ -279,6 +279,7 @@ struct mlx4_en_tx_ring {
        u32                     size; /* number of TXBBs */
        u32                     size_mask;
        u16                     stride;
+       u32                     full_size;
        u16                     cqn;    /* index of port CQ associated with this ring */
        u32                     buf_size;
        __be32                  doorbell_qpn;
@@ -580,7 +581,6 @@ struct mlx4_en_priv {
        int vids[128];
        bool wol;
        struct device *ddev;
-       int base_tx_qpn;
        struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
        struct hwtstamp_config hwtstamp_config;
        u32 counter_index;
index 2bae50292dcd814a2b8cb338da1bb0a6beac82f0..83651ac8ddb9d54ca8d7548dc3ca070c29bc5e59 100644 (file)
@@ -279,7 +279,7 @@ MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
 MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
 MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
 
-/* Careful: must be accessed under kparam_block_sysfs_write */
+/* Careful: must be accessed under kernel_param_lock() */
 static char *myri10ge_fw_name = NULL;
 module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
@@ -3427,7 +3427,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
                }
        }
 
-       kparam_block_sysfs_write(myri10ge_fw_name);
+       kernel_param_lock(THIS_MODULE);
        if (myri10ge_fw_name != NULL) {
                char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
                if (fw_name) {
@@ -3435,7 +3435,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
                        set_fw_name(mgp, fw_name, true);
                }
        }
-       kparam_unblock_sysfs_write(myri10ge_fw_name);
+       kernel_param_unlock(THIS_MODULE);
 
        if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
            myri10ge_fw_names[mgp->board_number] != NULL &&
index 42656da5050063ca382a6bb24375ec08ff672385..7a8ce920c49e709b067321ae91306153f4f24390 100644 (file)
@@ -116,8 +116,10 @@ static int ravb_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
        priv->ptp.current_addend = addend;
 
        gccr = ravb_read(ndev, GCCR);
-       if (gccr & GCCR_LTI)
+       if (gccr & GCCR_LTI) {
+               spin_unlock_irqrestore(&priv->lock, flags);
                return -EBUSY;
+       }
        ravb_write(ndev, addend & GTI_TIV, GTI);
        ravb_write(ndev, gccr | GCCR_LTI, GCCR);
 
index 1341f33e60843029e1e1ecd035fcbebc0fccc1cb..7d430d3229310a45e3fa67df26d5570dc7b8992c 100644 (file)
@@ -56,7 +56,7 @@ enum sis900_configuration_register_bits {
        EDB_MASTER_EN = 0x00002000
 };
 
-enum sis900_eeprom_access_reigster_bits {
+enum sis900_eeprom_access_register_bits {
        MDC  = 0x00000040, MDDIR = 0x00000020, MDIO = 0x00000010, /* 7016 specific */
        EECS = 0x00000008, EECLK = 0x00000004, EEDO = 0x00000002,
        EEDI = 0x00000001
@@ -73,7 +73,7 @@ enum sis900_interrupt_register_bits {
        RxERR  = 0x00000004, RxDESC = 0x00000002, RxOK  = 0x00000001
 };
 
-enum sis900_interrupt_enable_reigster_bits {
+enum sis900_interrupt_enable_register_bits {
        IE = 0x00000001
 };
 
index 08c483bd2ec7bd94d5434f9567c75f609ce27d35..3f20bb1fe570c086e53d0bb5d1ca8124d971fe5d 100644 (file)
@@ -73,7 +73,7 @@
 #define MMC_RX_OCTETCOUNT_G            0x00000188
 #define MMC_RX_BROADCASTFRAME_G                0x0000018c
 #define MMC_RX_MULTICASTFRAME_G                0x00000190
-#define MMC_RX_CRC_ERRROR              0x00000194
+#define MMC_RX_CRC_ERROR               0x00000194
 #define MMC_RX_ALIGN_ERROR             0x00000198
 #define MMC_RX_RUN_ERROR               0x0000019C
 #define MMC_RX_JABBER_ERROR            0x000001A0
@@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
        mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
        mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
        mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
-       mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR);
+       mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERROR);
        mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
        mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
        mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
index 8b0b1d6aca72c4a36c718862a064da6418dfc9f7..2f1264b882b9555f02e0b1cb50aa914d13c929fa 100644 (file)
@@ -18,6 +18,7 @@ if NET_VENDOR_VIA
 config VIA_RHINE
        tristate "VIA Rhine support"
        depends on (PCI || OF_IRQ)
+       depends on HAS_DMA
        select CRC32
        select MII
        ---help---
@@ -42,6 +43,7 @@ config VIA_RHINE_MMIO
 config VIA_VELOCITY
        tristate "VIA Velocity support"
        depends on (PCI || (OF_ADDRESS && OF_IRQ))
+       depends on HAS_DMA
        select CRC32
        select CRC_CCITT
        select MII
index 6a64197f5bcef6fa4fce9e714bee6fa664e46fb7..f8370808a018f77503de49503985f3d5c55985f6 100644 (file)
@@ -48,15 +48,70 @@ struct macvtap_queue {
 #define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
 
 #define MACVTAP_VNET_LE 0x80000000
+#define MACVTAP_VNET_BE 0x40000000
+
+#ifdef CONFIG_TUN_VNET_CROSS_LE
+static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
+{
+       return q->flags & MACVTAP_VNET_BE ? false :
+               virtio_legacy_is_little_endian();
+}
+
+static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp)
+{
+       int s = !!(q->flags & MACVTAP_VNET_BE);
+
+       if (put_user(s, sp))
+               return -EFAULT;
+
+       return 0;
+}
+
+static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp)
+{
+       int s;
+
+       if (get_user(s, sp))
+               return -EFAULT;
+
+       if (s)
+               q->flags |= MACVTAP_VNET_BE;
+       else
+               q->flags &= ~MACVTAP_VNET_BE;
+
+       return 0;
+}
+#else
+static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
+{
+       return virtio_legacy_is_little_endian();
+}
+
+static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp)
+{
+       return -EINVAL;
+}
+
+static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_TUN_VNET_CROSS_LE */
+
+static inline bool macvtap_is_little_endian(struct macvtap_queue *q)
+{
+       return q->flags & MACVTAP_VNET_LE ||
+               macvtap_legacy_is_little_endian(q);
+}
 
 static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val)
 {
-       return __virtio16_to_cpu(q->flags & MACVTAP_VNET_LE, val);
+       return __virtio16_to_cpu(macvtap_is_little_endian(q), val);
 }
 
 static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val)
 {
-       return __cpu_to_virtio16(q->flags & MACVTAP_VNET_LE, val);
+       return __cpu_to_virtio16(macvtap_is_little_endian(q), val);
 }
 
 static struct proto macvtap_proto = {
@@ -1085,6 +1140,12 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
                        q->flags &= ~MACVTAP_VNET_LE;
                return 0;
 
+       case TUNGETVNETBE:
+               return macvtap_get_vnet_be(q, sp);
+
+       case TUNSETVNETBE:
+               return macvtap_set_vnet_be(q, sp);
+
        case TUNSETOFFLOAD:
                /* let the user check for future flags */
                if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
index 5a7e6397440ab18487a87dd9951aa8ed12caf21a..3cc316cb7e6be792b06dfc2c520eae9809f1008b 100644 (file)
@@ -5,6 +5,7 @@
  *   GPL LICENSE SUMMARY
  *
  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
  *
  *   This program is free software; you can redistribute it and/or modify
  *   it under the terms of version 2 of the GNU General Public License as
@@ -13,6 +14,7 @@
  *   BSD LICENSE
  *
  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -40,7 +42,7 @@
  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
- * Intel PCIe NTB Network Linux driver
+ * PCIe NTB Network Linux driver
  *
  * Contact Information:
  * Jon Mason <jon.mason@intel.com>
@@ -50,6 +52,7 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/ntb.h>
+#include <linux/ntb_transport.h>
 
 #define NTB_NETDEV_VER "0.7"
 
@@ -70,26 +73,19 @@ struct ntb_netdev {
 
 static LIST_HEAD(dev_list);
 
-static void ntb_netdev_event_handler(void *data, int status)
+static void ntb_netdev_event_handler(void *data, int link_is_up)
 {
        struct net_device *ndev = data;
        struct ntb_netdev *dev = netdev_priv(ndev);
 
-       netdev_dbg(ndev, "Event %x, Link %x\n", status,
+       netdev_dbg(ndev, "Event %x, Link %x\n", link_is_up,
                   ntb_transport_link_query(dev->qp));
 
-       switch (status) {
-       case NTB_LINK_DOWN:
+       if (link_is_up) {
+               if (ntb_transport_link_query(dev->qp))
+                       netif_carrier_on(ndev);
+       } else {
                netif_carrier_off(ndev);
-               break;
-       case NTB_LINK_UP:
-               if (!ntb_transport_link_query(dev->qp))
-                       return;
-
-               netif_carrier_on(ndev);
-               break;
-       default:
-               netdev_warn(ndev, "Unsupported event type %d\n", status);
        }
 }
 
@@ -160,8 +156,6 @@ static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
        struct ntb_netdev *dev = netdev_priv(ndev);
        int rc;
 
-       netdev_dbg(ndev, "%s: skb len %d\n", __func__, skb->len);
-
        rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
        if (rc)
                goto err;
@@ -322,20 +316,26 @@ static const struct ntb_queue_handlers ntb_netdev_handlers = {
        .event_handler = ntb_netdev_event_handler,
 };
 
-static int ntb_netdev_probe(struct pci_dev *pdev)
+static int ntb_netdev_probe(struct device *client_dev)
 {
+       struct ntb_dev *ntb;
        struct net_device *ndev;
+       struct pci_dev *pdev;
        struct ntb_netdev *dev;
        int rc;
 
-       ndev = alloc_etherdev(sizeof(struct ntb_netdev));
+       ntb = dev_ntb(client_dev->parent);
+       pdev = ntb->pdev;
+       if (!pdev)
+               return -ENODEV;
+
+       ndev = alloc_etherdev(sizeof(*dev));
        if (!ndev)
                return -ENOMEM;
 
        dev = netdev_priv(ndev);
        dev->ndev = ndev;
        dev->pdev = pdev;
-       BUG_ON(!dev->pdev);
        ndev->features = NETIF_F_HIGHDMA;
 
        ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -349,7 +349,8 @@ static int ntb_netdev_probe(struct pci_dev *pdev)
        ndev->netdev_ops = &ntb_netdev_ops;
        ndev->ethtool_ops = &ntb_ethtool_ops;
 
-       dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
+       dev->qp = ntb_transport_create_queue(ndev, client_dev,
+                                            &ntb_netdev_handlers);
        if (!dev->qp) {
                rc = -EIO;
                goto err;
@@ -372,12 +373,17 @@ err:
        return rc;
 }
 
-static void ntb_netdev_remove(struct pci_dev *pdev)
+static void ntb_netdev_remove(struct device *client_dev)
 {
+       struct ntb_dev *ntb;
        struct net_device *ndev;
+       struct pci_dev *pdev;
        struct ntb_netdev *dev;
        bool found = false;
 
+       ntb = dev_ntb(client_dev->parent);
+       pdev = ntb->pdev;
+
        list_for_each_entry(dev, &dev_list, list) {
                if (dev->pdev == pdev) {
                        found = true;
@@ -396,7 +402,7 @@ static void ntb_netdev_remove(struct pci_dev *pdev)
        free_netdev(ndev);
 }
 
-static struct ntb_client ntb_netdev_client = {
+static struct ntb_transport_client ntb_netdev_client = {
        .driver.name = KBUILD_MODNAME,
        .driver.owner = THIS_MODULE,
        .probe = ntb_netdev_probe,
@@ -407,16 +413,16 @@ static int __init ntb_netdev_init_module(void)
 {
        int rc;
 
-       rc = ntb_register_client_dev(KBUILD_MODNAME);
+       rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
        if (rc)
                return rc;
-       return ntb_register_client(&ntb_netdev_client);
+       return ntb_transport_register_client(&ntb_netdev_client);
 }
 module_init(ntb_netdev_init_module);
 
 static void __exit ntb_netdev_exit_module(void)
 {
-       ntb_unregister_client(&ntb_netdev_client);
-       ntb_unregister_client_dev(KBUILD_MODNAME);
+       ntb_transport_unregister_client(&ntb_netdev_client);
+       ntb_transport_unregister_client_dev(KBUILD_MODNAME);
 }
 module_exit(ntb_netdev_exit_module);
index 4dea85bfc545b86d5874031531a9ce4963facbe2..6b701b3ded749642b6cdf2309e7b25d2b373ed37 100644 (file)
@@ -246,6 +246,13 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
        pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n",
                     dev_name(&phydev->dev), phydev->drv->name, rev, patch);
 
+       /* Dummy read to a register to workaround an issue upon reset where the
+        * internal inverter may not allow the first MDIO transaction to pass
+        * the MDIO management controller and make us return 0xffff for such
+        * reads.
+        */
+       phy_read(phydev, MII_BMSR);
+
        switch (rev) {
        case 0xb0:
                ret = bcm7xxx_28nm_b0_afe_config_init(phydev);
index fc7abc50b4f17544741d1166960fb549b8554fbd..6a52a7f0fa0dc5cace471b118a9e989d8c2713ea 100644 (file)
@@ -120,6 +120,48 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
        return 0;
 }
 
+/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with
+ * their internal MDIO management controller making them fail to successfully
+ * be read from or written to for the first transaction.  We insert a dummy
+ * BMSR read here to make sure that phy_get_device() and get_phy_id() can
+ * correctly read the PHY MII_PHYSID1/2 registers and successfully register a
+ * PHY device for this peripheral.
+ *
+ * Once the PHY driver is registered, we can workaround subsequent reads from
+ * there (e.g: during system-wide power management).
+ *
+ * bus->reset is invoked before mdiobus_scan during mdiobus_register and is
+ * therefore the right location to stick that workaround. Since we do not want
+ * to read from non-existing PHYs, we either use bus->phy_mask or do a manual
+ * Device Tree scan to limit the search area.
+ */
+static int unimac_mdio_reset(struct mii_bus *bus)
+{
+       struct device_node *np = bus->dev.of_node;
+       struct device_node *child;
+       u32 read_mask = 0;
+       int addr;
+
+       if (!np) {
+               read_mask = ~bus->phy_mask;
+       } else {
+               for_each_available_child_of_node(np, child) {
+                       addr = of_mdio_parse_addr(&bus->dev, child);
+                       if (addr < 0)
+                               continue;
+
+                       read_mask |= 1 << addr;
+               }
+       }
+
+       for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+               if (read_mask & 1 << addr)
+                       mdiobus_read(bus, addr, MII_BMSR);
+       }
+
+       return 0;
+}
+
 static int unimac_mdio_probe(struct platform_device *pdev)
 {
        struct unimac_mdio_priv *priv;
@@ -155,6 +197,7 @@ static int unimac_mdio_probe(struct platform_device *pdev)
        bus->parent = &pdev->dev;
        bus->read = unimac_mdio_read;
        bus->write = unimac_mdio_write;
+       bus->reset = unimac_mdio_reset;
        snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
 
        bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
index bdfe51fc3a6507154edfcaf8be3413884bcf702f..0302483de24066a64446699cb360b2fb2ad6a890 100644 (file)
@@ -230,7 +230,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
        for (i = 1;
             i < num_ids && c45_ids->devices_in_package == 0;
             i++) {
-               reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2;
+retry:         reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2;
                phy_reg = mdiobus_read(bus, addr, reg_addr);
                if (phy_reg < 0)
                        return -EIO;
@@ -242,12 +242,20 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
                        return -EIO;
                c45_ids->devices_in_package |= (phy_reg & 0xffff);
 
-               /* If mostly Fs, there is no device there,
-                * let's get out of here.
-                */
                if ((c45_ids->devices_in_package & 0x1fffffff) == 0x1fffffff) {
-                       *phy_id = 0xffffffff;
-                       return 0;
+                       if (i) {
+                               /*  If mostly Fs, there is no device there,
+                                *  then let's continue to probe more, as some
+                                *  10G PHYs have zero Devices In package,
+                                *  e.g. Cortina CS4315/CS4340 PHY.
+                                */
+                               i = 0;
+                               goto retry;
+                       } else {
+                               /* no device there, let's get out of here */
+                               *phy_id = 0xffffffff;
+                               return 0;
+                       }
                }
        }
 
@@ -796,10 +804,11 @@ static int genphy_config_advert(struct phy_device *phydev)
        if (phydev->supported & (SUPPORTED_1000baseT_Half |
                                 SUPPORTED_1000baseT_Full)) {
                adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
-               if (adv != oldadv)
-                       changed = 1;
        }
 
+       if (adv != oldadv)
+               changed = 1;
+
        err = phy_write(phydev, MII_CTRL1000, adv);
        if (err < 0)
                return err;
index 76cad712ddb2c7c6bc2794389380e4e0a862d5f5..17cad185169dd28fd3cae12e69d918371fe9d06f 100644 (file)
@@ -66,6 +66,7 @@
 #define PHY_ID_VSC8244                 0x000fc6c0
 #define PHY_ID_VSC8514                 0x00070670
 #define PHY_ID_VSC8574                 0x000704a0
+#define PHY_ID_VSC8641                 0x00070431
 #define PHY_ID_VSC8662                 0x00070660
 #define PHY_ID_VSC8221                 0x000fc550
 #define PHY_ID_VSC8211                 0x000fc4b0
@@ -271,6 +272,18 @@ static struct phy_driver vsc82xx_driver[] = {
        .ack_interrupt  = &vsc824x_ack_interrupt,
        .config_intr    = &vsc82xx_config_intr,
        .driver         = { .owner = THIS_MODULE,},
+}, {
+       .phy_id         = PHY_ID_VSC8641,
+       .name           = "Vitesse VSC8641",
+       .phy_id_mask    = 0x000ffff0,
+       .features       = PHY_GBIT_FEATURES,
+       .flags          = PHY_HAS_INTERRUPT,
+       .config_init    = &vsc824x_config_init,
+       .config_aneg    = &vsc82x4_config_aneg,
+       .read_status    = &genphy_read_status,
+       .ack_interrupt  = &vsc824x_ack_interrupt,
+       .config_intr    = &vsc82xx_config_intr,
+       .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_VSC8662,
        .name           = "Vitesse VSC8662",
@@ -318,6 +331,7 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
        { PHY_ID_VSC8244, 0x000fffc0 },
        { PHY_ID_VSC8514, 0x000ffff0 },
        { PHY_ID_VSC8574, 0x000ffff0 },
+       { PHY_ID_VSC8641, 0x000ffff0 },
        { PHY_ID_VSC8662, 0x000ffff0 },
        { PHY_ID_VSC8221, 0x000ffff0 },
        { PHY_ID_VSC8211, 0x000ffff0 },
index 1a1c4f7b3ec53d884c0ea3aed996d08d203ce6aa..06a039414628de9a9d91c73e9f8790c23fce71a3 100644 (file)
@@ -111,6 +111,7 @@ do {                                                                \
 #define TUN_FASYNC     IFF_ATTACH_QUEUE
 /* High bits in flags field are unused. */
 #define TUN_VNET_LE     0x80000000
+#define TUN_VNET_BE     0x40000000
 
 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
                      IFF_MULTI_QUEUE)
@@ -205,14 +206,68 @@ struct tun_struct {
        u32 flow_count;
 };
 
+#ifdef CONFIG_TUN_VNET_CROSS_LE
+static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
+{
+       return tun->flags & TUN_VNET_BE ? false :
+               virtio_legacy_is_little_endian();
+}
+
+static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
+{
+       int be = !!(tun->flags & TUN_VNET_BE);
+
+       if (put_user(be, argp))
+               return -EFAULT;
+
+       return 0;
+}
+
+static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
+{
+       int be;
+
+       if (get_user(be, argp))
+               return -EFAULT;
+
+       if (be)
+               tun->flags |= TUN_VNET_BE;
+       else
+               tun->flags &= ~TUN_VNET_BE;
+
+       return 0;
+}
+#else
+static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
+{
+       return virtio_legacy_is_little_endian();
+}
+
+static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
+{
+       return -EINVAL;
+}
+
+static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_TUN_VNET_CROSS_LE */
+
+static inline bool tun_is_little_endian(struct tun_struct *tun)
+{
+       return tun->flags & TUN_VNET_LE ||
+               tun_legacy_is_little_endian(tun);
+}
+
 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
 {
-       return __virtio16_to_cpu(tun->flags & TUN_VNET_LE, val);
+       return __virtio16_to_cpu(tun_is_little_endian(tun), val);
 }
 
 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
 {
-       return __cpu_to_virtio16(tun->flags & TUN_VNET_LE, val);
+       return __cpu_to_virtio16(tun_is_little_endian(tun), val);
 }
 
 static inline u32 tun_hashfn(u32 rxhash)
@@ -2044,6 +2099,14 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                        tun->flags &= ~TUN_VNET_LE;
                break;
 
+       case TUNGETVNETBE:
+               ret = tun_get_vnet_be(tun, argp);
+               break;
+
+       case TUNSETVNETBE:
+               ret = tun_set_vnet_be(tun, argp);
+               break;
+
        case TUNATTACHFILTER:
                /* Can be set only for TAPs */
                ret = -EINVAL;
index e9f1075f7d4c2055ccbf00c24b41c86a77a87558..2652245631d12f5016915721d5f93268cb4453c1 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.3.5.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.2.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01030500
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040200
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index b9febab8916735f113b99fa2e08d81662b13390a..6ca6193ab8a6100ac6257e24745d4575e17648a9 100644 (file)
@@ -62,7 +62,7 @@ static int mtu_max_set(const char *val, const struct kernel_param *kp)
        return ret;
 }
 
-static struct kernel_param_ops mtu_max_ops = {
+static const struct kernel_param_ops mtu_max_ops = {
        .set = mtu_max_set,
        .get = param_get_uint,
 };
@@ -91,7 +91,7 @@ static int ring_order_set(const char *val, const struct kernel_param *kp)
        return 0;
 }
 
-static struct kernel_param_ops ring_order_ops = {
+static const struct kernel_param_ops ring_order_ops = {
        .set = ring_order_set,
        .get = param_get_uint,
 };
index 1a20cee5febea93aa350e1ff401e88e04aa2c4b7..799a2efe57937241223aafcf3793586f7304346f 100644 (file)
@@ -821,15 +821,15 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
 
        lbtf_deb_enter(LBTF_DEB_USB);
 
-       kparam_block_sysfs_write(fw_name);
+       kernel_param_lock(THIS_MODULE);
        ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev);
        if (ret < 0) {
                pr_err("request_firmware() failed with %#x\n", ret);
                pr_err("firmware %s not found\n", lbtf_fw_name);
-               kparam_unblock_sysfs_write(fw_name);
+               kernel_param_unlock(THIS_MODULE);
                goto done;
        }
-       kparam_unblock_sysfs_write(fw_name);
+       kernel_param_unlock(THIS_MODULE);
 
        if (check_fwfile_format(cardp->fw->data, cardp->fw->size))
                goto release_fw;
index 5485f91294e7182da2c31f85715570629cac62d9..880d0d63e872e5725d76fe998db0282c749f45d6 100644 (file)
@@ -44,9 +44,9 @@
 #include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/interface/memory.h>
+#include <xen/page.h>
 
 #include <asm/xen/hypercall.h>
-#include <asm/xen/page.h>
 
 /* Provide an option to disable split event channels at load time as
  * event channels are limited resource. Split event channels are
index 56d8afd11077de5d1f7b475bba37d1164c92bf41..f948c46d51329970c186b2886c267ffba2e807db 100644 (file)
@@ -45,7 +45,6 @@
 #include <linux/slab.h>
 #include <net/ip.h>
 
-#include <asm/xen/page.h>
 #include <xen/xen.h>
 #include <xen/xenbus.h>
 #include <xen/events.h>
@@ -1245,10 +1244,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        np                   = netdev_priv(netdev);
        np->xbdev            = dev;
 
-       /* No need to use rtnl_lock() before the call below as it
-        * happens before register_netdev().
-        */
-       netif_set_real_num_tx_queues(netdev, 0);
        np->queues = NULL;
 
        err = -ENOMEM;
@@ -1900,9 +1895,6 @@ abort_transaction_no_dev_fatal:
        xennet_disconnect_backend(info);
        kfree(info->queues);
        info->queues = NULL;
-       rtnl_lock();
-       netif_set_real_num_tx_queues(info->netdev, 0);
-       rtnl_unlock();
  out:
        return err;
 }
index f69df793dbe2c153e591ddc1180335288288acd5..95944e52fa36a306c209a9fa766779be5969c036 100644 (file)
@@ -1,13 +1,28 @@
-config NTB
-       tristate "Intel Non-Transparent Bridge support"
-       depends on PCI
-       depends on X86
-       help
-        The PCI-E Non-transparent bridge hardware is a point-to-point PCI-E bus
-        connecting 2 systems.  When configured, writes to the device's PCI
-        mapped memory will be mirrored to a buffer on the remote system.  The
-        ntb Linux driver uses this point-to-point communication as a method to
-        transfer data from one system to the other.
-
-        If unsure, say N.
+menuconfig NTB
+       tristate "Non-Transparent Bridge support"
+       depends on PCI
+       help
+        The PCI-E Non-transparent bridge hardware is a point-to-point PCI-E bus
+        connecting 2 systems.  When configured, writes to the device's PCI
+        mapped memory will be mirrored to a buffer on the remote system.  The
+        ntb Linux driver uses this point-to-point communication as a method to
+        transfer data from one system to the other.
 
+        If unsure, say N.
+
+if NTB
+
+source "drivers/ntb/hw/Kconfig"
+
+source "drivers/ntb/test/Kconfig"
+
+config NTB_TRANSPORT
+       tristate "NTB Transport Client"
+       help
+        This is a transport driver that enables connected systems to exchange
+        messages over the ntb hardware.  The transport exposes a queue pair api
+        to client drivers.
+
+        If unsure, say N.
+
+endif # NTB
index 15cb59fd354e89176b9efe9a2df7a276336042f3..1921dec1949deb5e93e3ccf5e0b0ad5c705e8c96 100644 (file)
@@ -1,3 +1,2 @@
-obj-$(CONFIG_NTB) += ntb.o
-
-ntb-objs := ntb_hw.o ntb_transport.o
+obj-$(CONFIG_NTB) += ntb.o hw/ test/
+obj-$(CONFIG_NTB_TRANSPORT) += ntb_transport.o
diff --git a/drivers/ntb/hw/Kconfig b/drivers/ntb/hw/Kconfig
new file mode 100644 (file)
index 0000000..4d5535c
--- /dev/null
@@ -0,0 +1 @@
+source "drivers/ntb/hw/intel/Kconfig"
diff --git a/drivers/ntb/hw/Makefile b/drivers/ntb/hw/Makefile
new file mode 100644 (file)
index 0000000..175d7c9
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_INTEL)        += intel/
diff --git a/drivers/ntb/hw/intel/Kconfig b/drivers/ntb/hw/intel/Kconfig
new file mode 100644 (file)
index 0000000..91f995e
--- /dev/null
@@ -0,0 +1,7 @@
+config NTB_INTEL
+       tristate "Intel Non-Transparent Bridge support"
+       depends on X86_64
+       help
+        This driver supports Intel NTB on capable Xeon and Atom hardware.
+
+        If unsure, say N.
diff --git a/drivers/ntb/hw/intel/Makefile b/drivers/ntb/hw/intel/Makefile
new file mode 100644 (file)
index 0000000..1b43456
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_INTEL) += ntb_hw_intel.o
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
new file mode 100644 (file)
index 0000000..87751cf
--- /dev/null
@@ -0,0 +1,2274 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/ntb.h>
+
+#include "ntb_hw_intel.h"
+
+#define NTB_NAME       "ntb_hw_intel"
+#define NTB_DESC       "Intel(R) PCI-E Non-Transparent Bridge Driver"
+#define NTB_VER                "2.0"
+
+MODULE_DESCRIPTION(NTB_DESC);
+MODULE_VERSION(NTB_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+#define bar0_off(base, bar) ((base) + ((bar) << 2))
+#define bar2_off(base, bar) bar0_off(base, (bar) - 2)
+
+static const struct intel_ntb_reg atom_reg;
+static const struct intel_ntb_alt_reg atom_pri_reg;
+static const struct intel_ntb_alt_reg atom_sec_reg;
+static const struct intel_ntb_alt_reg atom_b2b_reg;
+static const struct intel_ntb_xlat_reg atom_pri_xlat;
+static const struct intel_ntb_xlat_reg atom_sec_xlat;
+static const struct intel_ntb_reg xeon_reg;
+static const struct intel_ntb_alt_reg xeon_pri_reg;
+static const struct intel_ntb_alt_reg xeon_sec_reg;
+static const struct intel_ntb_alt_reg xeon_b2b_reg;
+static const struct intel_ntb_xlat_reg xeon_pri_xlat;
+static const struct intel_ntb_xlat_reg xeon_sec_xlat;
+static struct intel_b2b_addr xeon_b2b_usd_addr;
+static struct intel_b2b_addr xeon_b2b_dsd_addr;
+static const struct ntb_dev_ops intel_ntb_ops;
+
+static const struct file_operations intel_ntb_debugfs_info;
+static struct dentry *debugfs_dir;
+
+static int b2b_mw_idx = -1;
+module_param(b2b_mw_idx, int, 0644);
+MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb.  A "
+                "value of zero or positive starts from first mw idx, and a "
+                "negative value starts from last mw idx.  Both sides MUST "
+                "set the same value here!");
+
+static unsigned int b2b_mw_share;
+module_param(b2b_mw_share, uint, 0644);
+MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
+                "ntb so that the peer ntb only occupies the first half of "
+                "the mw, so the second half can still be used as a mw.  Both "
+                "sides MUST set the same value here!");
+
+module_param_named(xeon_b2b_usd_bar2_addr64,
+                  xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+                "XEON B2B USD BAR 2 64-bit address");
+
+module_param_named(xeon_b2b_usd_bar4_addr64,
+                  xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+                "XEON B2B USD BAR 4 64-bit address");
+
+module_param_named(xeon_b2b_usd_bar4_addr32,
+                  xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+                "XEON B2B USD split-BAR 4 32-bit address");
+
+module_param_named(xeon_b2b_usd_bar5_addr32,
+                  xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+                "XEON B2B USD split-BAR 5 32-bit address");
+
+module_param_named(xeon_b2b_dsd_bar2_addr64,
+                  xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+                "XEON B2B DSD BAR 2 64-bit address");
+
+module_param_named(xeon_b2b_dsd_bar4_addr64,
+                  xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+                "XEON B2B DSD BAR 4 64-bit address");
+
+module_param_named(xeon_b2b_dsd_bar4_addr32,
+                  xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+                "XEON B2B DSD split-BAR 4 32-bit address");
+
+module_param_named(xeon_b2b_dsd_bar5_addr32,
+                  xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
+MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+                "XEON B2B DSD split-BAR 5 32-bit address");
+
+#ifndef ioread64
+#ifdef readq
+#define ioread64 readq
+#else
+#define ioread64 _ioread64
+static inline u64 _ioread64(void __iomem *mmio)
+{
+       u64 low, high;
+
+       low = ioread32(mmio);
+       high = ioread32(mmio + sizeof(u32));
+       return low | (high << 32);
+}
+#endif
+#endif
+
+#ifndef iowrite64
+#ifdef writeq
+#define iowrite64 writeq
+#else
+#define iowrite64 _iowrite64
+static inline void _iowrite64(u64 val, void __iomem *mmio)
+{
+       iowrite32(val, mmio);
+       iowrite32(val >> 32, mmio + sizeof(u32));
+}
+#endif
+#endif
+
+static inline int pdev_is_atom(struct pci_dev *pdev)
+{
+       switch (pdev->device) {
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
+               return 1;
+       }
+       return 0;
+}
+
+static inline int pdev_is_xeon(struct pci_dev *pdev)
+{
+       switch (pdev->device) {
+       case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
+       case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
+       case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
+       case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+               return 1;
+       }
+       return 0;
+}
+
+static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
+{
+       ndev->unsafe_flags = 0;
+       ndev->unsafe_flags_ignore = 0;
+
+       /* Only B2B has a workaround to avoid SDOORBELL */
+       if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
+               if (!ntb_topo_is_b2b(ndev->ntb.topo))
+                       ndev->unsafe_flags |= NTB_UNSAFE_DB;
+
+       /* No low level workaround to avoid SB01BASE */
+       if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
+               ndev->unsafe_flags |= NTB_UNSAFE_DB;
+               ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
+       }
+}
+
+static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
+                                unsigned long flag)
+{
+       return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
+}
+
+static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
+                                    unsigned long flag)
+{
+       flag &= ndev->unsafe_flags;
+       ndev->unsafe_flags_ignore |= flag;
+
+       return !!flag;
+}
+
+static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
+{
+       if (idx < 0 || idx > ndev->mw_count)
+               return -EINVAL;
+       return ndev->reg->mw_bar[idx];
+}
+
+static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
+                              phys_addr_t *db_addr, resource_size_t *db_size,
+                              phys_addr_t reg_addr, unsigned long reg)
+{
+       if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+               pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+       if (db_addr) {
+               *db_addr = reg_addr + reg;
+               dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
+       }
+
+       if (db_size) {
+               *db_size = ndev->reg->db_size;
+               dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
+       }
+
+       return 0;
+}
+
+static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
+                              void __iomem *mmio)
+{
+       if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+               pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+       return ndev->reg->db_ioread(mmio);
+}
+
+static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
+                               void __iomem *mmio)
+{
+       if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+               pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+       if (db_bits & ~ndev->db_valid_mask)
+               return -EINVAL;
+
+       ndev->reg->db_iowrite(db_bits, mmio);
+
+       return 0;
+}
+
+static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
+                                  void __iomem *mmio)
+{
+       unsigned long irqflags;
+
+       if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+               pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+       if (db_bits & ~ndev->db_valid_mask)
+               return -EINVAL;
+
+       spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
+       {
+               ndev->db_mask |= db_bits;
+               ndev->reg->db_iowrite(ndev->db_mask, mmio);
+       }
+       spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
+
+       return 0;
+}
+
+static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
+                                    void __iomem *mmio)
+{
+       unsigned long irqflags;
+
+       if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
+               pr_warn_once("%s: NTB unsafe doorbell access", __func__);
+
+       if (db_bits & ~ndev->db_valid_mask)
+               return -EINVAL;
+
+       spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
+       {
+               ndev->db_mask &= ~db_bits;
+               ndev->reg->db_iowrite(ndev->db_mask, mmio);
+       }
+       spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
+
+       return 0;
+}
+
+static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
+{
+       u64 shift, mask;
+
+       shift = ndev->db_vec_shift;
+       mask = BIT_ULL(shift) - 1;
+
+       return mask << (shift * db_vector);
+}
+
+static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
+                                phys_addr_t *spad_addr, phys_addr_t reg_addr,
+                                unsigned long reg)
+{
+       if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
+               pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
+
+       if (idx < 0 || idx >= ndev->spad_count)
+               return -EINVAL;
+
+       if (spad_addr) {
+               *spad_addr = reg_addr + reg + (idx << 2);
+               dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
+       }
+
+       return 0;
+}
+
+static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
+                                void __iomem *mmio)
+{
+       if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
+               pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
+
+       if (idx < 0 || idx >= ndev->spad_count)
+               return 0;
+
+       return ioread32(mmio + (idx << 2));
+}
+
+static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
+                                 void __iomem *mmio)
+{
+       if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
+               pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
+
+       if (idx < 0 || idx >= ndev->spad_count)
+               return -EINVAL;
+
+       iowrite32(val, mmio + (idx << 2));
+
+       return 0;
+}
+
+static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
+{
+       u64 vec_mask;
+
+       vec_mask = ndev_vec_mask(ndev, vec);
+
+       dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
+
+       ndev->last_ts = jiffies;
+
+       if (vec_mask & ndev->db_link_mask) {
+               if (ndev->reg->poll_link(ndev))
+                       ntb_link_event(&ndev->ntb);
+       }
+
+       if (vec_mask & ndev->db_valid_mask)
+               ntb_db_event(&ndev->ntb, vec);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ndev_vec_isr(int irq, void *dev)
+{
+       struct intel_ntb_vec *nvec = dev;
+
+       return ndev_interrupt(nvec->ndev, nvec->num);
+}
+
+static irqreturn_t ndev_irq_isr(int irq, void *dev)
+{
+       struct intel_ntb_dev *ndev = dev;
+
+       return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
+}
+
+static int ndev_init_isr(struct intel_ntb_dev *ndev,
+                        int msix_min, int msix_max,
+                        int msix_shift, int total_shift)
+{
+       struct pci_dev *pdev;
+       int rc, i, msix_count, node;
+
+       pdev = ndev_pdev(ndev);
+
+       node = dev_to_node(&pdev->dev);
+
+       /* Mask all doorbell interrupts */
+       ndev->db_mask = ndev->db_valid_mask;
+       ndev->reg->db_iowrite(ndev->db_mask,
+                             ndev->self_mmio +
+                             ndev->self_reg->db_mask);
+
+       /* Try to set up msix irq */
+
+       ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
+                                GFP_KERNEL, node);
+       if (!ndev->vec)
+               goto err_msix_vec_alloc;
+
+       ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
+                                 GFP_KERNEL, node);
+       if (!ndev->msix)
+               goto err_msix_alloc;
+
+       for (i = 0; i < msix_max; ++i)
+               ndev->msix[i].entry = i;
+
+       msix_count = pci_enable_msix_range(pdev, ndev->msix,
+                                          msix_min, msix_max);
+       if (msix_count < 0)
+               goto err_msix_enable;
+
+       for (i = 0; i < msix_count; ++i) {
+               ndev->vec[i].ndev = ndev;
+               ndev->vec[i].num = i;
+               rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
+                                "ndev_vec_isr", &ndev->vec[i]);
+               if (rc)
+                       goto err_msix_request;
+       }
+
+       dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
+       ndev->db_vec_count = msix_count;
+       ndev->db_vec_shift = msix_shift;
+       return 0;
+
+err_msix_request:
+       while (i-- > 0)
+               free_irq(ndev->msix[i].vector, ndev);
+       pci_disable_msix(pdev);
+err_msix_enable:
+       kfree(ndev->msix);
+err_msix_alloc:
+       kfree(ndev->vec);
+err_msix_vec_alloc:
+       ndev->msix = NULL;
+       ndev->vec = NULL;
+
+       /* Try to set up msi irq */
+
+       rc = pci_enable_msi(pdev);
+       if (rc)
+               goto err_msi_enable;
+
+       rc = request_irq(pdev->irq, ndev_irq_isr, 0,
+                        "ndev_irq_isr", ndev);
+       if (rc)
+               goto err_msi_request;
+
+       dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
+       ndev->db_vec_count = 1;
+       ndev->db_vec_shift = total_shift;
+       return 0;
+
+err_msi_request:
+       pci_disable_msi(pdev);
+err_msi_enable:
+
+       /* Try to set up intx irq */
+
+       pci_intx(pdev, 1);
+
+       rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
+                        "ndev_irq_isr", ndev);
+       if (rc)
+               goto err_intx_request;
+
+       dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
+       ndev->db_vec_count = 1;
+       ndev->db_vec_shift = total_shift;
+       return 0;
+
+err_intx_request:
+       return rc;
+}
+
+static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
+{
+       struct pci_dev *pdev;
+       int i;
+
+       pdev = ndev_pdev(ndev);
+
+       /* Mask all doorbell interrupts */
+       ndev->db_mask = ndev->db_valid_mask;
+       ndev->reg->db_iowrite(ndev->db_mask,
+                             ndev->self_mmio +
+                             ndev->self_reg->db_mask);
+
+       if (ndev->msix) {
+               i = ndev->db_vec_count;
+               while (i--)
+                       free_irq(ndev->msix[i].vector, &ndev->vec[i]);
+               pci_disable_msix(pdev);
+               kfree(ndev->msix);
+               kfree(ndev->vec);
+       } else {
+               free_irq(pdev->irq, ndev);
+               if (pci_dev_msi_enabled(pdev))
+                       pci_disable_msi(pdev);
+       }
+}
+
+static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
+                                size_t count, loff_t *offp)
+{
+       struct intel_ntb_dev *ndev;
+       void __iomem *mmio;
+       char *buf;
+       size_t buf_size;
+       ssize_t ret, off;
+       union { u64 v64; u32 v32; u16 v16; } u;
+
+       ndev = filp->private_data;
+       mmio = ndev->self_mmio;
+
+       buf_size = min(count, 0x800ul);
+
+       buf = kmalloc(buf_size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       off = 0;
+
+       off += scnprintf(buf + off, buf_size - off,
+                        "NTB Device Information:\n");
+
+       off += scnprintf(buf + off, buf_size - off,
+                        "Connection Topology -\t%s\n",
+                        ntb_topo_string(ndev->ntb.topo));
+
+       off += scnprintf(buf + off, buf_size - off,
+                        "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
+       off += scnprintf(buf + off, buf_size - off,
+                        "B2B MW Idx -\t\t%d\n", ndev->b2b_idx);
+       off += scnprintf(buf + off, buf_size - off,
+                        "BAR4 Split -\t\t%s\n",
+                        ndev->bar4_split ? "yes" : "no");
+
+       off += scnprintf(buf + off, buf_size - off,
+                        "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
+       off += scnprintf(buf + off, buf_size - off,
+                        "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
+
+       if (!ndev->reg->link_is_up(ndev)) {
+               off += scnprintf(buf + off, buf_size - off,
+                                "Link Status -\t\tDown\n");
+       } else {
+               off += scnprintf(buf + off, buf_size - off,
+                                "Link Status -\t\tUp\n");
+               off += scnprintf(buf + off, buf_size - off,
+                                "Link Speed -\t\tPCI-E Gen %u\n",
+                                NTB_LNK_STA_SPEED(ndev->lnk_sta));
+               off += scnprintf(buf + off, buf_size - off,
+                                "Link Width -\t\tx%u\n",
+                                NTB_LNK_STA_WIDTH(ndev->lnk_sta));
+       }
+
+       off += scnprintf(buf + off, buf_size - off,
+                        "Memory Window Count -\t%u\n", ndev->mw_count);
+       off += scnprintf(buf + off, buf_size - off,
+                        "Scratchpad Count -\t%u\n", ndev->spad_count);
+       off += scnprintf(buf + off, buf_size - off,
+                        "Doorbell Count -\t%u\n", ndev->db_count);
+       off += scnprintf(buf + off, buf_size - off,
+                        "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
+       off += scnprintf(buf + off, buf_size - off,
+                        "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
+
+       off += scnprintf(buf + off, buf_size - off,
+                        "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
+       off += scnprintf(buf + off, buf_size - off,
+                        "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
+       off += scnprintf(buf + off, buf_size - off,
+                        "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
+
+       u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
+       off += scnprintf(buf + off, buf_size - off,
+                        "Doorbell Mask -\t\t%#llx\n", u.v64);
+
+       u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
+       off += scnprintf(buf + off, buf_size - off,
+                        "Doorbell Bell -\t\t%#llx\n", u.v64);
+
+       off += scnprintf(buf + off, buf_size - off,
+                        "\nNTB Incoming XLAT:\n");
+
+       u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
+       off += scnprintf(buf + off, buf_size - off,
+                        "XLAT23 -\t\t%#018llx\n", u.v64);
+
+       if (ndev->bar4_split) {
+               u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
+               off += scnprintf(buf + off, buf_size - off,
+                                "XLAT4 -\t\t\t%#06x\n", u.v32);
+
+               u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
+               off += scnprintf(buf + off, buf_size - off,
+                                "XLAT5 -\t\t\t%#06x\n", u.v32);
+       } else {
+               u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
+               off += scnprintf(buf + off, buf_size - off,
+                                "XLAT45 -\t\t%#018llx\n", u.v64);
+       }
+
+       u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
+       off += scnprintf(buf + off, buf_size - off,
+                        "LMT23 -\t\t\t%#018llx\n", u.v64);
+
+       if (ndev->bar4_split) {
+               u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
+               off += scnprintf(buf + off, buf_size - off,
+                                "LMT4 -\t\t\t%#06x\n", u.v32);
+               u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
+               off += scnprintf(buf + off, buf_size - off,
+                                "LMT5 -\t\t\t%#06x\n", u.v32);
+       } else {
+               u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
+               off += scnprintf(buf + off, buf_size - off,
+                                "LMT45 -\t\t\t%#018llx\n", u.v64);
+       }
+
+       if (pdev_is_xeon(ndev->ntb.pdev)) {
+               if (ntb_topo_is_b2b(ndev->ntb.topo)) {
+                       off += scnprintf(buf + off, buf_size - off,
+                                        "\nNTB Outgoing B2B XLAT:\n");
+
+                       u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
+                       off += scnprintf(buf + off, buf_size - off,
+                                        "B2B XLAT23 -\t\t%#018llx\n", u.v64);
+
+                       if (ndev->bar4_split) {
+                               u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
+                               off += scnprintf(buf + off, buf_size - off,
+                                                "B2B XLAT4 -\t\t%#06x\n",
+                                                u.v32);
+                               u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
+                               off += scnprintf(buf + off, buf_size - off,
+                                                "B2B XLAT5 -\t\t%#06x\n",
+                                                u.v32);
+                       } else {
+                               u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
+                               off += scnprintf(buf + off, buf_size - off,
+                                                "B2B XLAT45 -\t\t%#018llx\n",
+                                                u.v64);
+                       }
+
+                       u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
+                       off += scnprintf(buf + off, buf_size - off,
+                                        "B2B LMT23 -\t\t%#018llx\n", u.v64);
+
+                       if (ndev->bar4_split) {
+                               u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
+                               off += scnprintf(buf + off, buf_size - off,
+                                                "B2B LMT4 -\t\t%#06x\n",
+                                                u.v32);
+                               u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
+                               off += scnprintf(buf + off, buf_size - off,
+                                                "B2B LMT5 -\t\t%#06x\n",
+                                                u.v32);
+                       } else {
+                               u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
+                               off += scnprintf(buf + off, buf_size - off,
+                                                "B2B LMT45 -\t\t%#018llx\n",
+                                                u.v64);
+                       }
+
+                       off += scnprintf(buf + off, buf_size - off,
+                                        "\nNTB Secondary BAR:\n");
+
+                       u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
+                       off += scnprintf(buf + off, buf_size - off,
+                                        "SBAR01 -\t\t%#018llx\n", u.v64);
+
+                       u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
+                       off += scnprintf(buf + off, buf_size - off,
+                                        "SBAR23 -\t\t%#018llx\n", u.v64);
+
+                       if (ndev->bar4_split) {
+                               u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
+                               off += scnprintf(buf + off, buf_size - off,
+                                                "SBAR4 -\t\t\t%#06x\n", u.v32);
+                               u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
+                               off += scnprintf(buf + off, buf_size - off,
+                                                "SBAR5 -\t\t\t%#06x\n", u.v32);
+                       } else {
+                               u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
+                               off += scnprintf(buf + off, buf_size - off,
+                                                "SBAR45 -\t\t%#018llx\n",
+                                                u.v64);
+                       }
+               }
+
+               off += scnprintf(buf + off, buf_size - off,
+                                "\nXEON NTB Statistics:\n");
+
+               u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
+               off += scnprintf(buf + off, buf_size - off,
+                                "Upstream Memory Miss -\t%u\n", u.v16);
+
+               off += scnprintf(buf + off, buf_size - off,
+                                "\nXEON NTB Hardware Errors:\n");
+
+               if (!pci_read_config_word(ndev->ntb.pdev,
+                                         XEON_DEVSTS_OFFSET, &u.v16))
+                       off += scnprintf(buf + off, buf_size - off,
+                                        "DEVSTS -\t\t%#06x\n", u.v16);
+
+               if (!pci_read_config_word(ndev->ntb.pdev,
+                                         XEON_LINK_STATUS_OFFSET, &u.v16))
+                       off += scnprintf(buf + off, buf_size - off,
+                                        "LNKSTS -\t\t%#06x\n", u.v16);
+
+               if (!pci_read_config_dword(ndev->ntb.pdev,
+                                          XEON_UNCERRSTS_OFFSET, &u.v32))
+                       off += scnprintf(buf + off, buf_size - off,
+                                        "UNCERRSTS -\t\t%#06x\n", u.v32);
+
+               if (!pci_read_config_dword(ndev->ntb.pdev,
+                                          XEON_CORERRSTS_OFFSET, &u.v32))
+                       off += scnprintf(buf + off, buf_size - off,
+                                        "CORERRSTS -\t\t%#06x\n", u.v32);
+       }
+
+       ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
+       kfree(buf);
+       return ret;
+}
+
+static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
+{
+       if (!debugfs_dir) {
+               ndev->debugfs_dir = NULL;
+               ndev->debugfs_info = NULL;
+       } else {
+               ndev->debugfs_dir =
+                       debugfs_create_dir(ndev_name(ndev), debugfs_dir);
+               if (!ndev->debugfs_dir)
+                       ndev->debugfs_info = NULL;
+               else
+                       ndev->debugfs_info =
+                               debugfs_create_file("info", S_IRUSR,
+                                                   ndev->debugfs_dir, ndev,
+                                                   &intel_ntb_debugfs_info);
+       }
+}
+
+static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
+{
+       debugfs_remove_recursive(ndev->debugfs_dir);
+}
+
+static int intel_ntb_mw_count(struct ntb_dev *ntb)
+{
+       return ntb_ndev(ntb)->mw_count;
+}
+
+static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
+                                 phys_addr_t *base,
+                                 resource_size_t *size,
+                                 resource_size_t *align,
+                                 resource_size_t *align_size)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+       int bar;
+
+       if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+               idx += 1;
+
+       bar = ndev_mw_to_bar(ndev, idx);
+       if (bar < 0)
+               return bar;
+
+       if (base)
+               *base = pci_resource_start(ndev->ntb.pdev, bar) +
+                       (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
+
+       if (size)
+               *size = pci_resource_len(ndev->ntb.pdev, bar) -
+                       (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
+
+       if (align)
+               *align = pci_resource_len(ndev->ntb.pdev, bar);
+
+       if (align_size)
+               *align_size = 1;
+
+       return 0;
+}
+
+static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
+                                 dma_addr_t addr, resource_size_t size)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+       unsigned long base_reg, xlat_reg, limit_reg;
+       resource_size_t bar_size, mw_size;
+       void __iomem *mmio;
+       u64 base, limit, reg_val;
+       int bar;
+
+       if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+               idx += 1;
+
+       bar = ndev_mw_to_bar(ndev, idx);
+       if (bar < 0)
+               return bar;
+
+       bar_size = pci_resource_len(ndev->ntb.pdev, bar);
+
+       if (idx == ndev->b2b_idx)
+               mw_size = bar_size - ndev->b2b_off;
+       else
+               mw_size = bar_size;
+
+       /* hardware requires that addr is aligned to bar size */
+       if (addr & (bar_size - 1))
+               return -EINVAL;
+
+       /* make sure the range fits in the usable mw size */
+       if (size > mw_size)
+               return -EINVAL;
+
+       mmio = ndev->self_mmio;
+       base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
+       xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
+       limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
+
+       if (bar < 4 || !ndev->bar4_split) {
+               base = ioread64(mmio + base_reg);
+
+               /* Set the limit if supported, if size is not mw_size */
+               if (limit_reg && size != mw_size)
+                       limit = base + size;
+               else
+                       limit = 0;
+
+               /* set and verify setting the translation address */
+               iowrite64(addr, mmio + xlat_reg);
+               reg_val = ioread64(mmio + xlat_reg);
+               if (reg_val != addr) {
+                       iowrite64(0, mmio + xlat_reg);
+                       return -EIO;
+               }
+
+               /* set and verify setting the limit */
+               iowrite64(limit, mmio + limit_reg);
+               reg_val = ioread64(mmio + limit_reg);
+               if (reg_val != limit) {
+                       iowrite64(base, mmio + limit_reg);
+                       iowrite64(0, mmio + xlat_reg);
+                       return -EIO;
+               }
+       } else {
+               /* split bar addr range must all be 32 bit */
+               if (addr & (~0ull << 32))
+                       return -EINVAL;
+               if ((addr + size) & (~0ull << 32))
+                       return -EINVAL;
+
+               base = ioread32(mmio + base_reg);
+
+               /* Set the limit if supported, if size is not mw_size */
+               if (limit_reg && size != mw_size)
+                       limit = base + size;
+               else
+                       limit = 0;
+
+               /* set and verify setting the translation address */
+               iowrite32(addr, mmio + xlat_reg);
+               reg_val = ioread32(mmio + xlat_reg);
+               if (reg_val != addr) {
+                       iowrite32(0, mmio + xlat_reg);
+                       return -EIO;
+               }
+
+               /* set and verify setting the limit */
+               iowrite32(limit, mmio + limit_reg);
+               reg_val = ioread32(mmio + limit_reg);
+               if (reg_val != limit) {
+                       iowrite32(base, mmio + limit_reg);
+                       iowrite32(0, mmio + xlat_reg);
+                       return -EIO;
+               }
+       }
+
+       return 0;
+}
+
+static int intel_ntb_link_is_up(struct ntb_dev *ntb,
+                               enum ntb_speed *speed,
+                               enum ntb_width *width)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+       if (ndev->reg->link_is_up(ndev)) {
+               if (speed)
+                       *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
+               if (width)
+                       *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
+               return 1;
+       } else {
+               /* TODO MAYBE: is it possible to observe the link speed and
+                * width while link is training? */
+               if (speed)
+                       *speed = NTB_SPEED_NONE;
+               if (width)
+                       *width = NTB_WIDTH_NONE;
+               return 0;
+       }
+}
+
+static int intel_ntb_link_enable(struct ntb_dev *ntb,
+                                enum ntb_speed max_speed,
+                                enum ntb_width max_width)
+{
+       struct intel_ntb_dev *ndev;
+       u32 ntb_ctl;
+
+       ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+       if (ndev->ntb.topo == NTB_TOPO_SEC)
+               return -EINVAL;
+
+       dev_dbg(ndev_dev(ndev),
+               "Enabling link with max_speed %d max_width %d\n",
+               max_speed, max_width);
+       if (max_speed != NTB_SPEED_AUTO)
+               dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
+       if (max_width != NTB_WIDTH_AUTO)
+               dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
+
+       ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+       ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
+       ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
+       ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
+       if (ndev->bar4_split)
+               ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
+       iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+       return 0;
+}
+
+static int intel_ntb_link_disable(struct ntb_dev *ntb)
+{
+       struct intel_ntb_dev *ndev;
+       u32 ntb_cntl;
+
+       ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+       if (ndev->ntb.topo == NTB_TOPO_SEC)
+               return -EINVAL;
+
+       dev_dbg(ndev_dev(ndev), "Disabling link\n");
+
+       /* Bring NTB link down */
+       ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+       ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
+       ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
+       if (ndev->bar4_split)
+               ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
+       ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
+       iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+       return 0;
+}
+
+static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
+{
+       return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
+}
+
+static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
+{
+       return ntb_ndev(ntb)->db_valid_mask;
+}
+
+static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
+{
+       struct intel_ntb_dev *ndev;
+
+       ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+       return ndev->db_vec_count;
+}
+
+static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+       if (db_vector < 0 || db_vector > ndev->db_vec_count)
+               return 0;
+
+       return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
+}
+
+static u64 intel_ntb_db_read(struct ntb_dev *ntb)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+       return ndev_db_read(ndev,
+                           ndev->self_mmio +
+                           ndev->self_reg->db_bell);
+}
+
+static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+       return ndev_db_write(ndev, db_bits,
+                            ndev->self_mmio +
+                            ndev->self_reg->db_bell);
+}
+
+static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+       return ndev_db_set_mask(ndev, db_bits,
+                               ndev->self_mmio +
+                               ndev->self_reg->db_mask);
+}
+
+static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+       return ndev_db_clear_mask(ndev, db_bits,
+                                 ndev->self_mmio +
+                                 ndev->self_reg->db_mask);
+}
+
+static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
+                                 phys_addr_t *db_addr,
+                                 resource_size_t *db_size)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+       return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
+                           ndev->peer_reg->db_bell);
+}
+
+static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+       return ndev_db_write(ndev, db_bits,
+                            ndev->peer_mmio +
+                            ndev->peer_reg->db_bell);
+}
+
+static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
+{
+       return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
+}
+
+static int intel_ntb_spad_count(struct ntb_dev *ntb)
+{
+       struct intel_ntb_dev *ndev;
+
+       ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+       return ndev->spad_count;
+}
+
+static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+       return ndev_spad_read(ndev, idx,
+                             ndev->self_mmio +
+                             ndev->self_reg->spad);
+}
+
+static int intel_ntb_spad_write(struct ntb_dev *ntb,
+                               int idx, u32 val)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+       return ndev_spad_write(ndev, idx, val,
+                              ndev->self_mmio +
+                              ndev->self_reg->spad);
+}
+
+static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
+                                   phys_addr_t *spad_addr)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+       return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
+                             ndev->peer_reg->spad);
+}
+
+static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+       return ndev_spad_read(ndev, idx,
+                             ndev->peer_mmio +
+                             ndev->peer_reg->spad);
+}
+
+static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
+                                    int idx, u32 val)
+{
+       struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+       return ndev_spad_write(ndev, idx, val,
+                              ndev->peer_mmio +
+                              ndev->peer_reg->spad);
+}
+
+/* ATOM */
+
+static u64 atom_db_ioread(void __iomem *mmio)
+{
+       return ioread64(mmio);
+}
+
+static void atom_db_iowrite(u64 bits, void __iomem *mmio)
+{
+       iowrite64(bits, mmio);
+}
+
+static int atom_poll_link(struct intel_ntb_dev *ndev)
+{
+       u32 ntb_ctl;
+
+       ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET);
+
+       if (ntb_ctl == ndev->ntb_ctl)
+               return 0;
+
+       ndev->ntb_ctl = ntb_ctl;
+
+       ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET);
+
+       return 1;
+}
+
+static int atom_link_is_up(struct intel_ntb_dev *ndev)
+{
+       return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl);
+}
+
+static int atom_link_is_err(struct intel_ntb_dev *ndev)
+{
+       if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET)
+           & ATOM_LTSSMSTATEJMP_FORCEDETECT)
+               return 1;
+
+       if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET)
+           & ATOM_IBIST_ERR_OFLOW)
+               return 1;
+
+       return 0;
+}
+
+static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
+{
+       switch (ppd & ATOM_PPD_TOPO_MASK) {
+       case ATOM_PPD_TOPO_B2B_USD:
+               dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
+               return NTB_TOPO_B2B_USD;
+
+       case ATOM_PPD_TOPO_B2B_DSD:
+               dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
+               return NTB_TOPO_B2B_DSD;
+
+       case ATOM_PPD_TOPO_PRI_USD:
+       case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
+       case ATOM_PPD_TOPO_SEC_USD:
+       case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
+               dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
+               return NTB_TOPO_NONE;
+       }
+
+       dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
+       return NTB_TOPO_NONE;
+}
+
+static void atom_link_hb(struct work_struct *work)
+{
+       struct intel_ntb_dev *ndev = hb_ndev(work);
+       unsigned long poll_ts;
+       void __iomem *mmio;
+       u32 status32;
+
+       poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT;
+
+       /* Delay polling the link status if an interrupt was received,
+        * unless the cached link status says the link is down.
+        */
+       if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) {
+               schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
+               return;
+       }
+
+       if (atom_poll_link(ndev))
+               ntb_link_event(&ndev->ntb);
+
+       if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) {
+               schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
+               return;
+       }
+
+       /* Link is down with error: recover the link! */
+
+       mmio = ndev->self_mmio;
+
+       /* Driver resets the NTB ModPhy lanes - magic! */
+       iowrite8(0xe0, mmio + ATOM_MODPHY_PCSREG6);
+       iowrite8(0x40, mmio + ATOM_MODPHY_PCSREG4);
+       iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG4);
+       iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG6);
+
+       /* Driver waits 100ms to allow the NTB ModPhy to settle */
+       msleep(100);
+
+       /* Clear AER Errors, write to clear */
+       status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
+       dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
+       status32 &= PCI_ERR_COR_REP_ROLL;
+       iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
+
+       /* Clear unexpected electrical idle event in LTSSM, write to clear */
+       status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
+       dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
+       status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
+       iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
+
+       /* Clear DeSkew Buffer error, write to clear */
+       status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
+       dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
+       status32 |= ATOM_DESKEWSTS_DBERR;
+       iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
+
+       status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
+       dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
+       status32 &= ATOM_IBIST_ERR_OFLOW;
+       iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
+
+       /* Releases the NTB state machine to allow the link to retrain */
+       status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
+       dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
+       status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
+       iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
+
+       /* There is a potential race between the 2 NTB devices recovering at the
+        * same time.  If the times are the same, the link will not recover and
+        * the driver will be stuck in this loop forever.  Add a random interval
+        * to the recovery time to prevent this race.
+        */
+       schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME
+                             + prandom_u32() % ATOM_LINK_RECOVERY_TIME);
+}
+
+static int atom_init_isr(struct intel_ntb_dev *ndev)
+{
+       int rc;
+
+       rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT,
+                          ATOM_DB_MSIX_VECTOR_SHIFT, ATOM_DB_TOTAL_SHIFT);
+       if (rc)
+               return rc;
+
+       /* ATOM doesn't have link status interrupt, poll on that platform */
+       ndev->last_ts = jiffies;
+       INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb);
+       schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
+
+       return 0;
+}
+
+static void atom_deinit_isr(struct intel_ntb_dev *ndev)
+{
+       cancel_delayed_work_sync(&ndev->hb_timer);
+       ndev_deinit_isr(ndev);
+}
+
+static int atom_init_ntb(struct intel_ntb_dev *ndev)
+{
+       ndev->mw_count = ATOM_MW_COUNT;
+       ndev->spad_count = ATOM_SPAD_COUNT;
+       ndev->db_count = ATOM_DB_COUNT;
+
+       switch (ndev->ntb.topo) {
+       case NTB_TOPO_B2B_USD:
+       case NTB_TOPO_B2B_DSD:
+               ndev->self_reg = &atom_pri_reg;
+               ndev->peer_reg = &atom_b2b_reg;
+               ndev->xlat_reg = &atom_sec_xlat;
+
+               /* Enable Bus Master and Memory Space on the secondary side */
+               iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
+                         ndev->self_mmio + ATOM_SPCICMD_OFFSET);
+
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+       return 0;
+}
+
+static int atom_init_dev(struct intel_ntb_dev *ndev)
+{
+       u32 ppd;
+       int rc;
+
+       rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd);
+       if (rc)
+               return -EIO;
+
+       ndev->ntb.topo = atom_ppd_topo(ndev, ppd);
+       if (ndev->ntb.topo == NTB_TOPO_NONE)
+               return -EINVAL;
+
+       rc = atom_init_ntb(ndev);
+       if (rc)
+               return rc;
+
+       rc = atom_init_isr(ndev);
+       if (rc)
+               return rc;
+
+       if (ndev->ntb.topo != NTB_TOPO_SEC) {
+               /* Initiate PCI-E link training */
+               rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET,
+                                           ppd | ATOM_PPD_INIT_LINK);
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
+
+static void atom_deinit_dev(struct intel_ntb_dev *ndev)
+{
+       atom_deinit_isr(ndev);
+}
+
+/* XEON */
+
+static u64 xeon_db_ioread(void __iomem *mmio)
+{
+       return (u64)ioread16(mmio);
+}
+
+static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
+{
+       iowrite16((u16)bits, mmio);
+}
+
+static int xeon_poll_link(struct intel_ntb_dev *ndev)
+{
+       u16 reg_val;
+       int rc;
+
+       ndev->reg->db_iowrite(ndev->db_link_mask,
+                             ndev->self_mmio +
+                             ndev->self_reg->db_bell);
+
+       rc = pci_read_config_word(ndev->ntb.pdev,
+                                 XEON_LINK_STATUS_OFFSET, &reg_val);
+       if (rc)
+               return 0;
+
+       if (reg_val == ndev->lnk_sta)
+               return 0;
+
+       ndev->lnk_sta = reg_val;
+
+       return 1;
+}
+
+static int xeon_link_is_up(struct intel_ntb_dev *ndev)
+{
+       if (ndev->ntb.topo == NTB_TOPO_SEC)
+               return 1;
+
+       return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
+}
+
+static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
+{
+       switch (ppd & XEON_PPD_TOPO_MASK) {
+       case XEON_PPD_TOPO_B2B_USD:
+               return NTB_TOPO_B2B_USD;
+
+       case XEON_PPD_TOPO_B2B_DSD:
+               return NTB_TOPO_B2B_DSD;
+
+       case XEON_PPD_TOPO_PRI_USD:
+       case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
+               return NTB_TOPO_PRI;
+
+       case XEON_PPD_TOPO_SEC_USD:
+       case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
+               return NTB_TOPO_SEC;
+       }
+
+       return NTB_TOPO_NONE;
+}
+
+static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
+{
+       if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
+               dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
+               return 1;
+       }
+       return 0;
+}
+
+static int xeon_init_isr(struct intel_ntb_dev *ndev)
+{
+       return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
+                            XEON_DB_MSIX_VECTOR_COUNT,
+                            XEON_DB_MSIX_VECTOR_SHIFT,
+                            XEON_DB_TOTAL_SHIFT);
+}
+
+static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
+{
+       ndev_deinit_isr(ndev);
+}
+
+static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
+                            const struct intel_b2b_addr *addr,
+                            const struct intel_b2b_addr *peer_addr)
+{
+       struct pci_dev *pdev;
+       void __iomem *mmio;
+       resource_size_t bar_size;
+       phys_addr_t bar_addr;
+       int b2b_bar;
+       u8 bar_sz;
+
+       pdev = ndev_pdev(ndev);
+       mmio = ndev->self_mmio;
+
+       if (ndev->b2b_idx >= ndev->mw_count) {
+               dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
+               b2b_bar = 0;
+               ndev->b2b_off = 0;
+       } else {
+               b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
+               if (b2b_bar < 0)
+                       return -EIO;
+
+               dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
+
+               bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
+
+               dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
+
+               if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
+                       dev_dbg(ndev_dev(ndev),
+                               "b2b using first half of bar\n");
+                       ndev->b2b_off = bar_size >> 1;
+               } else if (XEON_B2B_MIN_SIZE <= bar_size) {
+                       dev_dbg(ndev_dev(ndev),
+                               "b2b using whole bar\n");
+                       ndev->b2b_off = 0;
+                       --ndev->mw_count;
+               } else {
+                       dev_dbg(ndev_dev(ndev),
+                               "b2b bar size is too small\n");
+                       return -EIO;
+               }
+       }
+
+       /* Reset the secondary bar sizes to match the primary bar sizes,
+        * except disable or halve the size of the b2b secondary bar.
+        *
+        * Note: code for each specific bar size register, because the register
+        * offsets are not in a consistent order (bar5sz comes after ppd, odd).
+        */
+       pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
+       dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
+       if (b2b_bar == 2) {
+               if (ndev->b2b_off)
+                       bar_sz -= 1;
+               else
+                       bar_sz = 0;
+       }
+       pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
+       pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
+       dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
+
+       if (!ndev->bar4_split) {
+               pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
+               dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
+               if (b2b_bar == 4) {
+                       if (ndev->b2b_off)
+                               bar_sz -= 1;
+                       else
+                               bar_sz = 0;
+               }
+               pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
+               pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
+               dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
+       } else {
+               pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
+               dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
+               if (b2b_bar == 4) {
+                       if (ndev->b2b_off)
+                               bar_sz -= 1;
+                       else
+                               bar_sz = 0;
+               }
+               pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
+               pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
+               dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
+
+               pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
+               dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
+               if (b2b_bar == 5) {
+                       if (ndev->b2b_off)
+                               bar_sz -= 1;
+                       else
+                               bar_sz = 0;
+               }
+               pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
+               pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
+               dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
+       }
+
+       /* SBAR01 hit by first part of the b2b bar */
+       if (b2b_bar == 0)
+               bar_addr = addr->bar0_addr;
+       else if (b2b_bar == 2)
+               bar_addr = addr->bar2_addr64;
+       else if (b2b_bar == 4 && !ndev->bar4_split)
+               bar_addr = addr->bar4_addr64;
+       else if (b2b_bar == 4)
+               bar_addr = addr->bar4_addr32;
+       else if (b2b_bar == 5)
+               bar_addr = addr->bar5_addr32;
+       else
+               return -EIO;
+
+       dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
+       iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
+
+       /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
+        * The b2b bar is either disabled above, or configured half-size, and
+        * it starts at the PBAR xlat + offset.
+        */
+
+       bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
+       iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
+       bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
+       dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
+
+       if (!ndev->bar4_split) {
+               bar_addr = addr->bar4_addr64 +
+                       (b2b_bar == 4 ? ndev->b2b_off : 0);
+               iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
+               bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
+               dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
+       } else {
+               bar_addr = addr->bar4_addr32 +
+                       (b2b_bar == 4 ? ndev->b2b_off : 0);
+               iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
+               bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
+               dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
+
+               bar_addr = addr->bar5_addr32 +
+                       (b2b_bar == 5 ? ndev->b2b_off : 0);
+               iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
+               bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
+               dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
+       }
+
+       /* setup incoming bar limits == base addrs (zero length windows) */
+
+       bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
+       iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
+       bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
+       dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
+
+       if (!ndev->bar4_split) {
+               bar_addr = addr->bar4_addr64 +
+                       (b2b_bar == 4 ? ndev->b2b_off : 0);
+               iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
+               bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
+               dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
+       } else {
+               bar_addr = addr->bar4_addr32 +
+                       (b2b_bar == 4 ? ndev->b2b_off : 0);
+               iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
+               bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
+               dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
+
+               bar_addr = addr->bar5_addr32 +
+                       (b2b_bar == 5 ? ndev->b2b_off : 0);
+               iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
+               bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
+               dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
+       }
+
+       /* zero incoming translation addrs */
+       iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
+
+       if (!ndev->bar4_split) {
+               iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
+       } else {
+               iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
+               iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
+       }
+
+       /* zero outgoing translation limits (whole bar size windows) */
+       iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
+       if (!ndev->bar4_split) {
+               iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
+       } else {
+               iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
+               iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
+       }
+
+       /* set outgoing translation offsets */
+       bar_addr = peer_addr->bar2_addr64;
+       iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
+       bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
+       dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
+
+       if (!ndev->bar4_split) {
+               bar_addr = peer_addr->bar4_addr64;
+               iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
+               bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
+               dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
+       } else {
+               bar_addr = peer_addr->bar4_addr32;
+               iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
+               bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
+               dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
+
+               bar_addr = peer_addr->bar5_addr32;
+               iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
+               bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
+               dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
+       }
+
+       /* set the translation offset for b2b registers */
+       if (b2b_bar == 0)
+               bar_addr = peer_addr->bar0_addr;
+       else if (b2b_bar == 2)
+               bar_addr = peer_addr->bar2_addr64;
+       else if (b2b_bar == 4 && !ndev->bar4_split)
+               bar_addr = peer_addr->bar4_addr64;
+       else if (b2b_bar == 4)
+               bar_addr = peer_addr->bar4_addr32;
+       else if (b2b_bar == 5)
+               bar_addr = peer_addr->bar5_addr32;
+       else
+               return -EIO;
+
+       /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
+       dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
+       iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
+       iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
+
+       if (b2b_bar) {
+               /* map peer ntb mmio config space registers */
+               ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
+                                           XEON_B2B_MIN_SIZE);
+               if (!ndev->peer_mmio)
+                       return -EIO;
+       }
+
+       return 0;
+}
+
+static int xeon_init_ntb(struct intel_ntb_dev *ndev)
+{
+       int rc;
+       u32 ntb_ctl;
+
+       if (ndev->bar4_split)
+               ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
+       else
+               ndev->mw_count = XEON_MW_COUNT;
+
+       ndev->spad_count = XEON_SPAD_COUNT;
+       ndev->db_count = XEON_DB_COUNT;
+       ndev->db_link_mask = XEON_DB_LINK_BIT;
+
+       switch (ndev->ntb.topo) {
+       case NTB_TOPO_PRI:
+               if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
+                       dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
+                       return -EINVAL;
+               }
+
+               /* enable link to allow secondary side device to appear */
+               ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+               ntb_ctl &= ~NTB_CTL_DISABLE;
+               iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+               /* use half the spads for the peer */
+               ndev->spad_count >>= 1;
+               ndev->self_reg = &xeon_pri_reg;
+               ndev->peer_reg = &xeon_sec_reg;
+               ndev->xlat_reg = &xeon_sec_xlat;
+               break;
+
+       case NTB_TOPO_SEC:
+               if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
+                       dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
+                       return -EINVAL;
+               }
+               /* use half the spads for the peer */
+               ndev->spad_count >>= 1;
+               ndev->self_reg = &xeon_sec_reg;
+               ndev->peer_reg = &xeon_pri_reg;
+               ndev->xlat_reg = &xeon_pri_xlat;
+               break;
+
+       case NTB_TOPO_B2B_USD:
+       case NTB_TOPO_B2B_DSD:
+               ndev->self_reg = &xeon_pri_reg;
+               ndev->peer_reg = &xeon_b2b_reg;
+               ndev->xlat_reg = &xeon_sec_xlat;
+
+               if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
+                       ndev->peer_reg = &xeon_pri_reg;
+
+                       if (b2b_mw_idx < 0)
+                               ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
+                       else
+                               ndev->b2b_idx = b2b_mw_idx;
+
+                       dev_dbg(ndev_dev(ndev),
+                               "setting up b2b mw idx %d means %d\n",
+                               b2b_mw_idx, ndev->b2b_idx);
+
+               } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
+                       dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
+                       ndev->db_count -= 1;
+               }
+
+               if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
+                       rc = xeon_setup_b2b_mw(ndev,
+                                              &xeon_b2b_dsd_addr,
+                                              &xeon_b2b_usd_addr);
+               } else {
+                       rc = xeon_setup_b2b_mw(ndev,
+                                              &xeon_b2b_usd_addr,
+                                              &xeon_b2b_dsd_addr);
+               }
+               if (rc)
+                       return rc;
+
+               /* Enable Bus Master and Memory Space on the secondary side */
+               iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
+                         ndev->self_mmio + XEON_SPCICMD_OFFSET);
+
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+       ndev->reg->db_iowrite(ndev->db_valid_mask,
+                             ndev->self_mmio +
+                             ndev->self_reg->db_mask);
+
+       return 0;
+}
+
+static int xeon_init_dev(struct intel_ntb_dev *ndev)
+{
+       struct pci_dev *pdev;
+       u8 ppd;
+       int rc, mem;
+
+       pdev = ndev_pdev(ndev);
+
+       switch (pdev->device) {
+       /* There is a Xeon hardware errata related to writes to SDOORBELL or
+        * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
+        * which may hang the system.  To workaround this use the second memory
+        * window to access the interrupt and scratch pad registers on the
+        * remote system.
+        */
+       case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
+       case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+       case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
+       case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+               ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
+               break;
+       }
+
+       switch (pdev->device) {
+       /* There is a hardware errata related to accessing any register in
+        * SB01BASE in the presence of bidirectional traffic crossing the NTB.
+        */
+       case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
+       case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+               ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
+               break;
+       }
+
+       switch (pdev->device) {
+       /* HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
+        * mirrored to the remote system.  Shrink the number of bits by one,
+        * since bit 14 is the last bit.
+        */
+       case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
+       case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+       case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
+       case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
+       case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
+       case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
+               ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
+               break;
+       }
+
+       ndev->reg = &xeon_reg;
+
+       rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
+       if (rc)
+               return -EIO;
+
+       ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
+       dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
+               ntb_topo_string(ndev->ntb.topo));
+       if (ndev->ntb.topo == NTB_TOPO_NONE)
+               return -EINVAL;
+
+       if (ndev->ntb.topo != NTB_TOPO_SEC) {
+               ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
+               dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
+                       ppd, ndev->bar4_split);
+       } else {
+               /* This is a way for transparent BAR to figure out if we are
+                * doing split BAR or not. There is no way for the hw on the
+                * transparent side to know and set the PPD.
+                */
+               mem = pci_select_bars(pdev, IORESOURCE_MEM);
+               ndev->bar4_split = hweight32(mem) ==
+                       HSX_SPLIT_BAR_MW_COUNT + 1;
+               dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
+                       mem, ndev->bar4_split);
+       }
+
+       rc = xeon_init_ntb(ndev);
+       if (rc)
+               return rc;
+
+       return xeon_init_isr(ndev);
+}
+
+static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
+{
+       xeon_deinit_isr(ndev);
+}
+
+static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
+{
+       int rc;
+
+       pci_set_drvdata(pdev, ndev);
+
+       rc = pci_enable_device(pdev);
+       if (rc)
+               goto err_pci_enable;
+
+       rc = pci_request_regions(pdev, NTB_NAME);
+       if (rc)
+               goto err_pci_regions;
+
+       pci_set_master(pdev);
+
+       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (rc) {
+               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (rc)
+                       goto err_dma_mask;
+               dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
+       }
+
+       rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (rc) {
+               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (rc)
+                       goto err_dma_mask;
+               dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
+       }
+
+       ndev->self_mmio = pci_iomap(pdev, 0, 0);
+       if (!ndev->self_mmio) {
+               rc = -EIO;
+               goto err_mmio;
+       }
+       ndev->peer_mmio = ndev->self_mmio;
+
+       return 0;
+
+err_mmio:
+err_dma_mask:
+       pci_clear_master(pdev);
+       pci_release_regions(pdev);
+err_pci_regions:
+       pci_disable_device(pdev);
+err_pci_enable:
+       pci_set_drvdata(pdev, NULL);
+       return rc;
+}
+
+static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
+{
+       struct pci_dev *pdev = ndev_pdev(ndev);
+
+       if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
+               pci_iounmap(pdev, ndev->peer_mmio);
+       pci_iounmap(pdev, ndev->self_mmio);
+
+       pci_clear_master(pdev);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+}
+
+static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
+                                   struct pci_dev *pdev)
+{
+       ndev->ntb.pdev = pdev;
+       ndev->ntb.topo = NTB_TOPO_NONE;
+       ndev->ntb.ops = &intel_ntb_ops;
+
+       ndev->b2b_off = 0;
+       ndev->b2b_idx = INT_MAX;
+
+       ndev->bar4_split = 0;
+
+       ndev->mw_count = 0;
+       ndev->spad_count = 0;
+       ndev->db_count = 0;
+       ndev->db_vec_count = 0;
+       ndev->db_vec_shift = 0;
+
+       ndev->ntb_ctl = 0;
+       ndev->lnk_sta = 0;
+
+       ndev->db_valid_mask = 0;
+       ndev->db_link_mask = 0;
+       ndev->db_mask = 0;
+
+       spin_lock_init(&ndev->db_mask_lock);
+}
+
+static int intel_ntb_pci_probe(struct pci_dev *pdev,
+                              const struct pci_device_id *id)
+{
+       struct intel_ntb_dev *ndev;
+       int rc, node;
+
+       node = dev_to_node(&pdev->dev);
+
+       if (pdev_is_atom(pdev)) {
+               ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
+               if (!ndev) {
+                       rc = -ENOMEM;
+                       goto err_ndev;
+               }
+
+               ndev_init_struct(ndev, pdev);
+
+               rc = intel_ntb_init_pci(ndev, pdev);
+               if (rc)
+                       goto err_init_pci;
+
+               rc = atom_init_dev(ndev);
+               if (rc)
+                       goto err_init_dev;
+
+       } else if (pdev_is_xeon(pdev)) {
+               ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
+               if (!ndev) {
+                       rc = -ENOMEM;
+                       goto err_ndev;
+               }
+
+               ndev_init_struct(ndev, pdev);
+
+               rc = intel_ntb_init_pci(ndev, pdev);
+               if (rc)
+                       goto err_init_pci;
+
+               rc = xeon_init_dev(ndev);
+               if (rc)
+                       goto err_init_dev;
+
+       } else {
+               rc = -EINVAL;
+               goto err_ndev;
+       }
+
+       ndev_reset_unsafe_flags(ndev);
+
+       ndev->reg->poll_link(ndev);
+
+       ndev_init_debugfs(ndev);
+
+       rc = ntb_register_device(&ndev->ntb);
+       if (rc)
+               goto err_register;
+
+       dev_info(&pdev->dev, "NTB device registered.\n");
+
+       return 0;
+
+err_register:
+       ndev_deinit_debugfs(ndev);
+       if (pdev_is_atom(pdev))
+               atom_deinit_dev(ndev);
+       else if (pdev_is_xeon(pdev))
+               xeon_deinit_dev(ndev);
+err_init_dev:
+       intel_ntb_deinit_pci(ndev);
+err_init_pci:
+       kfree(ndev);
+err_ndev:
+       return rc;
+}
+
+static void intel_ntb_pci_remove(struct pci_dev *pdev)
+{
+       struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
+
+       ntb_unregister_device(&ndev->ntb);
+       ndev_deinit_debugfs(ndev);
+       if (pdev_is_atom(pdev))
+               atom_deinit_dev(ndev);
+       else if (pdev_is_xeon(pdev))
+               xeon_deinit_dev(ndev);
+       intel_ntb_deinit_pci(ndev);
+       kfree(ndev);
+}
+
+static const struct intel_ntb_reg atom_reg = {
+       .poll_link              = atom_poll_link,
+       .link_is_up             = atom_link_is_up,
+       .db_ioread              = atom_db_ioread,
+       .db_iowrite             = atom_db_iowrite,
+       .db_size                = sizeof(u64),
+       .ntb_ctl                = ATOM_NTBCNTL_OFFSET,
+       .mw_bar                 = {2, 4},
+};
+
+static const struct intel_ntb_alt_reg atom_pri_reg = {
+       .db_bell                = ATOM_PDOORBELL_OFFSET,
+       .db_mask                = ATOM_PDBMSK_OFFSET,
+       .spad                   = ATOM_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_alt_reg atom_b2b_reg = {
+       .db_bell                = ATOM_B2B_DOORBELL_OFFSET,
+       .spad                   = ATOM_B2B_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg atom_sec_xlat = {
+       /* FIXME : .bar0_base   = ATOM_SBAR0BASE_OFFSET, */
+       /* FIXME : .bar2_limit  = ATOM_SBAR2LMT_OFFSET, */
+       .bar2_xlat              = ATOM_SBAR2XLAT_OFFSET,
+};
+
+static const struct intel_ntb_reg xeon_reg = {
+       .poll_link              = xeon_poll_link,
+       .link_is_up             = xeon_link_is_up,
+       .db_ioread              = xeon_db_ioread,
+       .db_iowrite             = xeon_db_iowrite,
+       .db_size                = sizeof(u32),
+       .ntb_ctl                = XEON_NTBCNTL_OFFSET,
+       .mw_bar                 = {2, 4, 5},
+};
+
+static const struct intel_ntb_alt_reg xeon_pri_reg = {
+       .db_bell                = XEON_PDOORBELL_OFFSET,
+       .db_mask                = XEON_PDBMSK_OFFSET,
+       .spad                   = XEON_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_alt_reg xeon_sec_reg = {
+       .db_bell                = XEON_SDOORBELL_OFFSET,
+       .db_mask                = XEON_SDBMSK_OFFSET,
+       /* second half of the scratchpads */
+       .spad                   = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
+};
+
+static const struct intel_ntb_alt_reg xeon_b2b_reg = {
+       .db_bell                = XEON_B2B_DOORBELL_OFFSET,
+       .spad                   = XEON_B2B_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
+       /* Note: no primary .bar0_base visible to the secondary side.
+        *
+        * The secondary side cannot get the base address stored in primary
+        * bars.  The base address is necessary to set the limit register to
+        * any value other than zero, or unlimited.
+        *
+        * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
+        * window by setting the limit equal to base, nor can it limit the size
+        * of the memory window by setting the limit to base + size.
+        */
+       .bar2_limit             = XEON_PBAR23LMT_OFFSET,
+       .bar2_xlat              = XEON_PBAR23XLAT_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
+       .bar0_base              = XEON_SBAR0BASE_OFFSET,
+       .bar2_limit             = XEON_SBAR23LMT_OFFSET,
+       .bar2_xlat              = XEON_SBAR23XLAT_OFFSET,
+};
+
+static struct intel_b2b_addr xeon_b2b_usd_addr = {
+       .bar2_addr64            = XEON_B2B_BAR2_USD_ADDR64,
+       .bar4_addr64            = XEON_B2B_BAR4_USD_ADDR64,
+       .bar4_addr32            = XEON_B2B_BAR4_USD_ADDR32,
+       .bar5_addr32            = XEON_B2B_BAR5_USD_ADDR32,
+};
+
+static struct intel_b2b_addr xeon_b2b_dsd_addr = {
+       .bar2_addr64            = XEON_B2B_BAR2_DSD_ADDR64,
+       .bar4_addr64            = XEON_B2B_BAR4_DSD_ADDR64,
+       .bar4_addr32            = XEON_B2B_BAR4_DSD_ADDR32,
+       .bar5_addr32            = XEON_B2B_BAR5_DSD_ADDR32,
+};
+
+/* operations for primary side of local ntb */
+static const struct ntb_dev_ops intel_ntb_ops = {
+       .mw_count               = intel_ntb_mw_count,
+       .mw_get_range           = intel_ntb_mw_get_range,
+       .mw_set_trans           = intel_ntb_mw_set_trans,
+       .link_is_up             = intel_ntb_link_is_up,
+       .link_enable            = intel_ntb_link_enable,
+       .link_disable           = intel_ntb_link_disable,
+       .db_is_unsafe           = intel_ntb_db_is_unsafe,
+       .db_valid_mask          = intel_ntb_db_valid_mask,
+       .db_vector_count        = intel_ntb_db_vector_count,
+       .db_vector_mask         = intel_ntb_db_vector_mask,
+       .db_read                = intel_ntb_db_read,
+       .db_clear               = intel_ntb_db_clear,
+       .db_set_mask            = intel_ntb_db_set_mask,
+       .db_clear_mask          = intel_ntb_db_clear_mask,
+       .peer_db_addr           = intel_ntb_peer_db_addr,
+       .peer_db_set            = intel_ntb_peer_db_set,
+       .spad_is_unsafe         = intel_ntb_spad_is_unsafe,
+       .spad_count             = intel_ntb_spad_count,
+       .spad_read              = intel_ntb_spad_read,
+       .spad_write             = intel_ntb_spad_write,
+       .peer_spad_addr         = intel_ntb_peer_spad_addr,
+       .peer_spad_read         = intel_ntb_peer_spad_read,
+       .peer_spad_write        = intel_ntb_peer_spad_write,
+};
+
+static const struct file_operations intel_ntb_debugfs_info = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .read = ndev_debugfs_read,
+};
+
+static const struct pci_device_id intel_ntb_pci_tbl[] = {
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
+       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
+       {0}
+};
+MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
+
+static struct pci_driver intel_ntb_pci_driver = {
+       .name = KBUILD_MODNAME,
+       .id_table = intel_ntb_pci_tbl,
+       .probe = intel_ntb_pci_probe,
+       .remove = intel_ntb_pci_remove,
+};
+
+static int __init intel_ntb_pci_driver_init(void)
+{
+       pr_info("%s %s\n", NTB_DESC, NTB_VER);
+
+       if (debugfs_initialized())
+               debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+       return pci_register_driver(&intel_ntb_pci_driver);
+}
+module_init(intel_ntb_pci_driver_init);
+
+static void __exit intel_ntb_pci_driver_exit(void)
+{
+       pci_unregister_driver(&intel_ntb_pci_driver);
+
+       debugfs_remove_recursive(debugfs_dir);
+}
+module_exit(intel_ntb_pci_driver_exit);
+
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
new file mode 100644 (file)
index 0000000..7ddaf38
--- /dev/null
@@ -0,0 +1,342 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#ifndef NTB_HW_INTEL_H
+#define NTB_HW_INTEL_H
+
+#include <linux/ntb.h>
+#include <linux/pci.h>
+
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF        0x3725
+#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF 0x3726
+#define PCI_DEVICE_ID_INTEL_NTB_SS_JSF 0x3727
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB        0x3C0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_SNB 0x3C0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_SNB 0x3C0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_IVT        0x0E0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_IVT 0x0E0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_IVT 0x0E0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_HSX        0x2F0D
+#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E
+#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD        0x0C4E
+
+/* Intel Xeon hardware */
+
+#define XEON_PBAR23LMT_OFFSET          0x0000
+#define XEON_PBAR45LMT_OFFSET          0x0008
+#define XEON_PBAR4LMT_OFFSET           0x0008
+#define XEON_PBAR5LMT_OFFSET           0x000c
+#define XEON_PBAR23XLAT_OFFSET         0x0010
+#define XEON_PBAR45XLAT_OFFSET         0x0018
+#define XEON_PBAR4XLAT_OFFSET          0x0018
+#define XEON_PBAR5XLAT_OFFSET          0x001c
+#define XEON_SBAR23LMT_OFFSET          0x0020
+#define XEON_SBAR45LMT_OFFSET          0x0028
+#define XEON_SBAR4LMT_OFFSET           0x0028
+#define XEON_SBAR5LMT_OFFSET           0x002c
+#define XEON_SBAR23XLAT_OFFSET         0x0030
+#define XEON_SBAR45XLAT_OFFSET         0x0038
+#define XEON_SBAR4XLAT_OFFSET          0x0038
+#define XEON_SBAR5XLAT_OFFSET          0x003c
+#define XEON_SBAR0BASE_OFFSET          0x0040
+#define XEON_SBAR23BASE_OFFSET         0x0048
+#define XEON_SBAR45BASE_OFFSET         0x0050
+#define XEON_SBAR4BASE_OFFSET          0x0050
+#define XEON_SBAR5BASE_OFFSET          0x0054
+#define XEON_SBDF_OFFSET               0x005c
+#define XEON_NTBCNTL_OFFSET            0x0058
+#define XEON_PDOORBELL_OFFSET          0x0060
+#define XEON_PDBMSK_OFFSET             0x0062
+#define XEON_SDOORBELL_OFFSET          0x0064
+#define XEON_SDBMSK_OFFSET             0x0066
+#define XEON_USMEMMISS_OFFSET          0x0070
+#define XEON_SPAD_OFFSET               0x0080
+#define XEON_PBAR23SZ_OFFSET           0x00d0
+#define XEON_PBAR45SZ_OFFSET           0x00d1
+#define XEON_PBAR4SZ_OFFSET            0x00d1
+#define XEON_SBAR23SZ_OFFSET           0x00d2
+#define XEON_SBAR45SZ_OFFSET           0x00d3
+#define XEON_SBAR4SZ_OFFSET            0x00d3
+#define XEON_PPD_OFFSET                        0x00d4
+#define XEON_PBAR5SZ_OFFSET            0x00d5
+#define XEON_SBAR5SZ_OFFSET            0x00d6
+#define XEON_WCCNTRL_OFFSET            0x00e0
+#define XEON_UNCERRSTS_OFFSET          0x014c
+#define XEON_CORERRSTS_OFFSET          0x0158
+#define XEON_LINK_STATUS_OFFSET                0x01a2
+#define XEON_SPCICMD_OFFSET            0x0504
+#define XEON_DEVCTRL_OFFSET            0x0598
+#define XEON_DEVSTS_OFFSET             0x059a
+#define XEON_SLINK_STATUS_OFFSET       0x05a2
+#define XEON_B2B_SPAD_OFFSET           0x0100
+#define XEON_B2B_DOORBELL_OFFSET       0x0140
+#define XEON_B2B_XLAT_OFFSETL          0x0144
+#define XEON_B2B_XLAT_OFFSETU          0x0148
+#define XEON_PPD_CONN_MASK             0x03
+#define XEON_PPD_CONN_TRANSPARENT      0x00
+#define XEON_PPD_CONN_B2B              0x01
+#define XEON_PPD_CONN_RP               0x02
+#define XEON_PPD_DEV_MASK              0x10
+#define XEON_PPD_DEV_USD               0x00
+#define XEON_PPD_DEV_DSD               0x10
+#define XEON_PPD_SPLIT_BAR_MASK                0x40
+
+#define XEON_PPD_TOPO_MASK     (XEON_PPD_CONN_MASK | XEON_PPD_DEV_MASK)
+#define XEON_PPD_TOPO_PRI_USD  (XEON_PPD_CONN_RP | XEON_PPD_DEV_USD)
+#define XEON_PPD_TOPO_PRI_DSD  (XEON_PPD_CONN_RP | XEON_PPD_DEV_DSD)
+#define XEON_PPD_TOPO_SEC_USD  (XEON_PPD_CONN_TRANSPARENT | XEON_PPD_DEV_USD)
+#define XEON_PPD_TOPO_SEC_DSD  (XEON_PPD_CONN_TRANSPARENT | XEON_PPD_DEV_DSD)
+#define XEON_PPD_TOPO_B2B_USD  (XEON_PPD_CONN_B2B | XEON_PPD_DEV_USD)
+#define XEON_PPD_TOPO_B2B_DSD  (XEON_PPD_CONN_B2B | XEON_PPD_DEV_DSD)
+
+#define XEON_MW_COUNT                  2
+#define HSX_SPLIT_BAR_MW_COUNT         3
+#define XEON_DB_COUNT                  15
+#define XEON_DB_LINK                   15
+#define XEON_DB_LINK_BIT                       BIT_ULL(XEON_DB_LINK)
+#define XEON_DB_MSIX_VECTOR_COUNT      4
+#define XEON_DB_MSIX_VECTOR_SHIFT      5
+#define XEON_DB_TOTAL_SHIFT            16
+#define XEON_SPAD_COUNT                        16
+
+/* Intel Atom hardware */
+
+#define ATOM_SBAR2XLAT_OFFSET          0x0008
+#define ATOM_PDOORBELL_OFFSET          0x0020
+#define ATOM_PDBMSK_OFFSET             0x0028
+#define ATOM_NTBCNTL_OFFSET            0x0060
+#define ATOM_SPAD_OFFSET                       0x0080
+#define ATOM_PPD_OFFSET                        0x00d4
+#define ATOM_PBAR2XLAT_OFFSET          0x8008
+#define ATOM_B2B_DOORBELL_OFFSET               0x8020
+#define ATOM_B2B_SPAD_OFFSET           0x8080
+#define ATOM_SPCICMD_OFFSET            0xb004
+#define ATOM_LINK_STATUS_OFFSET                0xb052
+#define ATOM_ERRCORSTS_OFFSET          0xb110
+#define ATOM_IP_BASE                   0xc000
+#define ATOM_DESKEWSTS_OFFSET          (ATOM_IP_BASE + 0x3024)
+#define ATOM_LTSSMERRSTS0_OFFSET               (ATOM_IP_BASE + 0x3180)
+#define ATOM_LTSSMSTATEJMP_OFFSET      (ATOM_IP_BASE + 0x3040)
+#define ATOM_IBSTERRRCRVSTS0_OFFSET    (ATOM_IP_BASE + 0x3324)
+#define ATOM_MODPHY_PCSREG4            0x1c004
+#define ATOM_MODPHY_PCSREG6            0x1c006
+
+#define ATOM_PPD_INIT_LINK             0x0008
+#define ATOM_PPD_CONN_MASK             0x0300
+#define ATOM_PPD_CONN_TRANSPARENT      0x0000
+#define ATOM_PPD_CONN_B2B              0x0100
+#define ATOM_PPD_CONN_RP                       0x0200
+#define ATOM_PPD_DEV_MASK              0x1000
+#define ATOM_PPD_DEV_USD                       0x0000
+#define ATOM_PPD_DEV_DSD                       0x1000
+#define ATOM_PPD_TOPO_MASK     (ATOM_PPD_CONN_MASK | ATOM_PPD_DEV_MASK)
+#define ATOM_PPD_TOPO_PRI_USD  (ATOM_PPD_CONN_TRANSPARENT | ATOM_PPD_DEV_USD)
+#define ATOM_PPD_TOPO_PRI_DSD  (ATOM_PPD_CONN_TRANSPARENT | ATOM_PPD_DEV_DSD)
+#define ATOM_PPD_TOPO_SEC_USD  (ATOM_PPD_CONN_RP | ATOM_PPD_DEV_USD)
+#define ATOM_PPD_TOPO_SEC_DSD  (ATOM_PPD_CONN_RP | ATOM_PPD_DEV_DSD)
+#define ATOM_PPD_TOPO_B2B_USD  (ATOM_PPD_CONN_B2B | ATOM_PPD_DEV_USD)
+#define ATOM_PPD_TOPO_B2B_DSD  (ATOM_PPD_CONN_B2B | ATOM_PPD_DEV_DSD)
+
+#define ATOM_MW_COUNT                  2
+#define ATOM_DB_COUNT                  34
+#define ATOM_DB_VALID_MASK             (BIT_ULL(ATOM_DB_COUNT) - 1)
+#define ATOM_DB_MSIX_VECTOR_COUNT      34
+#define ATOM_DB_MSIX_VECTOR_SHIFT      1
+#define ATOM_DB_TOTAL_SHIFT            34
+#define ATOM_SPAD_COUNT                        16
+
+#define ATOM_NTB_CTL_DOWN_BIT          BIT(16)
+#define ATOM_NTB_CTL_ACTIVE(x)         !(x & ATOM_NTB_CTL_DOWN_BIT)
+
+#define ATOM_DESKEWSTS_DBERR           BIT(15)
+#define ATOM_LTSSMERRSTS0_UNEXPECTEDEI BIT(20)
+#define ATOM_LTSSMSTATEJMP_FORCEDETECT BIT(2)
+#define ATOM_IBIST_ERR_OFLOW           0x7FFF7FFF
+
+#define ATOM_LINK_HB_TIMEOUT           msecs_to_jiffies(1000)
+#define ATOM_LINK_RECOVERY_TIME                msecs_to_jiffies(500)
+
+/* Ntb control and link status */
+
+#define NTB_CTL_CFG_LOCK               BIT(0)
+#define NTB_CTL_DISABLE                        BIT(1)
+#define NTB_CTL_S2P_BAR2_SNOOP         BIT(2)
+#define NTB_CTL_P2S_BAR2_SNOOP         BIT(4)
+#define NTB_CTL_S2P_BAR4_SNOOP         BIT(6)
+#define NTB_CTL_P2S_BAR4_SNOOP         BIT(8)
+#define NTB_CTL_S2P_BAR5_SNOOP         BIT(12)
+#define NTB_CTL_P2S_BAR5_SNOOP         BIT(14)
+
+#define NTB_LNK_STA_ACTIVE_BIT         0x2000
+#define NTB_LNK_STA_SPEED_MASK         0x000f
+#define NTB_LNK_STA_WIDTH_MASK         0x03f0
+#define NTB_LNK_STA_ACTIVE(x)          (!!((x) & NTB_LNK_STA_ACTIVE_BIT))
+#define NTB_LNK_STA_SPEED(x)           ((x) & NTB_LNK_STA_SPEED_MASK)
+#define NTB_LNK_STA_WIDTH(x)           (((x) & NTB_LNK_STA_WIDTH_MASK) >> 4)
+
+/* Use the following addresses for translation between b2b ntb devices in case
+ * the hardware default values are not reliable. */
+#define XEON_B2B_BAR0_USD_ADDR         0x1000000000000000ull
+#define XEON_B2B_BAR2_USD_ADDR64       0x2000000000000000ull
+#define XEON_B2B_BAR4_USD_ADDR64       0x4000000000000000ull
+#define XEON_B2B_BAR4_USD_ADDR32       0x20000000u
+#define XEON_B2B_BAR5_USD_ADDR32       0x40000000u
+#define XEON_B2B_BAR0_DSD_ADDR         0x9000000000000000ull
+#define XEON_B2B_BAR2_DSD_ADDR64       0xa000000000000000ull
+#define XEON_B2B_BAR4_DSD_ADDR64       0xc000000000000000ull
+#define XEON_B2B_BAR4_DSD_ADDR32       0xa0000000u
+#define XEON_B2B_BAR5_DSD_ADDR32       0xc0000000u
+
+/* The peer ntb secondary config space is 32KB fixed size */
+#define XEON_B2B_MIN_SIZE              0x8000
+
+/* flags to indicate hardware errata */
+#define NTB_HWERR_SDOORBELL_LOCKUP     BIT_ULL(0)
+#define NTB_HWERR_SB01BASE_LOCKUP      BIT_ULL(1)
+#define NTB_HWERR_B2BDOORBELL_BIT14    BIT_ULL(2)
+
+/* flags to indicate unsafe api */
+#define NTB_UNSAFE_DB                  BIT_ULL(0)
+#define NTB_UNSAFE_SPAD                        BIT_ULL(1)
+
+struct intel_ntb_dev;
+
+struct intel_ntb_reg {
+       int (*poll_link)(struct intel_ntb_dev *ndev);
+       int (*link_is_up)(struct intel_ntb_dev *ndev);
+       u64 (*db_ioread)(void __iomem *mmio);
+       void (*db_iowrite)(u64 db_bits, void __iomem *mmio);
+       unsigned long                   ntb_ctl;
+       resource_size_t                 db_size;
+       int                             mw_bar[];
+};
+
+struct intel_ntb_alt_reg {
+       unsigned long                   db_bell;
+       unsigned long                   db_mask;
+       unsigned long                   spad;
+};
+
+struct intel_ntb_xlat_reg {
+       unsigned long                   bar0_base;
+       unsigned long                   bar2_xlat;
+       unsigned long                   bar2_limit;
+};
+
+struct intel_b2b_addr {
+       phys_addr_t                     bar0_addr;
+       phys_addr_t                     bar2_addr64;
+       phys_addr_t                     bar4_addr64;
+       phys_addr_t                     bar4_addr32;
+       phys_addr_t                     bar5_addr32;
+};
+
+struct intel_ntb_vec {
+       struct intel_ntb_dev            *ndev;
+       int                             num;
+};
+
+struct intel_ntb_dev {
+       struct ntb_dev                  ntb;
+
+       /* offset of peer bar0 in b2b bar */
+       unsigned long                   b2b_off;
+       /* mw idx used to access peer bar0 */
+       unsigned int                    b2b_idx;
+
+       /* BAR45 is split into BAR4 and BAR5 */
+       bool                            bar4_split;
+
+       u32                             ntb_ctl;
+       u32                             lnk_sta;
+
+       unsigned char                   mw_count;
+       unsigned char                   spad_count;
+       unsigned char                   db_count;
+       unsigned char                   db_vec_count;
+       unsigned char                   db_vec_shift;
+
+       u64                             db_valid_mask;
+       u64                             db_link_mask;
+       u64                             db_mask;
+
+       /* synchronize rmw access of db_mask and hw reg */
+       spinlock_t                      db_mask_lock;
+
+       struct msix_entry               *msix;
+       struct intel_ntb_vec            *vec;
+
+       const struct intel_ntb_reg      *reg;
+       const struct intel_ntb_alt_reg  *self_reg;
+       const struct intel_ntb_alt_reg  *peer_reg;
+       const struct intel_ntb_xlat_reg *xlat_reg;
+       void                            __iomem *self_mmio;
+       void                            __iomem *peer_mmio;
+       phys_addr_t                     peer_addr;
+
+       unsigned long                   last_ts;
+       struct delayed_work             hb_timer;
+
+       unsigned long                   hwerr_flags;
+       unsigned long                   unsafe_flags;
+       unsigned long                   unsafe_flags_ignore;
+
+       struct dentry                   *debugfs_dir;
+       struct dentry                   *debugfs_info;
+};
+
+#define ndev_pdev(ndev) ((ndev)->ntb.pdev)
+#define ndev_name(ndev) pci_name(ndev_pdev(ndev))
+#define ndev_dev(ndev) (&ndev_pdev(ndev)->dev)
+#define ntb_ndev(ntb) container_of(ntb, struct intel_ntb_dev, ntb)
+#define hb_ndev(work) container_of(work, struct intel_ntb_dev, hb_timer.work)
+
+#endif
diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c
new file mode 100644 (file)
index 0000000..23435f2
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Allen Hubbe <Allen.Hubbe@emc.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/ntb.h>
+#include <linux/pci.h>
+
+#define DRIVER_NAME                    "ntb"
+#define DRIVER_DESCRIPTION             "PCIe NTB Driver Framework"
+
+#define DRIVER_LICENSE                 "Dual BSD/GPL"
+#define DRIVER_VERSION                 "1.0"
+#define DRIVER_RELDATE                 "24 March 2015"
+#define DRIVER_AUTHOR                  "Allen Hubbe <Allen.Hubbe@emc.com>"
+
+MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+
+static struct bus_type ntb_bus;
+static void ntb_dev_release(struct device *dev);
+
+int __ntb_register_client(struct ntb_client *client, struct module *mod,
+                         const char *mod_name)
+{
+       if (!client)
+               return -EINVAL;
+       if (!ntb_client_ops_is_valid(&client->ops))
+               return -EINVAL;
+
+       memset(&client->drv, 0, sizeof(client->drv));
+       client->drv.bus = &ntb_bus;
+       client->drv.name = mod_name;
+       client->drv.owner = mod;
+
+       return driver_register(&client->drv);
+}
+EXPORT_SYMBOL(__ntb_register_client);
+
+void ntb_unregister_client(struct ntb_client *client)
+{
+       driver_unregister(&client->drv);
+}
+EXPORT_SYMBOL(ntb_unregister_client);
+
+int ntb_register_device(struct ntb_dev *ntb)
+{
+       if (!ntb)
+               return -EINVAL;
+       if (!ntb->pdev)
+               return -EINVAL;
+       if (!ntb->ops)
+               return -EINVAL;
+       if (!ntb_dev_ops_is_valid(ntb->ops))
+               return -EINVAL;
+
+       init_completion(&ntb->released);
+
+       memset(&ntb->dev, 0, sizeof(ntb->dev));
+       ntb->dev.bus = &ntb_bus;
+       ntb->dev.parent = &ntb->pdev->dev;
+       ntb->dev.release = ntb_dev_release;
+       dev_set_name(&ntb->dev, pci_name(ntb->pdev));
+
+       ntb->ctx = NULL;
+       ntb->ctx_ops = NULL;
+       spin_lock_init(&ntb->ctx_lock);
+
+       return device_register(&ntb->dev);
+}
+EXPORT_SYMBOL(ntb_register_device);
+
+void ntb_unregister_device(struct ntb_dev *ntb)
+{
+       device_unregister(&ntb->dev);
+       wait_for_completion(&ntb->released);
+}
+EXPORT_SYMBOL(ntb_unregister_device);
+
+int ntb_set_ctx(struct ntb_dev *ntb, void *ctx,
+               const struct ntb_ctx_ops *ctx_ops)
+{
+       unsigned long irqflags;
+
+       if (!ntb_ctx_ops_is_valid(ctx_ops))
+               return -EINVAL;
+       if (ntb->ctx_ops)
+               return -EINVAL;
+
+       spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+       {
+               ntb->ctx = ctx;
+               ntb->ctx_ops = ctx_ops;
+       }
+       spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+
+       return 0;
+}
+EXPORT_SYMBOL(ntb_set_ctx);
+
+void ntb_clear_ctx(struct ntb_dev *ntb)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+       {
+               ntb->ctx_ops = NULL;
+               ntb->ctx = NULL;
+       }
+       spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+}
+EXPORT_SYMBOL(ntb_clear_ctx);
+
+void ntb_link_event(struct ntb_dev *ntb)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+       {
+               if (ntb->ctx_ops && ntb->ctx_ops->link_event)
+                       ntb->ctx_ops->link_event(ntb->ctx);
+       }
+       spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+}
+EXPORT_SYMBOL(ntb_link_event);
+
+void ntb_db_event(struct ntb_dev *ntb, int vector)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&ntb->ctx_lock, irqflags);
+       {
+               if (ntb->ctx_ops && ntb->ctx_ops->db_event)
+                       ntb->ctx_ops->db_event(ntb->ctx, vector);
+       }
+       spin_unlock_irqrestore(&ntb->ctx_lock, irqflags);
+}
+EXPORT_SYMBOL(ntb_db_event);
+
+static int ntb_probe(struct device *dev)
+{
+       struct ntb_dev *ntb;
+       struct ntb_client *client;
+       int rc;
+
+       get_device(dev);
+       ntb = dev_ntb(dev);
+       client = drv_ntb_client(dev->driver);
+
+       rc = client->ops.probe(client, ntb);
+       if (rc)
+               put_device(dev);
+
+       return rc;
+}
+
+static int ntb_remove(struct device *dev)
+{
+       struct ntb_dev *ntb;
+       struct ntb_client *client;
+
+       if (dev->driver) {
+               ntb = dev_ntb(dev);
+               client = drv_ntb_client(dev->driver);
+
+               client->ops.remove(client, ntb);
+               put_device(dev);
+       }
+
+       return 0;
+}
+
+static void ntb_dev_release(struct device *dev)
+{
+       struct ntb_dev *ntb = dev_ntb(dev);
+
+       complete(&ntb->released);
+}
+
+static struct bus_type ntb_bus = {
+       .name = "ntb",
+       .probe = ntb_probe,
+       .remove = ntb_remove,
+};
+
+static int __init ntb_driver_init(void)
+{
+       return bus_register(&ntb_bus);
+}
+module_init(ntb_driver_init);
+
+static void __exit ntb_driver_exit(void)
+{
+       bus_unregister(&ntb_bus);
+}
+module_exit(ntb_driver_exit);
+
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
deleted file mode 100644 (file)
index 3f67386..0000000
+++ /dev/null
@@ -1,1895 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- *   redistributing this file, you may do so under either license.
- *
- *   GPL LICENSE SUMMARY
- *
- *   Copyright(c) 2012 Intel Corporation. All rights reserved.
- *
- *   This program is free software; you can redistribute it and/or modify
- *   it under the terms of version 2 of the GNU General Public License as
- *   published by the Free Software Foundation.
- *
- *   BSD LICENSE
- *
- *   Copyright(c) 2012 Intel Corporation. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copy
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel PCIe NTB Linux driver
- *
- * Contact Information:
- * Jon Mason <jon.mason@intel.com>
- */
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include "ntb_hw.h"
-#include "ntb_regs.h"
-
-#define NTB_NAME       "Intel(R) PCI-E Non-Transparent Bridge Driver"
-#define NTB_VER                "1.0"
-
-MODULE_DESCRIPTION(NTB_NAME);
-MODULE_VERSION(NTB_VER);
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel Corporation");
-
-enum {
-       NTB_CONN_TRANSPARENT = 0,
-       NTB_CONN_B2B,
-       NTB_CONN_RP,
-};
-
-enum {
-       NTB_DEV_USD = 0,
-       NTB_DEV_DSD,
-};
-
-enum {
-       SNB_HW = 0,
-       BWD_HW,
-};
-
-static struct dentry *debugfs_dir;
-
-#define BWD_LINK_RECOVERY_TIME 500
-
-/* Translate memory window 0,1,2 to BAR 2,4,5 */
-#define MW_TO_BAR(mw)  (mw == 0 ? 2 : (mw == 1 ? 4 : 5))
-
-static const struct pci_device_id ntb_pci_tbl[] = {
-       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
-       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
-       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
-       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
-       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
-       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
-       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
-       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
-       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
-       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
-       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
-       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
-       {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
-       {0}
-};
-MODULE_DEVICE_TABLE(pci, ntb_pci_tbl);
-
-static int is_ntb_xeon(struct ntb_device *ndev)
-{
-       switch (ndev->pdev->device) {
-       case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
-       case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
-       case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
-       case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
-       case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
-       case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
-       case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
-       case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
-       case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
-       case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
-       case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
-       case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
-               return 1;
-       default:
-               return 0;
-       }
-
-       return 0;
-}
-
-static int is_ntb_atom(struct ntb_device *ndev)
-{
-       switch (ndev->pdev->device) {
-       case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
-               return 1;
-       default:
-               return 0;
-       }
-
-       return 0;
-}
-
-static void ntb_set_errata_flags(struct ntb_device *ndev)
-{
-       switch (ndev->pdev->device) {
-       /*
-        * this workaround applies to all platform up to IvyBridge
-        * Haswell has splitbar support and use a different workaround
-        */
-       case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
-       case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
-       case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
-       case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
-       case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
-       case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
-       case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
-       case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
-       case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
-       case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
-       case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
-       case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
-               ndev->wa_flags |= WA_SNB_ERR;
-               break;
-       }
-}
-
-/**
- * ntb_register_event_callback() - register event callback
- * @ndev: pointer to ntb_device instance
- * @func: callback function to register
- *
- * This function registers a callback for any HW driver events such as link
- * up/down, power management notices and etc.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int ntb_register_event_callback(struct ntb_device *ndev,
-                               void (*func)(void *handle,
-                                            enum ntb_hw_event event))
-{
-       if (ndev->event_cb)
-               return -EINVAL;
-
-       ndev->event_cb = func;
-
-       return 0;
-}
-
-/**
- * ntb_unregister_event_callback() - unregisters the event callback
- * @ndev: pointer to ntb_device instance
- *
- * This function unregisters the existing callback from transport
- */
-void ntb_unregister_event_callback(struct ntb_device *ndev)
-{
-       ndev->event_cb = NULL;
-}
-
-static void ntb_irq_work(unsigned long data)
-{
-       struct ntb_db_cb *db_cb = (struct ntb_db_cb *)data;
-       int rc;
-
-       rc = db_cb->callback(db_cb->data, db_cb->db_num);
-       if (rc)
-               tasklet_schedule(&db_cb->irq_work);
-       else {
-               struct ntb_device *ndev = db_cb->ndev;
-               unsigned long mask;
-
-               mask = readw(ndev->reg_ofs.ldb_mask);
-               clear_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
-               writew(mask, ndev->reg_ofs.ldb_mask);
-       }
-}
-
-/**
- * ntb_register_db_callback() - register a callback for doorbell interrupt
- * @ndev: pointer to ntb_device instance
- * @idx: doorbell index to register callback, zero based
- * @data: pointer to be returned to caller with every callback
- * @func: callback function to register
- *
- * This function registers a callback function for the doorbell interrupt
- * on the primary side. The function will unmask the doorbell as well to
- * allow interrupt.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
-                            void *data, int (*func)(void *data, int db_num))
-{
-       unsigned long mask;
-
-       if (idx >= ndev->max_cbs || ndev->db_cb[idx].callback) {
-               dev_warn(&ndev->pdev->dev, "Invalid Index.\n");
-               return -EINVAL;
-       }
-
-       ndev->db_cb[idx].callback = func;
-       ndev->db_cb[idx].data = data;
-       ndev->db_cb[idx].ndev = ndev;
-
-       tasklet_init(&ndev->db_cb[idx].irq_work, ntb_irq_work,
-                    (unsigned long) &ndev->db_cb[idx]);
-
-       /* unmask interrupt */
-       mask = readw(ndev->reg_ofs.ldb_mask);
-       clear_bit(idx * ndev->bits_per_vector, &mask);
-       writew(mask, ndev->reg_ofs.ldb_mask);
-
-       return 0;
-}
-
-/**
- * ntb_unregister_db_callback() - unregister a callback for doorbell interrupt
- * @ndev: pointer to ntb_device instance
- * @idx: doorbell index to register callback, zero based
- *
- * This function unregisters a callback function for the doorbell interrupt
- * on the primary side. The function will also mask the said doorbell.
- */
-void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx)
-{
-       unsigned long mask;
-
-       if (idx >= ndev->max_cbs || !ndev->db_cb[idx].callback)
-               return;
-
-       mask = readw(ndev->reg_ofs.ldb_mask);
-       set_bit(idx * ndev->bits_per_vector, &mask);
-       writew(mask, ndev->reg_ofs.ldb_mask);
-
-       tasklet_disable(&ndev->db_cb[idx].irq_work);
-
-       ndev->db_cb[idx].callback = NULL;
-}
-
-/**
- * ntb_find_transport() - find the transport pointer
- * @transport: pointer to pci device
- *
- * Given the pci device pointer, return the transport pointer passed in when
- * the transport attached when it was inited.
- *
- * RETURNS: pointer to transport.
- */
-void *ntb_find_transport(struct pci_dev *pdev)
-{
-       struct ntb_device *ndev = pci_get_drvdata(pdev);
-       return ndev->ntb_transport;
-}
-
-/**
- * ntb_register_transport() - Register NTB transport with NTB HW driver
- * @transport: transport identifier
- *
- * This function allows a transport to reserve the hardware driver for
- * NTB usage.
- *
- * RETURNS: pointer to ntb_device, NULL on error.
- */
-struct ntb_device *ntb_register_transport(struct pci_dev *pdev, void *transport)
-{
-       struct ntb_device *ndev = pci_get_drvdata(pdev);
-
-       if (ndev->ntb_transport)
-               return NULL;
-
-       ndev->ntb_transport = transport;
-       return ndev;
-}
-
-/**
- * ntb_unregister_transport() - Unregister the transport with the NTB HW driver
- * @ndev - ntb_device of the transport to be freed
- *
- * This function unregisters the transport from the HW driver and performs any
- * necessary cleanups.
- */
-void ntb_unregister_transport(struct ntb_device *ndev)
-{
-       int i;
-
-       if (!ndev->ntb_transport)
-               return;
-
-       for (i = 0; i < ndev->max_cbs; i++)
-               ntb_unregister_db_callback(ndev, i);
-
-       ntb_unregister_event_callback(ndev);
-       ndev->ntb_transport = NULL;
-}
-
-/**
- * ntb_write_local_spad() - write to the secondary scratchpad register
- * @ndev: pointer to ntb_device instance
- * @idx: index to the scratchpad register, 0 based
- * @val: the data value to put into the register
- *
- * This function allows writing of a 32bit value to the indexed scratchpad
- * register. This writes over the data mirrored to the local scratchpad register
- * by the remote system.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
-{
-       if (idx >= ndev->limits.max_spads)
-               return -EINVAL;
-
-       dev_dbg(&ndev->pdev->dev, "Writing %x to local scratch pad index %d\n",
-               val, idx);
-       writel(val, ndev->reg_ofs.spad_read + idx * 4);
-
-       return 0;
-}
-
-/**
- * ntb_read_local_spad() - read from the primary scratchpad register
- * @ndev: pointer to ntb_device instance
- * @idx: index to scratchpad register, 0 based
- * @val: pointer to 32bit integer for storing the register value
- *
- * This function allows reading of the 32bit scratchpad register on
- * the primary (internal) side.  This allows the local system to read data
- * written and mirrored to the scratchpad register by the remote system.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
-{
-       if (idx >= ndev->limits.max_spads)
-               return -EINVAL;
-
-       *val = readl(ndev->reg_ofs.spad_write + idx * 4);
-       dev_dbg(&ndev->pdev->dev,
-               "Reading %x from local scratch pad index %d\n", *val, idx);
-
-       return 0;
-}
-
-/**
- * ntb_write_remote_spad() - write to the secondary scratchpad register
- * @ndev: pointer to ntb_device instance
- * @idx: index to the scratchpad register, 0 based
- * @val: the data value to put into the register
- *
- * This function allows writing of a 32bit value to the indexed scratchpad
- * register. The register resides on the secondary (external) side.  This allows
- * the local system to write data to be mirrored to the remote systems
- * scratchpad register.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
-{
-       if (idx >= ndev->limits.max_spads)
-               return -EINVAL;
-
-       dev_dbg(&ndev->pdev->dev, "Writing %x to remote scratch pad index %d\n",
-               val, idx);
-       writel(val, ndev->reg_ofs.spad_write + idx * 4);
-
-       return 0;
-}
-
-/**
- * ntb_read_remote_spad() - read from the primary scratchpad register
- * @ndev: pointer to ntb_device instance
- * @idx: index to scratchpad register, 0 based
- * @val: pointer to 32bit integer for storing the register value
- *
- * This function allows reading of the 32bit scratchpad register on
- * the primary (internal) side.  This alloows the local system to read the data
- * it wrote to be mirrored on the remote system.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
-{
-       if (idx >= ndev->limits.max_spads)
-               return -EINVAL;
-
-       *val = readl(ndev->reg_ofs.spad_read + idx * 4);
-       dev_dbg(&ndev->pdev->dev,
-               "Reading %x from remote scratch pad index %d\n", *val, idx);
-
-       return 0;
-}
-
-/**
- * ntb_get_mw_base() - get addr for the NTB memory window
- * @ndev: pointer to ntb_device instance
- * @mw: memory window number
- *
- * This function provides the base address of the memory window specified.
- *
- * RETURNS: address, or NULL on error.
- */
-resource_size_t ntb_get_mw_base(struct ntb_device *ndev, unsigned int mw)
-{
-       if (mw >= ntb_max_mw(ndev))
-               return 0;
-
-       return pci_resource_start(ndev->pdev, MW_TO_BAR(mw));
-}
-
-/**
- * ntb_get_mw_vbase() - get virtual addr for the NTB memory window
- * @ndev: pointer to ntb_device instance
- * @mw: memory window number
- *
- * This function provides the base virtual address of the memory window
- * specified.
- *
- * RETURNS: pointer to virtual address, or NULL on error.
- */
-void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
-{
-       if (mw >= ntb_max_mw(ndev))
-               return NULL;
-
-       return ndev->mw[mw].vbase;
-}
-
-/**
- * ntb_get_mw_size() - return size of NTB memory window
- * @ndev: pointer to ntb_device instance
- * @mw: memory window number
- *
- * This function provides the physical size of the memory window specified
- *
- * RETURNS: the size of the memory window or zero on error
- */
-u64 ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
-{
-       if (mw >= ntb_max_mw(ndev))
-               return 0;
-
-       return ndev->mw[mw].bar_sz;
-}
-
-/**
- * ntb_set_mw_addr - set the memory window address
- * @ndev: pointer to ntb_device instance
- * @mw: memory window number
- * @addr: base address for data
- *
- * This function sets the base physical address of the memory window.  This
- * memory address is where data from the remote system will be transfered into
- * or out of depending on how the transport is configured.
- */
-void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
-{
-       if (mw >= ntb_max_mw(ndev))
-               return;
-
-       dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
-               MW_TO_BAR(mw));
-
-       ndev->mw[mw].phys_addr = addr;
-
-       switch (MW_TO_BAR(mw)) {
-       case NTB_BAR_23:
-               writeq(addr, ndev->reg_ofs.bar2_xlat);
-               break;
-       case NTB_BAR_4:
-               if (ndev->split_bar)
-                       writel(addr, ndev->reg_ofs.bar4_xlat);
-               else
-                       writeq(addr, ndev->reg_ofs.bar4_xlat);
-               break;
-       case NTB_BAR_5:
-               writel(addr, ndev->reg_ofs.bar5_xlat);
-               break;
-       }
-}
-
-/**
- * ntb_ring_doorbell() - Set the doorbell on the secondary/external side
- * @ndev: pointer to ntb_device instance
- * @db: doorbell to ring
- *
- * This function allows triggering of a doorbell on the secondary/external
- * side that will initiate an interrupt on the remote host
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-void ntb_ring_doorbell(struct ntb_device *ndev, unsigned int db)
-{
-       dev_dbg(&ndev->pdev->dev, "%s: ringing doorbell %d\n", __func__, db);
-
-       if (ndev->hw_type == BWD_HW)
-               writeq((u64) 1 << db, ndev->reg_ofs.rdb);
-       else
-               writew(((1 << ndev->bits_per_vector) - 1) <<
-                      (db * ndev->bits_per_vector), ndev->reg_ofs.rdb);
-}
-
-static void bwd_recover_link(struct ntb_device *ndev)
-{
-       u32 status;
-
-       /* Driver resets the NTB ModPhy lanes - magic! */
-       writeb(0xe0, ndev->reg_base + BWD_MODPHY_PCSREG6);
-       writeb(0x40, ndev->reg_base + BWD_MODPHY_PCSREG4);
-       writeb(0x60, ndev->reg_base + BWD_MODPHY_PCSREG4);
-       writeb(0x60, ndev->reg_base + BWD_MODPHY_PCSREG6);
-
-       /* Driver waits 100ms to allow the NTB ModPhy to settle */
-       msleep(100);
-
-       /* Clear AER Errors, write to clear */
-       status = readl(ndev->reg_base + BWD_ERRCORSTS_OFFSET);
-       dev_dbg(&ndev->pdev->dev, "ERRCORSTS = %x\n", status);
-       status &= PCI_ERR_COR_REP_ROLL;
-       writel(status, ndev->reg_base + BWD_ERRCORSTS_OFFSET);
-
-       /* Clear unexpected electrical idle event in LTSSM, write to clear */
-       status = readl(ndev->reg_base + BWD_LTSSMERRSTS0_OFFSET);
-       dev_dbg(&ndev->pdev->dev, "LTSSMERRSTS0 = %x\n", status);
-       status |= BWD_LTSSMERRSTS0_UNEXPECTEDEI;
-       writel(status, ndev->reg_base + BWD_LTSSMERRSTS0_OFFSET);
-
-       /* Clear DeSkew Buffer error, write to clear */
-       status = readl(ndev->reg_base + BWD_DESKEWSTS_OFFSET);
-       dev_dbg(&ndev->pdev->dev, "DESKEWSTS = %x\n", status);
-       status |= BWD_DESKEWSTS_DBERR;
-       writel(status, ndev->reg_base + BWD_DESKEWSTS_OFFSET);
-
-       status = readl(ndev->reg_base + BWD_IBSTERRRCRVSTS0_OFFSET);
-       dev_dbg(&ndev->pdev->dev, "IBSTERRRCRVSTS0 = %x\n", status);
-       status &= BWD_IBIST_ERR_OFLOW;
-       writel(status, ndev->reg_base + BWD_IBSTERRRCRVSTS0_OFFSET);
-
-       /* Releases the NTB state machine to allow the link to retrain */
-       status = readl(ndev->reg_base + BWD_LTSSMSTATEJMP_OFFSET);
-       dev_dbg(&ndev->pdev->dev, "LTSSMSTATEJMP = %x\n", status);
-       status &= ~BWD_LTSSMSTATEJMP_FORCEDETECT;
-       writel(status, ndev->reg_base + BWD_LTSSMSTATEJMP_OFFSET);
-}
-
-static void ntb_link_event(struct ntb_device *ndev, int link_state)
-{
-       unsigned int event;
-
-       if (ndev->link_status == link_state)
-               return;
-
-       if (link_state == NTB_LINK_UP) {
-               u16 status;
-
-               dev_info(&ndev->pdev->dev, "Link Up\n");
-               ndev->link_status = NTB_LINK_UP;
-               event = NTB_EVENT_HW_LINK_UP;
-
-               if (is_ntb_atom(ndev) ||
-                   ndev->conn_type == NTB_CONN_TRANSPARENT)
-                       status = readw(ndev->reg_ofs.lnk_stat);
-               else {
-                       int rc = pci_read_config_word(ndev->pdev,
-                                                     SNB_LINK_STATUS_OFFSET,
-                                                     &status);
-                       if (rc)
-                               return;
-               }
-
-               ndev->link_width = (status & NTB_LINK_WIDTH_MASK) >> 4;
-               ndev->link_speed = (status & NTB_LINK_SPEED_MASK);
-               dev_info(&ndev->pdev->dev, "Link Width %d, Link Speed %d\n",
-                        ndev->link_width, ndev->link_speed);
-       } else {
-               dev_info(&ndev->pdev->dev, "Link Down\n");
-               ndev->link_status = NTB_LINK_DOWN;
-               event = NTB_EVENT_HW_LINK_DOWN;
-               /* Don't modify link width/speed, we need it in link recovery */
-       }
-
-       /* notify the upper layer if we have an event change */
-       if (ndev->event_cb)
-               ndev->event_cb(ndev->ntb_transport, event);
-}
-
-static int ntb_link_status(struct ntb_device *ndev)
-{
-       int link_state;
-
-       if (is_ntb_atom(ndev)) {
-               u32 ntb_cntl;
-
-               ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
-               if (ntb_cntl & BWD_CNTL_LINK_DOWN)
-                       link_state = NTB_LINK_DOWN;
-               else
-                       link_state = NTB_LINK_UP;
-       } else {
-               u16 status;
-               int rc;
-
-               rc = pci_read_config_word(ndev->pdev, SNB_LINK_STATUS_OFFSET,
-                                         &status);
-               if (rc)
-                       return rc;
-
-               if (status & NTB_LINK_STATUS_ACTIVE)
-                       link_state = NTB_LINK_UP;
-               else
-                       link_state = NTB_LINK_DOWN;
-       }
-
-       ntb_link_event(ndev, link_state);
-
-       return 0;
-}
-
-static void bwd_link_recovery(struct work_struct *work)
-{
-       struct ntb_device *ndev = container_of(work, struct ntb_device,
-                                              lr_timer.work);
-       u32 status32;
-
-       bwd_recover_link(ndev);
-       /* There is a potential race between the 2 NTB devices recovering at the
-        * same time.  If the times are the same, the link will not recover and
-        * the driver will be stuck in this loop forever.  Add a random interval
-        * to the recovery time to prevent this race.
-        */
-       msleep(BWD_LINK_RECOVERY_TIME + prandom_u32() % BWD_LINK_RECOVERY_TIME);
-
-       status32 = readl(ndev->reg_base + BWD_LTSSMSTATEJMP_OFFSET);
-       if (status32 & BWD_LTSSMSTATEJMP_FORCEDETECT)
-               goto retry;
-
-       status32 = readl(ndev->reg_base + BWD_IBSTERRRCRVSTS0_OFFSET);
-       if (status32 & BWD_IBIST_ERR_OFLOW)
-               goto retry;
-
-       status32 = readl(ndev->reg_ofs.lnk_cntl);
-       if (!(status32 & BWD_CNTL_LINK_DOWN)) {
-               unsigned char speed, width;
-               u16 status16;
-
-               status16 = readw(ndev->reg_ofs.lnk_stat);
-               width = (status16 & NTB_LINK_WIDTH_MASK) >> 4;
-               speed = (status16 & NTB_LINK_SPEED_MASK);
-               if (ndev->link_width != width || ndev->link_speed != speed)
-                       goto retry;
-       }
-
-       schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
-       return;
-
-retry:
-       schedule_delayed_work(&ndev->lr_timer, NTB_HB_TIMEOUT);
-}
-
-/* BWD doesn't have link status interrupt, poll on that platform */
-static void bwd_link_poll(struct work_struct *work)
-{
-       struct ntb_device *ndev = container_of(work, struct ntb_device,
-                                              hb_timer.work);
-       unsigned long ts = jiffies;
-
-       /* If we haven't gotten an interrupt in a while, check the BWD link
-        * status bit
-        */
-       if (ts > ndev->last_ts + NTB_HB_TIMEOUT) {
-               int rc = ntb_link_status(ndev);
-               if (rc)
-                       dev_err(&ndev->pdev->dev,
-                               "Error determining link status\n");
-
-               /* Check to see if a link error is the cause of the link down */
-               if (ndev->link_status == NTB_LINK_DOWN) {
-                       u32 status32 = readl(ndev->reg_base +
-                                            BWD_LTSSMSTATEJMP_OFFSET);
-                       if (status32 & BWD_LTSSMSTATEJMP_FORCEDETECT) {
-                               schedule_delayed_work(&ndev->lr_timer, 0);
-                               return;
-                       }
-               }
-       }
-
-       schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
-}
-
-static int ntb_xeon_setup(struct ntb_device *ndev)
-{
-       switch (ndev->conn_type) {
-       case NTB_CONN_B2B:
-               ndev->reg_ofs.ldb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
-               ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
-               ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
-               ndev->reg_ofs.bar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
-               ndev->reg_ofs.bar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
-               if (ndev->split_bar)
-                       ndev->reg_ofs.bar5_xlat =
-                               ndev->reg_base + SNB_SBAR5XLAT_OFFSET;
-               ndev->limits.max_spads = SNB_MAX_B2B_SPADS;
-
-               /* There is a Xeon hardware errata related to writes to
-                * SDOORBELL or B2BDOORBELL in conjunction with inbound access
-                * to NTB MMIO Space, which may hang the system.  To workaround
-                * this use the second memory window to access the interrupt and
-                * scratch pad registers on the remote system.
-                */
-               if (ndev->wa_flags & WA_SNB_ERR) {
-                       if (!ndev->mw[ndev->limits.max_mw - 1].bar_sz)
-                               return -EINVAL;
-
-                       ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
-                       ndev->reg_ofs.spad_write =
-                               ndev->mw[ndev->limits.max_mw - 1].vbase +
-                               SNB_SPAD_OFFSET;
-                       ndev->reg_ofs.rdb =
-                               ndev->mw[ndev->limits.max_mw - 1].vbase +
-                               SNB_PDOORBELL_OFFSET;
-
-                       /* Set the Limit register to 4k, the minimum size, to
-                        * prevent an illegal access
-                        */
-                       writeq(ndev->mw[1].bar_sz + 0x1000, ndev->reg_base +
-                              SNB_PBAR4LMT_OFFSET);
-                       /* HW errata on the Limit registers.  They can only be
-                        * written when the base register is 4GB aligned and
-                        * < 32bit.  This should already be the case based on
-                        * the driver defaults, but write the Limit registers
-                        * first just in case.
-                        */
-
-                       ndev->limits.max_mw = SNB_ERRATA_MAX_MW;
-               } else {
-                       /* HW Errata on bit 14 of b2bdoorbell register.  Writes
-                        * will not be mirrored to the remote system.  Shrink
-                        * the number of bits by one, since bit 14 is the last
-                        * bit.
-                        */
-                       ndev->limits.max_db_bits = SNB_MAX_DB_BITS - 1;
-                       ndev->reg_ofs.spad_write = ndev->reg_base +
-                                                  SNB_B2B_SPAD_OFFSET;
-                       ndev->reg_ofs.rdb = ndev->reg_base +
-                                           SNB_B2B_DOORBELL_OFFSET;
-
-                       /* Disable the Limit register, just incase it is set to
-                        * something silly. A 64bit write should handle it
-                        * regardless of whether it has a split BAR or not.
-                        */
-                       writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET);
-                       /* HW errata on the Limit registers.  They can only be
-                        * written when the base register is 4GB aligned and
-                        * < 32bit.  This should already be the case based on
-                        * the driver defaults, but write the Limit registers
-                        * first just in case.
-                        */
-                       if (ndev->split_bar)
-                               ndev->limits.max_mw = HSX_SPLITBAR_MAX_MW;
-                       else
-                               ndev->limits.max_mw = SNB_MAX_MW;
-               }
-
-               /* The Xeon errata workaround requires setting SBAR Base
-                * addresses to known values, so that the PBAR XLAT can be
-                * pointed at SBAR0 of the remote system.
-                */
-               if (ndev->dev_type == NTB_DEV_USD) {
-                       writeq(SNB_MBAR23_DSD_ADDR, ndev->reg_base +
-                              SNB_PBAR2XLAT_OFFSET);
-                       if (ndev->wa_flags & WA_SNB_ERR)
-                               writeq(SNB_MBAR01_DSD_ADDR, ndev->reg_base +
-                                      SNB_PBAR4XLAT_OFFSET);
-                       else {
-                               if (ndev->split_bar) {
-                                       writel(SNB_MBAR4_DSD_ADDR,
-                                              ndev->reg_base +
-                                              SNB_PBAR4XLAT_OFFSET);
-                                       writel(SNB_MBAR5_DSD_ADDR,
-                                              ndev->reg_base +
-                                              SNB_PBAR5XLAT_OFFSET);
-                               } else
-                                       writeq(SNB_MBAR4_DSD_ADDR,
-                                              ndev->reg_base +
-                                              SNB_PBAR4XLAT_OFFSET);
-
-                               /* B2B_XLAT_OFFSET is a 64bit register, but can
-                                * only take 32bit writes
-                                */
-                               writel(SNB_MBAR01_DSD_ADDR & 0xffffffff,
-                                      ndev->reg_base + SNB_B2B_XLAT_OFFSETL);
-                               writel(SNB_MBAR01_DSD_ADDR >> 32,
-                                      ndev->reg_base + SNB_B2B_XLAT_OFFSETU);
-                       }
-
-                       writeq(SNB_MBAR01_USD_ADDR, ndev->reg_base +
-                              SNB_SBAR0BASE_OFFSET);
-                       writeq(SNB_MBAR23_USD_ADDR, ndev->reg_base +
-                              SNB_SBAR2BASE_OFFSET);
-                       if (ndev->split_bar) {
-                               writel(SNB_MBAR4_USD_ADDR, ndev->reg_base +
-                                      SNB_SBAR4BASE_OFFSET);
-                               writel(SNB_MBAR5_USD_ADDR, ndev->reg_base +
-                                      SNB_SBAR5BASE_OFFSET);
-                       } else
-                               writeq(SNB_MBAR4_USD_ADDR, ndev->reg_base +
-                                      SNB_SBAR4BASE_OFFSET);
-               } else {
-                       writeq(SNB_MBAR23_USD_ADDR, ndev->reg_base +
-                              SNB_PBAR2XLAT_OFFSET);
-                       if (ndev->wa_flags & WA_SNB_ERR)
-                               writeq(SNB_MBAR01_USD_ADDR, ndev->reg_base +
-                                      SNB_PBAR4XLAT_OFFSET);
-                       else {
-                               if (ndev->split_bar) {
-                                       writel(SNB_MBAR4_USD_ADDR,
-                                              ndev->reg_base +
-                                              SNB_PBAR4XLAT_OFFSET);
-                                       writel(SNB_MBAR5_USD_ADDR,
-                                              ndev->reg_base +
-                                              SNB_PBAR5XLAT_OFFSET);
-                               } else
-                                       writeq(SNB_MBAR4_USD_ADDR,
-                                              ndev->reg_base +
-                                              SNB_PBAR4XLAT_OFFSET);
-
-                               /*
-                                * B2B_XLAT_OFFSET is a 64bit register, but can
-                                * only take 32bit writes
-                                */
-                               writel(SNB_MBAR01_USD_ADDR & 0xffffffff,
-                                      ndev->reg_base + SNB_B2B_XLAT_OFFSETL);
-                               writel(SNB_MBAR01_USD_ADDR >> 32,
-                                      ndev->reg_base + SNB_B2B_XLAT_OFFSETU);
-                       }
-                       writeq(SNB_MBAR01_DSD_ADDR, ndev->reg_base +
-                              SNB_SBAR0BASE_OFFSET);
-                       writeq(SNB_MBAR23_DSD_ADDR, ndev->reg_base +
-                              SNB_SBAR2BASE_OFFSET);
-                       if (ndev->split_bar) {
-                               writel(SNB_MBAR4_DSD_ADDR, ndev->reg_base +
-                                      SNB_SBAR4BASE_OFFSET);
-                               writel(SNB_MBAR5_DSD_ADDR, ndev->reg_base +
-                                      SNB_SBAR5BASE_OFFSET);
-                       } else
-                               writeq(SNB_MBAR4_DSD_ADDR, ndev->reg_base +
-                                      SNB_SBAR4BASE_OFFSET);
-
-               }
-               break;
-       case NTB_CONN_RP:
-               if (ndev->wa_flags & WA_SNB_ERR) {
-                       dev_err(&ndev->pdev->dev,
-                               "NTB-RP disabled due to hardware errata.\n");
-                       return -EINVAL;
-               }
-
-               /* Scratch pads need to have exclusive access from the primary
-                * or secondary side.  Halve the num spads so that each side can
-                * have an equal amount.
-                */
-               ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
-               ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
-               /* Note: The SDOORBELL is the cause of the errata.  You REALLY
-                * don't want to touch it.
-                */
-               ndev->reg_ofs.rdb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
-               ndev->reg_ofs.ldb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
-               ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
-               /* Offset the start of the spads to correspond to whether it is
-                * primary or secondary
-                */
-               ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET +
-                                          ndev->limits.max_spads * 4;
-               ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
-               ndev->reg_ofs.bar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
-               ndev->reg_ofs.bar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
-               if (ndev->split_bar) {
-                       ndev->reg_ofs.bar5_xlat =
-                               ndev->reg_base + SNB_SBAR5XLAT_OFFSET;
-                       ndev->limits.max_mw = HSX_SPLITBAR_MAX_MW;
-               } else
-                       ndev->limits.max_mw = SNB_MAX_MW;
-               break;
-       case NTB_CONN_TRANSPARENT:
-               if (ndev->wa_flags & WA_SNB_ERR) {
-                       dev_err(&ndev->pdev->dev,
-                               "NTB-TRANSPARENT disabled due to hardware errata.\n");
-                       return -EINVAL;
-               }
-
-               /* Scratch pads need to have exclusive access from the primary
-                * or secondary side.  Halve the num spads so that each side can
-                * have an equal amount.
-                */
-               ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2;
-               ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
-               ndev->reg_ofs.rdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
-               ndev->reg_ofs.ldb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
-               ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_SDBMSK_OFFSET;
-               ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET;
-               /* Offset the start of the spads to correspond to whether it is
-                * primary or secondary
-                */
-               ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET +
-                                         ndev->limits.max_spads * 4;
-               ndev->reg_ofs.bar2_xlat = ndev->reg_base + SNB_PBAR2XLAT_OFFSET;
-               ndev->reg_ofs.bar4_xlat = ndev->reg_base + SNB_PBAR4XLAT_OFFSET;
-
-               if (ndev->split_bar) {
-                       ndev->reg_ofs.bar5_xlat =
-                               ndev->reg_base + SNB_PBAR5XLAT_OFFSET;
-                       ndev->limits.max_mw = HSX_SPLITBAR_MAX_MW;
-               } else
-                       ndev->limits.max_mw = SNB_MAX_MW;
-               break;
-       default:
-               /*
-                * we should never hit this. the detect function should've
-                * take cared of everything.
-                */
-               return -EINVAL;
-       }
-
-       ndev->reg_ofs.lnk_cntl = ndev->reg_base + SNB_NTBCNTL_OFFSET;
-       ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_SLINK_STATUS_OFFSET;
-       ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET;
-
-       ndev->limits.msix_cnt = SNB_MSIX_CNT;
-       ndev->bits_per_vector = SNB_DB_BITS_PER_VEC;
-
-       return 0;
-}
-
-static int ntb_bwd_setup(struct ntb_device *ndev)
-{
-       int rc;
-       u32 val;
-
-       ndev->hw_type = BWD_HW;
-
-       rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &val);
-       if (rc)
-               return rc;
-
-       switch ((val & BWD_PPD_CONN_TYPE) >> 8) {
-       case NTB_CONN_B2B:
-               ndev->conn_type = NTB_CONN_B2B;
-               break;
-       case NTB_CONN_RP:
-       default:
-               dev_err(&ndev->pdev->dev, "Unsupported NTB configuration\n");
-               return -EINVAL;
-       }
-
-       if (val & BWD_PPD_DEV_TYPE)
-               ndev->dev_type = NTB_DEV_DSD;
-       else
-               ndev->dev_type = NTB_DEV_USD;
-
-       /* Initiate PCI-E link training */
-       rc = pci_write_config_dword(ndev->pdev, NTB_PPD_OFFSET,
-                                   val | BWD_PPD_INIT_LINK);
-       if (rc)
-               return rc;
-
-       ndev->reg_ofs.ldb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
-       ndev->reg_ofs.ldb_mask = ndev->reg_base + BWD_PDBMSK_OFFSET;
-       ndev->reg_ofs.rdb = ndev->reg_base + BWD_B2B_DOORBELL_OFFSET;
-       ndev->reg_ofs.bar2_xlat = ndev->reg_base + BWD_SBAR2XLAT_OFFSET;
-       ndev->reg_ofs.bar4_xlat = ndev->reg_base + BWD_SBAR4XLAT_OFFSET;
-       ndev->reg_ofs.lnk_cntl = ndev->reg_base + BWD_NTBCNTL_OFFSET;
-       ndev->reg_ofs.lnk_stat = ndev->reg_base + BWD_LINK_STATUS_OFFSET;
-       ndev->reg_ofs.spad_read = ndev->reg_base + BWD_SPAD_OFFSET;
-       ndev->reg_ofs.spad_write = ndev->reg_base + BWD_B2B_SPAD_OFFSET;
-       ndev->reg_ofs.spci_cmd = ndev->reg_base + BWD_PCICMD_OFFSET;
-       ndev->limits.max_mw = BWD_MAX_MW;
-       ndev->limits.max_spads = BWD_MAX_SPADS;
-       ndev->limits.max_db_bits = BWD_MAX_DB_BITS;
-       ndev->limits.msix_cnt = BWD_MSIX_CNT;
-       ndev->bits_per_vector = BWD_DB_BITS_PER_VEC;
-
-       /* Since bwd doesn't have a link interrupt, setup a poll timer */
-       INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_poll);
-       INIT_DELAYED_WORK(&ndev->lr_timer, bwd_link_recovery);
-       schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
-
-       return 0;
-}
-
-static int ntb_device_setup(struct ntb_device *ndev)
-{
-       int rc;
-
-       if (is_ntb_xeon(ndev))
-               rc = ntb_xeon_setup(ndev);
-       else if (is_ntb_atom(ndev))
-               rc = ntb_bwd_setup(ndev);
-       else
-               rc = -ENODEV;
-
-       if (rc)
-               return rc;
-
-       if (ndev->conn_type == NTB_CONN_B2B)
-               /* Enable Bus Master and Memory Space on the secondary side */
-               writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
-                      ndev->reg_ofs.spci_cmd);
-
-       return 0;
-}
-
-static void ntb_device_free(struct ntb_device *ndev)
-{
-       if (is_ntb_atom(ndev)) {
-               cancel_delayed_work_sync(&ndev->hb_timer);
-               cancel_delayed_work_sync(&ndev->lr_timer);
-       }
-}
-
-static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
-{
-       struct ntb_db_cb *db_cb = data;
-       struct ntb_device *ndev = db_cb->ndev;
-       unsigned long mask;
-
-       dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
-               db_cb->db_num);
-
-       mask = readw(ndev->reg_ofs.ldb_mask);
-       set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
-       writew(mask, ndev->reg_ofs.ldb_mask);
-
-       tasklet_schedule(&db_cb->irq_work);
-
-       /* No need to check for the specific HB irq, any interrupt means
-        * we're connected.
-        */
-       ndev->last_ts = jiffies;
-
-       writeq((u64) 1 << db_cb->db_num, ndev->reg_ofs.ldb);
-
-       return IRQ_HANDLED;
-}
-
-static irqreturn_t xeon_callback_msix_irq(int irq, void *data)
-{
-       struct ntb_db_cb *db_cb = data;
-       struct ntb_device *ndev = db_cb->ndev;
-       unsigned long mask;
-
-       dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
-               db_cb->db_num);
-
-       mask = readw(ndev->reg_ofs.ldb_mask);
-       set_bit(db_cb->db_num * ndev->bits_per_vector, &mask);
-       writew(mask, ndev->reg_ofs.ldb_mask);
-
-       tasklet_schedule(&db_cb->irq_work);
-
-       /* On Sandybridge, there are 16 bits in the interrupt register
-        * but only 4 vectors.  So, 5 bits are assigned to the first 3
-        * vectors, with the 4th having a single bit for link
-        * interrupts.
-        */
-       writew(((1 << ndev->bits_per_vector) - 1) <<
-              (db_cb->db_num * ndev->bits_per_vector), ndev->reg_ofs.ldb);
-
-       return IRQ_HANDLED;
-}
-
-/* Since we do not have a HW doorbell in BWD, this is only used in JF/JT */
-static irqreturn_t xeon_event_msix_irq(int irq, void *dev)
-{
-       struct ntb_device *ndev = dev;
-       int rc;
-
-       dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for Events\n", irq);
-
-       rc = ntb_link_status(ndev);
-       if (rc)
-               dev_err(&ndev->pdev->dev, "Error determining link status\n");
-
-       /* bit 15 is always the link bit */
-       writew(1 << SNB_LINK_DB, ndev->reg_ofs.ldb);
-
-       return IRQ_HANDLED;
-}
-
-static irqreturn_t ntb_interrupt(int irq, void *dev)
-{
-       struct ntb_device *ndev = dev;
-       unsigned int i = 0;
-
-       if (is_ntb_atom(ndev)) {
-               u64 ldb = readq(ndev->reg_ofs.ldb);
-
-               dev_dbg(&ndev->pdev->dev, "irq %d - ldb = %Lx\n", irq, ldb);
-
-               while (ldb) {
-                       i = __ffs(ldb);
-                       ldb &= ldb - 1;
-                       bwd_callback_msix_irq(irq, &ndev->db_cb[i]);
-               }
-       } else {
-               u16 ldb = readw(ndev->reg_ofs.ldb);
-
-               dev_dbg(&ndev->pdev->dev, "irq %d - ldb = %x\n", irq, ldb);
-
-               if (ldb & SNB_DB_HW_LINK) {
-                       xeon_event_msix_irq(irq, dev);
-                       ldb &= ~SNB_DB_HW_LINK;
-               }
-
-               while (ldb) {
-                       i = __ffs(ldb);
-                       ldb &= ldb - 1;
-                       xeon_callback_msix_irq(irq, &ndev->db_cb[i]);
-               }
-       }
-
-       return IRQ_HANDLED;
-}
-
-static int ntb_setup_snb_msix(struct ntb_device *ndev, int msix_entries)
-{
-       struct pci_dev *pdev = ndev->pdev;
-       struct msix_entry *msix;
-       int rc, i;
-
-       if (msix_entries < ndev->limits.msix_cnt)
-               return -ENOSPC;
-
-       rc = pci_enable_msix_exact(pdev, ndev->msix_entries, msix_entries);
-       if (rc < 0)
-               return rc;
-
-       for (i = 0; i < msix_entries; i++) {
-               msix = &ndev->msix_entries[i];
-               WARN_ON(!msix->vector);
-
-               if (i == msix_entries - 1) {
-                       rc = request_irq(msix->vector,
-                                        xeon_event_msix_irq, 0,
-                                        "ntb-event-msix", ndev);
-                       if (rc)
-                               goto err;
-               } else {
-                       rc = request_irq(msix->vector,
-                                        xeon_callback_msix_irq, 0,
-                                        "ntb-callback-msix",
-                                        &ndev->db_cb[i]);
-                       if (rc)
-                               goto err;
-               }
-       }
-
-       ndev->num_msix = msix_entries;
-       ndev->max_cbs = msix_entries - 1;
-
-       return 0;
-
-err:
-       while (--i >= 0) {
-               /* Code never reaches here for entry nr 'ndev->num_msix - 1' */
-               msix = &ndev->msix_entries[i];
-               free_irq(msix->vector, &ndev->db_cb[i]);
-       }
-
-       pci_disable_msix(pdev);
-       ndev->num_msix = 0;
-
-       return rc;
-}
-
-static int ntb_setup_bwd_msix(struct ntb_device *ndev, int msix_entries)
-{
-       struct pci_dev *pdev = ndev->pdev;
-       struct msix_entry *msix;
-       int rc, i;
-
-       msix_entries = pci_enable_msix_range(pdev, ndev->msix_entries,
-                                            1, msix_entries);
-       if (msix_entries < 0)
-               return msix_entries;
-
-       for (i = 0; i < msix_entries; i++) {
-               msix = &ndev->msix_entries[i];
-               WARN_ON(!msix->vector);
-
-               rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
-                                "ntb-callback-msix", &ndev->db_cb[i]);
-               if (rc)
-                       goto err;
-       }
-
-       ndev->num_msix = msix_entries;
-       ndev->max_cbs = msix_entries;
-
-       return 0;
-
-err:
-       while (--i >= 0)
-               free_irq(msix->vector, &ndev->db_cb[i]);
-
-       pci_disable_msix(pdev);
-       ndev->num_msix = 0;
-
-       return rc;
-}
-
-static int ntb_setup_msix(struct ntb_device *ndev)
-{
-       struct pci_dev *pdev = ndev->pdev;
-       int msix_entries;
-       int rc, i;
-
-       msix_entries = pci_msix_vec_count(pdev);
-       if (msix_entries < 0) {
-               rc = msix_entries;
-               goto err;
-       } else if (msix_entries > ndev->limits.msix_cnt) {
-               rc = -EINVAL;
-               goto err;
-       }
-
-       ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
-                                    GFP_KERNEL);
-       if (!ndev->msix_entries) {
-               rc = -ENOMEM;
-               goto err;
-       }
-
-       for (i = 0; i < msix_entries; i++)
-               ndev->msix_entries[i].entry = i;
-
-       if (is_ntb_atom(ndev))
-               rc = ntb_setup_bwd_msix(ndev, msix_entries);
-       else
-               rc = ntb_setup_snb_msix(ndev, msix_entries);
-       if (rc)
-               goto err1;
-
-       return 0;
-
-err1:
-       kfree(ndev->msix_entries);
-err:
-       dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
-       return rc;
-}
-
-static int ntb_setup_msi(struct ntb_device *ndev)
-{
-       struct pci_dev *pdev = ndev->pdev;
-       int rc;
-
-       rc = pci_enable_msi(pdev);
-       if (rc)
-               return rc;
-
-       rc = request_irq(pdev->irq, ntb_interrupt, 0, "ntb-msi", ndev);
-       if (rc) {
-               pci_disable_msi(pdev);
-               dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
-               return rc;
-       }
-
-       return 0;
-}
-
-static int ntb_setup_intx(struct ntb_device *ndev)
-{
-       struct pci_dev *pdev = ndev->pdev;
-       int rc;
-
-       /* Verify intx is enabled */
-       pci_intx(pdev, 1);
-
-       rc = request_irq(pdev->irq, ntb_interrupt, IRQF_SHARED, "ntb-intx",
-                        ndev);
-       if (rc)
-               return rc;
-
-       return 0;
-}
-
-static int ntb_setup_interrupts(struct ntb_device *ndev)
-{
-       int rc;
-
-       /* On BWD, disable all interrupts.  On SNB, disable all but Link
-        * Interrupt.  The rest will be unmasked as callbacks are registered.
-        */
-       if (is_ntb_atom(ndev))
-               writeq(~0, ndev->reg_ofs.ldb_mask);
-       else {
-               u16 var = 1 << SNB_LINK_DB;
-               writew(~var, ndev->reg_ofs.ldb_mask);
-       }
-
-       rc = ntb_setup_msix(ndev);
-       if (!rc)
-               goto done;
-
-       ndev->bits_per_vector = 1;
-       ndev->max_cbs = ndev->limits.max_db_bits;
-
-       rc = ntb_setup_msi(ndev);
-       if (!rc)
-               goto done;
-
-       rc = ntb_setup_intx(ndev);
-       if (rc) {
-               dev_err(&ndev->pdev->dev, "no usable interrupts\n");
-               return rc;
-       }
-
-done:
-       return 0;
-}
-
-static void ntb_free_interrupts(struct ntb_device *ndev)
-{
-       struct pci_dev *pdev = ndev->pdev;
-
-       /* mask interrupts */
-       if (is_ntb_atom(ndev))
-               writeq(~0, ndev->reg_ofs.ldb_mask);
-       else
-               writew(~0, ndev->reg_ofs.ldb_mask);
-
-       if (ndev->num_msix) {
-               struct msix_entry *msix;
-               u32 i;
-
-               for (i = 0; i < ndev->num_msix; i++) {
-                       msix = &ndev->msix_entries[i];
-                       if (is_ntb_xeon(ndev) && i == ndev->num_msix - 1)
-                               free_irq(msix->vector, ndev);
-                       else
-                               free_irq(msix->vector, &ndev->db_cb[i]);
-               }
-               pci_disable_msix(pdev);
-               kfree(ndev->msix_entries);
-       } else {
-               free_irq(pdev->irq, ndev);
-
-               if (pci_dev_msi_enabled(pdev))
-                       pci_disable_msi(pdev);
-       }
-}
-
-static int ntb_create_callbacks(struct ntb_device *ndev)
-{
-       int i;
-
-       /* Chicken-egg issue.  We won't know how many callbacks are necessary
-        * until we see how many MSI-X vectors we get, but these pointers need
-        * to be passed into the MSI-X register function.  So, we allocate the
-        * max, knowing that they might not all be used, to work around this.
-        */
-       ndev->db_cb = kcalloc(ndev->limits.max_db_bits,
-                             sizeof(struct ntb_db_cb),
-                             GFP_KERNEL);
-       if (!ndev->db_cb)
-               return -ENOMEM;
-
-       for (i = 0; i < ndev->limits.max_db_bits; i++) {
-               ndev->db_cb[i].db_num = i;
-               ndev->db_cb[i].ndev = ndev;
-       }
-
-       return 0;
-}
-
-static void ntb_free_callbacks(struct ntb_device *ndev)
-{
-       int i;
-
-       for (i = 0; i < ndev->limits.max_db_bits; i++)
-               ntb_unregister_db_callback(ndev, i);
-
-       kfree(ndev->db_cb);
-}
-
-static ssize_t ntb_debugfs_read(struct file *filp, char __user *ubuf,
-                               size_t count, loff_t *offp)
-{
-       struct ntb_device *ndev;
-       char *buf;
-       ssize_t ret, offset, out_count;
-
-       out_count = 500;
-
-       buf = kmalloc(out_count, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       ndev = filp->private_data;
-       offset = 0;
-       offset += snprintf(buf + offset, out_count - offset,
-                          "NTB Device Information:\n");
-       offset += snprintf(buf + offset, out_count - offset,
-                          "Connection Type - \t\t%s\n",
-                          ndev->conn_type == NTB_CONN_TRANSPARENT ?
-                          "Transparent" : (ndev->conn_type == NTB_CONN_B2B) ?
-                          "Back to back" : "Root Port");
-       offset += snprintf(buf + offset, out_count - offset,
-                          "Device Type - \t\t\t%s\n",
-                          ndev->dev_type == NTB_DEV_USD ?
-                          "DSD/USP" : "USD/DSP");
-       offset += snprintf(buf + offset, out_count - offset,
-                          "Max Number of Callbacks - \t%u\n",
-                          ntb_max_cbs(ndev));
-       offset += snprintf(buf + offset, out_count - offset,
-                          "Link Status - \t\t\t%s\n",
-                          ntb_hw_link_status(ndev) ? "Up" : "Down");
-       if (ntb_hw_link_status(ndev)) {
-               offset += snprintf(buf + offset, out_count - offset,
-                                  "Link Speed - \t\t\tPCI-E Gen %u\n",
-                                  ndev->link_speed);
-               offset += snprintf(buf + offset, out_count - offset,
-                                  "Link Width - \t\t\tx%u\n",
-                                  ndev->link_width);
-       }
-
-       if (is_ntb_xeon(ndev)) {
-               u32 status32;
-               u16 status16;
-               int rc;
-
-               offset += snprintf(buf + offset, out_count - offset,
-                                  "\nNTB Device Statistics:\n");
-               offset += snprintf(buf + offset, out_count - offset,
-                                  "Upstream Memory Miss - \t%u\n",
-                                  readw(ndev->reg_base +
-                                        SNB_USMEMMISS_OFFSET));
-
-               offset += snprintf(buf + offset, out_count - offset,
-                                  "\nNTB Hardware Errors:\n");
-
-               rc = pci_read_config_word(ndev->pdev, SNB_DEVSTS_OFFSET,
-                                         &status16);
-               if (!rc)
-                       offset += snprintf(buf + offset, out_count - offset,
-                                          "DEVSTS - \t%#06x\n", status16);
-
-               rc = pci_read_config_word(ndev->pdev, SNB_LINK_STATUS_OFFSET,
-                                         &status16);
-               if (!rc)
-                       offset += snprintf(buf + offset, out_count - offset,
-                                          "LNKSTS - \t%#06x\n", status16);
-
-               rc = pci_read_config_dword(ndev->pdev, SNB_UNCERRSTS_OFFSET,
-                                          &status32);
-               if (!rc)
-                       offset += snprintf(buf + offset, out_count - offset,
-                                          "UNCERRSTS - \t%#010x\n", status32);
-
-               rc = pci_read_config_dword(ndev->pdev, SNB_CORERRSTS_OFFSET,
-                                          &status32);
-               if (!rc)
-                       offset += snprintf(buf + offset, out_count - offset,
-                                          "CORERRSTS - \t%#010x\n", status32);
-       }
-
-       if (offset > out_count)
-               offset = out_count;
-
-       ret = simple_read_from_buffer(ubuf, count, offp, buf, offset);
-       kfree(buf);
-       return ret;
-}
-
-static const struct file_operations ntb_debugfs_info = {
-       .owner = THIS_MODULE,
-       .open = simple_open,
-       .read = ntb_debugfs_read,
-};
-
-static void ntb_setup_debugfs(struct ntb_device *ndev)
-{
-       if (!debugfs_initialized())
-               return;
-
-       if (!debugfs_dir)
-               debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
-
-       ndev->debugfs_dir = debugfs_create_dir(pci_name(ndev->pdev),
-                                              debugfs_dir);
-       if (ndev->debugfs_dir)
-               ndev->debugfs_info = debugfs_create_file("info", S_IRUSR,
-                                                        ndev->debugfs_dir,
-                                                        ndev,
-                                                        &ntb_debugfs_info);
-}
-
-static void ntb_free_debugfs(struct ntb_device *ndev)
-{
-       debugfs_remove_recursive(ndev->debugfs_dir);
-
-       if (debugfs_dir && simple_empty(debugfs_dir)) {
-               debugfs_remove_recursive(debugfs_dir);
-               debugfs_dir = NULL;
-       }
-}
-
-static void ntb_hw_link_up(struct ntb_device *ndev)
-{
-       if (ndev->conn_type == NTB_CONN_TRANSPARENT)
-               ntb_link_event(ndev, NTB_LINK_UP);
-       else {
-               u32 ntb_cntl;
-
-               /* Let's bring the NTB link up */
-               ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
-               ntb_cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
-               ntb_cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
-               ntb_cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP;
-               if (ndev->split_bar)
-                       ntb_cntl |= NTB_CNTL_P2S_BAR5_SNOOP |
-                                   NTB_CNTL_S2P_BAR5_SNOOP;
-
-               writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
-       }
-}
-
-static void ntb_hw_link_down(struct ntb_device *ndev)
-{
-       u32 ntb_cntl;
-
-       if (ndev->conn_type == NTB_CONN_TRANSPARENT) {
-               ntb_link_event(ndev, NTB_LINK_DOWN);
-               return;
-       }
-
-       /* Bring NTB link down */
-       ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
-       ntb_cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
-       ntb_cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP);
-       if (ndev->split_bar)
-               ntb_cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP |
-                             NTB_CNTL_S2P_BAR5_SNOOP);
-       ntb_cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
-       writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
-}
-
-static void ntb_max_mw_detect(struct ntb_device *ndev)
-{
-       if (ndev->split_bar)
-               ndev->limits.max_mw = HSX_SPLITBAR_MAX_MW;
-       else
-               ndev->limits.max_mw = SNB_MAX_MW;
-}
-
-static int ntb_xeon_detect(struct ntb_device *ndev)
-{
-       int rc, bars_mask;
-       u32 bars;
-       u8 ppd;
-
-       ndev->hw_type = SNB_HW;
-
-       rc = pci_read_config_byte(ndev->pdev, NTB_PPD_OFFSET, &ppd);
-       if (rc)
-               return -EIO;
-
-       if (ppd & SNB_PPD_DEV_TYPE)
-               ndev->dev_type = NTB_DEV_USD;
-       else
-               ndev->dev_type = NTB_DEV_DSD;
-
-       ndev->split_bar = (ppd & SNB_PPD_SPLIT_BAR) ? 1 : 0;
-
-       switch (ppd & SNB_PPD_CONN_TYPE) {
-       case NTB_CONN_B2B:
-               dev_info(&ndev->pdev->dev, "Conn Type = B2B\n");
-               ndev->conn_type = NTB_CONN_B2B;
-               break;
-       case NTB_CONN_RP:
-               dev_info(&ndev->pdev->dev, "Conn Type = RP\n");
-               ndev->conn_type = NTB_CONN_RP;
-               break;
-       case NTB_CONN_TRANSPARENT:
-               dev_info(&ndev->pdev->dev, "Conn Type = TRANSPARENT\n");
-               ndev->conn_type = NTB_CONN_TRANSPARENT;
-               /*
-                * This mode is default to USD/DSP. HW does not report
-                * properly in transparent mode as it has no knowledge of
-                * NTB. We will just force correct here.
-                */
-               ndev->dev_type = NTB_DEV_USD;
-
-               /*
-                * This is a way for transparent BAR to figure out if we
-                * are doing split BAR or not. There is no way for the hw
-                * on the transparent side to know and set the PPD.
-                */
-               bars_mask = pci_select_bars(ndev->pdev, IORESOURCE_MEM);
-               bars = hweight32(bars_mask);
-               if (bars == (HSX_SPLITBAR_MAX_MW + 1))
-                       ndev->split_bar = 1;
-
-               break;
-       default:
-               dev_err(&ndev->pdev->dev, "Unknown PPD %x\n", ppd);
-               return -ENODEV;
-       }
-
-       ntb_max_mw_detect(ndev);
-
-       return 0;
-}
-
-static int ntb_atom_detect(struct ntb_device *ndev)
-{
-       int rc;
-       u32 ppd;
-
-       ndev->hw_type = BWD_HW;
-       ndev->limits.max_mw = BWD_MAX_MW;
-
-       rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &ppd);
-       if (rc)
-               return rc;
-
-       switch ((ppd & BWD_PPD_CONN_TYPE) >> 8) {
-       case NTB_CONN_B2B:
-               dev_info(&ndev->pdev->dev, "Conn Type = B2B\n");
-               ndev->conn_type = NTB_CONN_B2B;
-               break;
-       case NTB_CONN_RP:
-       default:
-               dev_err(&ndev->pdev->dev, "Unsupported NTB configuration\n");
-               return -EINVAL;
-       }
-
-       if (ppd & BWD_PPD_DEV_TYPE)
-               ndev->dev_type = NTB_DEV_DSD;
-       else
-               ndev->dev_type = NTB_DEV_USD;
-
-       return 0;
-}
-
-static int ntb_device_detect(struct ntb_device *ndev)
-{
-       int rc;
-
-       if (is_ntb_xeon(ndev))
-               rc = ntb_xeon_detect(ndev);
-       else if (is_ntb_atom(ndev))
-               rc = ntb_atom_detect(ndev);
-       else
-               rc = -ENODEV;
-
-       dev_info(&ndev->pdev->dev, "Device Type = %s\n",
-                ndev->dev_type == NTB_DEV_USD ? "USD/DSP" : "DSD/USP");
-
-       return 0;
-}
-
-static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
-       struct ntb_device *ndev;
-       int rc, i;
-
-       ndev = kzalloc(sizeof(struct ntb_device), GFP_KERNEL);
-       if (!ndev)
-               return -ENOMEM;
-
-       ndev->pdev = pdev;
-
-       ntb_set_errata_flags(ndev);
-
-       ndev->link_status = NTB_LINK_DOWN;
-       pci_set_drvdata(pdev, ndev);
-       ntb_setup_debugfs(ndev);
-
-       rc = pci_enable_device(pdev);
-       if (rc)
-               goto err;
-
-       pci_set_master(ndev->pdev);
-
-       rc = ntb_device_detect(ndev);
-       if (rc)
-               goto err;
-
-       ndev->mw = kcalloc(ndev->limits.max_mw, sizeof(struct ntb_mw),
-                          GFP_KERNEL);
-       if (!ndev->mw) {
-               rc = -ENOMEM;
-               goto err1;
-       }
-
-       if (ndev->split_bar)
-               rc = pci_request_selected_regions(pdev, NTB_SPLITBAR_MASK,
-                                                 KBUILD_MODNAME);
-       else
-               rc = pci_request_selected_regions(pdev, NTB_BAR_MASK,
-                                                 KBUILD_MODNAME);
-
-       if (rc)
-               goto err2;
-
-       ndev->reg_base = pci_ioremap_bar(pdev, NTB_BAR_MMIO);
-       if (!ndev->reg_base) {
-               dev_warn(&pdev->dev, "Cannot remap BAR 0\n");
-               rc = -EIO;
-               goto err3;
-       }
-
-       for (i = 0; i < ndev->limits.max_mw; i++) {
-               ndev->mw[i].bar_sz = pci_resource_len(pdev, MW_TO_BAR(i));
-
-               /*
-                * with the errata we need to steal last of the memory
-                * windows for workarounds and they point to MMIO registers.
-                */
-               if ((ndev->wa_flags & WA_SNB_ERR) &&
-                   (i == (ndev->limits.max_mw - 1))) {
-                       ndev->mw[i].vbase =
-                               ioremap_nocache(pci_resource_start(pdev,
-                                                       MW_TO_BAR(i)),
-                                               ndev->mw[i].bar_sz);
-               } else {
-                       ndev->mw[i].vbase =
-                               ioremap_wc(pci_resource_start(pdev,
-                                                       MW_TO_BAR(i)),
-                                          ndev->mw[i].bar_sz);
-               }
-
-               dev_info(&pdev->dev, "MW %d size %llu\n", i,
-                        (unsigned long long) ndev->mw[i].bar_sz);
-               if (!ndev->mw[i].vbase) {
-                       dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
-                                MW_TO_BAR(i));
-                       rc = -EIO;
-                       goto err4;
-               }
-       }
-
-       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-       if (rc) {
-               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (rc)
-                       goto err4;
-
-               dev_warn(&pdev->dev, "Cannot DMA highmem\n");
-       }
-
-       rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-       if (rc) {
-               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (rc)
-                       goto err4;
-
-               dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
-       }
-
-       rc = ntb_device_setup(ndev);
-       if (rc)
-               goto err4;
-
-       rc = ntb_create_callbacks(ndev);
-       if (rc)
-               goto err5;
-
-       rc = ntb_setup_interrupts(ndev);
-       if (rc)
-               goto err6;
-
-       /* The scratchpad registers keep the values between rmmod/insmod,
-        * blast them now
-        */
-       for (i = 0; i < ndev->limits.max_spads; i++) {
-               ntb_write_local_spad(ndev, i, 0);
-               ntb_write_remote_spad(ndev, i, 0);
-       }
-
-       rc = ntb_transport_init(pdev);
-       if (rc)
-               goto err7;
-
-       ntb_hw_link_up(ndev);
-
-       return 0;
-
-err7:
-       ntb_free_interrupts(ndev);
-err6:
-       ntb_free_callbacks(ndev);
-err5:
-       ntb_device_free(ndev);
-err4:
-       for (i--; i >= 0; i--)
-               iounmap(ndev->mw[i].vbase);
-       iounmap(ndev->reg_base);
-err3:
-       if (ndev->split_bar)
-               pci_release_selected_regions(pdev, NTB_SPLITBAR_MASK);
-       else
-               pci_release_selected_regions(pdev, NTB_BAR_MASK);
-err2:
-       kfree(ndev->mw);
-err1:
-       pci_disable_device(pdev);
-err:
-       ntb_free_debugfs(ndev);
-       kfree(ndev);
-
-       dev_err(&pdev->dev, "Error loading %s module\n", KBUILD_MODNAME);
-       return rc;
-}
-
-static void ntb_pci_remove(struct pci_dev *pdev)
-{
-       struct ntb_device *ndev = pci_get_drvdata(pdev);
-       int i;
-
-       ntb_hw_link_down(ndev);
-
-       ntb_transport_free(ndev->ntb_transport);
-
-       ntb_free_interrupts(ndev);
-       ntb_free_callbacks(ndev);
-       ntb_device_free(ndev);
-
-       /* need to reset max_mw limits so we can unmap properly */
-       if (ndev->hw_type == SNB_HW)
-               ntb_max_mw_detect(ndev);
-
-       for (i = 0; i < ndev->limits.max_mw; i++)
-               iounmap(ndev->mw[i].vbase);
-
-       kfree(ndev->mw);
-       iounmap(ndev->reg_base);
-       if (ndev->split_bar)
-               pci_release_selected_regions(pdev, NTB_SPLITBAR_MASK);
-       else
-               pci_release_selected_regions(pdev, NTB_BAR_MASK);
-       pci_disable_device(pdev);
-       ntb_free_debugfs(ndev);
-       kfree(ndev);
-}
-
-static struct pci_driver ntb_pci_driver = {
-       .name = KBUILD_MODNAME,
-       .id_table = ntb_pci_tbl,
-       .probe = ntb_pci_probe,
-       .remove = ntb_pci_remove,
-};
-
-module_pci_driver(ntb_pci_driver);
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
deleted file mode 100644 (file)
index 96de5fc..0000000
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- *   redistributing this file, you may do so under either license.
- *
- *   GPL LICENSE SUMMARY
- *
- *   Copyright(c) 2012 Intel Corporation. All rights reserved.
- *
- *   This program is free software; you can redistribute it and/or modify
- *   it under the terms of version 2 of the GNU General Public License as
- *   published by the Free Software Foundation.
- *
- *   BSD LICENSE
- *
- *   Copyright(c) 2012 Intel Corporation. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copy
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel PCIe NTB Linux driver
- *
- * Contact Information:
- * Jon Mason <jon.mason@intel.com>
- */
-#include <linux/ntb.h>
-
-#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF                0x3725
-#define PCI_DEVICE_ID_INTEL_NTB_PS_JSF         0x3726
-#define PCI_DEVICE_ID_INTEL_NTB_SS_JSF         0x3727
-#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB                0x3C0D
-#define PCI_DEVICE_ID_INTEL_NTB_PS_SNB         0x3C0E
-#define PCI_DEVICE_ID_INTEL_NTB_SS_SNB         0x3C0F
-#define PCI_DEVICE_ID_INTEL_NTB_B2B_IVT                0x0E0D
-#define PCI_DEVICE_ID_INTEL_NTB_PS_IVT         0x0E0E
-#define PCI_DEVICE_ID_INTEL_NTB_SS_IVT         0x0E0F
-#define PCI_DEVICE_ID_INTEL_NTB_B2B_HSX                0x2F0D
-#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX         0x2F0E
-#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX         0x2F0F
-#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD                0x0C4E
-
-#ifndef readq
-static inline u64 readq(void __iomem *addr)
-{
-       return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
-}
-#endif
-
-#ifndef writeq
-static inline void writeq(u64 val, void __iomem *addr)
-{
-       writel(val & 0xffffffff, addr);
-       writel(val >> 32, addr + 4);
-}
-#endif
-
-#define NTB_BAR_MMIO           0
-#define NTB_BAR_23             2
-#define NTB_BAR_4              4
-#define NTB_BAR_5              5
-
-#define NTB_BAR_MASK           ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
-                                (1 << NTB_BAR_4))
-#define NTB_SPLITBAR_MASK      ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
-                                (1 << NTB_BAR_4) | (1 << NTB_BAR_5))
-
-#define NTB_HB_TIMEOUT         msecs_to_jiffies(1000)
-
-enum ntb_hw_event {
-       NTB_EVENT_SW_EVENT0 = 0,
-       NTB_EVENT_SW_EVENT1,
-       NTB_EVENT_SW_EVENT2,
-       NTB_EVENT_HW_ERROR,
-       NTB_EVENT_HW_LINK_UP,
-       NTB_EVENT_HW_LINK_DOWN,
-};
-
-struct ntb_mw {
-       dma_addr_t phys_addr;
-       void __iomem *vbase;
-       resource_size_t bar_sz;
-};
-
-struct ntb_db_cb {
-       int (*callback)(void *data, int db_num);
-       unsigned int db_num;
-       void *data;
-       struct ntb_device *ndev;
-       struct tasklet_struct irq_work;
-};
-
-#define WA_SNB_ERR     0x00000001
-
-struct ntb_device {
-       struct pci_dev *pdev;
-       struct msix_entry *msix_entries;
-       void __iomem *reg_base;
-       struct ntb_mw *mw;
-       struct {
-               unsigned char max_mw;
-               unsigned char max_spads;
-               unsigned char max_db_bits;
-               unsigned char msix_cnt;
-       } limits;
-       struct {
-               void __iomem *ldb;
-               void __iomem *ldb_mask;
-               void __iomem *rdb;
-               void __iomem *bar2_xlat;
-               void __iomem *bar4_xlat;
-               void __iomem *bar5_xlat;
-               void __iomem *spad_write;
-               void __iomem *spad_read;
-               void __iomem *lnk_cntl;
-               void __iomem *lnk_stat;
-               void __iomem *spci_cmd;
-       } reg_ofs;
-       struct ntb_transport *ntb_transport;
-       void (*event_cb)(void *handle, enum ntb_hw_event event);
-
-       struct ntb_db_cb *db_cb;
-       unsigned char hw_type;
-       unsigned char conn_type;
-       unsigned char dev_type;
-       unsigned char num_msix;
-       unsigned char bits_per_vector;
-       unsigned char max_cbs;
-       unsigned char link_width;
-       unsigned char link_speed;
-       unsigned char link_status;
-       unsigned char split_bar;
-
-       struct delayed_work hb_timer;
-       unsigned long last_ts;
-
-       struct delayed_work lr_timer;
-
-       struct dentry *debugfs_dir;
-       struct dentry *debugfs_info;
-
-       unsigned int wa_flags;
-};
-
-/**
- * ntb_max_cbs() - return the max callbacks
- * @ndev: pointer to ntb_device instance
- *
- * Given the ntb pointer, return the maximum number of callbacks
- *
- * RETURNS: the maximum number of callbacks
- */
-static inline unsigned char ntb_max_cbs(struct ntb_device *ndev)
-{
-       return ndev->max_cbs;
-}
-
-/**
- * ntb_max_mw() - return the max number of memory windows
- * @ndev: pointer to ntb_device instance
- *
- * Given the ntb pointer, return the maximum number of memory windows
- *
- * RETURNS: the maximum number of memory windows
- */
-static inline unsigned char ntb_max_mw(struct ntb_device *ndev)
-{
-       return ndev->limits.max_mw;
-}
-
-/**
- * ntb_hw_link_status() - return the hardware link status
- * @ndev: pointer to ntb_device instance
- *
- * Returns true if the hardware is connected to the remote system
- *
- * RETURNS: true or false based on the hardware link state
- */
-static inline bool ntb_hw_link_status(struct ntb_device *ndev)
-{
-       return ndev->link_status == NTB_LINK_UP;
-}
-
-/**
- * ntb_query_pdev() - return the pci_dev pointer
- * @ndev: pointer to ntb_device instance
- *
- * Given the ntb pointer, return the pci_dev pointer for the NTB hardware device
- *
- * RETURNS: a pointer to the ntb pci_dev
- */
-static inline struct pci_dev *ntb_query_pdev(struct ntb_device *ndev)
-{
-       return ndev->pdev;
-}
-
-/**
- * ntb_query_debugfs() - return the debugfs pointer
- * @ndev: pointer to ntb_device instance
- *
- * Given the ntb pointer, return the debugfs directory pointer for the NTB
- * hardware device
- *
- * RETURNS: a pointer to the debugfs directory
- */
-static inline struct dentry *ntb_query_debugfs(struct ntb_device *ndev)
-{
-       return ndev->debugfs_dir;
-}
-
-struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
-                                         void *transport);
-void ntb_unregister_transport(struct ntb_device *ndev);
-void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr);
-int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
-                            void *data, int (*db_cb_func)(void *data,
-                                                          int db_num));
-void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
-int ntb_register_event_callback(struct ntb_device *ndev,
-                               void (*event_cb_func)(void *handle,
-                                                     enum ntb_hw_event event));
-void ntb_unregister_event_callback(struct ntb_device *ndev);
-int ntb_get_max_spads(struct ntb_device *ndev);
-int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
-int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
-int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
-int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
-resource_size_t ntb_get_mw_base(struct ntb_device *ndev, unsigned int mw);
-void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw);
-u64 ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw);
-void ntb_ring_doorbell(struct ntb_device *ndev, unsigned int idx);
-void *ntb_find_transport(struct pci_dev *pdev);
-
-int ntb_transport_init(struct pci_dev *pdev);
-void ntb_transport_free(void *transport);
diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
deleted file mode 100644 (file)
index f028ff8..0000000
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- *   redistributing this file, you may do so under either license.
- *
- *   GPL LICENSE SUMMARY
- *
- *   Copyright(c) 2012 Intel Corporation. All rights reserved.
- *
- *   This program is free software; you can redistribute it and/or modify
- *   it under the terms of version 2 of the GNU General Public License as
- *   published by the Free Software Foundation.
- *
- *   BSD LICENSE
- *
- *   Copyright(c) 2012 Intel Corporation. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copy
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel PCIe NTB Linux driver
- *
- * Contact Information:
- * Jon Mason <jon.mason@intel.com>
- */
-
-#define NTB_LINK_STATUS_ACTIVE 0x2000
-#define NTB_LINK_SPEED_MASK    0x000f
-#define NTB_LINK_WIDTH_MASK    0x03f0
-
-#define SNB_MSIX_CNT           4
-#define SNB_MAX_B2B_SPADS      16
-#define SNB_MAX_COMPAT_SPADS   16
-/* Reserve the uppermost bit for link interrupt */
-#define SNB_MAX_DB_BITS                15
-#define SNB_LINK_DB            15
-#define SNB_DB_BITS_PER_VEC    5
-#define HSX_SPLITBAR_MAX_MW    3
-#define SNB_MAX_MW             2
-#define SNB_ERRATA_MAX_MW      1
-
-#define SNB_DB_HW_LINK         0x8000
-
-#define SNB_UNCERRSTS_OFFSET   0x014C
-#define SNB_CORERRSTS_OFFSET   0x0158
-#define SNB_LINK_STATUS_OFFSET 0x01A2
-#define SNB_PCICMD_OFFSET      0x0504
-#define SNB_DEVCTRL_OFFSET     0x0598
-#define SNB_DEVSTS_OFFSET      0x059A
-#define SNB_SLINK_STATUS_OFFSET        0x05A2
-
-#define SNB_PBAR2LMT_OFFSET    0x0000
-#define SNB_PBAR4LMT_OFFSET    0x0008
-#define SNB_PBAR5LMT_OFFSET    0x000C
-#define SNB_PBAR2XLAT_OFFSET   0x0010
-#define SNB_PBAR4XLAT_OFFSET   0x0018
-#define SNB_PBAR5XLAT_OFFSET   0x001C
-#define SNB_SBAR2LMT_OFFSET    0x0020
-#define SNB_SBAR4LMT_OFFSET    0x0028
-#define SNB_SBAR5LMT_OFFSET    0x002C
-#define SNB_SBAR2XLAT_OFFSET   0x0030
-#define SNB_SBAR4XLAT_OFFSET   0x0038
-#define SNB_SBAR5XLAT_OFFSET   0x003C
-#define SNB_SBAR0BASE_OFFSET   0x0040
-#define SNB_SBAR2BASE_OFFSET   0x0048
-#define SNB_SBAR4BASE_OFFSET   0x0050
-#define SNB_SBAR5BASE_OFFSET   0x0054
-#define SNB_NTBCNTL_OFFSET     0x0058
-#define SNB_SBDF_OFFSET                0x005C
-#define SNB_PDOORBELL_OFFSET   0x0060
-#define SNB_PDBMSK_OFFSET      0x0062
-#define SNB_SDOORBELL_OFFSET   0x0064
-#define SNB_SDBMSK_OFFSET      0x0066
-#define SNB_USMEMMISS_OFFSET   0x0070
-#define SNB_SPAD_OFFSET                0x0080
-#define SNB_SPADSEMA4_OFFSET   0x00c0
-#define SNB_WCCNTRL_OFFSET     0x00e0
-#define SNB_B2B_SPAD_OFFSET    0x0100
-#define SNB_B2B_DOORBELL_OFFSET        0x0140
-#define SNB_B2B_XLAT_OFFSETL   0x0144
-#define SNB_B2B_XLAT_OFFSETU   0x0148
-
-/*
- * The addresses are setup so the 32bit BARs can function. Thus
- * the addresses are all in 32bit space
- */
-#define SNB_MBAR01_USD_ADDR    0x000000002100000CULL
-#define SNB_MBAR23_USD_ADDR    0x000000004100000CULL
-#define SNB_MBAR4_USD_ADDR     0x000000008100000CULL
-#define SNB_MBAR5_USD_ADDR     0x00000000A100000CULL
-#define SNB_MBAR01_DSD_ADDR    0x000000002000000CULL
-#define SNB_MBAR23_DSD_ADDR    0x000000004000000CULL
-#define SNB_MBAR4_DSD_ADDR     0x000000008000000CULL
-#define SNB_MBAR5_DSD_ADDR     0x00000000A000000CULL
-
-#define BWD_MSIX_CNT           34
-#define BWD_MAX_SPADS          16
-#define BWD_MAX_DB_BITS                34
-#define BWD_DB_BITS_PER_VEC    1
-#define BWD_MAX_MW             2
-
-#define BWD_PCICMD_OFFSET      0xb004
-#define BWD_MBAR23_OFFSET      0xb018
-#define BWD_MBAR45_OFFSET      0xb020
-#define BWD_DEVCTRL_OFFSET     0xb048
-#define BWD_LINK_STATUS_OFFSET 0xb052
-#define BWD_ERRCORSTS_OFFSET   0xb110
-
-#define BWD_SBAR2XLAT_OFFSET   0x0008
-#define BWD_SBAR4XLAT_OFFSET   0x0010
-#define BWD_PDOORBELL_OFFSET   0x0020
-#define BWD_PDBMSK_OFFSET      0x0028
-#define BWD_NTBCNTL_OFFSET     0x0060
-#define BWD_EBDF_OFFSET                0x0064
-#define BWD_SPAD_OFFSET                0x0080
-#define BWD_SPADSEMA_OFFSET    0x00c0
-#define BWD_STKYSPAD_OFFSET    0x00c4
-#define BWD_PBAR2XLAT_OFFSET   0x8008
-#define BWD_PBAR4XLAT_OFFSET   0x8010
-#define BWD_B2B_DOORBELL_OFFSET        0x8020
-#define BWD_B2B_SPAD_OFFSET    0x8080
-#define BWD_B2B_SPADSEMA_OFFSET        0x80c0
-#define BWD_B2B_STKYSPAD_OFFSET        0x80c4
-
-#define BWD_MODPHY_PCSREG4     0x1c004
-#define BWD_MODPHY_PCSREG6     0x1c006
-
-#define BWD_IP_BASE            0xC000
-#define BWD_DESKEWSTS_OFFSET   (BWD_IP_BASE + 0x3024)
-#define BWD_LTSSMERRSTS0_OFFSET (BWD_IP_BASE + 0x3180)
-#define BWD_LTSSMSTATEJMP_OFFSET       (BWD_IP_BASE + 0x3040)
-#define BWD_IBSTERRRCRVSTS0_OFFSET     (BWD_IP_BASE + 0x3324)
-
-#define BWD_DESKEWSTS_DBERR    (1 << 15)
-#define BWD_LTSSMERRSTS0_UNEXPECTEDEI  (1 << 20)
-#define BWD_LTSSMSTATEJMP_FORCEDETECT  (1 << 2)
-#define BWD_IBIST_ERR_OFLOW    0x7FFF7FFF
-
-#define NTB_CNTL_CFG_LOCK              (1 << 0)
-#define NTB_CNTL_LINK_DISABLE          (1 << 1)
-#define NTB_CNTL_S2P_BAR23_SNOOP       (1 << 2)
-#define NTB_CNTL_P2S_BAR23_SNOOP       (1 << 4)
-#define NTB_CNTL_S2P_BAR4_SNOOP        (1 << 6)
-#define NTB_CNTL_P2S_BAR4_SNOOP        (1 << 8)
-#define NTB_CNTL_S2P_BAR5_SNOOP        (1 << 12)
-#define NTB_CNTL_P2S_BAR5_SNOOP        (1 << 14)
-#define BWD_CNTL_LINK_DOWN             (1 << 16)
-
-#define NTB_PPD_OFFSET         0x00D4
-#define SNB_PPD_CONN_TYPE      0x0003
-#define SNB_PPD_DEV_TYPE       0x0010
-#define SNB_PPD_SPLIT_BAR      (1 << 6)
-#define BWD_PPD_INIT_LINK      0x0008
-#define BWD_PPD_CONN_TYPE      0x0300
-#define BWD_PPD_DEV_TYPE       0x1000
index e9bf2f47b61ada12a6730990c433d57a71ce90ce..efe3ad4122f2ee1094da78c1bb31d86642b5b3a6 100644 (file)
@@ -5,6 +5,7 @@
  *   GPL LICENSE SUMMARY
  *
  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
  *
  *   This program is free software; you can redistribute it and/or modify
  *   it under the terms of version 2 of the GNU General Public License as
@@ -13,6 +14,7 @@
  *   BSD LICENSE
  *
  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -40,7 +42,7 @@
  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
- * Intel PCIe NTB Linux driver
+ * PCIe NTB Transport Linux driver
  *
  * Contact Information:
  * Jon Mason <jon.mason@intel.com>
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/types.h>
-#include "ntb_hw.h"
+#include <linux/uaccess.h>
+#include "linux/ntb.h"
+#include "linux/ntb_transport.h"
 
-#define NTB_TRANSPORT_VERSION  3
+#define NTB_TRANSPORT_VERSION  4
+#define NTB_TRANSPORT_VER      "4"
+#define NTB_TRANSPORT_NAME     "ntb_transport"
+#define NTB_TRANSPORT_DESC     "Software Queue-Pair Transport over NTB"
 
-static unsigned int transport_mtu = 0x401E;
+MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
+MODULE_VERSION(NTB_TRANSPORT_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+static unsigned long max_mw_size;
+module_param(max_mw_size, ulong, 0644);
+MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
+
+static unsigned int transport_mtu = 0x10000;
 module_param(transport_mtu, uint, 0644);
 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
 
@@ -72,10 +88,16 @@ static unsigned int copy_bytes = 1024;
 module_param(copy_bytes, uint, 0644);
 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
 
+static bool use_dma;
+module_param(use_dma, bool, 0644);
+MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
+
+static struct dentry *nt_debugfs_dir;
+
 struct ntb_queue_entry {
        /* ntb_queue list reference */
        struct list_head entry;
-       /* pointers to data to be transfered */
+       /* pointers to data to be transferred */
        void *cb_data;
        void *buf;
        unsigned int len;
@@ -94,14 +116,16 @@ struct ntb_rx_info {
 };
 
 struct ntb_transport_qp {
-       struct ntb_transport *transport;
-       struct ntb_device *ndev;
+       struct ntb_transport_ctx *transport;
+       struct ntb_dev *ndev;
        void *cb_data;
        struct dma_chan *dma_chan;
 
        bool client_ready;
-       bool qp_link;
+       bool link_is_up;
+
        u8 qp_num;      /* Only 64 QP's are allowed.  0-63 */
+       u64 qp_bit;
 
        struct ntb_rx_info __iomem *rx_info;
        struct ntb_rx_info *remote_rx_info;
@@ -127,6 +151,7 @@ struct ntb_transport_qp {
        unsigned int rx_max_entry;
        unsigned int rx_max_frame;
        dma_cookie_t last_cookie;
+       struct tasklet_struct rxc_db_work;
 
        void (*event_handler)(void *data, int status);
        struct delayed_work link_work;
@@ -153,33 +178,44 @@ struct ntb_transport_qp {
 };
 
 struct ntb_transport_mw {
-       size_t size;
+       phys_addr_t phys_addr;
+       resource_size_t phys_size;
+       resource_size_t xlat_align;
+       resource_size_t xlat_align_size;
+       void __iomem *vbase;
+       size_t xlat_size;
+       size_t buff_size;
        void *virt_addr;
        dma_addr_t dma_addr;
 };
 
 struct ntb_transport_client_dev {
        struct list_head entry;
+       struct ntb_transport_ctx *nt;
        struct device dev;
 };
 
-struct ntb_transport {
+struct ntb_transport_ctx {
        struct list_head entry;
        struct list_head client_devs;
 
-       struct ntb_device *ndev;
-       struct ntb_transport_mw *mw;
-       struct ntb_transport_qp *qps;
-       unsigned int max_qps;
-       unsigned long qp_bitmap;
-       bool transport_link;
+       struct ntb_dev *ndev;
+
+       struct ntb_transport_mw *mw_vec;
+       struct ntb_transport_qp *qp_vec;
+       unsigned int mw_count;
+       unsigned int qp_count;
+       u64 qp_bitmap;
+       u64 qp_bitmap_free;
+
+       bool link_is_up;
        struct delayed_work link_work;
        struct work_struct link_cleanup;
 };
 
 enum {
-       DESC_DONE_FLAG = 1 << 0,
-       LINK_DOWN_FLAG = 1 << 1,
+       DESC_DONE_FLAG = BIT(0),
+       LINK_DOWN_FLAG = BIT(1),
 };
 
 struct ntb_payload_header {
@@ -200,68 +236,69 @@ enum {
        MAX_SPAD,
 };
 
-#define QP_TO_MW(ndev, qp)     ((qp) % ntb_max_mw(ndev))
+#define dev_client_dev(__dev) \
+       container_of((__dev), struct ntb_transport_client_dev, dev)
+
+#define drv_client(__drv) \
+       container_of((__drv), struct ntb_transport_client, driver)
+
+#define QP_TO_MW(nt, qp)       ((qp) % nt->mw_count)
 #define NTB_QP_DEF_NUM_ENTRIES 100
 #define NTB_LINK_DOWN_TIMEOUT  10
 
-static int ntb_match_bus(struct device *dev, struct device_driver *drv)
+static void ntb_transport_rxc_db(unsigned long data);
+static const struct ntb_ctx_ops ntb_transport_ops;
+static struct ntb_client ntb_transport_client;
+
+static int ntb_transport_bus_match(struct device *dev,
+                                  struct device_driver *drv)
 {
        return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
 }
 
-static int ntb_client_probe(struct device *dev)
+static int ntb_transport_bus_probe(struct device *dev)
 {
-       const struct ntb_client *drv = container_of(dev->driver,
-                                                   struct ntb_client, driver);
-       struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
+       const struct ntb_transport_client *client;
        int rc = -EINVAL;
 
        get_device(dev);
-       if (drv && drv->probe)
-               rc = drv->probe(pdev);
+
+       client = drv_client(dev->driver);
+       rc = client->probe(dev);
        if (rc)
                put_device(dev);
 
        return rc;
 }
 
-static int ntb_client_remove(struct device *dev)
+static int ntb_transport_bus_remove(struct device *dev)
 {
-       const struct ntb_client *drv = container_of(dev->driver,
-                                                   struct ntb_client, driver);
-       struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
+       const struct ntb_transport_client *client;
 
-       if (drv && drv->remove)
-               drv->remove(pdev);
+       client = drv_client(dev->driver);
+       client->remove(dev);
 
        put_device(dev);
 
        return 0;
 }
 
-static struct bus_type ntb_bus_type = {
-       .name = "ntb_bus",
-       .match = ntb_match_bus,
-       .probe = ntb_client_probe,
-       .remove = ntb_client_remove,
+static struct bus_type ntb_transport_bus = {
+       .name = "ntb_transport",
+       .match = ntb_transport_bus_match,
+       .probe = ntb_transport_bus_probe,
+       .remove = ntb_transport_bus_remove,
 };
 
 static LIST_HEAD(ntb_transport_list);
 
-static int ntb_bus_init(struct ntb_transport *nt)
+static int ntb_bus_init(struct ntb_transport_ctx *nt)
 {
-       if (list_empty(&ntb_transport_list)) {
-               int rc = bus_register(&ntb_bus_type);
-               if (rc)
-                       return rc;
-       }
-
        list_add(&nt->entry, &ntb_transport_list);
-
        return 0;
 }
 
-static void ntb_bus_remove(struct ntb_transport *nt)
+static void ntb_bus_remove(struct ntb_transport_ctx *nt)
 {
        struct ntb_transport_client_dev *client_dev, *cd;
 
@@ -273,29 +310,26 @@ static void ntb_bus_remove(struct ntb_transport *nt)
        }
 
        list_del(&nt->entry);
-
-       if (list_empty(&ntb_transport_list))
-               bus_unregister(&ntb_bus_type);
 }
 
-static void ntb_client_release(struct device *dev)
+static void ntb_transport_client_release(struct device *dev)
 {
        struct ntb_transport_client_dev *client_dev;
-       client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
 
+       client_dev = dev_client_dev(dev);
        kfree(client_dev);
 }
 
 /**
- * ntb_unregister_client_dev - Unregister NTB client device
+ * ntb_transport_unregister_client_dev - Unregister NTB client device
  * @device_name: Name of NTB client device
  *
  * Unregister an NTB client device with the NTB transport layer
  */
-void ntb_unregister_client_dev(char *device_name)
+void ntb_transport_unregister_client_dev(char *device_name)
 {
        struct ntb_transport_client_dev *client, *cd;
-       struct ntb_transport *nt;
+       struct ntb_transport_ctx *nt;
 
        list_for_each_entry(nt, &ntb_transport_list, entry)
                list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
@@ -305,18 +339,19 @@ void ntb_unregister_client_dev(char *device_name)
                                device_unregister(&client->dev);
                        }
 }
-EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
+EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
 
 /**
- * ntb_register_client_dev - Register NTB client device
+ * ntb_transport_register_client_dev - Register NTB client device
  * @device_name: Name of NTB client device
  *
  * Register an NTB client device with the NTB transport layer
  */
-int ntb_register_client_dev(char *device_name)
+int ntb_transport_register_client_dev(char *device_name)
 {
        struct ntb_transport_client_dev *client_dev;
-       struct ntb_transport *nt;
+       struct ntb_transport_ctx *nt;
+       int node;
        int rc, i = 0;
 
        if (list_empty(&ntb_transport_list))
@@ -325,8 +360,10 @@ int ntb_register_client_dev(char *device_name)
        list_for_each_entry(nt, &ntb_transport_list, entry) {
                struct device *dev;
 
-               client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
-                                    GFP_KERNEL);
+               node = dev_to_node(&nt->ndev->dev);
+
+               client_dev = kzalloc_node(sizeof(*client_dev),
+                                         GFP_KERNEL, node);
                if (!client_dev) {
                        rc = -ENOMEM;
                        goto err;
@@ -336,9 +373,9 @@ int ntb_register_client_dev(char *device_name)
 
                /* setup and register client devices */
                dev_set_name(dev, "%s%d", device_name, i);
-               dev->bus = &ntb_bus_type;
-               dev->release = ntb_client_release;
-               dev->parent = &ntb_query_pdev(nt->ndev)->dev;
+               dev->bus = &ntb_transport_bus;
+               dev->release = ntb_transport_client_release;
+               dev->parent = &nt->ndev->dev;
 
                rc = device_register(dev);
                if (rc) {
@@ -353,44 +390,44 @@ int ntb_register_client_dev(char *device_name)
        return 0;
 
 err:
-       ntb_unregister_client_dev(device_name);
+       ntb_transport_unregister_client_dev(device_name);
 
        return rc;
 }
-EXPORT_SYMBOL_GPL(ntb_register_client_dev);
+EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
 
 /**
- * ntb_register_client - Register NTB client driver
+ * ntb_transport_register_client - Register NTB client driver
  * @drv: NTB client driver to be registered
  *
  * Register an NTB client driver with the NTB transport layer
  *
  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  */
-int ntb_register_client(struct ntb_client *drv)
+int ntb_transport_register_client(struct ntb_transport_client *drv)
 {
-       drv->driver.bus = &ntb_bus_type;
+       drv->driver.bus = &ntb_transport_bus;
 
        if (list_empty(&ntb_transport_list))
                return -ENODEV;
 
        return driver_register(&drv->driver);
 }
-EXPORT_SYMBOL_GPL(ntb_register_client);
+EXPORT_SYMBOL_GPL(ntb_transport_register_client);
 
 /**
- * ntb_unregister_client - Unregister NTB client driver
+ * ntb_transport_unregister_client - Unregister NTB client driver
  * @drv: NTB client driver to be unregistered
  *
  * Unregister an NTB client driver with the NTB transport layer
  *
  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  */
-void ntb_unregister_client(struct ntb_client *drv)
+void ntb_transport_unregister_client(struct ntb_transport_client *drv)
 {
        driver_unregister(&drv->driver);
 }
-EXPORT_SYMBOL_GPL(ntb_unregister_client);
+EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
 
 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
                            loff_t *offp)
@@ -452,8 +489,8 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
                               "tx_max_entry - \t%u\n", qp->tx_max_entry);
 
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
-                              "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
-                              "Up" : "Down");
+                              "\nQP Link %s\n",
+                              qp->link_is_up ? "Up" : "Down");
        if (out_offset > out_count)
                out_offset = out_count;
 
@@ -497,26 +534,31 @@ out:
        return entry;
 }
 
-static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
-                                     unsigned int qp_num)
+static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
+                                    unsigned int qp_num)
 {
-       struct ntb_transport_qp *qp = &nt->qps[qp_num];
+       struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
+       struct ntb_transport_mw *mw;
        unsigned int rx_size, num_qps_mw;
-       u8 mw_num, mw_max;
+       unsigned int mw_num, mw_count, qp_count;
        unsigned int i;
 
-       mw_max = ntb_max_mw(nt->ndev);
-       mw_num = QP_TO_MW(nt->ndev, qp_num);
+       mw_count = nt->mw_count;
+       qp_count = nt->qp_count;
+
+       mw_num = QP_TO_MW(nt, qp_num);
+       mw = &nt->mw_vec[mw_num];
 
-       WARN_ON(nt->mw[mw_num].virt_addr == NULL);
+       if (!mw->virt_addr)
+               return -ENOMEM;
 
-       if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
-               num_qps_mw = nt->max_qps / mw_max + 1;
+       if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+               num_qps_mw = qp_count / mw_count + 1;
        else
-               num_qps_mw = nt->max_qps / mw_max;
+               num_qps_mw = qp_count / mw_count;
 
-       rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
-       qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
+       rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
+       qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count;
        rx_size -= sizeof(struct ntb_rx_info);
 
        qp->remote_rx_info = qp->rx_buff + rx_size;
@@ -530,49 +572,63 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
 
        /* setup the hdr offsets with 0's */
        for (i = 0; i < qp->rx_max_entry; i++) {
-               void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
-                              sizeof(struct ntb_payload_header);
+               void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
+                               sizeof(struct ntb_payload_header));
                memset(offset, 0, sizeof(struct ntb_payload_header));
        }
 
        qp->rx_pkts = 0;
        qp->tx_pkts = 0;
        qp->tx_index = 0;
+
+       return 0;
 }
 
-static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
+static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
 {
-       struct ntb_transport_mw *mw = &nt->mw[num_mw];
-       struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+       struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
+       struct pci_dev *pdev = nt->ndev->pdev;
 
        if (!mw->virt_addr)
                return;
 
-       dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
+       ntb_mw_clear_trans(nt->ndev, num_mw);
+       dma_free_coherent(&pdev->dev, mw->buff_size,
+                         mw->virt_addr, mw->dma_addr);
+       mw->xlat_size = 0;
+       mw->buff_size = 0;
        mw->virt_addr = NULL;
 }
 
-static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
+static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
+                     unsigned int size)
 {
-       struct ntb_transport_mw *mw = &nt->mw[num_mw];
-       struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+       struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
+       struct pci_dev *pdev = nt->ndev->pdev;
+       unsigned int xlat_size, buff_size;
+       int rc;
+
+       xlat_size = round_up(size, mw->xlat_align_size);
+       buff_size = round_up(size, mw->xlat_align);
 
        /* No need to re-setup */
-       if (mw->size == ALIGN(size, 4096))
+       if (mw->xlat_size == xlat_size)
                return 0;
 
-       if (mw->size != 0)
+       if (mw->buff_size)
                ntb_free_mw(nt, num_mw);
 
-       /* Alloc memory for receiving data.  Must be 4k aligned */
-       mw->size = ALIGN(size, 4096);
+       /* Alloc memory for receiving data.  Must be aligned */
+       mw->xlat_size = xlat_size;
+       mw->buff_size = buff_size;
 
-       mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
-                                          GFP_KERNEL);
+       mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
+                                          &mw->dma_addr, GFP_KERNEL);
        if (!mw->virt_addr) {
-               mw->size = 0;
-               dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
-                      (int) mw->size);
+               mw->xlat_size = 0;
+               mw->buff_size = 0;
+               dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
+                       buff_size);
                return -ENOMEM;
        }
 
@@ -582,34 +638,58 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
         * is a requirement of the hardware. It is recommended to setup CMA
         * for BAR sizes equal or greater than 4MB.
         */
-       if (!IS_ALIGNED(mw->dma_addr, mw->size)) {
-               dev_err(&pdev->dev, "DMA memory %pad not aligned to BAR size\n",
+       if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
+               dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
                        &mw->dma_addr);
                ntb_free_mw(nt, num_mw);
                return -ENOMEM;
        }
 
        /* Notify HW the memory location of the receive buffer */
-       ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
+       rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
+       if (rc) {
+               dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
+               ntb_free_mw(nt, num_mw);
+               return -EIO;
+       }
 
        return 0;
 }
 
+static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
+{
+       qp->link_is_up = false;
+
+       qp->tx_index = 0;
+       qp->rx_index = 0;
+       qp->rx_bytes = 0;
+       qp->rx_pkts = 0;
+       qp->rx_ring_empty = 0;
+       qp->rx_err_no_buf = 0;
+       qp->rx_err_oflow = 0;
+       qp->rx_err_ver = 0;
+       qp->rx_memcpy = 0;
+       qp->rx_async = 0;
+       qp->tx_bytes = 0;
+       qp->tx_pkts = 0;
+       qp->tx_ring_full = 0;
+       qp->tx_err_no_buf = 0;
+       qp->tx_memcpy = 0;
+       qp->tx_async = 0;
+}
+
 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
 {
-       struct ntb_transport *nt = qp->transport;
-       struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+       struct ntb_transport_ctx *nt = qp->transport;
+       struct pci_dev *pdev = nt->ndev->pdev;
 
-       if (qp->qp_link == NTB_LINK_DOWN) {
-               cancel_delayed_work_sync(&qp->link_work);
-               return;
-       }
+       dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
 
-       if (qp->event_handler)
-               qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
+       cancel_delayed_work_sync(&qp->link_work);
+       ntb_qp_link_down_reset(qp);
 
-       dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
-       qp->qp_link = NTB_LINK_DOWN;
+       if (qp->event_handler)
+               qp->event_handler(qp->cb_data, qp->link_is_up);
 }
 
 static void ntb_qp_link_cleanup_work(struct work_struct *work)
@@ -617,11 +697,11 @@ static void ntb_qp_link_cleanup_work(struct work_struct *work)
        struct ntb_transport_qp *qp = container_of(work,
                                                   struct ntb_transport_qp,
                                                   link_cleanup);
-       struct ntb_transport *nt = qp->transport;
+       struct ntb_transport_ctx *nt = qp->transport;
 
        ntb_qp_link_cleanup(qp);
 
-       if (nt->transport_link == NTB_LINK_UP)
+       if (nt->link_is_up)
                schedule_delayed_work(&qp->link_work,
                                      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
 }
@@ -631,180 +711,132 @@ static void ntb_qp_link_down(struct ntb_transport_qp *qp)
        schedule_work(&qp->link_cleanup);
 }
 
-static void ntb_transport_link_cleanup(struct ntb_transport *nt)
+static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
 {
+       struct ntb_transport_qp *qp;
+       u64 qp_bitmap_alloc;
        int i;
 
+       qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
+
        /* Pass along the info to any clients */
-       for (i = 0; i < nt->max_qps; i++)
-               if (!test_bit(i, &nt->qp_bitmap))
-                       ntb_qp_link_cleanup(&nt->qps[i]);
+       for (i = 0; i < nt->qp_count; i++)
+               if (qp_bitmap_alloc & BIT_ULL(i)) {
+                       qp = &nt->qp_vec[i];
+                       ntb_qp_link_cleanup(qp);
+                       cancel_work_sync(&qp->link_cleanup);
+                       cancel_delayed_work_sync(&qp->link_work);
+               }
 
-       if (nt->transport_link == NTB_LINK_DOWN)
+       if (!nt->link_is_up)
                cancel_delayed_work_sync(&nt->link_work);
-       else
-               nt->transport_link = NTB_LINK_DOWN;
 
        /* The scratchpad registers keep the values if the remote side
         * goes down, blast them now to give them a sane value the next
         * time they are accessed
         */
        for (i = 0; i < MAX_SPAD; i++)
-               ntb_write_local_spad(nt->ndev, i, 0);
+               ntb_spad_write(nt->ndev, i, 0);
 }
 
 static void ntb_transport_link_cleanup_work(struct work_struct *work)
 {
-       struct ntb_transport *nt = container_of(work, struct ntb_transport,
-                                               link_cleanup);
+       struct ntb_transport_ctx *nt =
+               container_of(work, struct ntb_transport_ctx, link_cleanup);
 
        ntb_transport_link_cleanup(nt);
 }
 
-static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
+static void ntb_transport_event_callback(void *data)
 {
-       struct ntb_transport *nt = data;
+       struct ntb_transport_ctx *nt = data;
 
-       switch (event) {
-       case NTB_EVENT_HW_LINK_UP:
+       if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
                schedule_delayed_work(&nt->link_work, 0);
-               break;
-       case NTB_EVENT_HW_LINK_DOWN:
+       else
                schedule_work(&nt->link_cleanup);
-               break;
-       default:
-               BUG();
-       }
 }
 
 static void ntb_transport_link_work(struct work_struct *work)
 {
-       struct ntb_transport *nt = container_of(work, struct ntb_transport,
-                                               link_work.work);
-       struct ntb_device *ndev = nt->ndev;
-       struct pci_dev *pdev = ntb_query_pdev(ndev);
+       struct ntb_transport_ctx *nt =
+               container_of(work, struct ntb_transport_ctx, link_work.work);
+       struct ntb_dev *ndev = nt->ndev;
+       struct pci_dev *pdev = ndev->pdev;
+       resource_size_t size;
        u32 val;
-       int rc, i;
+       int rc, i, spad;
 
        /* send the local info, in the opposite order of the way we read it */
-       for (i = 0; i < ntb_max_mw(ndev); i++) {
-               rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
-                                          ntb_get_mw_size(ndev, i) >> 32);
-               if (rc) {
-                       dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
-                               (u32)(ntb_get_mw_size(ndev, i) >> 32),
-                               MW0_SZ_HIGH + (i * 2));
-                       goto out;
-               }
+       for (i = 0; i < nt->mw_count; i++) {
+               size = nt->mw_vec[i].phys_size;
 
-               rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
-                                          (u32) ntb_get_mw_size(ndev, i));
-               if (rc) {
-                       dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
-                               (u32) ntb_get_mw_size(ndev, i),
-                               MW0_SZ_LOW + (i * 2));
-                       goto out;
-               }
-       }
+               if (max_mw_size && size > max_mw_size)
+                       size = max_mw_size;
 
-       rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
-       if (rc) {
-               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
-                       ntb_max_mw(ndev), NUM_MWS);
-               goto out;
-       }
+               spad = MW0_SZ_HIGH + (i * 2);
+               ntb_peer_spad_write(ndev, spad, (u32)(size >> 32));
 
-       rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
-       if (rc) {
-               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
-                       nt->max_qps, NUM_QPS);
-               goto out;
+               spad = MW0_SZ_LOW + (i * 2);
+               ntb_peer_spad_write(ndev, spad, (u32)size);
        }
 
-       rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
-       if (rc) {
-               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
-                       NTB_TRANSPORT_VERSION, VERSION);
-               goto out;
-       }
+       ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
 
-       /* Query the remote side for its info */
-       rc = ntb_read_remote_spad(ndev, VERSION, &val);
-       if (rc) {
-               dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
-               goto out;
-       }
+       ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
 
-       if (val != NTB_TRANSPORT_VERSION)
-               goto out;
-       dev_dbg(&pdev->dev, "Remote version = %d\n", val);
+       ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
 
-       rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
-       if (rc) {
-               dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
+       /* Query the remote side for its info */
+       val = ntb_spad_read(ndev, VERSION);
+       dev_dbg(&pdev->dev, "Remote version = %d\n", val);
+       if (val != NTB_TRANSPORT_VERSION)
                goto out;
-       }
 
-       if (val != nt->max_qps)
-               goto out;
+       val = ntb_spad_read(ndev, NUM_QPS);
        dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
-
-       rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
-       if (rc) {
-               dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
+       if (val != nt->qp_count)
                goto out;
-       }
 
-       if (val != ntb_max_mw(ndev))
-               goto out;
+       val = ntb_spad_read(ndev, NUM_MWS);
        dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
+       if (val != nt->mw_count)
+               goto out;
 
-       for (i = 0; i < ntb_max_mw(ndev); i++) {
+       for (i = 0; i < nt->mw_count; i++) {
                u64 val64;
 
-               rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
-               if (rc) {
-                       dev_err(&pdev->dev, "Error reading remote spad %d\n",
-                               MW0_SZ_HIGH + (i * 2));
-                       goto out1;
-               }
-
-               val64 = (u64) val << 32;
-
-               rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
-               if (rc) {
-                       dev_err(&pdev->dev, "Error reading remote spad %d\n",
-                               MW0_SZ_LOW + (i * 2));
-                       goto out1;
-               }
+               val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
+               val64 = (u64)val << 32;
 
+               val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
                val64 |= val;
 
-               dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
+               dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
 
                rc = ntb_set_mw(nt, i, val64);
                if (rc)
                        goto out1;
        }
 
-       nt->transport_link = NTB_LINK_UP;
+       nt->link_is_up = true;
 
-       for (i = 0; i < nt->max_qps; i++) {
-               struct ntb_transport_qp *qp = &nt->qps[i];
+       for (i = 0; i < nt->qp_count; i++) {
+               struct ntb_transport_qp *qp = &nt->qp_vec[i];
 
                ntb_transport_setup_qp_mw(nt, i);
 
-               if (qp->client_ready == NTB_LINK_UP)
+               if (qp->client_ready)
                        schedule_delayed_work(&qp->link_work, 0);
        }
 
        return;
 
 out1:
-       for (i = 0; i < ntb_max_mw(ndev); i++)
+       for (i = 0; i < nt->mw_count; i++)
                ntb_free_mw(nt, i);
 out:
-       if (ntb_hw_link_status(ndev))
+       if (ntb_link_is_up(ndev, NULL, NULL) == 1)
                schedule_delayed_work(&nt->link_work,
                                      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
 }
@@ -814,73 +846,73 @@ static void ntb_qp_link_work(struct work_struct *work)
        struct ntb_transport_qp *qp = container_of(work,
                                                   struct ntb_transport_qp,
                                                   link_work.work);
-       struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
-       struct ntb_transport *nt = qp->transport;
-       int rc, val;
+       struct pci_dev *pdev = qp->ndev->pdev;
+       struct ntb_transport_ctx *nt = qp->transport;
+       int val;
 
-       WARN_ON(nt->transport_link != NTB_LINK_UP);
+       WARN_ON(!nt->link_is_up);
 
-       rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
-       if (rc) {
-               dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
-               return;
-       }
+       val = ntb_spad_read(nt->ndev, QP_LINKS);
 
-       rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
-       if (rc)
-               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
-                       val | 1 << qp->qp_num, QP_LINKS);
+       ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
 
        /* query remote spad for qp ready bits */
-       rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
-       if (rc)
-               dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
-
-       dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
+       ntb_peer_spad_read(nt->ndev, QP_LINKS);
+       dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
 
        /* See if the remote side is up */
-       if (1 << qp->qp_num & val) {
-               qp->qp_link = NTB_LINK_UP;
-
+       if (val & BIT(qp->qp_num)) {
                dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
+               qp->link_is_up = true;
+
                if (qp->event_handler)
-                       qp->event_handler(qp->cb_data, NTB_LINK_UP);
-       } else if (nt->transport_link == NTB_LINK_UP)
+                       qp->event_handler(qp->cb_data, qp->link_is_up);
+       } else if (nt->link_is_up)
                schedule_delayed_work(&qp->link_work,
                                      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
 }
 
-static int ntb_transport_init_queue(struct ntb_transport *nt,
+static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
                                    unsigned int qp_num)
 {
        struct ntb_transport_qp *qp;
+       struct ntb_transport_mw *mw;
+       phys_addr_t mw_base;
+       resource_size_t mw_size;
        unsigned int num_qps_mw, tx_size;
-       u8 mw_num, mw_max;
+       unsigned int mw_num, mw_count, qp_count;
        u64 qp_offset;
 
-       mw_max = ntb_max_mw(nt->ndev);
-       mw_num = QP_TO_MW(nt->ndev, qp_num);
+       mw_count = nt->mw_count;
+       qp_count = nt->qp_count;
 
-       qp = &nt->qps[qp_num];
+       mw_num = QP_TO_MW(nt, qp_num);
+       mw = &nt->mw_vec[mw_num];
+
+       qp = &nt->qp_vec[qp_num];
        qp->qp_num = qp_num;
        qp->transport = nt;
        qp->ndev = nt->ndev;
-       qp->qp_link = NTB_LINK_DOWN;
-       qp->client_ready = NTB_LINK_DOWN;
+       qp->client_ready = false;
        qp->event_handler = NULL;
+       ntb_qp_link_down_reset(qp);
 
-       if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max)
-               num_qps_mw = nt->max_qps / mw_max + 1;
+       if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+               num_qps_mw = qp_count / mw_count + 1;
        else
-               num_qps_mw = nt->max_qps / mw_max;
+               num_qps_mw = qp_count / mw_count;
+
+       mw_base = nt->mw_vec[mw_num].phys_addr;
+       mw_size = nt->mw_vec[mw_num].phys_size;
 
-       tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
-       qp_offset = qp_num / mw_max * tx_size;
-       qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
+       tx_size = (unsigned int)mw_size / num_qps_mw;
+       qp_offset = tx_size * qp_num / mw_count;
+
+       qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
        if (!qp->tx_mw)
                return -EINVAL;
 
-       qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
+       qp->tx_mw_phys = mw_base + qp_offset;
        if (!qp->tx_mw_phys)
                return -EINVAL;
 
@@ -891,16 +923,19 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
        qp->tx_max_frame = min(transport_mtu, tx_size / 2);
        qp->tx_max_entry = tx_size / qp->tx_max_frame;
 
-       if (ntb_query_debugfs(nt->ndev)) {
+       if (nt_debugfs_dir) {
                char debugfs_name[4];
 
                snprintf(debugfs_name, 4, "qp%d", qp_num);
                qp->debugfs_dir = debugfs_create_dir(debugfs_name,
-                                                ntb_query_debugfs(nt->ndev));
+                                                    nt_debugfs_dir);
 
                qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
                                                        qp->debugfs_dir, qp,
                                                        &ntb_qp_debugfs_stats);
+       } else {
+               qp->debugfs_dir = NULL;
+               qp->debugfs_stats = NULL;
        }
 
        INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
@@ -914,46 +949,89 @@ static int ntb_transport_init_queue(struct ntb_transport *nt,
        INIT_LIST_HEAD(&qp->rx_free_q);
        INIT_LIST_HEAD(&qp->tx_free_q);
 
+       tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
+                    (unsigned long)qp);
+
        return 0;
 }
 
-int ntb_transport_init(struct pci_dev *pdev)
+static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
 {
-       struct ntb_transport *nt;
+       struct ntb_transport_ctx *nt;
+       struct ntb_transport_mw *mw;
+       unsigned int mw_count, qp_count;
+       u64 qp_bitmap;
+       int node;
        int rc, i;
 
-       nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
+       if (ntb_db_is_unsafe(ndev))
+               dev_dbg(&ndev->dev,
+                       "doorbell is unsafe, proceed anyway...\n");
+       if (ntb_spad_is_unsafe(ndev))
+               dev_dbg(&ndev->dev,
+                       "scratchpad is unsafe, proceed anyway...\n");
+
+       node = dev_to_node(&ndev->dev);
+
+       nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
        if (!nt)
                return -ENOMEM;
 
-       nt->ndev = ntb_register_transport(pdev, nt);
-       if (!nt->ndev) {
-               rc = -EIO;
+       nt->ndev = ndev;
+
+       mw_count = ntb_mw_count(ndev);
+
+       nt->mw_count = mw_count;
+
+       nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
+                                 GFP_KERNEL, node);
+       if (!nt->mw_vec) {
+               rc = -ENOMEM;
                goto err;
        }
 
-       nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
-                        GFP_KERNEL);
-       if (!nt->mw) {
-               rc = -ENOMEM;
-               goto err1;
+       for (i = 0; i < mw_count; i++) {
+               mw = &nt->mw_vec[i];
+
+               rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size,
+                                     &mw->xlat_align, &mw->xlat_align_size);
+               if (rc)
+                       goto err1;
+
+               mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
+               if (!mw->vbase) {
+                       rc = -ENOMEM;
+                       goto err1;
+               }
+
+               mw->buff_size = 0;
+               mw->xlat_size = 0;
+               mw->virt_addr = NULL;
+               mw->dma_addr = 0;
        }
 
-       if (max_num_clients)
-               nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
-       else
-               nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
+       qp_bitmap = ntb_db_valid_mask(ndev);
+
+       qp_count = ilog2(qp_bitmap);
+       if (max_num_clients && max_num_clients < qp_count)
+               qp_count = max_num_clients;
+       else if (mw_count < qp_count)
+               qp_count = mw_count;
+
+       qp_bitmap &= BIT_ULL(qp_count) - 1;
 
-       nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
-                         GFP_KERNEL);
-       if (!nt->qps) {
+       nt->qp_count = qp_count;
+       nt->qp_bitmap = qp_bitmap;
+       nt->qp_bitmap_free = qp_bitmap;
+
+       nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
+                                 GFP_KERNEL, node);
+       if (!nt->qp_vec) {
                rc = -ENOMEM;
                goto err2;
        }
 
-       nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
-
-       for (i = 0; i < nt->max_qps; i++) {
+       for (i = 0; i < qp_count; i++) {
                rc = ntb_transport_init_queue(nt, i);
                if (rc)
                        goto err3;
@@ -962,8 +1040,7 @@ int ntb_transport_init(struct pci_dev *pdev)
        INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
        INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
 
-       rc = ntb_register_event_callback(nt->ndev,
-                                        ntb_transport_event_callback);
+       rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
        if (rc)
                goto err3;
 
@@ -972,51 +1049,61 @@ int ntb_transport_init(struct pci_dev *pdev)
        if (rc)
                goto err4;
 
-       if (ntb_hw_link_status(nt->ndev))
-               schedule_delayed_work(&nt->link_work, 0);
+       nt->link_is_up = false;
+       ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+       ntb_link_event(ndev);
 
        return 0;
 
 err4:
-       ntb_unregister_event_callback(nt->ndev);
+       ntb_clear_ctx(ndev);
 err3:
-       kfree(nt->qps);
+       kfree(nt->qp_vec);
 err2:
-       kfree(nt->mw);
+       kfree(nt->mw_vec);
 err1:
-       ntb_unregister_transport(nt->ndev);
+       while (i--) {
+               mw = &nt->mw_vec[i];
+               iounmap(mw->vbase);
+       }
 err:
        kfree(nt);
        return rc;
 }
 
-void ntb_transport_free(void *transport)
+static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
 {
-       struct ntb_transport *nt = transport;
-       struct ntb_device *ndev = nt->ndev;
+       struct ntb_transport_ctx *nt = ndev->ctx;
+       struct ntb_transport_qp *qp;
+       u64 qp_bitmap_alloc;
        int i;
 
        ntb_transport_link_cleanup(nt);
+       cancel_work_sync(&nt->link_cleanup);
+       cancel_delayed_work_sync(&nt->link_work);
+
+       qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
 
        /* verify that all the qp's are freed */
-       for (i = 0; i < nt->max_qps; i++) {
-               if (!test_bit(i, &nt->qp_bitmap))
-                       ntb_transport_free_queue(&nt->qps[i]);
-               debugfs_remove_recursive(nt->qps[i].debugfs_dir);
+       for (i = 0; i < nt->qp_count; i++) {
+               qp = &nt->qp_vec[i];
+               if (qp_bitmap_alloc & BIT_ULL(i))
+                       ntb_transport_free_queue(qp);
+               debugfs_remove_recursive(qp->debugfs_dir);
        }
 
-       ntb_bus_remove(nt);
+       ntb_link_disable(ndev);
+       ntb_clear_ctx(ndev);
 
-       cancel_delayed_work_sync(&nt->link_work);
-
-       ntb_unregister_event_callback(ndev);
+       ntb_bus_remove(nt);
 
-       for (i = 0; i < ntb_max_mw(ndev); i++)
+       for (i = nt->mw_count; i--; ) {
                ntb_free_mw(nt, i);
+               iounmap(nt->mw_vec[i].vbase);
+       }
 
-       kfree(nt->qps);
-       kfree(nt->mw);
-       ntb_unregister_transport(ndev);
+       kfree(nt->qp_vec);
+       kfree(nt->mw_vec);
        kfree(nt);
 }
 
@@ -1028,15 +1115,13 @@ static void ntb_rx_copy_callback(void *data)
        unsigned int len = entry->len;
        struct ntb_payload_header *hdr = entry->rx_hdr;
 
-       /* Ensure that the data is fully copied out before clearing the flag */
-       wmb();
        hdr->flags = 0;
 
        iowrite32(entry->index, &qp->rx_info->entry);
 
        ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
 
-       if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
+       if (qp->rx_handler && qp->client_ready)
                qp->rx_handler(qp, qp->cb_data, cb_data, len);
 }
 
@@ -1047,6 +1132,9 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
 
        memcpy(buf, offset, len);
 
+       /* Ensure that the data is fully copied out before clearing the flag */
+       wmb();
+
        ntb_rx_copy_callback(entry);
 }
 
@@ -1071,8 +1159,8 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
                goto err_wait;
 
        device = chan->device;
-       pay_off = (size_t) offset & ~PAGE_MASK;
-       buff_off = (size_t) buf & ~PAGE_MASK;
+       pay_off = (size_t)offset & ~PAGE_MASK;
+       buff_off = (size_t)buf & ~PAGE_MASK;
 
        if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
                goto err_wait;
@@ -1138,86 +1226,103 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
        struct ntb_payload_header *hdr;
        struct ntb_queue_entry *entry;
        void *offset;
+       int rc;
 
        offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
        hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
 
-       entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
-       if (!entry) {
-               dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
-                       "no buffer - HDR ver %u, len %d, flags %x\n",
-                       hdr->ver, hdr->len, hdr->flags);
-               qp->rx_err_no_buf++;
-               return -ENOMEM;
-       }
+       dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
+               qp->qp_num, hdr->ver, hdr->len, hdr->flags);
 
        if (!(hdr->flags & DESC_DONE_FLAG)) {
-               ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
-                            &qp->rx_pend_q);
+               dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
                qp->rx_ring_empty++;
                return -EAGAIN;
        }
 
-       if (hdr->ver != (u32) qp->rx_pkts) {
-               dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
-                       "qp %d: version mismatch, expected %llu - got %u\n",
-                       qp->qp_num, qp->rx_pkts, hdr->ver);
-               ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
-                            &qp->rx_pend_q);
+       if (hdr->flags & LINK_DOWN_FLAG) {
+               dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
+               ntb_qp_link_down(qp);
+               hdr->flags = 0;
+               return -EAGAIN;
+       }
+
+       if (hdr->ver != (u32)qp->rx_pkts) {
+               dev_dbg(&qp->ndev->pdev->dev,
+                       "version mismatch, expected %llu - got %u\n",
+                       qp->rx_pkts, hdr->ver);
                qp->rx_err_ver++;
                return -EIO;
        }
 
-       if (hdr->flags & LINK_DOWN_FLAG) {
-               ntb_qp_link_down(qp);
+       entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+       if (!entry) {
+               dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
+               qp->rx_err_no_buf++;
 
+               rc = -ENOMEM;
                goto err;
        }
 
-       dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
-               "rx offset %u, ver %u - %d payload received, buf size %d\n",
-               qp->rx_index, hdr->ver, hdr->len, entry->len);
-
-       qp->rx_bytes += hdr->len;
-       qp->rx_pkts++;
-
        if (hdr->len > entry->len) {
-               qp->rx_err_oflow++;
-               dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
-                       "RX overflow! Wanted %d got %d\n",
+               dev_dbg(&qp->ndev->pdev->dev,
+                       "receive buffer overflow! Wanted %d got %d\n",
                        hdr->len, entry->len);
+               qp->rx_err_oflow++;
 
+               rc = -EIO;
                goto err;
        }
 
+       dev_dbg(&qp->ndev->pdev->dev,
+               "RX OK index %u ver %u size %d into buf size %d\n",
+               qp->rx_index, hdr->ver, hdr->len, entry->len);
+
+       qp->rx_bytes += hdr->len;
+       qp->rx_pkts++;
+
        entry->index = qp->rx_index;
        entry->rx_hdr = hdr;
 
        ntb_async_rx(entry, offset, hdr->len);
 
-out:
        qp->rx_index++;
        qp->rx_index %= qp->rx_max_entry;
 
        return 0;
 
 err:
-       ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
-       /* Ensure that the data is fully copied out before clearing the flag */
-       wmb();
+       /* FIXME: if this syncrhonous update of the rx_index gets ahead of
+        * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
+        * scenarios:
+        *
+        * 1) The peer might miss this update, but observe the update
+        * from the memcpy completion callback.  In this case, the buffer will
+        * not be freed on the peer to be reused for a different packet.  The
+        * successful rx of a later packet would clear the condition, but the
+        * condition could persist if several rx fail in a row.
+        *
+        * 2) The peer may observe this update before the asyncrhonous copy of
+        * prior packets is completed.  The peer may overwrite the buffers of
+        * the prior packets before they are copied.
+        *
+        * 3) Both: the peer may observe the update, and then observe the index
+        * decrement by the asynchronous completion callback.  Who knows what
+        * badness that will cause.
+        */
        hdr->flags = 0;
        iowrite32(qp->rx_index, &qp->rx_info->entry);
 
-       goto out;
+       return rc;
 }
 
-static int ntb_transport_rxc_db(void *data, int db_num)
+static void ntb_transport_rxc_db(unsigned long data)
 {
-       struct ntb_transport_qp *qp = data;
+       struct ntb_transport_qp *qp = (void *)data;
        int rc, i;
 
-       dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
-               __func__, db_num);
+       dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
+               __func__, qp->qp_num);
 
        /* Limit the number of packets processed in a single interrupt to
         * provide fairness to others
@@ -1231,7 +1336,21 @@ static int ntb_transport_rxc_db(void *data, int db_num)
        if (qp->dma_chan)
                dma_async_issue_pending(qp->dma_chan);
 
-       return i;
+       if (i == qp->rx_max_entry) {
+               /* there is more work to do */
+               tasklet_schedule(&qp->rxc_db_work);
+       } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
+               /* the doorbell bit is set: clear it */
+               ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
+               /* ntb_db_read ensures ntb_db_clear write is committed */
+               ntb_db_read(qp->ndev);
+
+               /* an interrupt may have arrived between finishing
+                * ntb_process_rxc and clearing the doorbell bit:
+                * there might be some more work to do.
+                */
+               tasklet_schedule(&qp->rxc_db_work);
+       }
 }
 
 static void ntb_tx_copy_callback(void *data)
@@ -1240,11 +1359,9 @@ static void ntb_tx_copy_callback(void *data)
        struct ntb_transport_qp *qp = entry->qp;
        struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
 
-       /* Ensure that the data is fully copied out before setting the flags */
-       wmb();
        iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
 
-       ntb_ring_doorbell(qp->ndev, qp->qp_num);
+       ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
 
        /* The entry length can only be zero if the packet is intended to be a
         * "link down" or similar.  Since no payload is being sent in these
@@ -1263,7 +1380,18 @@ static void ntb_tx_copy_callback(void *data)
 
 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
 {
+#ifdef ARCH_HAS_NOCACHE_UACCESS
+       /*
+        * Using non-temporal mov to improve performance on non-cached
+        * writes, even though we aren't actually copying from user space.
+        */
+       __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
+#else
        memcpy_toio(offset, entry->buf, entry->len);
+#endif
+
+       /* Ensure that the data is fully copied out before setting the flags */
+       wmb();
 
        ntb_tx_copy_callback(entry);
 }
@@ -1288,7 +1416,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
        entry->tx_hdr = hdr;
 
        iowrite32(entry->len, &hdr->len);
-       iowrite32((u32) qp->tx_pkts, &hdr->ver);
+       iowrite32((u32)qp->tx_pkts, &hdr->ver);
 
        if (!chan)
                goto err;
@@ -1298,8 +1426,8 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
 
        device = chan->device;
        dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
-       buff_off = (size_t) buf & ~PAGE_MASK;
-       dest_off = (size_t) dest & ~PAGE_MASK;
+       buff_off = (size_t)buf & ~PAGE_MASK;
+       dest_off = (size_t)dest & ~PAGE_MASK;
 
        if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
                goto err;
@@ -1347,9 +1475,6 @@ err:
 static int ntb_process_tx(struct ntb_transport_qp *qp,
                          struct ntb_queue_entry *entry)
 {
-       dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
-               qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
-               entry->buf);
        if (qp->tx_index == qp->remote_rx_info->entry) {
                qp->tx_ring_full++;
                return -EAGAIN;
@@ -1376,15 +1501,14 @@ static int ntb_process_tx(struct ntb_transport_qp *qp,
 
 static void ntb_send_link_down(struct ntb_transport_qp *qp)
 {
-       struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+       struct pci_dev *pdev = qp->ndev->pdev;
        struct ntb_queue_entry *entry;
        int i, rc;
 
-       if (qp->qp_link == NTB_LINK_DOWN)
+       if (!qp->link_is_up)
                return;
 
-       qp->qp_link = NTB_LINK_DOWN;
-       dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
+       dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
 
        for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
                entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
@@ -1405,6 +1529,13 @@ static void ntb_send_link_down(struct ntb_transport_qp *qp)
        if (rc)
                dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
                        qp->qp_num);
+
+       ntb_qp_link_down_reset(qp);
+}
+
+static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
+{
+       return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
 }
 
 /**
@@ -1422,18 +1553,25 @@ static void ntb_send_link_down(struct ntb_transport_qp *qp)
  * RETURNS: pointer to newly created ntb_queue, NULL on error.
  */
 struct ntb_transport_qp *
-ntb_transport_create_queue(void *data, struct pci_dev *pdev,
+ntb_transport_create_queue(void *data, struct device *client_dev,
                           const struct ntb_queue_handlers *handlers)
 {
+       struct ntb_dev *ndev;
+       struct pci_dev *pdev;
+       struct ntb_transport_ctx *nt;
        struct ntb_queue_entry *entry;
        struct ntb_transport_qp *qp;
-       struct ntb_transport *nt;
+       u64 qp_bit;
        unsigned int free_queue;
-       int rc, i;
+       dma_cap_mask_t dma_mask;
+       int node;
+       int i;
 
-       nt = ntb_find_transport(pdev);
-       if (!nt)
-               goto err;
+       ndev = dev_ntb(client_dev->parent);
+       pdev = ndev->pdev;
+       nt = ndev->ctx;
+
+       node = dev_to_node(&ndev->dev);
 
        free_queue = ffs(nt->qp_bitmap);
        if (!free_queue)
@@ -1442,23 +1580,31 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
        /* decrement free_queue to make it zero based */
        free_queue--;
 
-       clear_bit(free_queue, &nt->qp_bitmap);
+       qp = &nt->qp_vec[free_queue];
+       qp_bit = BIT_ULL(qp->qp_num);
+
+       nt->qp_bitmap_free &= ~qp_bit;
 
-       qp = &nt->qps[free_queue];
        qp->cb_data = data;
        qp->rx_handler = handlers->rx_handler;
        qp->tx_handler = handlers->tx_handler;
        qp->event_handler = handlers->event_handler;
 
-       dmaengine_get();
-       qp->dma_chan = dma_find_channel(DMA_MEMCPY);
-       if (!qp->dma_chan) {
-               dmaengine_put();
-               dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
+       dma_cap_zero(dma_mask);
+       dma_cap_set(DMA_MEMCPY, dma_mask);
+
+       if (use_dma) {
+               qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn,
+                                                  (void *)(unsigned long)node);
+               if (!qp->dma_chan)
+                       dev_info(&pdev->dev, "Unable to allocate DMA channel\n");
+       } else {
+               qp->dma_chan = NULL;
        }
+       dev_dbg(&pdev->dev, "Using %s memcpy\n", qp->dma_chan ? "DMA" : "CPU");
 
        for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
-               entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
+               entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
                if (!entry)
                        goto err1;
 
@@ -1468,7 +1614,7 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
        }
 
        for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
-               entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
+               entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
                if (!entry)
                        goto err2;
 
@@ -1477,10 +1623,8 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
                             &qp->tx_free_q);
        }
 
-       rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
-                                     ntb_transport_rxc_db);
-       if (rc)
-               goto err2;
+       ntb_db_clear(qp->ndev, qp_bit);
+       ntb_db_clear_mask(qp->ndev, qp_bit);
 
        dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
 
@@ -1493,8 +1637,8 @@ err1:
        while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
                kfree(entry);
        if (qp->dma_chan)
-               dmaengine_put();
-       set_bit(free_queue, &nt->qp_bitmap);
+               dma_release_channel(qp->dma_chan);
+       nt->qp_bitmap_free |= qp_bit;
 err:
        return NULL;
 }
@@ -1508,13 +1652,15 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
  */
 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
 {
+       struct ntb_transport_ctx *nt = qp->transport;
        struct pci_dev *pdev;
        struct ntb_queue_entry *entry;
+       u64 qp_bit;
 
        if (!qp)
                return;
 
-       pdev = ntb_query_pdev(qp->ndev);
+       pdev = qp->ndev->pdev;
 
        if (qp->dma_chan) {
                struct dma_chan *chan = qp->dma_chan;
@@ -1528,13 +1674,21 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
                 */
                dma_sync_wait(chan, qp->last_cookie);
                dmaengine_terminate_all(chan);
-               dmaengine_put();
+               dma_release_channel(chan);
        }
 
-       ntb_unregister_db_callback(qp->ndev, qp->qp_num);
+       qp_bit = BIT_ULL(qp->qp_num);
+
+       ntb_db_set_mask(qp->ndev, qp_bit);
+       tasklet_disable(&qp->rxc_db_work);
 
        cancel_delayed_work_sync(&qp->link_work);
 
+       qp->cb_data = NULL;
+       qp->rx_handler = NULL;
+       qp->tx_handler = NULL;
+       qp->event_handler = NULL;
+
        while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
                kfree(entry);
 
@@ -1546,7 +1700,7 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
        while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
                kfree(entry);
 
-       set_bit(qp->qp_num, &qp->transport->qp_bitmap);
+       nt->qp_bitmap_free |= qp_bit;
 
        dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
 }
@@ -1567,7 +1721,7 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
        struct ntb_queue_entry *entry;
        void *buf;
 
-       if (!qp || qp->client_ready == NTB_LINK_UP)
+       if (!qp || qp->client_ready)
                return NULL;
 
        entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
@@ -1636,7 +1790,7 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
        struct ntb_queue_entry *entry;
        int rc;
 
-       if (!qp || qp->qp_link != NTB_LINK_UP || !len)
+       if (!qp || !qp->link_is_up || !len)
                return -EINVAL;
 
        entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
@@ -1670,9 +1824,9 @@ void ntb_transport_link_up(struct ntb_transport_qp *qp)
        if (!qp)
                return;
 
-       qp->client_ready = NTB_LINK_UP;
+       qp->client_ready = true;
 
-       if (qp->transport->transport_link == NTB_LINK_UP)
+       if (qp->transport->link_is_up)
                schedule_delayed_work(&qp->link_work, 0);
 }
 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
@@ -1688,27 +1842,20 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up);
 void ntb_transport_link_down(struct ntb_transport_qp *qp)
 {
        struct pci_dev *pdev;
-       int rc, val;
+       int val;
 
        if (!qp)
                return;
 
-       pdev = ntb_query_pdev(qp->ndev);
-       qp->client_ready = NTB_LINK_DOWN;
+       pdev = qp->ndev->pdev;
+       qp->client_ready = false;
 
-       rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
-       if (rc) {
-               dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
-               return;
-       }
+       val = ntb_spad_read(qp->ndev, QP_LINKS);
 
-       rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
-                                  val & ~(1 << qp->qp_num));
-       if (rc)
-               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
-                       val & ~(1 << qp->qp_num), QP_LINKS);
+       ntb_peer_spad_write(qp->ndev, QP_LINKS,
+                           val & ~BIT(qp->qp_num));
 
-       if (qp->qp_link == NTB_LINK_UP)
+       if (qp->link_is_up)
                ntb_send_link_down(qp);
        else
                cancel_delayed_work_sync(&qp->link_work);
@@ -1728,7 +1875,7 @@ bool ntb_transport_link_query(struct ntb_transport_qp *qp)
        if (!qp)
                return false;
 
-       return qp->qp_link == NTB_LINK_UP;
+       return qp->link_is_up;
 }
 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
 
@@ -1774,3 +1921,71 @@ unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
        return max;
 }
 EXPORT_SYMBOL_GPL(ntb_transport_max_size);
+
+static void ntb_transport_doorbell_callback(void *data, int vector)
+{
+       struct ntb_transport_ctx *nt = data;
+       struct ntb_transport_qp *qp;
+       u64 db_bits;
+       unsigned int qp_num;
+
+       db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
+                  ntb_db_vector_mask(nt->ndev, vector));
+
+       while (db_bits) {
+               qp_num = __ffs(db_bits);
+               qp = &nt->qp_vec[qp_num];
+
+               tasklet_schedule(&qp->rxc_db_work);
+
+               db_bits &= ~BIT_ULL(qp_num);
+       }
+}
+
+static const struct ntb_ctx_ops ntb_transport_ops = {
+       .link_event = ntb_transport_event_callback,
+       .db_event = ntb_transport_doorbell_callback,
+};
+
+static struct ntb_client ntb_transport_client = {
+       .ops = {
+               .probe = ntb_transport_probe,
+               .remove = ntb_transport_free,
+       },
+};
+
+static int __init ntb_transport_init(void)
+{
+       int rc;
+
+       pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER);
+
+       if (debugfs_initialized())
+               nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+       rc = bus_register(&ntb_transport_bus);
+       if (rc)
+               goto err_bus;
+
+       rc = ntb_register_client(&ntb_transport_client);
+       if (rc)
+               goto err_client;
+
+       return 0;
+
+err_client:
+       bus_unregister(&ntb_transport_bus);
+err_bus:
+       debugfs_remove_recursive(nt_debugfs_dir);
+       return rc;
+}
+module_init(ntb_transport_init);
+
+static void __exit ntb_transport_exit(void)
+{
+       debugfs_remove_recursive(nt_debugfs_dir);
+
+       ntb_unregister_client(&ntb_transport_client);
+       bus_unregister(&ntb_transport_bus);
+}
+module_exit(ntb_transport_exit);
diff --git a/drivers/ntb/test/Kconfig b/drivers/ntb/test/Kconfig
new file mode 100644 (file)
index 0000000..01852f9
--- /dev/null
@@ -0,0 +1,19 @@
+config NTB_PINGPONG
+       tristate "NTB Ping Pong Test Client"
+       help
+        This is a simple ping pong driver that exercises the scratchpads and
+        doorbells of the ntb hardware.  This driver may be used to test that
+        your ntb hardware and drivers are functioning at a basic level.
+
+        If unsure, say N.
+
+config NTB_TOOL
+       tristate "NTB Debugging Tool Test Client"
+       help
+        This is a simple debugging driver that enables the doorbell and
+        scratchpad registers to be read and written from the debugfs.  This
+        enables more complicated debugging to be scripted from user space.
+        This driver may be used to test that your ntb hardware and drivers are
+        functioning at a basic level.
+
+        If unsure, say N.
diff --git a/drivers/ntb/test/Makefile b/drivers/ntb/test/Makefile
new file mode 100644 (file)
index 0000000..0ea32a3
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_NTB_PINGPONG) += ntb_pingpong.o
+obj-$(CONFIG_NTB_TOOL) += ntb_tool.o
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
new file mode 100644 (file)
index 0000000..fe16005
--- /dev/null
@@ -0,0 +1,250 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * PCIe NTB Pingpong Linux driver
+ *
+ * Contact Information:
+ * Allen Hubbe <Allen.Hubbe@emc.com>
+ */
+
+/* Note: load this module with option 'dyndbg=+p' */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/ntb.h>
+
+#define DRIVER_NAME                    "ntb_pingpong"
+#define DRIVER_DESCRIPTION             "PCIe NTB Simple Pingpong Client"
+
+#define DRIVER_LICENSE                 "Dual BSD/GPL"
+#define DRIVER_VERSION                 "1.0"
+#define DRIVER_RELDATE                 "24 March 2015"
+#define DRIVER_AUTHOR                  "Allen Hubbe <Allen.Hubbe@emc.com>"
+
+MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+
+static unsigned int unsafe;
+module_param(unsafe, uint, 0644);
+MODULE_PARM_DESC(unsafe, "Run even though ntb operations may be unsafe");
+
+static unsigned int delay_ms = 1000;
+module_param(delay_ms, uint, 0644);
+MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer");
+
+static unsigned long db_init = 0x7;
+module_param(db_init, ulong, 0644);
+MODULE_PARM_DESC(delay_ms, "Initial doorbell bits to ring on the peer");
+
+struct pp_ctx {
+       struct ntb_dev                  *ntb;
+       u64                             db_bits;
+       /* synchronize access to db_bits by ping and pong */
+       spinlock_t                      db_lock;
+       struct timer_list               db_timer;
+       unsigned long                   db_delay;
+};
+
+static void pp_ping(unsigned long ctx)
+{
+       struct pp_ctx *pp = (void *)ctx;
+       unsigned long irqflags;
+       u64 db_bits, db_mask;
+       u32 spad_rd, spad_wr;
+
+       spin_lock_irqsave(&pp->db_lock, irqflags);
+       {
+               db_mask = ntb_db_valid_mask(pp->ntb);
+               db_bits = ntb_db_read(pp->ntb);
+
+               if (db_bits) {
+                       dev_dbg(&pp->ntb->dev,
+                               "Masked pongs %#llx\n",
+                               db_bits);
+                       ntb_db_clear(pp->ntb, db_bits);
+               }
+
+               db_bits = ((pp->db_bits | db_bits) << 1) & db_mask;
+
+               if (!db_bits)
+                       db_bits = db_init;
+
+               spad_rd = ntb_spad_read(pp->ntb, 0);
+               spad_wr = spad_rd + 1;
+
+               dev_dbg(&pp->ntb->dev,
+                       "Ping bits %#llx read %#x write %#x\n",
+                       db_bits, spad_rd, spad_wr);
+
+               ntb_peer_spad_write(pp->ntb, 0, spad_wr);
+               ntb_peer_db_set(pp->ntb, db_bits);
+               ntb_db_clear_mask(pp->ntb, db_mask);
+
+               pp->db_bits = 0;
+       }
+       spin_unlock_irqrestore(&pp->db_lock, irqflags);
+}
+
+static void pp_link_event(void *ctx)
+{
+       struct pp_ctx *pp = ctx;
+
+       if (ntb_link_is_up(pp->ntb, NULL, NULL) == 1) {
+               dev_dbg(&pp->ntb->dev, "link is up\n");
+               pp_ping((unsigned long)pp);
+       } else {
+               dev_dbg(&pp->ntb->dev, "link is down\n");
+               del_timer(&pp->db_timer);
+       }
+}
+
+static void pp_db_event(void *ctx, int vec)
+{
+       struct pp_ctx *pp = ctx;
+       u64 db_bits, db_mask;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&pp->db_lock, irqflags);
+       {
+               db_mask = ntb_db_vector_mask(pp->ntb, vec);
+               db_bits = db_mask & ntb_db_read(pp->ntb);
+               ntb_db_set_mask(pp->ntb, db_mask);
+               ntb_db_clear(pp->ntb, db_bits);
+
+               pp->db_bits |= db_bits;
+
+               mod_timer(&pp->db_timer, jiffies + pp->db_delay);
+
+               dev_dbg(&pp->ntb->dev,
+                       "Pong vec %d bits %#llx\n",
+                       vec, db_bits);
+       }
+       spin_unlock_irqrestore(&pp->db_lock, irqflags);
+}
+
+static const struct ntb_ctx_ops pp_ops = {
+       .link_event = pp_link_event,
+       .db_event = pp_db_event,
+};
+
+static int pp_probe(struct ntb_client *client,
+                   struct ntb_dev *ntb)
+{
+       struct pp_ctx *pp;
+       int rc;
+
+       if (ntb_db_is_unsafe(ntb)) {
+               dev_dbg(&ntb->dev, "doorbell is unsafe\n");
+               if (!unsafe) {
+                       rc = -EINVAL;
+                       goto err_pp;
+               }
+       }
+
+       if (ntb_spad_is_unsafe(ntb)) {
+               dev_dbg(&ntb->dev, "scratchpad is unsafe\n");
+               if (!unsafe) {
+                       rc = -EINVAL;
+                       goto err_pp;
+               }
+       }
+
+       pp = kmalloc(sizeof(*pp), GFP_KERNEL);
+       if (!pp) {
+               rc = -ENOMEM;
+               goto err_pp;
+       }
+
+       pp->ntb = ntb;
+       pp->db_bits = 0;
+       spin_lock_init(&pp->db_lock);
+       setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp);
+       pp->db_delay = msecs_to_jiffies(delay_ms);
+
+       rc = ntb_set_ctx(ntb, pp, &pp_ops);
+       if (rc)
+               goto err_ctx;
+
+       ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+       ntb_link_event(ntb);
+
+       return 0;
+
+err_ctx:
+       kfree(pp);
+err_pp:
+       return rc;
+}
+
+static void pp_remove(struct ntb_client *client,
+                     struct ntb_dev *ntb)
+{
+       struct pp_ctx *pp = ntb->ctx;
+
+       ntb_clear_ctx(ntb);
+       del_timer_sync(&pp->db_timer);
+       ntb_link_disable(ntb);
+
+       kfree(pp);
+}
+
+static struct ntb_client pp_client = {
+       .ops = {
+               .probe = pp_probe,
+               .remove = pp_remove,
+       },
+};
+module_ntb_client(pp_client);
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
new file mode 100644 (file)
index 0000000..6f5dc6c
--- /dev/null
@@ -0,0 +1,556 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * PCIe NTB Debugging Tool Linux driver
+ *
+ * Contact Information:
+ * Allen Hubbe <Allen.Hubbe@emc.com>
+ */
+
+/*
+ * How to use this tool, by example.
+ *
+ * Assuming $DBG_DIR is something like:
+ * '/sys/kernel/debug/ntb_tool/0000:00:03.0'
+ *
+ * Eg: check if clearing the doorbell mask generates an interrupt.
+ *
+ * # Set the doorbell mask
+ * root@self# echo 's 1' > $DBG_DIR/mask
+ *
+ * # Ring the doorbell from the peer
+ * root@peer# echo 's 1' > $DBG_DIR/peer_db
+ *
+ * # Clear the doorbell mask
+ * root@self# echo 'c 1' > $DBG_DIR/mask
+ *
+ * Observe debugging output in dmesg or your console.  You should see a
+ * doorbell event triggered by clearing the mask.  If not, this may indicate an
+ * issue with the hardware that needs to be worked around in the driver.
+ *
+ * Eg: read and write scratchpad registers
+ *
+ * root@peer# echo '0 0x01010101 1 0x7f7f7f7f' > $DBG_DIR/peer_spad
+ *
+ * root@self# cat $DBG_DIR/spad
+ *
+ * Observe that spad 0 and 1 have the values set by the peer.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <linux/ntb.h>
+
+#define DRIVER_NAME                    "ntb_tool"
+#define DRIVER_DESCRIPTION             "PCIe NTB Debugging Tool"
+
+#define DRIVER_LICENSE                 "Dual BSD/GPL"
+#define DRIVER_VERSION                 "1.0"
+#define DRIVER_RELDATE                 "22 April 2015"
+#define DRIVER_AUTHOR                  "Allen Hubbe <Allen.Hubbe@emc.com>"
+
+MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+
+static struct dentry *tool_dbgfs;
+
+struct tool_ctx {
+       struct ntb_dev *ntb;
+       struct dentry *dbgfs;
+};
+
+#define SPAD_FNAME_SIZE 0x10
+#define INT_PTR(x) ((void *)(unsigned long)x)
+#define PTR_INT(x) ((int)(unsigned long)x)
+
+#define TOOL_FOPS_RDWR(__name, __read, __write) \
+       const struct file_operations __name = { \
+               .owner = THIS_MODULE,           \
+               .open = simple_open,            \
+               .read = __read,                 \
+               .write = __write,               \
+       }
+
+static void tool_link_event(void *ctx)
+{
+       struct tool_ctx *tc = ctx;
+       enum ntb_speed speed;
+       enum ntb_width width;
+       int up;
+
+       up = ntb_link_is_up(tc->ntb, &speed, &width);
+
+       dev_dbg(&tc->ntb->dev, "link is %s speed %d width %d\n",
+               up ? "up" : "down", speed, width);
+}
+
+static void tool_db_event(void *ctx, int vec)
+{
+       struct tool_ctx *tc = ctx;
+       u64 db_bits, db_mask;
+
+       db_mask = ntb_db_vector_mask(tc->ntb, vec);
+       db_bits = ntb_db_read(tc->ntb);
+
+       dev_dbg(&tc->ntb->dev, "doorbell vec %d mask %#llx bits %#llx\n",
+               vec, db_mask, db_bits);
+}
+
+static const struct ntb_ctx_ops tool_ops = {
+       .link_event = tool_link_event,
+       .db_event = tool_db_event,
+};
+
+static ssize_t tool_dbfn_read(struct tool_ctx *tc, char __user *ubuf,
+                             size_t size, loff_t *offp,
+                             u64 (*db_read_fn)(struct ntb_dev *))
+{
+       size_t buf_size;
+       char *buf;
+       ssize_t pos, rc;
+
+       if (!db_read_fn)
+               return -EINVAL;
+
+       buf_size = min_t(size_t, size, 0x20);
+
+       buf = kmalloc(buf_size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       pos = scnprintf(buf, buf_size, "%#llx\n",
+                       db_read_fn(tc->ntb));
+
+       rc = simple_read_from_buffer(ubuf, size, offp, buf, pos);
+
+       kfree(buf);
+
+       return rc;
+}
+
+static ssize_t tool_dbfn_write(struct tool_ctx *tc,
+                              const char __user *ubuf,
+                              size_t size, loff_t *offp,
+                              int (*db_set_fn)(struct ntb_dev *, u64),
+                              int (*db_clear_fn)(struct ntb_dev *, u64))
+{
+       u64 db_bits;
+       char *buf, cmd;
+       ssize_t rc;
+       int n;
+
+       buf = kmalloc(size + 1, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       rc = simple_write_to_buffer(buf, size, offp, ubuf, size);
+       if (rc < 0) {
+               kfree(buf);
+               return rc;
+       }
+
+       buf[size] = 0;
+
+       n = sscanf(buf, "%c %lli", &cmd, &db_bits);
+
+       kfree(buf);
+
+       if (n != 2) {
+               rc = -EINVAL;
+       } else if (cmd == 's') {
+               if (!db_set_fn)
+                       rc = -EINVAL;
+               else
+                       rc = db_set_fn(tc->ntb, db_bits);
+       } else if (cmd == 'c') {
+               if (!db_clear_fn)
+                       rc = -EINVAL;
+               else
+                       rc = db_clear_fn(tc->ntb, db_bits);
+       } else {
+               rc = -EINVAL;
+       }
+
+       return rc ? : size;
+}
+
+static ssize_t tool_spadfn_read(struct tool_ctx *tc, char __user *ubuf,
+                               size_t size, loff_t *offp,
+                               u32 (*spad_read_fn)(struct ntb_dev *, int))
+{
+       size_t buf_size;
+       char *buf;
+       ssize_t pos, rc;
+       int i, spad_count;
+
+       if (!spad_read_fn)
+               return -EINVAL;
+
+       buf_size = min_t(size_t, size, 0x100);
+
+       buf = kmalloc(buf_size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       pos = 0;
+
+       spad_count = ntb_spad_count(tc->ntb);
+       for (i = 0; i < spad_count; ++i) {
+               pos += scnprintf(buf + pos, buf_size - pos, "%d\t%#x\n",
+                                i, spad_read_fn(tc->ntb, i));
+       }
+
+       rc = simple_read_from_buffer(ubuf, size, offp, buf, pos);
+
+       kfree(buf);
+
+       return rc;
+}
+
+static ssize_t tool_spadfn_write(struct tool_ctx *tc,
+                                const char __user *ubuf,
+                                size_t size, loff_t *offp,
+                                int (*spad_write_fn)(struct ntb_dev *,
+                                                     int, u32))
+{
+       int spad_idx;
+       u32 spad_val;
+       char *buf;
+       int pos, n;
+       ssize_t rc;
+
+       if (!spad_write_fn) {
+               dev_dbg(&tc->ntb->dev, "no spad write fn\n");
+               return -EINVAL;
+       }
+
+       buf = kmalloc(size + 1, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       rc = simple_write_to_buffer(buf, size, offp, ubuf, size);
+       if (rc < 0) {
+               kfree(buf);
+               return rc;
+       }
+
+       buf[size] = 0;
+
+       n = sscanf(buf, "%d %i%n", &spad_idx, &spad_val, &pos);
+       while (n == 2) {
+               rc = spad_write_fn(tc->ntb, spad_idx, spad_val);
+               if (rc)
+                       break;
+
+               n = sscanf(buf + pos, "%d %i%n", &spad_idx, &spad_val, &pos);
+       }
+
+       if (n < 0)
+               rc = n;
+
+       kfree(buf);
+
+       return rc ? : size;
+}
+
+static ssize_t tool_db_read(struct file *filep, char __user *ubuf,
+                           size_t size, loff_t *offp)
+{
+       struct tool_ctx *tc = filep->private_data;
+
+       return tool_dbfn_read(tc, ubuf, size, offp,
+                             tc->ntb->ops->db_read);
+}
+
+static ssize_t tool_db_write(struct file *filep, const char __user *ubuf,
+                            size_t size, loff_t *offp)
+{
+       struct tool_ctx *tc = filep->private_data;
+
+       return tool_dbfn_write(tc, ubuf, size, offp,
+                              tc->ntb->ops->db_set,
+                              tc->ntb->ops->db_clear);
+}
+
+static TOOL_FOPS_RDWR(tool_db_fops,
+                     tool_db_read,
+                     tool_db_write);
+
+static ssize_t tool_mask_read(struct file *filep, char __user *ubuf,
+                             size_t size, loff_t *offp)
+{
+       struct tool_ctx *tc = filep->private_data;
+
+       return tool_dbfn_read(tc, ubuf, size, offp,
+                             tc->ntb->ops->db_read_mask);
+}
+
+static ssize_t tool_mask_write(struct file *filep, const char __user *ubuf,
+                              size_t size, loff_t *offp)
+{
+       struct tool_ctx *tc = filep->private_data;
+
+       return tool_dbfn_write(tc, ubuf, size, offp,
+                              tc->ntb->ops->db_set_mask,
+                              tc->ntb->ops->db_clear_mask);
+}
+
+static TOOL_FOPS_RDWR(tool_mask_fops,
+                     tool_mask_read,
+                     tool_mask_write);
+
+static ssize_t tool_peer_db_read(struct file *filep, char __user *ubuf,
+                                size_t size, loff_t *offp)
+{
+       struct tool_ctx *tc = filep->private_data;
+
+       return tool_dbfn_read(tc, ubuf, size, offp,
+                             tc->ntb->ops->peer_db_read);
+}
+
+static ssize_t tool_peer_db_write(struct file *filep, const char __user *ubuf,
+                                 size_t size, loff_t *offp)
+{
+       struct tool_ctx *tc = filep->private_data;
+
+       return tool_dbfn_write(tc, ubuf, size, offp,
+                              tc->ntb->ops->peer_db_set,
+                              tc->ntb->ops->peer_db_clear);
+}
+
+static TOOL_FOPS_RDWR(tool_peer_db_fops,
+                     tool_peer_db_read,
+                     tool_peer_db_write);
+
+static ssize_t tool_peer_mask_read(struct file *filep, char __user *ubuf,
+                                  size_t size, loff_t *offp)
+{
+       struct tool_ctx *tc = filep->private_data;
+
+       return tool_dbfn_read(tc, ubuf, size, offp,
+                             tc->ntb->ops->peer_db_read_mask);
+}
+
+static ssize_t tool_peer_mask_write(struct file *filep, const char __user *ubuf,
+                                   size_t size, loff_t *offp)
+{
+       struct tool_ctx *tc = filep->private_data;
+
+       return tool_dbfn_write(tc, ubuf, size, offp,
+                              tc->ntb->ops->peer_db_set_mask,
+                              tc->ntb->ops->peer_db_clear_mask);
+}
+
+static TOOL_FOPS_RDWR(tool_peer_mask_fops,
+                     tool_peer_mask_read,
+                     tool_peer_mask_write);
+
+static ssize_t tool_spad_read(struct file *filep, char __user *ubuf,
+                             size_t size, loff_t *offp)
+{
+       struct tool_ctx *tc = filep->private_data;
+
+       return tool_spadfn_read(tc, ubuf, size, offp,
+                               tc->ntb->ops->spad_read);
+}
+
+static ssize_t tool_spad_write(struct file *filep, const char __user *ubuf,
+                              size_t size, loff_t *offp)
+{
+       struct tool_ctx *tc = filep->private_data;
+
+       return tool_spadfn_write(tc, ubuf, size, offp,
+                                tc->ntb->ops->spad_write);
+}
+
+static TOOL_FOPS_RDWR(tool_spad_fops,
+                     tool_spad_read,
+                     tool_spad_write);
+
+static ssize_t tool_peer_spad_read(struct file *filep, char __user *ubuf,
+                                  size_t size, loff_t *offp)
+{
+       struct tool_ctx *tc = filep->private_data;
+
+       return tool_spadfn_read(tc, ubuf, size, offp,
+                               tc->ntb->ops->peer_spad_read);
+}
+
+static ssize_t tool_peer_spad_write(struct file *filep, const char __user *ubuf,
+                                   size_t size, loff_t *offp)
+{
+       struct tool_ctx *tc = filep->private_data;
+
+       return tool_spadfn_write(tc, ubuf, size, offp,
+                                tc->ntb->ops->peer_spad_write);
+}
+
+static TOOL_FOPS_RDWR(tool_peer_spad_fops,
+                     tool_peer_spad_read,
+                     tool_peer_spad_write);
+
+static void tool_setup_dbgfs(struct tool_ctx *tc)
+{
+       /* This modules is useless without dbgfs... */
+       if (!tool_dbgfs) {
+               tc->dbgfs = NULL;
+               return;
+       }
+
+       tc->dbgfs = debugfs_create_dir(dev_name(&tc->ntb->dev),
+                                      tool_dbgfs);
+       if (!tc->dbgfs)
+               return;
+
+       debugfs_create_file("db", S_IRUSR | S_IWUSR, tc->dbgfs,
+                           tc, &tool_db_fops);
+
+       debugfs_create_file("mask", S_IRUSR | S_IWUSR, tc->dbgfs,
+                           tc, &tool_mask_fops);
+
+       debugfs_create_file("peer_db", S_IRUSR | S_IWUSR, tc->dbgfs,
+                           tc, &tool_peer_db_fops);
+
+       debugfs_create_file("peer_mask", S_IRUSR | S_IWUSR, tc->dbgfs,
+                           tc, &tool_peer_mask_fops);
+
+       debugfs_create_file("spad", S_IRUSR | S_IWUSR, tc->dbgfs,
+                           tc, &tool_spad_fops);
+
+       debugfs_create_file("peer_spad", S_IRUSR | S_IWUSR, tc->dbgfs,
+                           tc, &tool_peer_spad_fops);
+}
+
+static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
+{
+       struct tool_ctx *tc;
+       int rc;
+
+       if (ntb_db_is_unsafe(ntb))
+               dev_dbg(&ntb->dev, "doorbell is unsafe\n");
+
+       if (ntb_spad_is_unsafe(ntb))
+               dev_dbg(&ntb->dev, "scratchpad is unsafe\n");
+
+       tc = kmalloc(sizeof(*tc), GFP_KERNEL);
+       if (!tc) {
+               rc = -ENOMEM;
+               goto err_tc;
+       }
+
+       tc->ntb = ntb;
+
+       tool_setup_dbgfs(tc);
+
+       rc = ntb_set_ctx(ntb, tc, &tool_ops);
+       if (rc)
+               goto err_ctx;
+
+       ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+       ntb_link_event(ntb);
+
+       return 0;
+
+err_ctx:
+       debugfs_remove_recursive(tc->dbgfs);
+       kfree(tc);
+err_tc:
+       return rc;
+}
+
+static void tool_remove(struct ntb_client *self, struct ntb_dev *ntb)
+{
+       struct tool_ctx *tc = ntb->ctx;
+
+       ntb_clear_ctx(ntb);
+       ntb_link_disable(ntb);
+
+       debugfs_remove_recursive(tc->dbgfs);
+       kfree(tc);
+}
+
+static struct ntb_client tool_client = {
+       .ops = {
+               .probe = tool_probe,
+               .remove = tool_remove,
+       },
+};
+
+static int __init tool_init(void)
+{
+       int rc;
+
+       if (debugfs_initialized())
+               tool_dbgfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+       rc = ntb_register_client(&tool_client);
+       if (rc)
+               goto err_client;
+
+       return 0;
+
+err_client:
+       debugfs_remove_recursive(tool_dbgfs);
+       return rc;
+}
+module_init(tool_init);
+
+static void __exit tool_exit(void)
+{
+       ntb_unregister_client(&tool_client);
+       debugfs_remove_recursive(tool_dbgfs);
+}
+module_exit(tool_exit);
index 07bb3c8f191bed56c6c2293d68cefb3a6aa678ac..8df1b1777745e8e0dfb46611197a8238c929af4e 100644 (file)
@@ -1,15 +1,20 @@
 config DTC
        bool
 
-config OF
-       bool
+menuconfig OF
+       bool "Device Tree and Open Firmware support"
+       help
+         This option enables the device tree infrastructure.
+         It is automatically selected by platforms that need it or can
+         be enabled manually for unittests, overlays or
+         compile-coverage.
 
-menu "Device Tree and Open Firmware support"
-       depends on OF
+if OF
 
 config OF_UNITTEST
        bool "Device Tree runtime unit tests"
-       depends on OF_IRQ && OF_EARLY_FLATTREE
+       depends on OF_IRQ
+       select OF_EARLY_FLATTREE
        select OF_RESOLVE
        help
          This option builds in test cases for the device tree infrastructure
@@ -97,4 +102,4 @@ config OF_OVERLAY
          While this option is selected automatically when needed, you can
          enable it manually to improve device tree unit test coverage.
 
-endmenu # OF
+endif # OF
index fcacb186a67beee65742542f86fc45feff8e4352..156c072b31177eab081de4e54ab28b7815e32c95 100644 (file)
@@ -16,6 +16,3 @@ obj-$(CONFIG_OF_RESOLVE)  += resolver.o
 obj-$(CONFIG_OF_OVERLAY) += overlay.o
 
 obj-$(CONFIG_OF_UNITTEST) += unittest-data/
-
-CFLAGS_fdt.o = -I$(src)/../../scripts/dtc/libfdt
-CFLAGS_fdt_address.o = -I$(src)/../../scripts/dtc/libfdt
index 6906a3f61bd86d3874572c868480b8e7f6424c37..8bfda6ade2c02d3323533dee562dcb3b705a604e 100644 (file)
@@ -712,7 +712,7 @@ int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
        }
 
        /* add the range to the list */
-       range = kzalloc(sizeof(*range), GFP_KERNEL);
+       range = kzalloc(sizeof(*range), GFP_ATOMIC);
        if (!range) {
                err = -ENOMEM;
                goto end_register;
index 6f85ff74c10dc02470454e5b4a0be54bd7da3ed3..8b5a187a768280c25efce25284f09875347834f1 100644 (file)
@@ -89,7 +89,7 @@ EXPORT_SYMBOL(of_n_size_cells);
 #ifdef CONFIG_NUMA
 int __weak of_node_to_nid(struct device_node *np)
 {
-       return numa_node_id();
+       return NUMA_NO_NODE;
 }
 #endif
 
index 20c1332a00182cb43bb707e2a7468dce73cdf991..8b91ea241b10f553f058885b5cc083a35e2a81fe 100644 (file)
@@ -163,6 +163,18 @@ void of_device_unregister(struct platform_device *ofdev)
 }
 EXPORT_SYMBOL(of_device_unregister);
 
+const void *of_device_get_match_data(const struct device *dev)
+{
+       const struct of_device_id *match;
+
+       match = of_match_device(dev->driver->of_match_table, dev);
+       if (!match)
+               return NULL;
+
+       return match->data;
+}
+EXPORT_SYMBOL(of_device_get_match_data);
+
 ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
 {
        const char *compat;
index f2dd23a32267d0476104af2547084d36d7d834f9..07496560e5b9e2eb90ff913f2e688f5e85c194c7 100644 (file)
@@ -164,11 +164,14 @@ static void *unflatten_dt_alloc(void **mem, unsigned long size,
  * unflatten_dt_node - Alloc and populate a device_node from the flat tree
  * @blob: The parent device tree blob
  * @mem: Memory chunk to use for allocating device nodes and properties
- * @p: pointer to node in flat tree
+ * @poffset: pointer to node in flat tree
  * @dad: Parent struct device_node
+ * @nodepp: The device_node tree created by the call
  * @fpsize: Size of the node path up at the current depth.
+ * @dryrun: If true, do not allocate device nodes but still calculate needed
+ * memory size
  */
-static void * unflatten_dt_node(void *blob,
+static void * unflatten_dt_node(const void *blob,
                                void *mem,
                                int *poffset,
                                struct device_node *dad,
@@ -378,7 +381,7 @@ static void * unflatten_dt_node(void *blob,
  * @dt_alloc: An allocator that provides a virtual address to memory
  * for the resulting tree
  */
-static void __unflatten_device_tree(void *blob,
+static void __unflatten_device_tree(const void *blob,
                             struct device_node **mynodes,
                             void * (*dt_alloc)(u64 size, u64 align))
 {
@@ -441,7 +444,7 @@ static void *kernel_tree_alloc(u64 size, u64 align)
  * pointers of the nodes so the normal device-tree walking functions
  * can be used.
  */
-void of_fdt_unflatten_tree(unsigned long *blob,
+void of_fdt_unflatten_tree(const unsigned long *blob,
                        struct device_node **mynodes)
 {
        __unflatten_device_tree(blob, mynodes, &kernel_tree_alloc);
@@ -1024,6 +1027,11 @@ void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align)
        return __va(memblock_alloc(size, align));
 }
 #else
+void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+       WARN_ON(1);
+}
+
 int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
                                        phys_addr_t size, bool nomap)
 {
@@ -1031,6 +1039,12 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
                  &base, &size, nomap ? " (nomap)" : "");
        return -ENOSYS;
 }
+
+void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+       WARN_ON(1);
+       return NULL;
+}
 #endif
 
 bool __init early_init_dt_verify(void *params)
index 1a7980692f254c6917371dec5a11f77f448723cf..3cf7a01f557f4fbe924479e379d599d03016f07f 100644 (file)
@@ -252,8 +252,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
                 * Successfully parsed an interrrupt-map translation; copy new
                 * interrupt specifier into the out_irq structure
                 */
-               out_irq->np = newpar;
-
                match_array = imap - newaddrsize - newintsize;
                for (i = 0; i < newintsize; i++)
                        out_irq->args[i] = be32_to_cpup(imap - newintsize + i);
@@ -262,6 +260,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
 
        skiplevel:
                /* Iterate again with new parent */
+               out_irq->np = newpar;
                pr_debug(" -> new parent: %s\n", of_node_full_name(newpar));
                of_node_put(ipar);
                ipar = newpar;
@@ -469,7 +468,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
 }
 EXPORT_SYMBOL_GPL(of_irq_to_resource_table);
 
-struct intc_desc {
+struct of_intc_desc {
        struct list_head        list;
        struct device_node      *dev;
        struct device_node      *interrupt_parent;
@@ -485,7 +484,7 @@ struct intc_desc {
 void __init of_irq_init(const struct of_device_id *matches)
 {
        struct device_node *np, *parent = NULL;
-       struct intc_desc *desc, *temp_desc;
+       struct of_intc_desc *desc, *temp_desc;
        struct list_head intc_desc_list, intc_parent_list;
 
        INIT_LIST_HEAD(&intc_desc_list);
@@ -496,7 +495,7 @@ void __init of_irq_init(const struct of_device_id *matches)
                                !of_device_is_available(np))
                        continue;
                /*
-                * Here, we allocate and populate an intc_desc with the node
+                * Here, we allocate and populate an of_intc_desc with the node
                 * pointer, interrupt-parent device_node etc.
                 */
                desc = kzalloc(sizeof(*desc), GFP_KERNEL);
index dee9270ba5471e730518b0ddc647469c8a52776f..24e025f7929932cb1dcf8c0a834b308a30b99fe2 100644 (file)
@@ -333,7 +333,7 @@ static DEFINE_IDR(ov_idr);
  * of the overlay in a list. This list can be used to prevent
  * illegal overlay removals.
  *
- * Returns the id of the created overlay, or an negative error number
+ * Returns the id of the created overlay, or a negative error number
  */
 int of_overlay_create(struct device_node *tree)
 {
@@ -481,7 +481,7 @@ static int overlay_removal_is_ok(struct of_overlay *ov)
  *
  * Removes an overlay if it is permissible.
  *
- * Returns 0 on success, or an negative error number
+ * Returns 0 on success, or a negative error number
  */
 int of_overlay_destroy(int id)
 {
@@ -528,7 +528,7 @@ EXPORT_SYMBOL_GPL(of_overlay_destroy);
  *
  * Removes all overlays from the system in the correct order.
  *
- * Returns 0 on success, or an negative error number
+ * Returns 0 on success, or a negative error number
  */
 int of_overlay_destroy_all(void)
 {
index b75d684aefcd78a81947c269c81215f8cc5020d4..734da589cdfb94b4312c3a8cd13dbe06c9c5604c 100644 (file)
@@ -221,10 +221,9 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
        /* MSI IRQ */
        if (IS_ENABLED(CONFIG_PCI_MSI)) {
                for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
-                       irq_set_chained_handler(ks_pcie->msi_host_irqs[i],
-                                               ks_pcie_msi_irq_handler);
-                       irq_set_handler_data(ks_pcie->msi_host_irqs[i],
-                                            ks_pcie);
+                       irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i],
+                                                        ks_pcie_msi_irq_handler,
+                                                        ks_pcie);
                }
        }
 }
index 240f388720857f0c1e3df0635d35fa71e6f05787..8b7a900cd28b25e5a8d52a10357768df1d6513aa 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/workqueue.h>
 #include <linux/bitops.h>
 #include <linux/time.h>
+#include <linux/ktime.h>
 #include <xen/platform_pci.h>
 
 #include <asm/xen/swiotlb-xen.h>
@@ -115,7 +116,6 @@ static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
        evtchn_port_t port = pdev->evtchn;
        unsigned irq = pdev->irq;
        s64 ns, ns_timeout;
-       struct timeval tv;
 
        spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
 
@@ -132,8 +132,7 @@ static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
         * (in the latter case we end up continually re-executing poll() with a
         * timeout in the past). 1s difference gives plenty of slack for error.
         */
-       do_gettimeofday(&tv);
-       ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC;
+       ns_timeout = ktime_get_ns() + 2 * (s64)NSEC_PER_SEC;
 
        xen_clear_irq_pending(irq);
 
@@ -141,8 +140,7 @@ static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
                        (unsigned long *)&pdev->sh_info->flags)) {
                xen_poll_irq_timeout(irq, jiffies + 3*HZ);
                xen_clear_irq_pending(irq);
-               do_gettimeofday(&tv);
-               ns = timeval_to_ns(&tv);
+               ns = ktime_get_ns();
                if (ns > ns_timeout) {
                        dev_err(&pdev->xdev->dev,
                                "pciback not responding!!!\n");
index 4c04360f378bb94037b77c1283bc47bd245b7b7c..b2a189507fc35edfdc3057385df5c750d2ff2e8f 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
 #include <linux/resource.h>
index c4fc77aa766eebae13550798dc1ea049b7a64a92..ad1ea1695b4ae46781f7329416f8a94e2791cd4b 100644 (file)
@@ -1351,8 +1351,7 @@ int mtk_pctrl_init(struct platform_device *pdev,
                set_irq_flags(virq, IRQF_VALID);
        };
 
-       irq_set_chained_handler(irq, mtk_eint_irq_handler);
-       irq_set_handler_data(irq, pctl);
+       irq_set_chained_handler_and_data(irq, mtk_eint_irq_handler, pctl);
        set_irq_flags(irq, IRQF_VALID);
        return 0;
 
index 873433da0f2ce192651aa9003946399d16e82a0f..c3c3d2345fc6b23b4364d952e75d8887772f7221 100644 (file)
@@ -865,8 +865,8 @@ static int adi_gpio_pint_probe(struct platform_device *pdev)
        pint->pint_map_port = adi_pint_map_port;
        platform_set_drvdata(pdev, pint);
 
-       irq_set_chained_handler(pint->irq, adi_gpio_handle_pint_irq);
-       irq_set_handler_data(pint->irq, pint);
+       irq_set_chained_handler_and_data(pint->irq, adi_gpio_handle_pint_irq,
+                                        pint);
 
        list_add_tail(&pint->node, &adi_pint_list);
 
index d34ac879af9ef302f7b80906ff75db23d6b21217..c262e5f35c2808d873678560f4abc89dd75a1fae 100644 (file)
@@ -1661,8 +1661,8 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
                if (IS_ERR(info->irqmux_base))
                        return PTR_ERR(info->irqmux_base);
 
-               irq_set_chained_handler(irq, st_gpio_irqmux_handler);
-               irq_set_handler_data(irq, info);
+               irq_set_chained_handler_and_data(irq, st_gpio_irqmux_handler,
+                                                info);
 
        }
 
index 0b7afa50121a249d385fddd5d1ae9a906a0f322b..b18dabba03a480e1cde292bc4ecca6263cce7c55 100644 (file)
@@ -563,8 +563,8 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
                return -ENOMEM;
        }
 
-       irq_set_chained_handler(irq, exynos_irq_demux_eint16_31);
-       irq_set_handler_data(irq, muxed_data);
+       irq_set_chained_handler_and_data(irq, exynos_irq_demux_eint16_31,
+                                        muxed_data);
 
        bank = d->pin_banks;
        idx = 0;
index f1993f42114c40ba565f3ae6a82a6c97f10ef1d7..01b43dbfb795b04eea99111a0d273ff70242e884 100644 (file)
@@ -514,8 +514,7 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
                }
 
                eint_data->parents[i] = irq;
-               irq_set_chained_handler(irq, handlers[i]);
-               irq_set_handler_data(irq, eint_data);
+               irq_set_chained_handler_and_data(irq, handlers[i], eint_data);
        }
 
        bank = d->pin_banks;
index 7756c1e9e76313565d23431fd807278fa70b0246..ec8cc3b476213c4161faada14acf20bc3d1a9725 100644 (file)
@@ -506,8 +506,7 @@ static int s3c64xx_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
                data->domains[nr_domains++] = bank->irq_domain;
        }
 
-       irq_set_chained_handler(d->irq, s3c64xx_eint_gpio_irq);
-       irq_set_handler_data(d->irq, data);
+       irq_set_chained_handler_and_data(d->irq, s3c64xx_eint_gpio_irq, data);
 
        return 0;
 }
@@ -731,8 +730,9 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
                        return -ENXIO;
                }
 
-               irq_set_chained_handler(irq, s3c64xx_eint0_handlers[i]);
-               irq_set_handler_data(irq, data);
+               irq_set_chained_handler_and_data(irq,
+                                                s3c64xx_eint0_handlers[i],
+                                                data);
        }
 
        bank = d->pin_banks;
index d7857c72e627d9c65d8c21446d9f4392b1ccbb6c..f09573e132035a700d7b3f782b7c4aa50f70fc6d 100644 (file)
@@ -1005,9 +1005,9 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
                writel(0xffffffff,
                        pctl->membase + sunxi_irq_status_reg_from_bank(i));
 
-               irq_set_chained_handler(pctl->irq[i],
-                                       sunxi_pinctrl_irq_handler);
-               irq_set_handler_data(pctl->irq[i], pctl);
+               irq_set_chained_handler_and_data(pctl->irq[i],
+                                                sunxi_pinctrl_irq_handler,
+                                                pctl);
        }
 
        dev_info(&pdev->dev, "initialized sunXi PIO driver\n");
index 8c43589c3edba9248c13760c8e92025b4a5cf599..1f52462f4cdd4b7e1431723ca37b32c267da68a1 100644 (file)
@@ -220,20 +220,10 @@ free_resources:
        return ret;
 }
 
-static int goldfish_pdev_bus_remove(struct platform_device *pdev)
-{
-       iounmap(pdev_bus_base);
-       free_irq(pdev_bus_irq, pdev);
-       release_mem_region(pdev_bus_addr, pdev_bus_len);
-       return 0;
-}
-
 static struct platform_driver goldfish_pdev_bus_driver = {
        .probe = goldfish_pdev_bus_probe,
-       .remove = goldfish_pdev_bus_remove,
        .driver = {
                .name = "goldfish_pdev_bus"
        }
 };
-
-module_platform_driver(goldfish_pdev_bus_driver);
+builtin_platform_driver(goldfish_pdev_bus_driver);
index 7137a077b9a6a6ae4b40f33c7a412c24f1a9112a..6dc13e4de3962ee66f7f2cf32dacbc7296529b64 100644 (file)
@@ -141,6 +141,22 @@ config DELL_SMO8800
          To compile this driver as a module, choose M here: the module will
          be called dell-smo8800.
 
+config DELL_RBTN
+       tristate "Dell Airplane Mode Switch driver"
+       depends on ACPI
+       depends on INPUT
+       depends on RFKILL
+       ---help---
+         Say Y here if you want to support Dell Airplane Mode Switch ACPI
+         device on Dell laptops. Sometimes it has names: DELLABCE or DELRBTN.
+         This driver register rfkill device or input hotkey device depending
+         on hardware type (hw switch slider or keyboard toggle button). For
+         rfkill devices it receive HW switch events and set correct hard
+         rfkill state.
+
+         To compile this driver as a module, choose M here: the module will
+         be called dell-rbtn.
+
 
 config FUJITSU_LAPTOP
        tristate "Fujitsu Laptop Extras"
@@ -622,7 +638,6 @@ config ACPI_TOSHIBA
        select NEW_LEDS
        depends on BACKLIGHT_CLASS_DEVICE
        depends on INPUT
-       depends on RFKILL || RFKILL = n
        depends on SERIO_I8042 || SERIO_I8042 = n
        depends on ACPI_VIDEO || ACPI_VIDEO = n
        select INPUT_POLLDEV
@@ -653,6 +668,7 @@ config ACPI_TOSHIBA
 config TOSHIBA_BT_RFKILL
        tristate "Toshiba Bluetooth RFKill switch support"
        depends on ACPI
+       depends on RFKILL || RFKILL = n
        ---help---
          This driver adds support for Bluetooth events for the RFKill
          switch on modern Toshiba laptops with full ACPI support and
@@ -896,4 +912,11 @@ config PVPANIC
          a paravirtualized device provided by QEMU; it lets a virtual machine
          (guest) communicate panic events to the host.
 
+config INTEL_PMC_IPC
+       tristate "Intel PMC IPC Driver"
+       ---help---
+       This driver provides support for PMC control on some Intel platforms.
+       The PMC is an ARC processor which defines IPC commands for communication
+       with other entities in the CPU.
+
 endif # X86_PLATFORM_DEVICES
index f82232b1fc4d719491ba5e2550603fed50802e51..dda95a98532101c1d8d55b54bb47d4545ece5bc4 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_DELL_LAPTOP)     += dell-laptop.o
 obj-$(CONFIG_DELL_WMI)         += dell-wmi.o
 obj-$(CONFIG_DELL_WMI_AIO)     += dell-wmi-aio.o
 obj-$(CONFIG_DELL_SMO8800)     += dell-smo8800.o
+obj-$(CONFIG_DELL_RBTN)                += dell-rbtn.o
 obj-$(CONFIG_ACER_WMI)         += acer-wmi.o
 obj-$(CONFIG_ACERHDF)          += acerhdf.o
 obj-$(CONFIG_HP_ACCEL)         += hp_accel.o
@@ -58,3 +59,4 @@ obj-$(CONFIG_INTEL_SMARTCONNECT)      += intel-smartconnect.o
 
 obj-$(CONFIG_PVPANIC)           += pvpanic.o
 obj-$(CONFIG_ALIENWARE_WMI)    += alienware-wmi.o
+obj-$(CONFIG_INTEL_PMC_IPC)    += intel_pmc_ipc.o
index 6f8558f744a4e275df3ffda283c2b97d3232efd5..efbc3f0c592b7a9f6abfc1497e27d9593592f33c 100644 (file)
@@ -78,6 +78,7 @@ MODULE_LICENSE("GPL");
 #define ASUS_WMI_METHODID_GPID         0x44495047 /* Get Panel ID?? (Resol) */
 #define ASUS_WMI_METHODID_QMOD         0x444F4D51 /* Quiet MODe */
 #define ASUS_WMI_METHODID_SPLV         0x4C425053 /* Set Panel Light Value */
+#define ASUS_WMI_METHODID_AGFN         0x4E464741 /* FaN? */
 #define ASUS_WMI_METHODID_SFUN         0x4E554653 /* FUNCtionalities */
 #define ASUS_WMI_METHODID_SDSP         0x50534453 /* Set DiSPlay output */
 #define ASUS_WMI_METHODID_GDSP         0x50534447 /* Get DiSPlay output */
@@ -150,11 +151,37 @@ MODULE_LICENSE("GPL");
 #define ASUS_WMI_DSTS_BRIGHTNESS_MASK  0x000000FF
 #define ASUS_WMI_DSTS_MAX_BRIGTH_MASK  0x0000FF00
 
+#define ASUS_FAN_DESC                  "cpu_fan"
+#define ASUS_FAN_MFUN                  0x13
+#define ASUS_FAN_SFUN_READ             0x06
+#define ASUS_FAN_SFUN_WRITE            0x07
+#define ASUS_FAN_CTRL_MANUAL           1
+#define ASUS_FAN_CTRL_AUTO             2
+
 struct bios_args {
        u32 arg0;
        u32 arg1;
 } __packed;
 
+/*
+ * Struct that's used for all methods called via AGFN. Naming is
+ * identically to the AML code.
+ */
+struct agfn_args {
+       u16 mfun; /* probably "Multi-function" to be called */
+       u16 sfun; /* probably "Sub-function" to be called */
+       u16 len;  /* size of the hole struct, including subfunction fields */
+       u8 stas;  /* not used by now */
+       u8 err;   /* zero on success */
+} __packed;
+
+/* struct used for calling fan read and write methods */
+struct fan_args {
+       struct agfn_args agfn;  /* common fields */
+       u8 fan;                 /* fan number: 0: set auto mode 1: 1st fan */
+       u32 speed;              /* read: RPM/100 - write: 0-255 */
+} __packed;
+
 /*
  * <platform>/    - debugfs root directory
  *   dev_id      - current dev_id
@@ -204,6 +231,10 @@ struct asus_wmi {
        struct asus_rfkill gps;
        struct asus_rfkill uwb;
 
+       bool asus_hwmon_fan_manual_mode;
+       int asus_hwmon_num_fans;
+       int asus_hwmon_pwm;
+
        struct hotplug_slot *hotplug_slot;
        struct mutex hotplug_lock;
        struct mutex wmi_lock;
@@ -294,6 +325,36 @@ exit:
        return 0;
 }
 
+static int asus_wmi_evaluate_method_agfn(const struct acpi_buffer args)
+{
+       struct acpi_buffer input;
+       u64 phys_addr;
+       u32 retval;
+       u32 status = -1;
+
+       /*
+        * Copy to dma capable address otherwise memory corruption occurs as
+        * bios has to be able to access it.
+        */
+       input.pointer = kzalloc(args.length, GFP_DMA | GFP_KERNEL);
+       input.length = args.length;
+       if (!input.pointer)
+               return -ENOMEM;
+       phys_addr = virt_to_phys(input.pointer);
+       memcpy(input.pointer, args.pointer, args.length);
+
+       status = asus_wmi_evaluate_method(ASUS_WMI_METHODID_AGFN,
+                                       phys_addr, 0, &retval);
+       if (!status)
+               memcpy(args.pointer, input.pointer, args.length);
+
+       kfree(input.pointer);
+       if (status)
+               return -ENXIO;
+
+       return retval;
+}
+
 static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval)
 {
        return asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval);
@@ -1022,35 +1083,228 @@ exit:
 /*
  * Hwmon device
  */
-static ssize_t asus_hwmon_pwm1(struct device *dev,
-                              struct device_attribute *attr,
-                              char *buf)
+static int asus_hwmon_agfn_fan_speed_read(struct asus_wmi *asus, int fan,
+                                         int *speed)
+{
+       struct fan_args args = {
+               .agfn.len = sizeof(args),
+               .agfn.mfun = ASUS_FAN_MFUN,
+               .agfn.sfun = ASUS_FAN_SFUN_READ,
+               .fan = fan,
+               .speed = 0,
+       };
+       struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+       int status;
+
+       if (fan != 1)
+               return -EINVAL;
+
+       status = asus_wmi_evaluate_method_agfn(input);
+
+       if (status || args.agfn.err)
+               return -ENXIO;
+
+       if (speed)
+               *speed = args.speed;
+
+       return 0;
+}
+
+static int asus_hwmon_agfn_fan_speed_write(struct asus_wmi *asus, int fan,
+                                    int *speed)
+{
+       struct fan_args args = {
+               .agfn.len = sizeof(args),
+               .agfn.mfun = ASUS_FAN_MFUN,
+               .agfn.sfun = ASUS_FAN_SFUN_WRITE,
+               .fan = fan,
+               .speed = speed ?  *speed : 0,
+       };
+       struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+       int status;
+
+       /* 1: for setting 1st fan's speed 0: setting auto mode */
+       if (fan != 1 && fan != 0)
+               return -EINVAL;
+
+       status = asus_wmi_evaluate_method_agfn(input);
+
+       if (status || args.agfn.err)
+               return -ENXIO;
+
+       if (speed && fan == 1)
+               asus->asus_hwmon_pwm = *speed;
+
+       return 0;
+}
+
+/*
+ * Check if we can read the speed of one fan. If true we assume we can also
+ * control it.
+ */
+static int asus_hwmon_get_fan_number(struct asus_wmi *asus, int *num_fans)
+{
+       int status;
+       int speed = 0;
+
+       *num_fans = 0;
+
+       status = asus_hwmon_agfn_fan_speed_read(asus, 1, &speed);
+       if (!status)
+               *num_fans = 1;
+
+       return 0;
+}
+
+static int asus_hwmon_fan_set_auto(struct asus_wmi *asus)
+{
+       int status;
+
+       status = asus_hwmon_agfn_fan_speed_write(asus, 0, NULL);
+       if (status)
+               return -ENXIO;
+
+       asus->asus_hwmon_fan_manual_mode = false;
+
+       return 0;
+}
+
+static int asus_hwmon_fan_rpm_show(struct device *dev, int fan)
 {
        struct asus_wmi *asus = dev_get_drvdata(dev);
-       u32 value;
+       int value;
+       int ret;
+
+       /* no speed readable on manual mode */
+       if (asus->asus_hwmon_fan_manual_mode)
+               return -ENXIO;
+
+       ret = asus_hwmon_agfn_fan_speed_read(asus, fan+1, &value);
+       if (ret) {
+               pr_warn("reading fan speed failed: %d\n", ret);
+               return -ENXIO;
+       }
+
+       return value;
+}
+
+static void asus_hwmon_pwm_show(struct asus_wmi *asus, int fan, int *value)
+{
        int err;
 
-       err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, &value);
+       if (asus->asus_hwmon_pwm >= 0) {
+               *value = asus->asus_hwmon_pwm;
+               return;
+       }
 
+       err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, value);
        if (err < 0)
-               return err;
+               return;
 
-       value &= 0xFF;
-
-       if (value == 1) /* Low Speed */
-               value = 85;
-       else if (value == 2)
-               value = 170;
-       else if (value == 3)
-               value = 255;
-       else if (value != 0) {
-               pr_err("Unknown fan speed %#x\n", value);
-               value = -1;
+       *value &= 0xFF;
+
+       if (*value == 1) /* Low Speed */
+               *value = 85;
+       else if (*value == 2)
+               *value = 170;
+       else if (*value == 3)
+               *value = 255;
+       else if (*value) {
+               pr_err("Unknown fan speed %#x\n", *value);
+               *value = -1;
        }
+}
+
+static ssize_t pwm1_show(struct device *dev,
+                              struct device_attribute *attr,
+                              char *buf)
+{
+       struct asus_wmi *asus = dev_get_drvdata(dev);
+       int value;
+
+       asus_hwmon_pwm_show(asus, 0, &value);
 
        return sprintf(buf, "%d\n", value);
 }
 
+static ssize_t pwm1_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count) {
+       struct asus_wmi *asus = dev_get_drvdata(dev);
+       int value;
+       int state;
+       int ret;
+
+       ret = kstrtouint(buf, 10, &value);
+
+       if (ret)
+               return ret;
+
+       value = clamp(value, 0, 255);
+
+       state = asus_hwmon_agfn_fan_speed_write(asus, 1, &value);
+       if (state)
+               pr_warn("Setting fan speed failed: %d\n", state);
+       else
+               asus->asus_hwmon_fan_manual_mode = true;
+
+       return count;
+}
+
+static ssize_t fan1_input_show(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       int value = asus_hwmon_fan_rpm_show(dev, 0);
+
+       return sprintf(buf, "%d\n", value < 0 ? -1 : value*100);
+
+}
+
+static ssize_t pwm1_enable_show(struct device *dev,
+                                                struct device_attribute *attr,
+                                                char *buf)
+{
+       struct asus_wmi *asus = dev_get_drvdata(dev);
+
+       if (asus->asus_hwmon_fan_manual_mode)
+               return sprintf(buf, "%d\n", ASUS_FAN_CTRL_MANUAL);
+
+       return sprintf(buf, "%d\n", ASUS_FAN_CTRL_AUTO);
+}
+
+static ssize_t pwm1_enable_store(struct device *dev,
+                                                 struct device_attribute *attr,
+                                                 const char *buf, size_t count)
+{
+       struct asus_wmi *asus = dev_get_drvdata(dev);
+       int status = 0;
+       int state;
+       int ret;
+
+       ret = kstrtouint(buf, 10, &state);
+
+       if (ret)
+               return ret;
+
+       if (state == ASUS_FAN_CTRL_MANUAL)
+               asus->asus_hwmon_fan_manual_mode = true;
+       else
+               status = asus_hwmon_fan_set_auto(asus);
+
+       if (status)
+               return status;
+
+       return count;
+}
+
+static ssize_t fan1_label_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       return sprintf(buf, "%s\n", ASUS_FAN_DESC);
+}
+
 static ssize_t asus_hwmon_temp1(struct device *dev,
                                struct device_attribute *attr,
                                char *buf)
@@ -1069,11 +1323,21 @@ static ssize_t asus_hwmon_temp1(struct device *dev,
        return sprintf(buf, "%d\n", value);
 }
 
-static DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL);
+/* Fan1 */
+static DEVICE_ATTR_RW(pwm1);
+static DEVICE_ATTR_RW(pwm1_enable);
+static DEVICE_ATTR_RO(fan1_input);
+static DEVICE_ATTR_RO(fan1_label);
+
+/* Temperature */
 static DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL);
 
 static struct attribute *hwmon_attributes[] = {
        &dev_attr_pwm1.attr,
+       &dev_attr_pwm1_enable.attr,
+       &dev_attr_fan1_input.attr,
+       &dev_attr_fan1_label.attr,
+
        &dev_attr_temp1_input.attr,
        NULL
 };
@@ -1084,19 +1348,28 @@ static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
        struct device *dev = container_of(kobj, struct device, kobj);
        struct platform_device *pdev = to_platform_device(dev->parent);
        struct asus_wmi *asus = platform_get_drvdata(pdev);
-       bool ok = true;
        int dev_id = -1;
+       int fan_attr = -1;
        u32 value = ASUS_WMI_UNSUPPORTED_METHOD;
+       bool ok = true;
 
        if (attr == &dev_attr_pwm1.attr)
                dev_id = ASUS_WMI_DEVID_FAN_CTRL;
        else if (attr == &dev_attr_temp1_input.attr)
                dev_id = ASUS_WMI_DEVID_THERMAL_CTRL;
 
+
+       if (attr == &dev_attr_fan1_input.attr
+           || attr == &dev_attr_fan1_label.attr
+           || attr == &dev_attr_pwm1.attr
+           || attr == &dev_attr_pwm1_enable.attr) {
+               fan_attr = 1;
+       }
+
        if (dev_id != -1) {
                int err = asus_wmi_get_devstate(asus, dev_id, &value);
 
-               if (err < 0)
+               if (err < 0 && fan_attr == -1)
                        return 0; /* can't return negative here */
        }
 
@@ -1112,10 +1385,16 @@ static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
                if (value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
                    || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT)))
                        ok = false;
+               else
+                       ok = fan_attr <= asus->asus_hwmon_num_fans;
        } else if (dev_id == ASUS_WMI_DEVID_THERMAL_CTRL) {
                /* If value is zero, something is clearly wrong */
-               if (value == 0)
+               if (!value)
                        ok = false;
+       } else if (fan_attr <= asus->asus_hwmon_num_fans && fan_attr != -1) {
+               ok = true;
+       } else {
+               ok = false;
        }
 
        return ok ? attr->mode : 0;
@@ -1723,6 +2002,25 @@ error_debugfs:
        return -ENOMEM;
 }
 
+static int asus_wmi_fan_init(struct asus_wmi *asus)
+{
+       int status;
+
+       asus->asus_hwmon_pwm = -1;
+       asus->asus_hwmon_num_fans = -1;
+       asus->asus_hwmon_fan_manual_mode = false;
+
+       status = asus_hwmon_get_fan_number(asus, &asus->asus_hwmon_num_fans);
+       if (status) {
+               asus->asus_hwmon_num_fans = 0;
+               pr_warn("Could not determine number of fans: %d\n", status);
+               return -ENXIO;
+       }
+
+       pr_info("Number of fans: %d\n", asus->asus_hwmon_num_fans);
+       return 0;
+}
+
 /*
  * WMI Driver
  */
@@ -1756,6 +2054,9 @@ static int asus_wmi_add(struct platform_device *pdev)
        if (err)
                goto fail_input;
 
+       err = asus_wmi_fan_init(asus); /* probably no problems on error */
+       asus_hwmon_fan_set_auto(asus);
+
        err = asus_wmi_hwmon_init(asus);
        if (err)
                goto fail_hwmon;
@@ -1831,6 +2132,7 @@ static int asus_wmi_remove(struct platform_device *device)
        asus_wmi_rfkill_exit(asus);
        asus_wmi_debugfs_exit(asus);
        asus_wmi_platform_exit(asus);
+       asus_hwmon_fan_set_auto(asus);
 
        kfree(asus);
        return 0;
index 01d081052b508b795580cfec441fa0f8201b7d75..ed317ccac4a2dc8c490291cb561bb986203dcef2 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/seq_file.h>
 #include <acpi/video.h>
 #include "../../firmware/dcdbas.h"
+#include "dell-rbtn.h"
 
 #define BRIGHTNESS_TOKEN 0x7d
 #define KBD_LED_OFF_TOKEN 0x01E1
@@ -306,7 +307,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
 };
 
 static struct calling_interface_buffer *buffer;
-static struct page *bufferpage;
 static DEFINE_MUTEX(buffer_mutex);
 
 static int hwswitch_state;
@@ -423,45 +423,125 @@ static inline int dell_smi_error(int value)
        }
 }
 
-/* Derived from information in DellWirelessCtl.cpp:
-   Class 17, select 11 is radio control. It returns an array of 32-bit values.
-
-   Input byte 0 = 0: Wireless information
-
-   result[0]: return code
-   result[1]:
-     Bit 0:      Hardware switch supported
-     Bit 1:      Wifi locator supported
-     Bit 2:      Wifi is supported
-     Bit 3:      Bluetooth is supported
-     Bit 4:      WWAN is supported
-     Bit 5:      Wireless keyboard supported
-     Bits 6-7:   Reserved
-     Bit 8:      Wifi is installed
-     Bit 9:      Bluetooth is installed
-     Bit 10:     WWAN is installed
-     Bits 11-15: Reserved
-     Bit 16:     Hardware switch is on
-     Bit 17:     Wifi is blocked
-     Bit 18:     Bluetooth is blocked
-     Bit 19:     WWAN is blocked
-     Bits 20-31: Reserved
-   result[2]: NVRAM size in bytes
-   result[3]: NVRAM format version number
-
-   Input byte 0 = 2: Wireless switch configuration
-   result[0]: return code
-   result[1]:
-     Bit 0:      Wifi controlled by switch
-     Bit 1:      Bluetooth controlled by switch
-     Bit 2:      WWAN controlled by switch
-     Bits 3-6:   Reserved
-     Bit 7:      Wireless switch config locked
-     Bit 8:      Wifi locator enabled
-     Bits 9-14:  Reserved
-     Bit 15:     Wifi locator setting locked
-     Bits 16-31: Reserved
-*/
+/*
+ * Derived from information in smbios-wireless-ctl:
+ *
+ * cbSelect 17, Value 11
+ *
+ * Return Wireless Info
+ * cbArg1, byte0 = 0x00
+ *
+ *     cbRes1 Standard return codes (0, -1, -2)
+ *     cbRes2 Info bit flags:
+ *
+ *     0 Hardware switch supported (1)
+ *     1 WiFi locator supported (1)
+ *     2 WLAN supported (1)
+ *     3 Bluetooth (BT) supported (1)
+ *     4 WWAN supported (1)
+ *     5 Wireless KBD supported (1)
+ *     6 Uw b supported (1)
+ *     7 WiGig supported (1)
+ *     8 WLAN installed (1)
+ *     9 BT installed (1)
+ *     10 WWAN installed (1)
+ *     11 Uw b installed (1)
+ *     12 WiGig installed (1)
+ *     13-15 Reserved (0)
+ *     16 Hardware (HW) switch is On (1)
+ *     17 WLAN disabled (1)
+ *     18 BT disabled (1)
+ *     19 WWAN disabled (1)
+ *     20 Uw b disabled (1)
+ *     21 WiGig disabled (1)
+ *     20-31 Reserved (0)
+ *
+ *     cbRes3 NVRAM size in bytes
+ *     cbRes4, byte 0 NVRAM format version number
+ *
+ *
+ * Set QuickSet Radio Disable Flag
+ *     cbArg1, byte0 = 0x01
+ *     cbArg1, byte1
+ *     Radio ID     value:
+ *     0        Radio Status
+ *     1        WLAN ID
+ *     2        BT ID
+ *     3        WWAN ID
+ *     4        UWB ID
+ *     5        WIGIG ID
+ *     cbArg1, byte2    Flag bits:
+ *             0 QuickSet disables radio (1)
+ *             1-7 Reserved (0)
+ *
+ *     cbRes1    Standard return codes (0, -1, -2)
+ *     cbRes2    QuickSet (QS) radio disable bit map:
+ *     0 QS disables WLAN
+ *     1 QS disables BT
+ *     2 QS disables WWAN
+ *     3 QS disables UWB
+ *     4 QS disables WIGIG
+ *     5-31 Reserved (0)
+ *
+ * Wireless Switch Configuration
+ *     cbArg1, byte0 = 0x02
+ *
+ *     cbArg1, byte1
+ *     Subcommand:
+ *     0 Get config
+ *     1 Set config
+ *     2 Set WiFi locator enable/disable
+ *     cbArg1,byte2
+ *     Switch settings (if byte 1==1):
+ *     0 WLAN sw itch control (1)
+ *     1 BT sw itch control (1)
+ *     2 WWAN sw itch control (1)
+ *     3 UWB sw itch control (1)
+ *     4 WiGig sw itch control (1)
+ *     5-7 Reserved (0)
+ *    cbArg1, byte2 Enable bits (if byte 1==2):
+ *     0 Enable WiFi locator (1)
+ *
+ *    cbRes1     Standard return codes (0, -1, -2)
+ *    cbRes2 QuickSet radio disable bit map:
+ *     0 WLAN controlled by sw itch (1)
+ *     1 BT controlled by sw itch (1)
+ *     2 WWAN controlled by sw itch (1)
+ *     3 UWB controlled by sw itch (1)
+ *     4 WiGig controlled by sw itch (1)
+ *     5-6 Reserved (0)
+ *     7 Wireless sw itch config locked (1)
+ *     8 WiFi locator enabled (1)
+ *     9-14 Reserved (0)
+ *     15 WiFi locator setting locked (1)
+ *     16-31 Reserved (0)
+ *
+ * Read Local Config Data (LCD)
+ *     cbArg1, byte0 = 0x10
+ *     cbArg1, byte1 NVRAM index low byte
+ *     cbArg1, byte2 NVRAM index high byte
+ *     cbRes1 Standard return codes (0, -1, -2)
+ *     cbRes2 4 bytes read from LCD[index]
+ *     cbRes3 4 bytes read from LCD[index+4]
+ *     cbRes4 4 bytes read from LCD[index+8]
+ *
+ * Write Local Config Data (LCD)
+ *     cbArg1, byte0 = 0x11
+ *     cbArg1, byte1 NVRAM index low byte
+ *     cbArg1, byte2 NVRAM index high byte
+ *     cbArg2 4 bytes to w rite at LCD[index]
+ *     cbArg3 4 bytes to w rite at LCD[index+4]
+ *     cbArg4 4 bytes to w rite at LCD[index+8]
+ *     cbRes1 Standard return codes (0, -1, -2)
+ *
+ * Populate Local Config Data from NVRAM
+ *     cbArg1, byte0 = 0x12
+ *     cbRes1 Standard return codes (0, -1, -2)
+ *
+ * Commit Local Config Data to NVRAM
+ *     cbArg1, byte0 = 0x13
+ *     cbRes1 Standard return codes (0, -1, -2)
+ */
 
 static int dell_rfkill_set(void *data, bool blocked)
 {
@@ -549,12 +629,21 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
                  (status & BIT(4)) >> 4);
        seq_printf(s, "Bit 5 : Wireless keyboard supported: %lu\n",
                  (status & BIT(5)) >> 5);
+       seq_printf(s, "Bit 6 : UWB supported:               %lu\n",
+                 (status & BIT(6)) >> 6);
+       seq_printf(s, "Bit 7 : WiGig supported:             %lu\n",
+                 (status & BIT(7)) >> 7);
        seq_printf(s, "Bit 8 : Wifi is installed:           %lu\n",
                  (status & BIT(8)) >> 8);
        seq_printf(s, "Bit 9 : Bluetooth is installed:      %lu\n",
                  (status & BIT(9)) >> 9);
        seq_printf(s, "Bit 10: WWAN is installed:           %lu\n",
                  (status & BIT(10)) >> 10);
+       seq_printf(s, "Bit 11: UWB installed:               %lu\n",
+                 (status & BIT(11)) >> 11);
+       seq_printf(s, "Bit 12: WiGig installed:             %lu\n",
+                 (status & BIT(12)) >> 12);
+
        seq_printf(s, "Bit 16: Hardware switch is on:       %lu\n",
                  (status & BIT(16)) >> 16);
        seq_printf(s, "Bit 17: Wifi is blocked:             %lu\n",
@@ -563,6 +652,10 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
                  (status & BIT(18)) >> 18);
        seq_printf(s, "Bit 19: WWAN is blocked:             %lu\n",
                  (status & BIT(19)) >> 19);
+       seq_printf(s, "Bit 20: UWB is blocked:              %lu\n",
+                 (status & BIT(20)) >> 20);
+       seq_printf(s, "Bit 21: WiGig is blocked:            %lu\n",
+                 (status & BIT(21)) >> 21);
 
        seq_printf(s, "\nhwswitch_state:\t0x%X\n", hwswitch_state);
        seq_printf(s, "Bit 0 : Wifi controlled by switch:      %lu\n",
@@ -571,6 +664,10 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
                   (hwswitch_state & BIT(1)) >> 1);
        seq_printf(s, "Bit 2 : WWAN controlled by switch:      %lu\n",
                   (hwswitch_state & BIT(2)) >> 2);
+       seq_printf(s, "Bit 3 : UWB controlled by switch:       %lu\n",
+                  (hwswitch_state & BIT(3)) >> 3);
+       seq_printf(s, "Bit 4 : WiGig controlled by switch:     %lu\n",
+                  (hwswitch_state & BIT(4)) >> 4);
        seq_printf(s, "Bit 7 : Wireless switch config locked:  %lu\n",
                   (hwswitch_state & BIT(7)) >> 7);
        seq_printf(s, "Bit 8 : Wifi locator enabled:           %lu\n",
@@ -643,6 +740,20 @@ static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
        return false;
 }
 
+static int (*dell_rbtn_notifier_register_func)(struct notifier_block *);
+static int (*dell_rbtn_notifier_unregister_func)(struct notifier_block *);
+
+static int dell_laptop_rbtn_notifier_call(struct notifier_block *nb,
+                                         unsigned long action, void *data)
+{
+       schedule_delayed_work(&dell_rfkill_work, 0);
+       return NOTIFY_OK;
+}
+
+static struct notifier_block dell_laptop_rbtn_notifier = {
+       .notifier_call = dell_laptop_rbtn_notifier_call,
+};
+
 static int __init dell_setup_rfkill(void)
 {
        int status, ret, whitelisted;
@@ -719,10 +830,62 @@ static int __init dell_setup_rfkill(void)
                        goto err_wwan;
        }
 
-       ret = i8042_install_filter(dell_laptop_i8042_filter);
-       if (ret) {
-               pr_warn("Unable to install key filter\n");
+       /*
+        * Dell Airplane Mode Switch driver (dell-rbtn) supports ACPI devices
+        * which can receive events from HW slider switch.
+        *
+        * Dell SMBIOS on whitelisted models supports controlling radio devices
+        * but does not support receiving HW button switch events. We can use
+        * i8042 filter hook function to receive keyboard data and handle
+        * keycode for HW button.
+        *
+        * So if it is possible we will use Dell Airplane Mode Switch ACPI
+        * driver for receiving HW events and Dell SMBIOS for setting rfkill
+        * states. If ACPI driver or device is not available we will fallback to
+        * i8042 filter hook function.
+        *
+        * To prevent duplicate rfkill devices which control and do same thing,
+        * dell-rbtn driver will automatically remove its own rfkill devices
+        * once function dell_rbtn_notifier_register() is called.
+        */
+
+       dell_rbtn_notifier_register_func =
+               symbol_request(dell_rbtn_notifier_register);
+       if (dell_rbtn_notifier_register_func) {
+               dell_rbtn_notifier_unregister_func =
+                       symbol_request(dell_rbtn_notifier_unregister);
+               if (!dell_rbtn_notifier_unregister_func) {
+                       symbol_put(dell_rbtn_notifier_register);
+                       dell_rbtn_notifier_register_func = NULL;
+               }
+       }
+
+       if (dell_rbtn_notifier_register_func) {
+               ret = dell_rbtn_notifier_register_func(
+                       &dell_laptop_rbtn_notifier);
+               symbol_put(dell_rbtn_notifier_register);
+               dell_rbtn_notifier_register_func = NULL;
+               if (ret != 0) {
+                       symbol_put(dell_rbtn_notifier_unregister);
+                       dell_rbtn_notifier_unregister_func = NULL;
+               }
+       } else {
+               pr_info("Symbols from dell-rbtn acpi driver are not available\n");
+               ret = -ENODEV;
+       }
+
+       if (ret == 0) {
+               pr_info("Using dell-rbtn acpi driver for receiving events\n");
+       } else if (ret != -ENODEV) {
+               pr_warn("Unable to register dell rbtn notifier\n");
                goto err_filter;
+       } else {
+               ret = i8042_install_filter(dell_laptop_i8042_filter);
+               if (ret) {
+                       pr_warn("Unable to install key filter\n");
+                       goto err_filter;
+               }
+               pr_info("Using i8042 filter function for receiving events\n");
        }
 
        return 0;
@@ -745,6 +908,14 @@ err_wifi:
 
 static void dell_cleanup_rfkill(void)
 {
+       if (dell_rbtn_notifier_unregister_func) {
+               dell_rbtn_notifier_unregister_func(&dell_laptop_rbtn_notifier);
+               symbol_put(dell_rbtn_notifier_unregister);
+               dell_rbtn_notifier_unregister_func = NULL;
+       } else {
+               i8042_remove_filter(dell_laptop_i8042_filter);
+       }
+       cancel_delayed_work_sync(&dell_rfkill_work);
        if (wifi_rfkill) {
                rfkill_unregister(wifi_rfkill);
                rfkill_destroy(wifi_rfkill);
@@ -1897,12 +2068,11 @@ static int __init dell_init(void)
         * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
         * is passed to SMI handler.
         */
-       bufferpage = alloc_page(GFP_KERNEL | GFP_DMA32);
-       if (!bufferpage) {
+       buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
+       if (!buffer) {
                ret = -ENOMEM;
                goto fail_buffer;
        }
-       buffer = page_address(bufferpage);
 
        ret = dell_setup_rfkill();
 
@@ -1957,11 +2127,9 @@ static int __init dell_init(void)
        return 0;
 
 fail_backlight:
-       i8042_remove_filter(dell_laptop_i8042_filter);
-       cancel_delayed_work_sync(&dell_rfkill_work);
        dell_cleanup_rfkill();
 fail_rfkill:
-       free_page((unsigned long)bufferpage);
+       free_page((unsigned long)buffer);
 fail_buffer:
        platform_device_del(platform_device);
 fail_platform_device2:
@@ -1979,8 +2147,6 @@ static void __exit dell_exit(void)
        if (quirks && quirks->touchpad_led)
                touchpad_led_exit();
        kbd_led_exit();
-       i8042_remove_filter(dell_laptop_i8042_filter);
-       cancel_delayed_work_sync(&dell_rfkill_work);
        backlight_device_unregister(dell_backlight_device);
        dell_cleanup_rfkill();
        if (platform_device) {
@@ -1991,7 +2157,14 @@ static void __exit dell_exit(void)
        free_page((unsigned long)buffer);
 }
 
-module_init(dell_init);
+/* dell-rbtn.c driver export functions which will not work correctly (and could
+ * cause kernel crash) if they are called before dell-rbtn.c init code. This is
+ * not problem when dell-rbtn.c is compiled as external module. When both files
+ * (dell-rbtn.c and dell-laptop.c) are compiled statically into kernel, then we
+ * need to ensure that dell_init() will be called after initializing dell-rbtn.
+ * This can be achieved by late_initcall() instead module_init().
+ */
+late_initcall(dell_init);
 module_exit(dell_exit);
 
 MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
new file mode 100644 (file)
index 0000000..cd410e3
--- /dev/null
@@ -0,0 +1,423 @@
+/*
+    Dell Airplane Mode Switch driver
+    Copyright (C) 2014-2015  Pali Rohár <pali.rohar@gmail.com>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/rfkill.h>
+#include <linux/input.h>
+
+enum rbtn_type {
+       RBTN_UNKNOWN,
+       RBTN_TOGGLE,
+       RBTN_SLIDER,
+};
+
+struct rbtn_data {
+       enum rbtn_type type;
+       struct rfkill *rfkill;
+       struct input_dev *input_dev;
+};
+
+
+/*
+ * acpi functions
+ */
+
+static enum rbtn_type rbtn_check(struct acpi_device *device)
+{
+       unsigned long long output;
+       acpi_status status;
+
+       status = acpi_evaluate_integer(device->handle, "CRBT", NULL, &output);
+       if (ACPI_FAILURE(status))
+               return RBTN_UNKNOWN;
+
+       switch (output) {
+       case 0:
+       case 1:
+               return RBTN_TOGGLE;
+       case 2:
+       case 3:
+               return RBTN_SLIDER;
+       default:
+               return RBTN_UNKNOWN;
+       }
+}
+
+static int rbtn_get(struct acpi_device *device)
+{
+       unsigned long long output;
+       acpi_status status;
+
+       status = acpi_evaluate_integer(device->handle, "GRBT", NULL, &output);
+       if (ACPI_FAILURE(status))
+               return -EINVAL;
+
+       return !output;
+}
+
+static int rbtn_acquire(struct acpi_device *device, bool enable)
+{
+       struct acpi_object_list input;
+       union acpi_object param;
+       acpi_status status;
+
+       param.type = ACPI_TYPE_INTEGER;
+       param.integer.value = enable;
+       input.count = 1;
+       input.pointer = &param;
+
+       status = acpi_evaluate_object(device->handle, "ARBT", &input, NULL);
+       if (ACPI_FAILURE(status))
+               return -EINVAL;
+
+       return 0;
+}
+
+
+/*
+ * rfkill device
+ */
+
+static void rbtn_rfkill_query(struct rfkill *rfkill, void *data)
+{
+       struct acpi_device *device = data;
+       int state;
+
+       state = rbtn_get(device);
+       if (state < 0)
+               return;
+
+       rfkill_set_states(rfkill, state, state);
+}
+
+static int rbtn_rfkill_set_block(void *data, bool blocked)
+{
+       /* NOTE: setting soft rfkill state is not supported */
+       return -EINVAL;
+}
+
+static struct rfkill_ops rbtn_ops = {
+       .query = rbtn_rfkill_query,
+       .set_block = rbtn_rfkill_set_block,
+};
+
+static int rbtn_rfkill_init(struct acpi_device *device)
+{
+       struct rbtn_data *rbtn_data = device->driver_data;
+       int ret;
+
+       if (rbtn_data->rfkill)
+               return 0;
+
+       /*
+        * NOTE: rbtn controls all radio devices, not only WLAN
+        *       but rfkill interface does not support "ANY" type
+        *       so "WLAN" type is used
+        */
+       rbtn_data->rfkill = rfkill_alloc("dell-rbtn", &device->dev,
+                                        RFKILL_TYPE_WLAN, &rbtn_ops, device);
+       if (!rbtn_data->rfkill)
+               return -ENOMEM;
+
+       ret = rfkill_register(rbtn_data->rfkill);
+       if (ret) {
+               rfkill_destroy(rbtn_data->rfkill);
+               rbtn_data->rfkill = NULL;
+               return ret;
+       }
+
+       return 0;
+}
+
+static void rbtn_rfkill_exit(struct acpi_device *device)
+{
+       struct rbtn_data *rbtn_data = device->driver_data;
+
+       if (!rbtn_data->rfkill)
+               return;
+
+       rfkill_unregister(rbtn_data->rfkill);
+       rfkill_destroy(rbtn_data->rfkill);
+       rbtn_data->rfkill = NULL;
+}
+
+static void rbtn_rfkill_event(struct acpi_device *device)
+{
+       struct rbtn_data *rbtn_data = device->driver_data;
+
+       if (rbtn_data->rfkill)
+               rbtn_rfkill_query(rbtn_data->rfkill, device);
+}
+
+
+/*
+ * input device
+ */
+
+static int rbtn_input_init(struct rbtn_data *rbtn_data)
+{
+       int ret;
+
+       rbtn_data->input_dev = input_allocate_device();
+       if (!rbtn_data->input_dev)
+               return -ENOMEM;
+
+       rbtn_data->input_dev->name = "DELL Wireless hotkeys";
+       rbtn_data->input_dev->phys = "dellabce/input0";
+       rbtn_data->input_dev->id.bustype = BUS_HOST;
+       rbtn_data->input_dev->evbit[0] = BIT(EV_KEY);
+       set_bit(KEY_RFKILL, rbtn_data->input_dev->keybit);
+
+       ret = input_register_device(rbtn_data->input_dev);
+       if (ret) {
+               input_free_device(rbtn_data->input_dev);
+               rbtn_data->input_dev = NULL;
+               return ret;
+       }
+
+       return 0;
+}
+
+static void rbtn_input_exit(struct rbtn_data *rbtn_data)
+{
+       input_unregister_device(rbtn_data->input_dev);
+       rbtn_data->input_dev = NULL;
+}
+
+static void rbtn_input_event(struct rbtn_data *rbtn_data)
+{
+       input_report_key(rbtn_data->input_dev, KEY_RFKILL, 1);
+       input_sync(rbtn_data->input_dev);
+       input_report_key(rbtn_data->input_dev, KEY_RFKILL, 0);
+       input_sync(rbtn_data->input_dev);
+}
+
+
+/*
+ * acpi driver
+ */
+
+static int rbtn_add(struct acpi_device *device);
+static int rbtn_remove(struct acpi_device *device);
+static void rbtn_notify(struct acpi_device *device, u32 event);
+
+static const struct acpi_device_id rbtn_ids[] = {
+       { "DELRBTN", 0 },
+       { "DELLABCE", 0 },
+       { "", 0 },
+};
+
+static struct acpi_driver rbtn_driver = {
+       .name = "dell-rbtn",
+       .ids = rbtn_ids,
+       .ops = {
+               .add = rbtn_add,
+               .remove = rbtn_remove,
+               .notify = rbtn_notify,
+       },
+       .owner = THIS_MODULE,
+};
+
+
+/*
+ * notifier export functions
+ */
+
+static bool auto_remove_rfkill = true;
+
+static ATOMIC_NOTIFIER_HEAD(rbtn_chain_head);
+
+static int rbtn_inc_count(struct device *dev, void *data)
+{
+       struct acpi_device *device = to_acpi_device(dev);
+       struct rbtn_data *rbtn_data = device->driver_data;
+       int *count = data;
+
+       if (rbtn_data->type == RBTN_SLIDER)
+               (*count)++;
+
+       return 0;
+}
+
+static int rbtn_switch_dev(struct device *dev, void *data)
+{
+       struct acpi_device *device = to_acpi_device(dev);
+       struct rbtn_data *rbtn_data = device->driver_data;
+       bool enable = data;
+
+       if (rbtn_data->type != RBTN_SLIDER)
+               return 0;
+
+       if (enable)
+               rbtn_rfkill_init(device);
+       else
+               rbtn_rfkill_exit(device);
+
+       return 0;
+}
+
+int dell_rbtn_notifier_register(struct notifier_block *nb)
+{
+       bool first;
+       int count;
+       int ret;
+
+       count = 0;
+       ret = driver_for_each_device(&rbtn_driver.drv, NULL, &count,
+                                    rbtn_inc_count);
+       if (ret || count == 0)
+               return -ENODEV;
+
+       first = !rbtn_chain_head.head;
+
+       ret = atomic_notifier_chain_register(&rbtn_chain_head, nb);
+       if (ret != 0)
+               return ret;
+
+       if (auto_remove_rfkill && first)
+               ret = driver_for_each_device(&rbtn_driver.drv, NULL,
+                                            (void *)false, rbtn_switch_dev);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dell_rbtn_notifier_register);
+
+int dell_rbtn_notifier_unregister(struct notifier_block *nb)
+{
+       int ret;
+
+       ret = atomic_notifier_chain_unregister(&rbtn_chain_head, nb);
+       if (ret != 0)
+               return ret;
+
+       if (auto_remove_rfkill && !rbtn_chain_head.head)
+               ret = driver_for_each_device(&rbtn_driver.drv, NULL,
+                                            (void *)true, rbtn_switch_dev);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dell_rbtn_notifier_unregister);
+
+
+/*
+ * acpi driver functions
+ */
+
+static int rbtn_add(struct acpi_device *device)
+{
+       struct rbtn_data *rbtn_data;
+       enum rbtn_type type;
+       int ret = 0;
+
+       type = rbtn_check(device);
+       if (type == RBTN_UNKNOWN) {
+               dev_info(&device->dev, "Unknown device type\n");
+               return -EINVAL;
+       }
+
+       ret = rbtn_acquire(device, true);
+       if (ret < 0) {
+               dev_err(&device->dev, "Cannot enable device\n");
+               return ret;
+       }
+
+       rbtn_data = devm_kzalloc(&device->dev, sizeof(*rbtn_data), GFP_KERNEL);
+       if (!rbtn_data)
+               return -ENOMEM;
+
+       rbtn_data->type = type;
+       device->driver_data = rbtn_data;
+
+       switch (rbtn_data->type) {
+       case RBTN_TOGGLE:
+               ret = rbtn_input_init(rbtn_data);
+               break;
+       case RBTN_SLIDER:
+               if (auto_remove_rfkill && rbtn_chain_head.head)
+                       ret = 0;
+               else
+                       ret = rbtn_rfkill_init(device);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       return ret;
+
+}
+
+static int rbtn_remove(struct acpi_device *device)
+{
+       struct rbtn_data *rbtn_data = device->driver_data;
+
+       switch (rbtn_data->type) {
+       case RBTN_TOGGLE:
+               rbtn_input_exit(rbtn_data);
+               break;
+       case RBTN_SLIDER:
+               rbtn_rfkill_exit(device);
+               break;
+       default:
+               break;
+       }
+
+       rbtn_acquire(device, false);
+       device->driver_data = NULL;
+
+       return 0;
+}
+
+static void rbtn_notify(struct acpi_device *device, u32 event)
+{
+       struct rbtn_data *rbtn_data = device->driver_data;
+
+       if (event != 0x80) {
+               dev_info(&device->dev, "Received unknown event (0x%x)\n",
+                        event);
+               return;
+       }
+
+       switch (rbtn_data->type) {
+       case RBTN_TOGGLE:
+               rbtn_input_event(rbtn_data);
+               break;
+       case RBTN_SLIDER:
+               rbtn_rfkill_event(device);
+               atomic_notifier_call_chain(&rbtn_chain_head, event, device);
+               break;
+       default:
+               break;
+       }
+}
+
+
+/*
+ * module functions
+ */
+
+module_acpi_driver(rbtn_driver);
+
+module_param(auto_remove_rfkill, bool, 0444);
+
+MODULE_PARM_DESC(auto_remove_rfkill, "Automatically remove rfkill devices when "
+                                    "other modules start receiving events "
+                                    "from this module and re-add them when "
+                                    "the last module stops receiving events "
+                                    "(default true)");
+MODULE_DEVICE_TABLE(acpi, rbtn_ids);
+MODULE_DESCRIPTION("Dell Airplane Mode Switch driver");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-rbtn.h b/drivers/platform/x86/dell-rbtn.h
new file mode 100644 (file)
index 0000000..c59cc6b
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+    Dell Airplane Mode Switch driver
+    Copyright (C) 2014-2015  Pali Rohár <pali.rohar@gmail.com>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+*/
+
+#ifndef _DELL_RBTN_H_
+#define _DELL_RBTN_H_
+
+struct notifier_block;
+
+int dell_rbtn_notifier_register(struct notifier_block *nb);
+int dell_rbtn_notifier_unregister(struct notifier_block *nb);
+
+#endif
index bea0228309443e1031607b09e0eef85e45b6011e..76b57388d01b5b73838bd3ab73741b5c096148d7 100644 (file)
@@ -465,8 +465,9 @@ static const struct ideapad_rfk_data ideapad_rfk_data[] = {
 static int ideapad_rfk_set(void *data, bool blocked)
 {
        struct ideapad_rfk_priv *priv = data;
+       int opcode = ideapad_rfk_data[priv->dev].opcode;
 
-       return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked);
+       return write_ec_cmd(priv->priv->adev->handle, opcode, !blocked);
 }
 
 static struct rfkill_ops ideapad_rfk_ops = {
@@ -837,6 +838,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G40-30"),
                },
        },
+       {
+               .ident = "Lenovo G50-30",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G50-30"),
+               },
+       },
        {
                .ident = "Lenovo Yoga 2 11 / 13 / Pro",
                .matches = {
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
new file mode 100644 (file)
index 0000000..d734763
--- /dev/null
@@ -0,0 +1,767 @@
+/*
+ * intel_pmc_ipc.c: Driver for the Intel PMC IPC mechanism
+ *
+ * (C) Copyright 2014-2015 Intel Corporation
+ *
+ * This driver is based on Intel SCU IPC driver(intel_scu_opc.c) by
+ *     Sreedhara DS <sreedhara.ds@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * PMC running in ARC processor communicates with other entity running in IA
+ * core through IPC mechanism which in turn messaging between IA core ad PMC.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/pm_qos.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+#include <linux/acpi.h>
+#include <asm/intel_pmc_ipc.h>
+#include <linux/mfd/lpc_ich.h>
+
+/*
+ * IPC registers
+ * The IA write to IPC_CMD command register triggers an interrupt to the ARC,
+ * The ARC handles the interrupt and services it, writing optional data to
+ * the IPC1 registers, updates the IPC_STS response register with the status.
+ */
+#define IPC_CMD                        0x0
+#define                IPC_CMD_MSI             0x100
+#define                IPC_CMD_SIZE            16
+#define                IPC_CMD_SUBCMD          12
+#define IPC_STATUS             0x04
+#define                IPC_STATUS_IRQ          0x4
+#define                IPC_STATUS_ERR          0x2
+#define                IPC_STATUS_BUSY         0x1
+#define IPC_SPTR               0x08
+#define IPC_DPTR               0x0C
+#define IPC_WRITE_BUFFER       0x80
+#define IPC_READ_BUFFER                0x90
+
+/*
+ * 16-byte buffer for sending data associated with IPC command.
+ */
+#define IPC_DATA_BUFFER_SIZE   16
+
+#define IPC_LOOP_CNT           3000000
+#define IPC_MAX_SEC            3
+
+#define IPC_TRIGGER_MODE_IRQ           true
+
+/* exported resources from IFWI */
+#define PLAT_RESOURCE_IPC_INDEX                0
+#define PLAT_RESOURCE_IPC_SIZE         0x1000
+#define PLAT_RESOURCE_GCR_SIZE         0x1000
+#define PLAT_RESOURCE_PUNIT_DATA_INDEX 1
+#define PLAT_RESOURCE_PUNIT_INTER_INDEX        2
+#define PLAT_RESOURCE_ACPI_IO_INDEX    0
+
+/*
+ * BIOS does not create an ACPI device for each PMC function,
+ * but exports multiple resources from one ACPI device(IPC) for
+ * multiple functions. This driver is responsible to create a
+ * platform device and to export resources for those functions.
+ */
+#define TCO_DEVICE_NAME                        "iTCO_wdt"
+#define SMI_EN_OFFSET                  0x30
+#define SMI_EN_SIZE                    4
+#define TCO_BASE_OFFSET                        0x60
+#define TCO_REGS_SIZE                  16
+#define PUNIT_DEVICE_NAME              "intel_punit_ipc"
+
+static const int iTCO_version = 3;
+
+static struct intel_pmc_ipc_dev {
+       struct device *dev;
+       void __iomem *ipc_base;
+       bool irq_mode;
+       int irq;
+       int cmd;
+       struct completion cmd_complete;
+
+       /* The following PMC BARs share the same ACPI device with the IPC */
+       void *acpi_io_base;
+       int acpi_io_size;
+       struct platform_device *tco_dev;
+
+       /* gcr */
+       void *gcr_base;
+       int gcr_size;
+
+       /* punit */
+       void *punit_base;
+       int punit_size;
+       void *punit_base2;
+       int punit_size2;
+       struct platform_device *punit_dev;
+} ipcdev;
+
+static char *ipc_err_sources[] = {
+       [IPC_ERR_NONE] =
+               "no error",
+       [IPC_ERR_CMD_NOT_SUPPORTED] =
+               "command not supported",
+       [IPC_ERR_CMD_NOT_SERVICED] =
+               "command not serviced",
+       [IPC_ERR_UNABLE_TO_SERVICE] =
+               "unable to service",
+       [IPC_ERR_CMD_INVALID] =
+               "command invalid",
+       [IPC_ERR_CMD_FAILED] =
+               "command failed",
+       [IPC_ERR_EMSECURITY] =
+               "Invalid Battery",
+       [IPC_ERR_UNSIGNEDKERNEL] =
+               "Unsigned kernel",
+};
+
+/* Prevent concurrent calls to the PMC */
+static DEFINE_MUTEX(ipclock);
+
+static inline void ipc_send_command(u32 cmd)
+{
+       ipcdev.cmd = cmd;
+       if (ipcdev.irq_mode) {
+               reinit_completion(&ipcdev.cmd_complete);
+               cmd |= IPC_CMD_MSI;
+       }
+       writel(cmd, ipcdev.ipc_base + IPC_CMD);
+}
+
+static inline u32 ipc_read_status(void)
+{
+       return readl(ipcdev.ipc_base + IPC_STATUS);
+}
+
+static inline void ipc_data_writel(u32 data, u32 offset)
+{
+       writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset);
+}
+
+static inline u8 ipc_data_readb(u32 offset)
+{
+       return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
+}
+
+static inline u32 ipc_data_readl(u32 offset)
+{
+       return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
+}
+
+static int intel_pmc_ipc_check_status(void)
+{
+       int status;
+       int ret = 0;
+
+       if (ipcdev.irq_mode) {
+               if (0 == wait_for_completion_timeout(
+                               &ipcdev.cmd_complete, IPC_MAX_SEC * HZ))
+                       ret = -ETIMEDOUT;
+       } else {
+               int loop_count = IPC_LOOP_CNT;
+
+               while ((ipc_read_status() & IPC_STATUS_BUSY) && --loop_count)
+                       udelay(1);
+               if (loop_count == 0)
+                       ret = -ETIMEDOUT;
+       }
+
+       status = ipc_read_status();
+       if (ret == -ETIMEDOUT) {
+               dev_err(ipcdev.dev,
+                       "IPC timed out, TS=0x%x, CMD=0x%x\n",
+                       status, ipcdev.cmd);
+               return ret;
+       }
+
+       if (status & IPC_STATUS_ERR) {
+               int i;
+
+               ret = -EIO;
+               i = (status >> IPC_CMD_SIZE) & 0xFF;
+               if (i < ARRAY_SIZE(ipc_err_sources))
+                       dev_err(ipcdev.dev,
+                               "IPC failed: %s, STS=0x%x, CMD=0x%x\n",
+                               ipc_err_sources[i], status, ipcdev.cmd);
+               else
+                       dev_err(ipcdev.dev,
+                               "IPC failed: unknown, STS=0x%x, CMD=0x%x\n",
+                               status, ipcdev.cmd);
+               if ((i == IPC_ERR_UNSIGNEDKERNEL) || (i == IPC_ERR_EMSECURITY))
+                       ret = -EACCES;
+       }
+
+       return ret;
+}
+
+/*
+ * intel_pmc_ipc_simple_command
+ * @cmd: command
+ * @sub: sub type
+ */
+int intel_pmc_ipc_simple_command(int cmd, int sub)
+{
+       int ret;
+
+       mutex_lock(&ipclock);
+       if (ipcdev.dev == NULL) {
+               mutex_unlock(&ipclock);
+               return -ENODEV;
+       }
+       ipc_send_command(sub << IPC_CMD_SUBCMD | cmd);
+       ret = intel_pmc_ipc_check_status();
+       mutex_unlock(&ipclock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
+
+/*
+ * intel_pmc_ipc_raw_cmd
+ * @cmd: command
+ * @sub: sub type
+ * @in: input data
+ * @inlen: input length in bytes
+ * @out: output data
+ * @outlen: output length in dwords
+ * @sptr: data writing to SPTR register
+ * @dptr: data writing to DPTR register
+ */
+int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
+                         u32 outlen, u32 dptr, u32 sptr)
+{
+       u32 wbuf[4] = { 0 };
+       int ret;
+       int i;
+
+       if (inlen > IPC_DATA_BUFFER_SIZE || outlen > IPC_DATA_BUFFER_SIZE / 4)
+               return -EINVAL;
+
+       mutex_lock(&ipclock);
+       if (ipcdev.dev == NULL) {
+               mutex_unlock(&ipclock);
+               return -ENODEV;
+       }
+       memcpy(wbuf, in, inlen);
+       writel(dptr, ipcdev.ipc_base + IPC_DPTR);
+       writel(sptr, ipcdev.ipc_base + IPC_SPTR);
+       /* The input data register is 32bit register and inlen is in Byte */
+       for (i = 0; i < ((inlen + 3) / 4); i++)
+               ipc_data_writel(wbuf[i], 4 * i);
+       ipc_send_command((inlen << IPC_CMD_SIZE) |
+                       (sub << IPC_CMD_SUBCMD) | cmd);
+       ret = intel_pmc_ipc_check_status();
+       if (!ret) {
+               /* out is read from 32bit register and outlen is in 32bit */
+               for (i = 0; i < outlen; i++)
+                       *out++ = ipc_data_readl(4 * i);
+       }
+       mutex_unlock(&ipclock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd);
+
+/*
+ * intel_pmc_ipc_command
+ * @cmd: command
+ * @sub: sub type
+ * @in: input data
+ * @inlen: input length in bytes
+ * @out: output data
+ * @outlen: output length in dwords
+ */
+int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+                         u32 *out, u32 outlen)
+{
+       return intel_pmc_ipc_raw_cmd(cmd, sub, in, inlen, out, outlen, 0, 0);
+}
+EXPORT_SYMBOL_GPL(intel_pmc_ipc_command);
+
+static irqreturn_t ioc(int irq, void *dev_id)
+{
+       int status;
+
+       if (ipcdev.irq_mode) {
+               status = ipc_read_status();
+               writel(status | IPC_STATUS_IRQ, ipcdev.ipc_base + IPC_STATUS);
+       }
+       complete(&ipcdev.cmd_complete);
+
+       return IRQ_HANDLED;
+}
+
+static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       resource_size_t pci_resource;
+       int ret;
+       int len;
+
+       ipcdev.dev = &pci_dev_get(pdev)->dev;
+       ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
+
+       ret = pci_enable_device(pdev);
+       if (ret)
+               return ret;
+
+       ret = pci_request_regions(pdev, "intel_pmc_ipc");
+       if (ret)
+               return ret;
+
+       pci_resource = pci_resource_start(pdev, 0);
+       len = pci_resource_len(pdev, 0);
+       if (!pci_resource || !len) {
+               dev_err(&pdev->dev, "Failed to get resource\n");
+               return -ENOMEM;
+       }
+
+       init_completion(&ipcdev.cmd_complete);
+
+       if (request_irq(pdev->irq, ioc, 0, "intel_pmc_ipc", &ipcdev)) {
+               dev_err(&pdev->dev, "Failed to request irq\n");
+               return -EBUSY;
+       }
+
+       ipcdev.ipc_base = ioremap_nocache(pci_resource, len);
+       if (!ipcdev.ipc_base) {
+               dev_err(&pdev->dev, "Failed to ioremap ipc base\n");
+               free_irq(pdev->irq, &ipcdev);
+               ret = -ENOMEM;
+       }
+
+       return ret;
+}
+
+static void ipc_pci_remove(struct pci_dev *pdev)
+{
+       free_irq(pdev->irq, &ipcdev);
+       pci_release_regions(pdev);
+       pci_dev_put(pdev);
+       iounmap(ipcdev.ipc_base);
+       ipcdev.dev = NULL;
+}
+
+static const struct pci_device_id ipc_pci_ids[] = {
+       {PCI_VDEVICE(INTEL, 0x0a94), 0},
+       {PCI_VDEVICE(INTEL, 0x1a94), 0},
+       { 0,}
+};
+MODULE_DEVICE_TABLE(pci, ipc_pci_ids);
+
+static struct pci_driver ipc_pci_driver = {
+       .name = "intel_pmc_ipc",
+       .id_table = ipc_pci_ids,
+       .probe = ipc_pci_probe,
+       .remove = ipc_pci_remove,
+};
+
+static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
+                                             struct device_attribute *attr,
+                                             const char *buf, size_t count)
+{
+       int subcmd;
+       int cmd;
+       int ret;
+
+       ret = sscanf(buf, "%d %d", &cmd, &subcmd);
+       if (ret != 2) {
+               dev_err(dev, "Error args\n");
+               return -EINVAL;
+       }
+
+       ret = intel_pmc_ipc_simple_command(cmd, subcmd);
+       if (ret) {
+               dev_err(dev, "command %d error with %d\n", cmd, ret);
+               return ret;
+       }
+       return (ssize_t)count;
+}
+
+static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
+                                            struct device_attribute *attr,
+                                            const char *buf, size_t count)
+{
+       unsigned long val;
+       int subcmd;
+       int ret;
+
+       if (kstrtoul(buf, 0, &val))
+               return -EINVAL;
+
+       if (val)
+               subcmd = 1;
+       else
+               subcmd = 0;
+       ret = intel_pmc_ipc_simple_command(PMC_IPC_NORTHPEAK_CTRL, subcmd);
+       if (ret) {
+               dev_err(dev, "command north %d error with %d\n", subcmd, ret);
+               return ret;
+       }
+       return (ssize_t)count;
+}
+
+static DEVICE_ATTR(simplecmd, S_IWUSR,
+                  NULL, intel_pmc_ipc_simple_cmd_store);
+static DEVICE_ATTR(northpeak, S_IWUSR,
+                  NULL, intel_pmc_ipc_northpeak_store);
+
+static struct attribute *intel_ipc_attrs[] = {
+       &dev_attr_northpeak.attr,
+       &dev_attr_simplecmd.attr,
+       NULL
+};
+
+static const struct attribute_group intel_ipc_group = {
+       .attrs = intel_ipc_attrs,
+};
+
+#define PUNIT_RESOURCE_INTER           1
+static struct resource punit_res[] = {
+       /* Punit */
+       {
+               .flags = IORESOURCE_MEM,
+       },
+       {
+               .flags = IORESOURCE_MEM,
+       },
+};
+
+#define TCO_RESOURCE_ACPI_IO           0
+#define TCO_RESOURCE_SMI_EN_IO         1
+#define TCO_RESOURCE_GCR_MEM           2
+static struct resource tco_res[] = {
+       /* ACPI - TCO */
+       {
+               .flags = IORESOURCE_IO,
+       },
+       /* ACPI - SMI */
+       {
+               .flags = IORESOURCE_IO,
+       },
+       /* GCS */
+       {
+               .flags = IORESOURCE_MEM,
+       },
+};
+
+static struct lpc_ich_info tco_info = {
+       .name = "Apollo Lake SoC",
+       .iTCO_version = 3,
+};
+
+static int ipc_create_punit_device(void)
+{
+       struct platform_device *pdev;
+       struct resource *res;
+       int ret;
+
+       pdev = platform_device_alloc(PUNIT_DEVICE_NAME, -1);
+       if (!pdev) {
+               dev_err(ipcdev.dev, "Failed to alloc punit platform device\n");
+               return -ENOMEM;
+       }
+
+       pdev->dev.parent = ipcdev.dev;
+
+       res = punit_res;
+       res->start = (resource_size_t)ipcdev.punit_base;
+       res->end = res->start + ipcdev.punit_size - 1;
+
+       res = punit_res + PUNIT_RESOURCE_INTER;
+       res->start = (resource_size_t)ipcdev.punit_base2;
+       res->end = res->start + ipcdev.punit_size2 - 1;
+
+       ret = platform_device_add_resources(pdev, punit_res,
+                                           ARRAY_SIZE(punit_res));
+       if (ret) {
+               dev_err(ipcdev.dev, "Failed to add platform punit resources\n");
+               goto err;
+       }
+
+       ret = platform_device_add(pdev);
+       if (ret) {
+               dev_err(ipcdev.dev, "Failed to add punit platform device\n");
+               goto err;
+       }
+       ipcdev.punit_dev = pdev;
+
+       return 0;
+err:
+       platform_device_put(pdev);
+       return ret;
+}
+
+static int ipc_create_tco_device(void)
+{
+       struct platform_device *pdev;
+       struct resource *res;
+       int ret;
+
+       pdev = platform_device_alloc(TCO_DEVICE_NAME, -1);
+       if (!pdev) {
+               dev_err(ipcdev.dev, "Failed to alloc tco platform device\n");
+               return -ENOMEM;
+       }
+
+       pdev->dev.parent = ipcdev.dev;
+
+       res = tco_res + TCO_RESOURCE_ACPI_IO;
+       res->start = (resource_size_t)ipcdev.acpi_io_base + TCO_BASE_OFFSET;
+       res->end = res->start + TCO_REGS_SIZE - 1;
+
+       res = tco_res + TCO_RESOURCE_SMI_EN_IO;
+       res->start = (resource_size_t)ipcdev.acpi_io_base + SMI_EN_OFFSET;
+       res->end = res->start + SMI_EN_SIZE - 1;
+
+       res = tco_res + TCO_RESOURCE_GCR_MEM;
+       res->start = (resource_size_t)ipcdev.gcr_base;
+       res->end = res->start + ipcdev.gcr_size - 1;
+
+       ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res));
+       if (ret) {
+               dev_err(ipcdev.dev, "Failed to add tco platform resources\n");
+               goto err;
+       }
+
+       ret = platform_device_add_data(pdev, &tco_info,
+                                      sizeof(struct lpc_ich_info));
+       if (ret) {
+               dev_err(ipcdev.dev, "Failed to add tco platform data\n");
+               goto err;
+       }
+
+       ret = platform_device_add(pdev);
+       if (ret) {
+               dev_err(ipcdev.dev, "Failed to add tco platform device\n");
+               goto err;
+       }
+       ipcdev.tco_dev = pdev;
+
+       return 0;
+err:
+       platform_device_put(pdev);
+       return ret;
+}
+
+static int ipc_create_pmc_devices(void)
+{
+       int ret;
+
+       ret = ipc_create_tco_device();
+       if (ret) {
+               dev_err(ipcdev.dev, "Failed to add tco platform device\n");
+               return ret;
+       }
+       ret = ipc_create_punit_device();
+       if (ret) {
+               dev_err(ipcdev.dev, "Failed to add punit platform device\n");
+               platform_device_unregister(ipcdev.tco_dev);
+       }
+       return ret;
+}
+
+static int ipc_plat_get_res(struct platform_device *pdev)
+{
+       struct resource *res;
+       void __iomem *addr;
+       int size;
+
+       res = platform_get_resource(pdev, IORESOURCE_IO,
+                                   PLAT_RESOURCE_ACPI_IO_INDEX);
+       if (!res) {
+               dev_err(&pdev->dev, "Failed to get io resource\n");
+               return -ENXIO;
+       }
+       size = resource_size(res);
+       ipcdev.acpi_io_base = (void *)res->start;
+       ipcdev.acpi_io_size = size;
+       dev_info(&pdev->dev, "io res: %llx %x\n",
+                (long long)res->start, (int)resource_size(res));
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM,
+                                   PLAT_RESOURCE_PUNIT_DATA_INDEX);
+       if (!res) {
+               dev_err(&pdev->dev, "Failed to get punit resource\n");
+               return -ENXIO;
+       }
+       size = resource_size(res);
+       ipcdev.punit_base = (void *)res->start;
+       ipcdev.punit_size = size;
+       dev_info(&pdev->dev, "punit data res: %llx %x\n",
+                (long long)res->start, (int)resource_size(res));
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM,
+                                   PLAT_RESOURCE_PUNIT_INTER_INDEX);
+       if (!res) {
+               dev_err(&pdev->dev, "Failed to get punit inter resource\n");
+               return -ENXIO;
+       }
+       size = resource_size(res);
+       ipcdev.punit_base2 = (void *)res->start;
+       ipcdev.punit_size2 = size;
+       dev_info(&pdev->dev, "punit interface res: %llx %x\n",
+                (long long)res->start, (int)resource_size(res));
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM,
+                                   PLAT_RESOURCE_IPC_INDEX);
+       if (!res) {
+               dev_err(&pdev->dev, "Failed to get ipc resource\n");
+               return -ENXIO;
+       }
+       size = PLAT_RESOURCE_IPC_SIZE;
+       if (!request_mem_region(res->start, size, pdev->name)) {
+               dev_err(&pdev->dev, "Failed to request ipc resource\n");
+               return -EBUSY;
+       }
+       addr = ioremap_nocache(res->start, size);
+       if (!addr) {
+               dev_err(&pdev->dev, "I/O memory remapping failed\n");
+               release_mem_region(res->start, size);
+               return -ENOMEM;
+       }
+       ipcdev.ipc_base = addr;
+
+       ipcdev.gcr_base = (void *)(res->start + size);
+       ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE;
+       dev_info(&pdev->dev, "ipc res: %llx %x\n",
+                (long long)res->start, (int)resource_size(res));
+
+       return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ipc_acpi_ids[] = {
+       { "INT34D2", 0},
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, ipc_acpi_ids);
+#endif
+
+static int ipc_plat_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       int ret;
+
+       ipcdev.dev = &pdev->dev;
+       ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
+       init_completion(&ipcdev.cmd_complete);
+
+       ipcdev.irq = platform_get_irq(pdev, 0);
+       if (ipcdev.irq < 0) {
+               dev_err(&pdev->dev, "Failed to get irq\n");
+               return -EINVAL;
+       }
+
+       ret = ipc_plat_get_res(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to request resource\n");
+               return ret;
+       }
+
+       ret = ipc_create_pmc_devices();
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to create pmc devices\n");
+               goto err_device;
+       }
+
+       if (request_irq(ipcdev.irq, ioc, 0, "intel_pmc_ipc", &ipcdev)) {
+               dev_err(&pdev->dev, "Failed to request irq\n");
+               ret = -EBUSY;
+               goto err_irq;
+       }
+
+       ret = sysfs_create_group(&pdev->dev.kobj, &intel_ipc_group);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to create sysfs group %d\n",
+                       ret);
+               goto err_sys;
+       }
+
+       return 0;
+err_sys:
+       free_irq(ipcdev.irq, &ipcdev);
+err_irq:
+       platform_device_unregister(ipcdev.tco_dev);
+       platform_device_unregister(ipcdev.punit_dev);
+err_device:
+       iounmap(ipcdev.ipc_base);
+       res = platform_get_resource(pdev, IORESOURCE_MEM,
+                                   PLAT_RESOURCE_IPC_INDEX);
+       if (res)
+               release_mem_region(res->start, PLAT_RESOURCE_IPC_SIZE);
+       return ret;
+}
+
+static int ipc_plat_remove(struct platform_device *pdev)
+{
+       struct resource *res;
+
+       sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
+       free_irq(ipcdev.irq, &ipcdev);
+       platform_device_unregister(ipcdev.tco_dev);
+       platform_device_unregister(ipcdev.punit_dev);
+       iounmap(ipcdev.ipc_base);
+       res = platform_get_resource(pdev, IORESOURCE_MEM,
+                                   PLAT_RESOURCE_IPC_INDEX);
+       if (res)
+               release_mem_region(res->start, PLAT_RESOURCE_IPC_SIZE);
+       ipcdev.dev = NULL;
+       return 0;
+}
+
+static struct platform_driver ipc_plat_driver = {
+       .remove = ipc_plat_remove,
+       .probe = ipc_plat_probe,
+       .driver = {
+               .name = "pmc-ipc-plat",
+               .acpi_match_table = ACPI_PTR(ipc_acpi_ids),
+       },
+};
+
+static int __init intel_pmc_ipc_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&ipc_plat_driver);
+       if (ret) {
+               pr_err("Failed to register PMC ipc platform driver\n");
+               return ret;
+       }
+       ret = pci_register_driver(&ipc_pci_driver);
+       if (ret) {
+               pr_err("Failed to register PMC ipc pci driver\n");
+               platform_driver_unregister(&ipc_plat_driver);
+               return ret;
+       }
+       return ret;
+}
+
+static void __exit intel_pmc_ipc_exit(void)
+{
+       pci_unregister_driver(&ipc_pci_driver);
+       platform_driver_unregister(&ipc_plat_driver);
+}
+
+MODULE_AUTHOR("Zha Qipeng <qipeng.zha@intel.com>");
+MODULE_DESCRIPTION("Intel PMC IPC driver");
+MODULE_LICENSE("GPL");
+
+/* Some modules are dependent on this, so init earlier */
+fs_initcall(intel_pmc_ipc_init);
+module_exit(intel_pmc_ipc_exit);
index 073a90a63dbc4a1843aa5a7b839b4f169322c86b..fd86daba7ffd450e8c2767eddb180d27573c5c70 100644 (file)
@@ -92,13 +92,13 @@ pvpanic_walk_resources(struct acpi_resource *res, void *context)
 
 static int pvpanic_add(struct acpi_device *device)
 {
-       acpi_status status;
-       u64 ret;
+       int ret;
 
-       status = acpi_evaluate_integer(device->handle, "_STA", NULL,
-                                      &ret);
+       ret = acpi_bus_get_status(device);
+       if (ret < 0)
+               return ret;
 
-       if (ACPI_FAILURE(status) || (ret & 0x0B) != 0x0B)
+       if (!device->status.enabled || !device->status.functional)
                return -ENODEV;
 
        acpi_walk_resources(device->handle, METHOD_NAME__CRS,
index e36542564131018cb5985a19ad52fea1c2b46726..89aa976f0ab2a17c8cbed83bace078bf555c7384 100644 (file)
@@ -82,7 +82,7 @@ static int get_state(u32 *out, u8 instance)
                tmp = 0;
        }
 
-       if (result.length > 0 && result.pointer)
+       if (result.length > 0)
                kfree(result.pointer);
 
        switch (instance) {
index 59bf27ed72d63a1e7d1fa253452081277473d5ee..3ad7b1fa24ce5459900c4b6c966b631c4b6ddc46 100644 (file)
@@ -31,7 +31,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#define TOSHIBA_ACPI_VERSION   "0.21"
+#define TOSHIBA_ACPI_VERSION   "0.22"
 #define PROC_INTERFACE_VERSION 1
 
 #include <linux/kernel.h>
@@ -41,7 +41,6 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/backlight.h>
-#include <linux/rfkill.h>
 #include <linux/input.h>
 #include <linux/input/sparse-keymap.h>
 #include <linux/leds.h>
@@ -82,7 +81,7 @@ MODULE_LICENSE("GPL");
 
 #define TCI_WORDS                      6
 
-/* operations */
+/* Operations */
 #define HCI_SET                                0xff00
 #define HCI_GET                                0xfe00
 #define SCI_OPEN                       0xf100
@@ -90,7 +89,7 @@ MODULE_LICENSE("GPL");
 #define SCI_GET                                0xf300
 #define SCI_SET                                0xf400
 
-/* return codes */
+/* Return codes */
 #define TOS_SUCCESS                    0x0000
 #define TOS_OPEN_CLOSE_OK              0x0044
 #define TOS_FAILURE                    0x1000
@@ -105,7 +104,7 @@ MODULE_LICENSE("GPL");
 #define TOS_NOT_INITIALIZED            0x8d50
 #define TOS_NOT_INSTALLED              0x8e00
 
-/* registers */
+/* Registers */
 #define HCI_FAN                                0x0004
 #define HCI_TR_BACKLIGHT               0x0005
 #define HCI_SYSTEM_EVENT               0x0016
@@ -127,7 +126,7 @@ MODULE_LICENSE("GPL");
 #define SCI_TOUCHPAD                   0x050e
 #define SCI_KBD_FUNCTION_KEYS          0x0522
 
-/* field definitions */
+/* Field definitions */
 #define HCI_ACCEL_MASK                 0x7fff
 #define HCI_HOTKEY_DISABLE             0x0b
 #define HCI_HOTKEY_ENABLE              0x09
@@ -165,7 +164,6 @@ MODULE_LICENSE("GPL");
 struct toshiba_acpi_dev {
        struct acpi_device *acpi_dev;
        const char *method_hci;
-       struct rfkill *bt_rfk;
        struct input_dev *hotkey_dev;
        struct work_struct hotkey_work;
        struct backlight_device *backlight_dev;
@@ -202,8 +200,6 @@ struct toshiba_acpi_dev {
        unsigned int panel_power_on_supported:1;
        unsigned int usb_three_supported:1;
        unsigned int sysfs_created:1;
-
-       struct mutex mutex;
 };
 
 static struct toshiba_acpi_dev *toshiba_acpi;
@@ -330,13 +326,13 @@ static acpi_status tci_raw(struct toshiba_acpi_dev *dev,
 }
 
 /*
- * Common hci tasks (get or set one or two value)
+ * Common hci tasks
  *
  * In addition to the ACPI status, the HCI system returns a result which
  * may be useful (such as "not supported").
  */
 
-static u32 hci_write1(struct toshiba_acpi_dev *dev, u32 reg, u32 in1)
+static u32 hci_write(struct toshiba_acpi_dev *dev, u32 reg, u32 in1)
 {
        u32 in[TCI_WORDS] = { HCI_SET, reg, in1, 0, 0, 0 };
        u32 out[TCI_WORDS];
@@ -345,7 +341,7 @@ static u32 hci_write1(struct toshiba_acpi_dev *dev, u32 reg, u32 in1)
        return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE;
 }
 
-static u32 hci_read1(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
+static u32 hci_read(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
 {
        u32 in[TCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 };
        u32 out[TCI_WORDS];
@@ -359,31 +355,6 @@ static u32 hci_read1(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
        return out[0];
 }
 
-static u32 hci_write2(struct toshiba_acpi_dev *dev, u32 reg, u32 in1, u32 in2)
-{
-       u32 in[TCI_WORDS] = { HCI_SET, reg, in1, in2, 0, 0 };
-       u32 out[TCI_WORDS];
-       acpi_status status = tci_raw(dev, in, out);
-
-       return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE;
-}
-
-static u32 hci_read2(struct toshiba_acpi_dev *dev,
-                    u32 reg, u32 *out1, u32 *out2)
-{
-       u32 in[TCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 };
-       u32 out[TCI_WORDS];
-       acpi_status status = tci_raw(dev, in, out);
-
-       if (ACPI_FAILURE(status))
-               return TOS_FAILURE;
-
-       *out1 = out[2];
-       *out2 = out[3];
-
-       return out[0];
-}
-
 /*
  * Common sci tasks
  */
@@ -395,7 +366,7 @@ static int sci_open(struct toshiba_acpi_dev *dev)
        acpi_status status;
 
        status = tci_raw(dev, in, out);
-       if  (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+       if  (ACPI_FAILURE(status)) {
                pr_err("ACPI call to open SCI failed\n");
                return 0;
        }
@@ -433,7 +404,7 @@ static void sci_close(struct toshiba_acpi_dev *dev)
        acpi_status status;
 
        status = tci_raw(dev, in, out);
-       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+       if (ACPI_FAILURE(status)) {
                pr_err("ACPI call to close SCI failed\n");
                return;
        }
@@ -481,7 +452,7 @@ static int toshiba_illumination_available(struct toshiba_acpi_dev *dev)
 
        status = tci_raw(dev, in, out);
        sci_close(dev);
-       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+       if (ACPI_FAILURE(status)) {
                pr_err("ACPI call to query Illumination support failed\n");
                return 0;
        } else if (out[0] == TOS_NOT_SUPPORTED) {
@@ -522,7 +493,7 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
                        struct toshiba_acpi_dev, led_dev);
        u32 state, result;
 
-       /* First request : initialize communication. */
+       /* First request : initialize communication. */
        if (!sci_open(dev))
                return LED_OFF;
 
@@ -625,7 +596,7 @@ static enum led_brightness toshiba_kbd_backlight_get(struct led_classdev *cdev)
        u32 state, result;
 
        /* Check the keyboard backlight state */
-       result = hci_read1(dev, HCI_KBD_ILLUMINATION, &state);
+       result = hci_read(dev, HCI_KBD_ILLUMINATION, &state);
        if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
                pr_err("ACPI call to get the keyboard backlight failed\n");
                return LED_OFF;
@@ -646,7 +617,7 @@ static void toshiba_kbd_backlight_set(struct led_classdev *cdev,
 
        /* Set the keyboard backlight state */
        state = brightness ? 1 : 0;
-       result = hci_write1(dev, HCI_KBD_ILLUMINATION, state);
+       result = hci_write(dev, HCI_KBD_ILLUMINATION, state);
        if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
                pr_err("ACPI call to set KBD Illumination mode failed\n");
                return;
@@ -703,7 +674,7 @@ static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
        u32 out[TCI_WORDS];
 
        status = tci_raw(dev, in, out);
-       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+       if (ACPI_FAILURE(status)) {
                pr_err("ACPI call to get ECO led failed\n");
        } else if (out[0] == TOS_NOT_INSTALLED) {
                pr_info("ECO led not installed");
@@ -825,7 +796,7 @@ static void toshiba_usb_sleep_charge_available(struct toshiba_acpi_dev *dev)
                return;
 
        status = tci_raw(dev, in, out);
-       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+       if (ACPI_FAILURE(status)) {
                pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
                sci_close(dev);
                return;
@@ -839,7 +810,7 @@ static void toshiba_usb_sleep_charge_available(struct toshiba_acpi_dev *dev)
 
        in[5] = SCI_USB_CHARGE_BAT_LVL;
        status = tci_raw(dev, in, out);
-       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+       if (ACPI_FAILURE(status)) {
                pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
                sci_close(dev);
                return;
@@ -919,7 +890,7 @@ static int toshiba_sleep_functions_status_get(struct toshiba_acpi_dev *dev,
        in[5] = SCI_USB_CHARGE_BAT_LVL;
        status = tci_raw(dev, in, out);
        sci_close(dev);
-       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+       if (ACPI_FAILURE(status)) {
                pr_err("ACPI call to get USB S&C battery level failed\n");
                return -EIO;
        } else if (out[0] == TOS_NOT_SUPPORTED) {
@@ -948,7 +919,7 @@ static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev,
        in[5] = SCI_USB_CHARGE_BAT_LVL;
        status = tci_raw(dev, in, out);
        sci_close(dev);
-       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+       if (ACPI_FAILURE(status)) {
                pr_err("ACPI call to set USB S&C battery level failed\n");
                return -EIO;
        } else if (out[0] == TOS_NOT_SUPPORTED) {
@@ -974,7 +945,7 @@ static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev,
        in[5] = SCI_USB_CHARGE_RAPID_DSP;
        status = tci_raw(dev, in, out);
        sci_close(dev);
-       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+       if (ACPI_FAILURE(status)) {
                pr_err("ACPI call to get USB Rapid Charge failed\n");
                return -EIO;
        } else if (out[0] == TOS_NOT_SUPPORTED ||
@@ -1002,7 +973,7 @@ static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
        in[5] = SCI_USB_CHARGE_RAPID_DSP;
        status = tci_raw(dev, in, out);
        sci_close(dev);
-       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+       if (ACPI_FAILURE(status)) {
                pr_err("ACPI call to set USB Rapid Charge failed\n");
                return -EIO;
        } else if (out[0] == TOS_NOT_SUPPORTED) {
@@ -1194,121 +1165,31 @@ static int toshiba_usb_three_set(struct toshiba_acpi_dev *dev, u32 state)
 static int toshiba_hotkey_event_type_get(struct toshiba_acpi_dev *dev,
                                         u32 *type)
 {
-       u32 val1 = 0x03;
-       u32 val2 = 0;
-       u32 result;
+       u32 in[TCI_WORDS] = { HCI_GET, HCI_SYSTEM_INFO, 0x03, 0, 0, 0 };
+       u32 out[TCI_WORDS];
+       acpi_status status;
 
-       result = hci_read2(dev, HCI_SYSTEM_INFO, &val1, &val2);
-       if (result == TOS_FAILURE) {
+       status = tci_raw(dev, in, out);
+       if (ACPI_FAILURE(status)) {
                pr_err("ACPI call to get System type failed\n");
                return -EIO;
-       } else if (result == TOS_NOT_SUPPORTED) {
+       } else if (out[0] == TOS_NOT_SUPPORTED) {
                pr_info("System type not supported\n");
                return -ENODEV;
        }
 
-       *type = val2;
+       *type = out[3];
 
        return 0;
 }
 
-/* Bluetooth rfkill handlers */
-
-static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present)
-{
-       u32 hci_result;
-       u32 value, value2;
-
-       value = 0;
-       value2 = 0;
-       hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2);
-       if (hci_result == TOS_SUCCESS)
-               *present = (value & HCI_WIRELESS_BT_PRESENT) ? true : false;
-
-       return hci_result;
-}
-
-static u32 hci_get_radio_state(struct toshiba_acpi_dev *dev, bool *radio_state)
-{
-       u32 hci_result;
-       u32 value, value2;
-
-       value = 0;
-       value2 = 0x0001;
-       hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2);
-
-       *radio_state = value & HCI_WIRELESS_KILL_SWITCH;
-       return hci_result;
-}
-
-static int bt_rfkill_set_block(void *data, bool blocked)
-{
-       struct toshiba_acpi_dev *dev = data;
-       u32 result1, result2;
-       u32 value;
-       int err;
-       bool radio_state;
-
-       value = (blocked == false);
-
-       mutex_lock(&dev->mutex);
-       if (hci_get_radio_state(dev, &radio_state) != TOS_SUCCESS) {
-               err = -EIO;
-               goto out;
-       }
-
-       if (!radio_state) {
-               err = 0;
-               goto out;
-       }
-
-       result1 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER);
-       result2 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH);
-
-       if (result1 != TOS_SUCCESS || result2 != TOS_SUCCESS)
-               err = -EIO;
-       else
-               err = 0;
- out:
-       mutex_unlock(&dev->mutex);
-       return err;
-}
-
-static void bt_rfkill_poll(struct rfkill *rfkill, void *data)
-{
-       bool new_rfk_state;
-       bool value;
-       u32 hci_result;
-       struct toshiba_acpi_dev *dev = data;
-
-       mutex_lock(&dev->mutex);
-
-       hci_result = hci_get_radio_state(dev, &value);
-       if (hci_result != TOS_SUCCESS) {
-               /* Can't do anything useful */
-               mutex_unlock(&dev->mutex);
-               return;
-       }
-
-       new_rfk_state = value;
-
-       mutex_unlock(&dev->mutex);
-
-       if (rfkill_set_hw_state(rfkill, !new_rfk_state))
-               bt_rfkill_set_block(data, true);
-}
-
-static const struct rfkill_ops toshiba_rfk_ops = {
-       .set_block = bt_rfkill_set_block,
-       .poll = bt_rfkill_poll,
-};
-
+/* Transflective Backlight */
 static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
 {
        u32 hci_result;
        u32 status;
 
-       hci_result = hci_read1(dev, HCI_TR_BACKLIGHT, &status);
+       hci_result = hci_read(dev, HCI_TR_BACKLIGHT, &status);
        *enabled = !status;
        return hci_result == TOS_SUCCESS ? 0 : -EIO;
 }
@@ -1318,12 +1199,13 @@ static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
        u32 hci_result;
        u32 value = !enable;
 
-       hci_result = hci_write1(dev, HCI_TR_BACKLIGHT, value);
+       hci_result = hci_write(dev, HCI_TR_BACKLIGHT, value);
        return hci_result == TOS_SUCCESS ? 0 : -EIO;
 }
 
-static struct proc_dir_entry *toshiba_proc_dir /*= 0*/;
+static struct proc_dir_entry *toshiba_proc_dir;
 
+/* LCD Brightness */
 static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
 {
        u32 hci_result;
@@ -1341,7 +1223,7 @@ static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
                brightness++;
        }
 
-       hci_result = hci_read1(dev, HCI_LCD_BRIGHTNESS, &value);
+       hci_result = hci_read(dev, HCI_LCD_BRIGHTNESS, &value);
        if (hci_result == TOS_SUCCESS)
                return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
 
@@ -1396,7 +1278,7 @@ static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
        }
 
        value = value << HCI_LCD_BRIGHTNESS_SHIFT;
-       hci_result = hci_write1(dev, HCI_LCD_BRIGHTNESS, value);
+       hci_result = hci_write(dev, HCI_LCD_BRIGHTNESS, value);
        return hci_result == TOS_SUCCESS ? 0 : -EIO;
 }
 
@@ -1446,7 +1328,7 @@ static int get_video_status(struct toshiba_acpi_dev *dev, u32 *status)
 {
        u32 hci_result;
 
-       hci_result = hci_read1(dev, HCI_VIDEO_OUT, status);
+       hci_result = hci_read(dev, HCI_VIDEO_OUT, status);
        return hci_result == TOS_SUCCESS ? 0 : -EIO;
 }
 
@@ -1531,7 +1413,8 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
                        _set_bit(&new_video_out, HCI_VIDEO_OUT_TV, tv_out);
                /*
                 * To avoid unnecessary video disruption, only write the new
-                * video setting if something changed. */
+                * video setting if something changed.
+                */
                if (new_video_out != video_out)
                        ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out);
        }
@@ -1552,7 +1435,7 @@ static int get_fan_status(struct toshiba_acpi_dev *dev, u32 *status)
 {
        u32 hci_result;
 
-       hci_result = hci_read1(dev, HCI_FAN, status);
+       hci_result = hci_read(dev, HCI_FAN, status);
        return hci_result == TOS_SUCCESS ? 0 : -EIO;
 }
 
@@ -1592,7 +1475,7 @@ static ssize_t fan_proc_write(struct file *file, const char __user *buf,
 
        if (sscanf(cmd, " force_on : %i", &value) == 1 &&
            value >= 0 && value <= 1) {
-               hci_result = hci_write1(dev, HCI_FAN, value);
+               hci_result = hci_write(dev, HCI_FAN, value);
                if (hci_result == TOS_SUCCESS)
                        dev->force_fan = value;
                else
@@ -1620,7 +1503,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
        u32 value;
 
        if (!dev->key_event_valid && dev->system_event_supported) {
-               hci_result = hci_read1(dev, HCI_SYSTEM_EVENT, &value);
+               hci_result = hci_read(dev, HCI_SYSTEM_EVENT, &value);
                if (hci_result == TOS_SUCCESS) {
                        dev->key_event_valid = 1;
                        dev->last_key_event = value;
@@ -1632,7 +1515,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
                         * some machines where system events sporadically
                         * become disabled.
                         */
-                       hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1);
+                       hci_result = hci_write(dev, HCI_SYSTEM_EVENT, 1);
                        pr_notice("Re-enabled hotkeys\n");
                } else {
                        pr_err("Error reading hotkey status\n");
@@ -1769,7 +1652,7 @@ static ssize_t fan_store(struct device *dev,
        if (state != 0 && state != 1)
                return -EINVAL;
 
-       result = hci_write1(toshiba, HCI_FAN, state);
+       result = hci_write(toshiba, HCI_FAN, state);
        if (result == TOS_FAILURE)
                return -EIO;
        else if (result == TOS_NOT_SUPPORTED)
@@ -2391,7 +2274,7 @@ static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
-       result = hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE);
+       result = hci_write(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE);
        if (result == TOS_FAILURE)
                return -EIO;
        else if (result == TOS_NOT_SUPPORTED)
@@ -2408,8 +2291,8 @@ static void toshiba_acpi_enable_special_functions(struct toshiba_acpi_dev *dev)
         * Re-activate the hotkeys, but this time, we are using the
         * "Special Functions" mode.
         */
-       result = hci_write1(dev, HCI_HOTKEY_EVENT,
-                           HCI_HOTKEY_SPECIAL_FUNCTIONS);
+       result = hci_write(dev, HCI_HOTKEY_EVENT,
+                          HCI_HOTKEY_SPECIAL_FUNCTIONS);
        if (result != TOS_SUCCESS)
                pr_err("Could not enable the Special Function mode\n");
 }
@@ -2490,7 +2373,7 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
                        toshiba_acpi_report_hotkey(dev, scancode);
        } else if (dev->system_event_supported) {
                do {
-                       hci_result = hci_read1(dev, HCI_SYSTEM_EVENT, &value);
+                       hci_result = hci_read(dev, HCI_SYSTEM_EVENT, &value);
                        switch (hci_result) {
                        case TOS_SUCCESS:
                                toshiba_acpi_report_hotkey(dev, (int)value);
@@ -2502,7 +2385,7 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
                                 * sporadically become disabled.
                                 */
                                hci_result =
-                                       hci_write1(dev, HCI_SYSTEM_EVENT, 1);
+                                       hci_write(dev, HCI_SYSTEM_EVENT, 1);
                                pr_notice("Re-enabled hotkeys\n");
                                /* Fall through */
                        default:
@@ -2579,7 +2462,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
        if (acpi_has_method(dev->acpi_dev->handle, "INFO"))
                dev->info_supported = 1;
        else {
-               hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1);
+               hci_result = hci_write(dev, HCI_SYSTEM_EVENT, 1);
                if (hci_result == TOS_SUCCESS)
                        dev->system_event_supported = 1;
        }
@@ -2689,11 +2572,6 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
                sparse_keymap_free(dev->hotkey_dev);
        }
 
-       if (dev->bt_rfk) {
-               rfkill_unregister(dev->bt_rfk);
-               rfkill_destroy(dev->bt_rfk);
-       }
-
        backlight_device_unregister(dev->backlight_dev);
 
        if (dev->illumination_supported)
@@ -2730,7 +2608,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
        const char *hci_method;
        u32 special_functions;
        u32 dummy;
-       bool bt_present;
        int ret = 0;
 
        if (toshiba_acpi)
@@ -2766,33 +2643,10 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
        if (toshiba_acpi_setup_keyboard(dev))
                pr_info("Unable to activate hotkeys\n");
 
-       mutex_init(&dev->mutex);
-
        ret = toshiba_acpi_setup_backlight(dev);
        if (ret)
                goto error;
 
-       /* Register rfkill switch for Bluetooth */
-       if (hci_get_bt_present(dev, &bt_present) == TOS_SUCCESS && bt_present) {
-               dev->bt_rfk = rfkill_alloc("Toshiba Bluetooth",
-                                          &acpi_dev->dev,
-                                          RFKILL_TYPE_BLUETOOTH,
-                                          &toshiba_rfk_ops,
-                                          dev);
-               if (!dev->bt_rfk) {
-                       pr_err("unable to allocate rfkill device\n");
-                       ret = -ENOMEM;
-                       goto error;
-               }
-
-               ret = rfkill_register(dev->bt_rfk);
-               if (ret) {
-                       pr_err("unable to register rfkill device\n");
-                       rfkill_destroy(dev->bt_rfk);
-                       goto error;
-               }
-       }
-
        if (toshiba_illumination_available(dev)) {
                dev->led_dev.name = "toshiba::illumination";
                dev->led_dev.max_brightness = 1;
@@ -2930,7 +2784,7 @@ static int toshiba_acpi_suspend(struct device *device)
        u32 result;
 
        if (dev->hotkey_dev)
-               result = hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_DISABLE);
+               result = hci_write(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_DISABLE);
 
        return 0;
 }
index 24980076336223147fb179979256be03b6c1e99a..c5e45089ac51d3c51c26522b3b07b085f38da750 100644 (file)
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * Note the Toshiba Bluetooth RFKill switch seems to be a strange
- * fish. It only provides a BT event when the switch is flipped to
- * the 'on' position. When flipping it to 'off', the USB device is
- * simply pulled away underneath us, without any BT event being
- * delivered.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -25,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/acpi.h>
+#include <linux/rfkill.h>
 
 #define BT_KILLSWITCH_MASK     0x01
 #define BT_PLUGGED_MASK                0x40
@@ -34,6 +29,15 @@ MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@gmail.com>");
 MODULE_DESCRIPTION("Toshiba Laptop ACPI Bluetooth Enable Driver");
 MODULE_LICENSE("GPL");
 
+struct toshiba_bluetooth_dev {
+       struct acpi_device *acpi_dev;
+       struct rfkill *rfk;
+
+       bool killswitch;
+       bool plugged;
+       bool powered;
+};
+
 static int toshiba_bt_rfkill_add(struct acpi_device *device);
 static int toshiba_bt_rfkill_remove(struct acpi_device *device);
 static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event);
@@ -95,41 +99,12 @@ static int toshiba_bluetooth_status(acpi_handle handle)
                return -ENXIO;
        }
 
-       pr_info("Bluetooth status %llu\n", status);
-
        return status;
 }
 
 static int toshiba_bluetooth_enable(acpi_handle handle)
 {
        acpi_status result;
-       bool killswitch;
-       bool powered;
-       bool plugged;
-       int status;
-
-       /*
-        * Query ACPI to verify RFKill switch is set to 'on'.
-        * If not, we return silently, no need to report it as
-        * an error.
-        */
-       status = toshiba_bluetooth_status(handle);
-       if (status < 0)
-               return status;
-
-       killswitch = (status & BT_KILLSWITCH_MASK) ? true : false;
-       powered = (status & BT_POWER_MASK) ? true : false;
-       plugged = (status & BT_PLUGGED_MASK) ? true : false;
-
-       if (!killswitch)
-               return 0;
-       /*
-        * This check ensures to only enable the device if it is powered
-        * off or detached, as some recent devices somehow pass the killswitch
-        * test, causing a loop enabling/disabling the device, see bug 93911.
-        */
-       if (powered || plugged)
-               return 0;
 
        result = acpi_evaluate_object(handle, "AUSB", NULL, NULL);
        if (ACPI_FAILURE(result)) {
@@ -165,20 +140,102 @@ static int toshiba_bluetooth_disable(acpi_handle handle)
        return 0;
 }
 
+/* Helper function */
+static int toshiba_bluetooth_sync_status(struct toshiba_bluetooth_dev *bt_dev)
+{
+       int status;
+
+       status = toshiba_bluetooth_status(bt_dev->acpi_dev->handle);
+       if (status < 0) {
+               pr_err("Could not sync bluetooth device status\n");
+               return status;
+       }
+
+       bt_dev->killswitch = (status & BT_KILLSWITCH_MASK) ? true : false;
+       bt_dev->plugged = (status & BT_PLUGGED_MASK) ? true : false;
+       bt_dev->powered = (status & BT_POWER_MASK) ? true : false;
+
+       pr_debug("Bluetooth status %d killswitch %d plugged %d powered %d\n",
+                status, bt_dev->killswitch, bt_dev->plugged, bt_dev->powered);
+
+       return 0;
+}
+
+/* RFKill handlers */
+static int bt_rfkill_set_block(void *data, bool blocked)
+{
+       struct toshiba_bluetooth_dev *bt_dev = data;
+       int ret;
+
+       ret = toshiba_bluetooth_sync_status(bt_dev);
+       if (ret)
+               return ret;
+
+       if (!bt_dev->killswitch)
+               return 0;
+
+       if (blocked)
+               ret = toshiba_bluetooth_disable(bt_dev->acpi_dev->handle);
+       else
+               ret = toshiba_bluetooth_enable(bt_dev->acpi_dev->handle);
+
+       return ret;
+}
+
+static void bt_rfkill_poll(struct rfkill *rfkill, void *data)
+{
+       struct toshiba_bluetooth_dev *bt_dev = data;
+
+       if (toshiba_bluetooth_sync_status(bt_dev))
+               return;
+
+       /*
+        * Note the Toshiba Bluetooth RFKill switch seems to be a strange
+        * fish. It only provides a BT event when the switch is flipped to
+        * the 'on' position. When flipping it to 'off', the USB device is
+        * simply pulled away underneath us, without any BT event being
+        * delivered.
+        */
+       rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch);
+}
+
+static const struct rfkill_ops rfk_ops = {
+       .set_block = bt_rfkill_set_block,
+       .poll = bt_rfkill_poll,
+};
+
+/* ACPI driver functions */
 static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event)
 {
-       toshiba_bluetooth_enable(device->handle);
+       struct toshiba_bluetooth_dev *bt_dev = acpi_driver_data(device);
+
+       if (toshiba_bluetooth_sync_status(bt_dev))
+               return;
+
+       rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch);
 }
 
 #ifdef CONFIG_PM_SLEEP
 static int toshiba_bt_resume(struct device *dev)
 {
-       return toshiba_bluetooth_enable(to_acpi_device(dev)->handle);
+       struct toshiba_bluetooth_dev *bt_dev;
+       int ret;
+
+       bt_dev = acpi_driver_data(to_acpi_device(dev));
+
+       ret = toshiba_bluetooth_sync_status(bt_dev);
+       if (ret)
+               return ret;
+
+       rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch);
+
+       return 0;
 }
 #endif
 
 static int toshiba_bt_rfkill_add(struct acpi_device *device)
 {
+       struct toshiba_bluetooth_dev *bt_dev;
        int result;
 
        result = toshiba_bluetooth_present(device->handle);
@@ -187,17 +244,54 @@ static int toshiba_bt_rfkill_add(struct acpi_device *device)
 
        pr_info("Toshiba ACPI Bluetooth device driver\n");
 
-       /* Enable the BT device */
-       result = toshiba_bluetooth_enable(device->handle);
-       if (result)
+       bt_dev = kzalloc(sizeof(*bt_dev), GFP_KERNEL);
+       if (!bt_dev)
+               return -ENOMEM;
+       bt_dev->acpi_dev = device;
+       device->driver_data = bt_dev;
+       dev_set_drvdata(&device->dev, bt_dev);
+
+       result = toshiba_bluetooth_sync_status(bt_dev);
+       if (result) {
+               kfree(bt_dev);
                return result;
+       }
+
+       bt_dev->rfk = rfkill_alloc("Toshiba Bluetooth",
+                                  &device->dev,
+                                  RFKILL_TYPE_BLUETOOTH,
+                                  &rfk_ops,
+                                  bt_dev);
+       if (!bt_dev->rfk) {
+               pr_err("Unable to allocate rfkill device\n");
+               kfree(bt_dev);
+               return -ENOMEM;
+       }
+
+       rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch);
+
+       result = rfkill_register(bt_dev->rfk);
+       if (result) {
+               pr_err("Unable to register rfkill device\n");
+               rfkill_destroy(bt_dev->rfk);
+               kfree(bt_dev);
+       }
 
        return result;
 }
 
 static int toshiba_bt_rfkill_remove(struct acpi_device *device)
 {
+       struct toshiba_bluetooth_dev *bt_dev = acpi_driver_data(device);
+
        /* clean up */
+       if (bt_dev->rfk) {
+               rfkill_unregister(bt_dev->rfk);
+               rfkill_destroy(bt_dev->rfk);
+       }
+
+       kfree(bt_dev);
+
        return toshiba_bluetooth_disable(device->handle);
 }
 
index 65300b6a84b9d351d6f074991e6e26de7ed8eea5..7f2afc6b5eb9539ffb3fb61cc9abdfbd212caacb 100644 (file)
@@ -78,15 +78,20 @@ static ssize_t protection_level_store(struct device *dev,
                                      const char *buf, size_t count)
 {
        struct toshiba_haps_dev *haps = dev_get_drvdata(dev);
-       int level, ret;
-
-       if (sscanf(buf, "%d", &level) != 1 || level < 0 || level > 3)
-               return -EINVAL;
+       int level;
+       int ret;
 
-       /* Set the sensor level.
-        * Acceptable levels are:
+       ret = kstrtoint(buf, 0, &level);
+       if (ret)
+               return ret;
+       /*
+        * Check for supported levels, which can be:
         * 0 - Disabled | 1 - Low | 2 - Medium | 3 - High
         */
+       if (level < 0 || level > 3)
+               return -EINVAL;
+
+       /* Set the sensor level */
        ret = toshiba_haps_protection_level(haps->acpi_dev->handle, level);
        if (ret != 0)
                return ret;
@@ -95,15 +100,21 @@ static ssize_t protection_level_store(struct device *dev,
 
        return count;
 }
+static DEVICE_ATTR_RW(protection_level);
 
 static ssize_t reset_protection_store(struct device *dev,
                                      struct device_attribute *attr,
                                      const char *buf, size_t count)
 {
        struct toshiba_haps_dev *haps = dev_get_drvdata(dev);
-       int reset, ret;
+       int reset;
+       int ret;
 
-       if (sscanf(buf, "%d", &reset) != 1 || reset != 1)
+       ret = kstrtoint(buf, 0, &reset);
+       if (ret)
+               return ret;
+       /* The only accepted value is 1 */
+       if (reset != 1)
                return -EINVAL;
 
        /* Reset the protection interface */
@@ -113,10 +124,7 @@ static ssize_t reset_protection_store(struct device *dev,
 
        return count;
 }
-
-static DEVICE_ATTR(protection_level, S_IRUGO | S_IWUSR,
-                  protection_level_show, protection_level_store);
-static DEVICE_ATTR(reset_protection, S_IWUSR, NULL, reset_protection_store);
+static DEVICE_ATTR_WO(reset_protection);
 
 static struct attribute *haps_attributes[] = {
        &dev_attr_protection_level.attr,
index d3c7d245ae63d93c5ec36617e69331b25f9b570c..7d0d269a0837c0b84e9f72eeabbab354159a3b8e 100644 (file)
@@ -88,4 +88,4 @@ static struct platform_driver syscon_reboot_driver = {
                .of_match_table = syscon_reboot_of_match,
        },
 };
-module_platform_driver(syscon_reboot_driver);
+builtin_platform_driver(syscon_reboot_driver);
index f986e0cca7acf68300796563b6a80ceebd3881e4..83c42ea88f2b252b0c77a65cfe1bab12135e9dca 100644 (file)
@@ -448,42 +448,42 @@ static int param_set_battery_voltage(const char *key,
 
 #define param_get_battery_voltage param_get_int
 
-static struct kernel_param_ops param_ops_ac_online = {
+static const struct kernel_param_ops param_ops_ac_online = {
        .set = param_set_ac_online,
        .get = param_get_ac_online,
 };
 
-static struct kernel_param_ops param_ops_usb_online = {
+static const struct kernel_param_ops param_ops_usb_online = {
        .set = param_set_usb_online,
        .get = param_get_usb_online,
 };
 
-static struct kernel_param_ops param_ops_battery_status = {
+static const struct kernel_param_ops param_ops_battery_status = {
        .set = param_set_battery_status,
        .get = param_get_battery_status,
 };
 
-static struct kernel_param_ops param_ops_battery_present = {
+static const struct kernel_param_ops param_ops_battery_present = {
        .set = param_set_battery_present,
        .get = param_get_battery_present,
 };
 
-static struct kernel_param_ops param_ops_battery_technology = {
+static const struct kernel_param_ops param_ops_battery_technology = {
        .set = param_set_battery_technology,
        .get = param_get_battery_technology,
 };
 
-static struct kernel_param_ops param_ops_battery_health = {
+static const struct kernel_param_ops param_ops_battery_health = {
        .set = param_set_battery_health,
        .get = param_get_battery_health,
 };
 
-static struct kernel_param_ops param_ops_battery_capacity = {
+static const struct kernel_param_ops param_ops_battery_capacity = {
        .set = param_set_battery_capacity,
        .get = param_get_battery_capacity,
 };
 
-static struct kernel_param_ops param_ops_battery_voltage = {
+static const struct kernel_param_ops param_ops_battery_voltage = {
        .set = param_set_battery_voltage,
        .get = param_get_battery_voltage,
 };
index 6af41abccacb473921dbeba078147d067ea8e0cd..c07ee13bd47047e1e930a16e1d76229ae0b7b027 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/gpio.h>
 #include <linux/slab.h>
 #include <linux/gpio/consumer.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
index 5e343bab9458e430f18f54c1be713a0a7686b593..28c711f0ac6bd5c3d77c32018c64abad77c4bdfa 100644 (file)
@@ -41,6 +41,19 @@ config STE_MODEM_RPROC
          This can be either built-in or a loadable module.
          If unsure say N.
 
+config WKUP_M3_RPROC
+       tristate "AMx3xx Wakeup M3 remoteproc support"
+       depends on SOC_AM33XX || SOC_AM43XX
+       select REMOTEPROC
+       help
+         Say y here to support Wakeup M3 remote processor on TI AM33xx
+         and AM43xx family of SoCs.
+
+         Required for Suspend-to-RAM on AM33xx and AM43xx SoCs. Also needed
+         for deep CPUIdle states on AM33xx SoCs. Allows for loading of the
+         firmware onto these remote processors.
+         If unsure say N.
+
 config DA8XX_REMOTEPROC
        tristate "DA8xx/OMAP-L13x remoteproc support"
        depends on ARCH_DAVINCI_DA8XX
index ac2ff75686d20da708d77728aca94f23cbdf1764..81b04d1e2e5888b8ebdaca78a551045e3aa0db40 100644 (file)
@@ -9,4 +9,5 @@ remoteproc-y                            += remoteproc_virtio.o
 remoteproc-y                           += remoteproc_elf_loader.o
 obj-$(CONFIG_OMAP_REMOTEPROC)          += omap_remoteproc.o
 obj-$(CONFIG_STE_MODEM_RPROC)          += ste_modem_rproc.o
+obj-$(CONFIG_WKUP_M3_RPROC)            += wkup_m3_rproc.o
 obj-$(CONFIG_DA8XX_REMOTEPROC)         += da8xx_remoteproc.o
index f8d6a0661c14cd51e9a8bbc1bf6fe0bee182b484..009e56f67de239779e3710bffc1b430257441ea3 100644 (file)
@@ -26,8 +26,7 @@
 static char *da8xx_fw_name;
 module_param(da8xx_fw_name, charp, S_IRUGO);
 MODULE_PARM_DESC(da8xx_fw_name,
-                "\n\t\tName of DSP firmware file in /lib/firmware"
-                " (if not specified defaults to 'rproc-dsp-fw')");
+                "Name of DSP firmware file in /lib/firmware (if not specified defaults to 'rproc-dsp-fw')");
 
 /*
  * OMAP-L138 Technical References:
index 11cdb119e4f3bbdee5d73293dd71cec450f2ae65..8b3130f22b42b334ff0d22718e46484012fb2c2b 100644 (file)
@@ -44,6 +44,9 @@
 
 #include "remoteproc_internal.h"
 
+static DEFINE_MUTEX(rproc_list_mutex);
+static LIST_HEAD(rproc_list);
+
 typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
                                struct resource_table *table, int len);
 typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
@@ -132,32 +135,48 @@ static void rproc_disable_iommu(struct rproc *rproc)
 
        iommu_detach_device(domain, dev);
        iommu_domain_free(domain);
-
-       return;
 }
 
-/*
+/**
+ * rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address
+ * @rproc: handle of a remote processor
+ * @da: remoteproc device address to translate
+ * @len: length of the memory region @da is pointing to
+ *
  * Some remote processors will ask us to allocate them physically contiguous
  * memory regions (which we call "carveouts"), and map them to specific
- * device addresses (which are hardcoded in the firmware).
+ * device addresses (which are hardcoded in the firmware). They may also have
+ * dedicated memory regions internal to the processors, and use them either
+ * exclusively or alongside carveouts.
  *
  * They may then ask us to copy objects into specific device addresses (e.g.
  * code/data sections) or expose us certain symbols in other device address
  * (e.g. their trace buffer).
  *
- * This function is an internal helper with which we can go over the allocated
- * carveouts and translate specific device address to kernel virtual addresses
- * so we can access the referenced memory.
+ * This function is a helper function with which we can go over the allocated
+ * carveouts and translate specific device addresses to kernel virtual addresses
+ * so we can access the referenced memory. This function also allows to perform
+ * translations on the internal remoteproc memory regions through a platform
+ * implementation specific da_to_va ops, if present.
+ *
+ * The function returns a valid kernel address on success or NULL on failure.
  *
  * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too,
  * but only on kernel direct mapped RAM memory. Instead, we're just using
- * here the output of the DMA API, which should be more correct.
+ * here the output of the DMA API for the carveouts, which should be more
+ * correct.
  */
 void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
 {
        struct rproc_mem_entry *carveout;
        void *ptr = NULL;
 
+       if (rproc->ops->da_to_va) {
+               ptr = rproc->ops->da_to_va(rproc, da, len);
+               if (ptr)
+                       goto out;
+       }
+
        list_for_each_entry(carveout, &rproc->carveouts, node) {
                int offset = da - carveout->da;
 
@@ -174,6 +193,7 @@ void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
                break;
        }
 
+out:
        return ptr;
 }
 EXPORT_SYMBOL(rproc_da_to_va);
@@ -411,10 +431,8 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
        }
 
        trace = kzalloc(sizeof(*trace), GFP_KERNEL);
-       if (!trace) {
-               dev_err(dev, "kzalloc trace failed\n");
+       if (!trace)
                return -ENOMEM;
-       }
 
        /* set the trace buffer dma properties */
        trace->len = rsc->len;
@@ -489,10 +507,8 @@ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
        }
 
        mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
-       if (!mapping) {
-               dev_err(dev, "kzalloc mapping failed\n");
+       if (!mapping)
                return -ENOMEM;
-       }
 
        ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
        if (ret) {
@@ -565,10 +581,8 @@ static int rproc_handle_carveout(struct rproc *rproc,
                        rsc->da, rsc->pa, rsc->len, rsc->flags);
 
        carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
-       if (!carveout) {
-               dev_err(dev, "kzalloc carveout failed\n");
+       if (!carveout)
                return -ENOMEM;
-       }
 
        va = dma_alloc_coherent(dev->parent, rsc->len, &dma, GFP_KERNEL);
        if (!va) {
@@ -768,7 +782,8 @@ static void rproc_resource_cleanup(struct rproc *rproc)
 
        /* clean up carveout allocations */
        list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
-               dma_free_coherent(dev->parent, entry->len, entry->va, entry->dma);
+               dma_free_coherent(dev->parent, entry->len, entry->va,
+                                 entry->dma);
                list_del(&entry->node);
                kfree(entry);
        }
@@ -808,9 +823,8 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
 
        /* look for the resource table */
        table = rproc_find_rsc_table(rproc, fw, &tablesz);
-       if (!table) {
+       if (!table)
                goto clean_up;
-       }
 
        /* Verify that resource table in loaded fw is unchanged */
        if (rproc->table_csum != crc32(0, table, tablesz)) {
@@ -911,7 +925,8 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
 
        /* count the number of notify-ids */
        rproc->max_notifyid = -1;
-       ret = rproc_handle_resources(rproc, tablesz, rproc_count_vrings_handler);
+       ret = rproc_handle_resources(rproc, tablesz,
+                                    rproc_count_vrings_handler);
        if (ret)
                goto out;
 
@@ -1151,6 +1166,50 @@ out:
 }
 EXPORT_SYMBOL(rproc_shutdown);
 
+/**
+ * rproc_get_by_phandle() - find a remote processor by phandle
+ * @phandle: phandle to the rproc
+ *
+ * Finds an rproc handle using the remote processor's phandle, and then
+ * return a handle to the rproc.
+ *
+ * This function increments the remote processor's refcount, so always
+ * use rproc_put() to decrement it back once rproc isn't needed anymore.
+ *
+ * Returns the rproc handle on success, and NULL on failure.
+ */
+#ifdef CONFIG_OF
+struct rproc *rproc_get_by_phandle(phandle phandle)
+{
+       struct rproc *rproc = NULL, *r;
+       struct device_node *np;
+
+       np = of_find_node_by_phandle(phandle);
+       if (!np)
+               return NULL;
+
+       mutex_lock(&rproc_list_mutex);
+       list_for_each_entry(r, &rproc_list, node) {
+               if (r->dev.parent && r->dev.parent->of_node == np) {
+                       rproc = r;
+                       get_device(&rproc->dev);
+                       break;
+               }
+       }
+       mutex_unlock(&rproc_list_mutex);
+
+       of_node_put(np);
+
+       return rproc;
+}
+#else
+struct rproc *rproc_get_by_phandle(phandle phandle)
+{
+       return NULL;
+}
+#endif
+EXPORT_SYMBOL(rproc_get_by_phandle);
+
 /**
  * rproc_add() - register a remote processor
  * @rproc: the remote processor handle to register
@@ -1180,6 +1239,11 @@ int rproc_add(struct rproc *rproc)
        if (ret < 0)
                return ret;
 
+       /* expose to rproc_get_by_phandle users */
+       mutex_lock(&rproc_list_mutex);
+       list_add(&rproc->node, &rproc_list);
+       mutex_unlock(&rproc_list_mutex);
+
        dev_info(dev, "%s is available\n", rproc->name);
 
        dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
@@ -1268,10 +1332,8 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
                name_len = strlen(name) + strlen(template) - 2 + 1;
 
        rproc = kzalloc(sizeof(struct rproc) + len + name_len, GFP_KERNEL);
-       if (!rproc) {
-               dev_err(dev, "%s: kzalloc failed\n", __func__);
+       if (!rproc)
                return NULL;
-       }
 
        if (!firmware) {
                p = (char *)rproc + sizeof(struct rproc) + len;
@@ -1369,6 +1431,11 @@ int rproc_del(struct rproc *rproc)
        /* Free the copy of the resource table */
        kfree(rproc->cached_table);
 
+       /* the rproc is downref'ed as soon as it's removed from the klist */
+       mutex_lock(&rproc_list_mutex);
+       list_del(&rproc->node);
+       mutex_unlock(&rproc_list_mutex);
+
        device_del(&rproc->dev);
 
        return 0;
index 70701a50ddfa6276105f383b33c81d88e885db33..8041b95cb05863c80ec6717bea03fab29c0ec27a 100644 (file)
@@ -35,7 +35,7 @@ struct rproc;
  * @get_boot_addr:     get boot address to entry point specified in firmware
  */
 struct rproc_fw_ops {
-       struct resource_table *(*find_rsc_table) (struct rproc *rproc,
+       struct resource_table *(*find_rsc_table)(struct rproc *rproc,
                                                const struct firmware *fw,
                                                int *tablesz);
        struct resource_table *(*find_loaded_rsc_table)(struct rproc *rproc,
index dd193f35a1ff22d3c36406edc44b3e106ea8d6e3..53dc17bdd54e0c3ef94f03a345e2d31e5a5bdfe6 100644 (file)
@@ -67,8 +67,7 @@ static int sproc_load_segments(struct rproc *rproc, const struct firmware *fw)
 static const struct ste_toc_entry *sproc_find_rsc_entry(const void *data)
 {
        int i;
-       const struct ste_toc *toc;
-       toc = data;
+       const struct ste_toc *toc = data;
 
        /* Search the table for the resource table */
        for (i = 0; i < SPROC_MAX_TOC_ENTRIES &&
@@ -230,6 +229,7 @@ static int sproc_start(struct rproc *rproc)
 static int sproc_stop(struct rproc *rproc)
 {
        struct sproc *sproc = rproc->priv;
+
        sproc_dbg(sproc, "stop ste-modem\n");
 
        return sproc->mdev->ops.power(sproc->mdev, false);
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
new file mode 100644 (file)
index 0000000..edf8181
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+ * TI AMx3 Wakeup M3 Remote Processor driver
+ *
+ * Copyright (C) 2014-2015 Texas Instruments, Inc.
+ *
+ * Dave Gerlach <d-gerlach@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc.h>
+
+#include <linux/platform_data/wkup_m3.h>
+
+#include "remoteproc_internal.h"
+
+#define WKUPM3_MEM_MAX 2
+
+/**
+ * struct wkup_m3_mem - WkupM3 internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address from Wakeup M3 view
+ * @size: Size of the memory region
+ */
+struct wkup_m3_mem {
+       void __iomem *cpu_addr;
+       phys_addr_t bus_addr;
+       u32 dev_addr;
+       size_t size;
+};
+
+/**
+ * struct wkup_m3_rproc - WkupM3 remote processor state
+ * @rproc: rproc handle
+ * @pdev: pointer to platform device
+ * @mem: WkupM3 memory information
+ */
+struct wkup_m3_rproc {
+       struct rproc *rproc;
+       struct platform_device *pdev;
+       struct wkup_m3_mem mem[WKUPM3_MEM_MAX];
+};
+
+static int wkup_m3_rproc_start(struct rproc *rproc)
+{
+       struct wkup_m3_rproc *wkupm3 = rproc->priv;
+       struct platform_device *pdev = wkupm3->pdev;
+       struct device *dev = &pdev->dev;
+       struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+
+       if (pdata->deassert_reset(pdev, pdata->reset_name)) {
+               dev_err(dev, "Unable to reset wkup_m3!\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int wkup_m3_rproc_stop(struct rproc *rproc)
+{
+       struct wkup_m3_rproc *wkupm3 = rproc->priv;
+       struct platform_device *pdev = wkupm3->pdev;
+       struct device *dev = &pdev->dev;
+       struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+
+       if (pdata->assert_reset(pdev, pdata->reset_name)) {
+               dev_err(dev, "Unable to assert reset of wkup_m3!\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+       struct wkup_m3_rproc *wkupm3 = rproc->priv;
+       void *va = NULL;
+       int i;
+       u32 offset;
+
+       if (len <= 0)
+               return NULL;
+
+       for (i = 0; i < WKUPM3_MEM_MAX; i++) {
+               if (da >= wkupm3->mem[i].dev_addr && da + len <=
+                   wkupm3->mem[i].dev_addr +  wkupm3->mem[i].size) {
+                       offset = da -  wkupm3->mem[i].dev_addr;
+                       /* __force to make sparse happy with type conversion */
+                       va = (__force void *)(wkupm3->mem[i].cpu_addr + offset);
+                       break;
+               }
+       }
+
+       return va;
+}
+
+static struct rproc_ops wkup_m3_rproc_ops = {
+       .start          = wkup_m3_rproc_start,
+       .stop           = wkup_m3_rproc_stop,
+       .da_to_va       = wkup_m3_rproc_da_to_va,
+};
+
+static const struct of_device_id wkup_m3_rproc_of_match[] = {
+       { .compatible = "ti,am3352-wkup-m3", },
+       { .compatible = "ti,am4372-wkup-m3", },
+       {},
+};
+
+static int wkup_m3_rproc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct wkup_m3_platform_data *pdata = dev->platform_data;
+       /* umem always needs to be processed first */
+       const char *mem_names[WKUPM3_MEM_MAX] = { "umem", "dmem" };
+       struct wkup_m3_rproc *wkupm3;
+       const char *fw_name;
+       struct rproc *rproc;
+       struct resource *res;
+       const __be32 *addrp;
+       u32 l4_offset = 0;
+       u64 size;
+       int ret;
+       int i;
+
+       if (!(pdata && pdata->deassert_reset && pdata->assert_reset &&
+             pdata->reset_name)) {
+               dev_err(dev, "Platform data missing!\n");
+               return -ENODEV;
+       }
+
+       ret = of_property_read_string(dev->of_node, "ti,pm-firmware",
+                                     &fw_name);
+       if (ret) {
+               dev_err(dev, "No firmware filename given\n");
+               return -ENODEV;
+       }
+
+       pm_runtime_enable(&pdev->dev);
+       ret = pm_runtime_get_sync(&pdev->dev);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
+               goto err;
+       }
+
+       rproc = rproc_alloc(dev, "wkup_m3", &wkup_m3_rproc_ops,
+                           fw_name, sizeof(*wkupm3));
+       if (!rproc) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       wkupm3 = rproc->priv;
+       wkupm3->rproc = rproc;
+       wkupm3->pdev = pdev;
+
+       for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
+               res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                                  mem_names[i]);
+               wkupm3->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
+               if (IS_ERR(wkupm3->mem[i].cpu_addr)) {
+                       dev_err(&pdev->dev, "devm_ioremap_resource failed for resource %d\n",
+                               i);
+                       ret = PTR_ERR(wkupm3->mem[i].cpu_addr);
+                       goto err;
+               }
+               wkupm3->mem[i].bus_addr = res->start;
+               wkupm3->mem[i].size = resource_size(res);
+               addrp = of_get_address(dev->of_node, i, &size, NULL);
+               /*
+                * The wkupm3 has umem at address 0 in its view, so the device
+                * addresses for each memory region is computed as a relative
+                * offset of the bus address for umem, and therefore needs to be
+                * processed first.
+                */
+               if (!strcmp(mem_names[i], "umem"))
+                       l4_offset = be32_to_cpu(*addrp);
+               wkupm3->mem[i].dev_addr = be32_to_cpu(*addrp) - l4_offset;
+       }
+
+       dev_set_drvdata(dev, rproc);
+
+       ret = rproc_add(rproc);
+       if (ret) {
+               dev_err(dev, "rproc_add failed\n");
+               goto err_put_rproc;
+       }
+
+       return 0;
+
+err_put_rproc:
+       rproc_put(rproc);
+err:
+       pm_runtime_put_noidle(dev);
+       pm_runtime_disable(dev);
+       return ret;
+}
+
+static int wkup_m3_rproc_remove(struct platform_device *pdev)
+{
+       struct rproc *rproc = platform_get_drvdata(pdev);
+
+       rproc_del(rproc);
+       rproc_put(rproc);
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int wkup_m3_rpm_suspend(struct device *dev)
+{
+       return -EBUSY;
+}
+
+static int wkup_m3_rpm_resume(struct device *dev)
+{
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops wkup_m3_rproc_pm_ops = {
+       SET_RUNTIME_PM_OPS(wkup_m3_rpm_suspend, wkup_m3_rpm_resume, NULL)
+};
+
+static struct platform_driver wkup_m3_rproc_driver = {
+       .probe = wkup_m3_rproc_probe,
+       .remove = wkup_m3_rproc_remove,
+       .driver = {
+               .name = "wkup_m3_rproc",
+               .of_match_table = wkup_m3_rproc_of_match,
+               .pm = &wkup_m3_rproc_pm_ops,
+       },
+};
+
+module_platform_driver(wkup_m3_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI Wakeup M3 remote processor control driver");
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
index 6f1fa1773e76418ef3d55917fb3146875dd4f5b7..f8d8fdb26b72a593260fa67b98337414a684e9b5 100644 (file)
@@ -65,6 +65,7 @@ struct virtio_ccw_device {
        bool is_thinint;
        bool going_away;
        bool device_lost;
+       unsigned int config_ready;
        void *airq_info;
 };
 
@@ -833,8 +834,11 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
        if (ret)
                goto out_free;
 
-       memcpy(vcdev->config, config_area, sizeof(vcdev->config));
-       memcpy(buf, &vcdev->config[offset], len);
+       memcpy(vcdev->config, config_area, offset + len);
+       if (buf)
+               memcpy(buf, &vcdev->config[offset], len);
+       if (vcdev->config_ready < offset + len)
+               vcdev->config_ready = offset + len;
 
 out_free:
        kfree(config_area);
@@ -857,6 +861,9 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
        if (!config_area)
                goto out_free;
 
+       /* Make sure we don't overwrite fields. */
+       if (vcdev->config_ready < offset)
+               virtio_ccw_get_config(vdev, 0, NULL, offset);
        memcpy(&vcdev->config[offset], buf, len);
        /* Write the config area to the host. */
        memcpy(config_area, vcdev->config, sizeof(vcdev->config));
index b3e5bd1d5d9cd3f6577bf0a12ddfe44e8548bb31..9842301f798075dfd7a2e7e0769a806c3dea0a68 100644 (file)
@@ -685,10 +685,7 @@ static inline void *cxgbi_alloc_big_mem(unsigned int size,
 
 static inline void cxgbi_free_big_mem(void *addr)
 {
-       if (is_vmalloc_addr(addr))
-               vfree(addr);
-       else
-               kfree(addr);
+       kvfree(addr);
 }
 
 static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
index 4a484d60be0d4db6449bde1e77af21fe58c3639b..b749026aa592445d70dd51056314ff29965578ca 100644 (file)
@@ -1191,7 +1191,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
        list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
                struct qla_tgt_cmd *cmd =
                        container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
-               if (cmd->tag == abts->exchange_addr_to_abort) {
+               if (se_cmd->tag == abts->exchange_addr_to_abort) {
                        lun = cmd->unpacked_lun;
                        found_lun = true;
                        break;
@@ -1728,9 +1728,8 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
 
        if (unlikely(cmd->aborted)) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
-                   "qla_target(%d): terminating exchange "
-                   "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
-                   se_cmd, cmd->tag);
+                      "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
+                      vha->vp_idx, cmd, se_cmd, se_cmd->tag);
 
                cmd->state = QLA_TGT_STATE_ABORTED;
                cmd->cmd_flags |= BIT_6;
@@ -1765,18 +1764,17 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
        if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
                prm->residual = se_cmd->residual_count;
                ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
-                   "Residual underflow: %d (tag %d, "
-                   "op %x, bufflen %d, rq_result %x)\n", prm->residual,
-                   cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
-                   cmd->bufflen, prm->rq_result);
+                   "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
+                      prm->residual, se_cmd->tag,
+                      se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+                      cmd->bufflen, prm->rq_result);
                prm->rq_result |= SS_RESIDUAL_UNDER;
        } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
                prm->residual = se_cmd->residual_count;
                ql_dbg(ql_dbg_io, vha, 0x305d,
-                   "Residual overflow: %d (tag %d, "
-                   "op %x, bufflen %d, rq_result %x)\n", prm->residual,
-                   cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
-                   cmd->bufflen, prm->rq_result);
+                   "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
+                      prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
+                      se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
                prm->rq_result |= SS_RESIDUAL_OVER;
        }
 
@@ -1849,7 +1847,7 @@ static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
            == 50) {
                *xmit_type &= ~QLA_TGT_XMIT_STATUS;
                ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
-                   "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
+                   "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag);
        }
 #endif
        /*
@@ -1873,7 +1871,7 @@ static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
                ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
                    "Cutting cmd %p (tag %d) buffer"
                    " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
-                   " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
+                   " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave,
                    cmd->bufflen, cmd->sg_cnt);
 
                cmd->bufflen = tot_len;
@@ -1885,13 +1883,13 @@ static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
 
                ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
                    "Cutting cmd %p (tag %d) buffer head "
-                   "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
+                   "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset,
                    cmd->bufflen);
                if (offset == 0)
                        *xmit_type &= ~QLA_TGT_XMIT_DATA;
                else if (qlt_set_data_offset(cmd, offset)) {
                        ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
-                           "qlt_set_data_offset() failed (tag %d)", cmd->tag);
+                           "qlt_set_data_offset() failed (tag %d)", se_cmd->tag);
                }
        }
 }
@@ -3194,7 +3192,7 @@ skip_term:
                return;
        } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
-                   "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
+                 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
        } else {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
                    "qla_target(%d): A command in state (%d) should "
@@ -3266,7 +3264,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
                goto out_term;
 
        cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
-       cmd->tag = atio->u.isp24.exchange_addr;
+       cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
        cmd->unpacked_lun = scsilun_to_int(
            (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
 
@@ -3893,9 +3891,8 @@ static void qlt_handle_srr(struct scsi_qla_host *vha,
                        resp = 1;
                } else {
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
-                           "qla_target(%d): SRR for in data for cmd "
-                           "without them (tag %d, SCSI status %d), "
-                           "reject", vha->vp_idx, cmd->tag,
+                              "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject",
+                              vha->vp_idx, se_cmd->tag,
                            cmd->se_cmd.scsi_status);
                        goto out_reject;
                }
@@ -3929,10 +3926,8 @@ static void qlt_handle_srr(struct scsi_qla_host *vha,
                        }
                } else {
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
-                           "qla_target(%d): SRR for out data for cmd "
-                           "without them (tag %d, SCSI status %d), "
-                           "reject", vha->vp_idx, cmd->tag,
-                           cmd->se_cmd.scsi_status);
+                           "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject",
+                              vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status);
                        goto out_reject;
                }
                break;
@@ -4053,10 +4048,9 @@ restart:
                cmd->sg = se_cmd->t_data_sg;
 
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
-                   "SRR cmd %p (se_cmd %p, tag %d, op %x), "
-                   "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
-                   se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
-                   cmd->sg_cnt, cmd->offset);
+                      "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d",
+                      cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ?
+                      se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset);
 
                qlt_handle_srr(vha, sctio, imm);
 
index 332086776dfe94db4be404cca5c621518d600b1f..985d76dd706b71b3da1b3685169a21c24af90fa0 100644 (file)
@@ -924,7 +924,6 @@ struct qla_tgt_cmd {
        int sg_cnt;             /* SG segments count */
        int bufflen;            /* cmd buffer length */
        int offset;
-       uint32_t tag;
        uint32_t unpacked_lun;
        enum dma_data_direction dma_data_direction;
        uint32_t reset_count;
index e32d24ec7a11516a79a40b23ac850da5120a3301..d9a8c6084346759778c5cd8506c42d4ead19e2cc 100644 (file)
@@ -44,7 +44,6 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
 #include "qla_def.h"
@@ -54,9 +53,6 @@
 static struct workqueue_struct *tcm_qla2xxx_free_wq;
 static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
 
-static const struct target_core_fabric_ops tcm_qla2xxx_ops;
-static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops;
-
 /*
  * Parse WWN.
  * If strict, we require lower-case hex and colon separators to be sure
@@ -191,23 +187,6 @@ static char *tcm_qla2xxx_npiv_get_fabric_name(void)
        return "qla2xxx_npiv";
 }
 
-static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
-       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
-                               struct tcm_qla2xxx_tpg, se_tpg);
-       struct tcm_qla2xxx_lport *lport = tpg->lport;
-       u8 proto_id;
-
-       switch (lport->lport_proto_id) {
-       case SCSI_PROTOCOL_FCP:
-       default:
-               proto_id = fc_get_fabric_proto_ident(se_tpg);
-               break;
-       }
-
-       return proto_id;
-}
-
 static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
        struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -224,78 +203,6 @@ static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
        return tpg->lport_tpgt;
 }
 
-static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
-{
-       return 1;
-}
-
-static u32 tcm_qla2xxx_get_pr_transport_id(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
-       int *format_code,
-       unsigned char *buf)
-{
-       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
-                               struct tcm_qla2xxx_tpg, se_tpg);
-       struct tcm_qla2xxx_lport *lport = tpg->lport;
-       int ret = 0;
-
-       switch (lport->lport_proto_id) {
-       case SCSI_PROTOCOL_FCP:
-       default:
-               ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                                       format_code, buf);
-               break;
-       }
-
-       return ret;
-}
-
-static u32 tcm_qla2xxx_get_pr_transport_id_len(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
-       int *format_code)
-{
-       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
-                               struct tcm_qla2xxx_tpg, se_tpg);
-       struct tcm_qla2xxx_lport *lport = tpg->lport;
-       int ret = 0;
-
-       switch (lport->lport_proto_id) {
-       case SCSI_PROTOCOL_FCP:
-       default:
-               ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                                       format_code);
-               break;
-       }
-
-       return ret;
-}
-
-static char *tcm_qla2xxx_parse_pr_out_transport_id(
-       struct se_portal_group *se_tpg,
-       const char *buf,
-       u32 *out_tid_len,
-       char **port_nexus_ptr)
-{
-       struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
-                               struct tcm_qla2xxx_tpg, se_tpg);
-       struct tcm_qla2xxx_lport *lport = tpg->lport;
-       char *tid = NULL;
-
-       switch (lport->lport_proto_id) {
-       case SCSI_PROTOCOL_FCP:
-       default:
-               tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                                       port_nexus_ptr);
-               break;
-       }
-
-       return tid;
-}
-
 static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
 {
        struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -344,29 +251,6 @@ static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg)
        return tpg->tpg_attrib.fabric_prot_type;
 }
 
-static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
-       struct se_portal_group *se_tpg)
-{
-       struct tcm_qla2xxx_nacl *nacl;
-
-       nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
-       if (!nacl) {
-               pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n");
-               return NULL;
-       }
-
-       return &nacl->se_node_acl;
-}
-
-static void tcm_qla2xxx_release_fabric_acl(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl)
-{
-       struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
-                       struct tcm_qla2xxx_nacl, se_node_acl);
-       kfree(nacl);
-}
-
 static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
 {
        struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -430,7 +314,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
                cmd->cmd_flags |= BIT_14;
        }
 
-       return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+       return target_put_sess_cmd(se_cmd);
 }
 
 /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
@@ -534,19 +418,6 @@ static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
        return;
 }
 
-static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
-{
-       struct qla_tgt_cmd *cmd;
-
-       /* check for task mgmt cmd */
-       if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
-               return 0xffffffff;
-
-       cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
-
-       return cmd->tag;
-}
-
 static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
 {
        return 0;
@@ -827,17 +698,6 @@ static void tcm_qla2xxx_release_session(struct kref *kref)
        qlt_unreg_sess(se_sess->fabric_sess_ptr);
 }
 
-static void tcm_qla2xxx_put_session(struct se_session *se_sess)
-{
-       struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
-       struct qla_hw_data *ha = sess->vha->hw;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-       kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
 static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
 {
        if (!sess)
@@ -853,53 +713,20 @@ static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
        target_sess_cmd_list_set_waiting(sess->se_sess);
 }
 
-static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
-       struct se_portal_group *se_tpg,
-       struct config_group *group,
-       const char *name)
+static int tcm_qla2xxx_init_nodeacl(struct se_node_acl *se_nacl,
+               const char *name)
 {
-       struct se_node_acl *se_nacl, *se_nacl_new;
-       struct tcm_qla2xxx_nacl *nacl;
+       struct tcm_qla2xxx_nacl *nacl =
+               container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
        u64 wwnn;
-       u32 qla2xxx_nexus_depth;
 
        if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
-               return ERR_PTR(-EINVAL);
-
-       se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
-       if (!se_nacl_new)
-               return ERR_PTR(-ENOMEM);
-/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
-       qla2xxx_nexus_depth = 1;
+               return -EINVAL;
 
-       /*
-        * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
-        * when converting a NodeACL from demo mode -> explict
-        */
-       se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
-                               name, qla2xxx_nexus_depth);
-       if (IS_ERR(se_nacl)) {
-               tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
-               return se_nacl;
-       }
-       /*
-        * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
-        */
-       nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
        nacl->nport_wwnn = wwnn;
        tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
 
-       return se_nacl;
-}
-
-static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
-{
-       struct se_portal_group *se_tpg = se_acl->se_tpg;
-       struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
-                               struct tcm_qla2xxx_nacl, se_node_acl);
-
-       core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
-       kfree(nacl);
+       return 0;
 }
 
 /* Start items for tcm_qla2xxx_tpg_attrib_cit */
@@ -1175,8 +1002,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
        tpg->tpg_attrib.cache_dynamic_acls = 1;
        tpg->tpg_attrib.demo_mode_login_only = 1;
 
-       ret = core_tpg_register(&tcm_qla2xxx_ops, wwn,
-                               &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+       ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
        if (ret < 0) {
                kfree(tpg);
                return NULL;
@@ -1295,8 +1121,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
        tpg->tpg_attrib.cache_dynamic_acls = 1;
        tpg->tpg_attrib.demo_mode_login_only = 1;
 
-       ret = core_tpg_register(&tcm_qla2xxx_npiv_ops, wwn,
-                               &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+       ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
        if (ret < 0) {
                kfree(tpg);
                return NULL;
@@ -1988,14 +1813,10 @@ static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
 static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
        .module                         = THIS_MODULE,
        .name                           = "qla2xxx",
+       .node_acl_size                  = sizeof(struct tcm_qla2xxx_nacl),
        .get_fabric_name                = tcm_qla2xxx_get_fabric_name,
-       .get_fabric_proto_ident         = tcm_qla2xxx_get_fabric_proto_ident,
        .tpg_get_wwn                    = tcm_qla2xxx_get_fabric_wwn,
        .tpg_get_tag                    = tcm_qla2xxx_get_tag,
-       .tpg_get_default_depth          = tcm_qla2xxx_get_default_depth,
-       .tpg_get_pr_transport_id        = tcm_qla2xxx_get_pr_transport_id,
-       .tpg_get_pr_transport_id_len    = tcm_qla2xxx_get_pr_transport_id_len,
-       .tpg_parse_pr_out_transport_id  = tcm_qla2xxx_parse_pr_out_transport_id,
        .tpg_check_demo_mode            = tcm_qla2xxx_check_demo_mode,
        .tpg_check_demo_mode_cache      = tcm_qla2xxx_check_demo_mode_cache,
        .tpg_check_demo_mode_write_protect =
@@ -2004,12 +1825,9 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
                                        tcm_qla2xxx_check_prod_write_protect,
        .tpg_check_prot_fabric_only     = tcm_qla2xxx_check_prot_fabric_only,
        .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
-       .tpg_alloc_fabric_acl           = tcm_qla2xxx_alloc_fabric_acl,
-       .tpg_release_fabric_acl         = tcm_qla2xxx_release_fabric_acl,
        .tpg_get_inst_index             = tcm_qla2xxx_tpg_get_inst_index,
        .check_stop_free                = tcm_qla2xxx_check_stop_free,
        .release_cmd                    = tcm_qla2xxx_release_cmd,
-       .put_session                    = tcm_qla2xxx_put_session,
        .shutdown_session               = tcm_qla2xxx_shutdown_session,
        .close_session                  = tcm_qla2xxx_close_session,
        .sess_get_index                 = tcm_qla2xxx_sess_get_index,
@@ -2017,7 +1835,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
        .write_pending                  = tcm_qla2xxx_write_pending,
        .write_pending_status           = tcm_qla2xxx_write_pending_status,
        .set_default_node_attributes    = tcm_qla2xxx_set_default_node_attrs,
-       .get_task_tag                   = tcm_qla2xxx_get_task_tag,
        .get_cmd_state                  = tcm_qla2xxx_get_cmd_state,
        .queue_data_in                  = tcm_qla2xxx_queue_data_in,
        .queue_status                   = tcm_qla2xxx_queue_status,
@@ -2031,12 +1848,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
        .fabric_drop_wwn                = tcm_qla2xxx_drop_lport,
        .fabric_make_tpg                = tcm_qla2xxx_make_tpg,
        .fabric_drop_tpg                = tcm_qla2xxx_drop_tpg,
-       .fabric_post_link               = NULL,
-       .fabric_pre_unlink              = NULL,
-       .fabric_make_np                 = NULL,
-       .fabric_drop_np                 = NULL,
-       .fabric_make_nodeacl            = tcm_qla2xxx_make_nodeacl,
-       .fabric_drop_nodeacl            = tcm_qla2xxx_drop_nodeacl,
+       .fabric_init_nodeacl            = tcm_qla2xxx_init_nodeacl,
 
        .tfc_wwn_attrs                  = tcm_qla2xxx_wwn_attrs,
        .tfc_tpg_base_attrs             = tcm_qla2xxx_tpg_attrs,
@@ -2046,26 +1858,19 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
 static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
        .module                         = THIS_MODULE,
        .name                           = "qla2xxx_npiv",
+       .node_acl_size                  = sizeof(struct tcm_qla2xxx_nacl),
        .get_fabric_name                = tcm_qla2xxx_npiv_get_fabric_name,
-       .get_fabric_proto_ident         = tcm_qla2xxx_get_fabric_proto_ident,
        .tpg_get_wwn                    = tcm_qla2xxx_get_fabric_wwn,
        .tpg_get_tag                    = tcm_qla2xxx_get_tag,
-       .tpg_get_default_depth          = tcm_qla2xxx_get_default_depth,
-       .tpg_get_pr_transport_id        = tcm_qla2xxx_get_pr_transport_id,
-       .tpg_get_pr_transport_id_len    = tcm_qla2xxx_get_pr_transport_id_len,
-       .tpg_parse_pr_out_transport_id  = tcm_qla2xxx_parse_pr_out_transport_id,
        .tpg_check_demo_mode            = tcm_qla2xxx_check_demo_mode,
        .tpg_check_demo_mode_cache      = tcm_qla2xxx_check_demo_mode_cache,
        .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode,
        .tpg_check_prod_mode_write_protect =
            tcm_qla2xxx_check_prod_write_protect,
        .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
-       .tpg_alloc_fabric_acl           = tcm_qla2xxx_alloc_fabric_acl,
-       .tpg_release_fabric_acl         = tcm_qla2xxx_release_fabric_acl,
        .tpg_get_inst_index             = tcm_qla2xxx_tpg_get_inst_index,
        .check_stop_free                = tcm_qla2xxx_check_stop_free,
        .release_cmd                    = tcm_qla2xxx_release_cmd,
-       .put_session                    = tcm_qla2xxx_put_session,
        .shutdown_session               = tcm_qla2xxx_shutdown_session,
        .close_session                  = tcm_qla2xxx_close_session,
        .sess_get_index                 = tcm_qla2xxx_sess_get_index,
@@ -2073,7 +1878,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
        .write_pending                  = tcm_qla2xxx_write_pending,
        .write_pending_status           = tcm_qla2xxx_write_pending_status,
        .set_default_node_attributes    = tcm_qla2xxx_set_default_node_attrs,
-       .get_task_tag                   = tcm_qla2xxx_get_task_tag,
        .get_cmd_state                  = tcm_qla2xxx_get_cmd_state,
        .queue_data_in                  = tcm_qla2xxx_queue_data_in,
        .queue_status                   = tcm_qla2xxx_queue_status,
@@ -2087,12 +1891,7 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
        .fabric_drop_wwn                = tcm_qla2xxx_npiv_drop_lport,
        .fabric_make_tpg                = tcm_qla2xxx_npiv_make_tpg,
        .fabric_drop_tpg                = tcm_qla2xxx_drop_tpg,
-       .fabric_post_link               = NULL,
-       .fabric_pre_unlink              = NULL,
-       .fabric_make_np                 = NULL,
-       .fabric_drop_np                 = NULL,
-       .fabric_make_nodeacl            = tcm_qla2xxx_make_nodeacl,
-       .fabric_drop_nodeacl            = tcm_qla2xxx_drop_nodeacl,
+       .fabric_init_nodeacl            = tcm_qla2xxx_init_nodeacl,
 
        .tfc_wwn_attrs                  = tcm_qla2xxx_wwn_attrs,
        .tfc_tpg_base_attrs             = tcm_qla2xxx_npiv_tpg_attrs,
index 23295115c9fc60d7ece0b7448e99fe31c2acf6cc..3bbf4cb6fd97e5c9f68fb61e633c7a222f76422c 100644 (file)
@@ -13,6 +13,8 @@
 #include "qla_target.h"
 
 struct tcm_qla2xxx_nacl {
+       struct se_node_acl se_node_acl;
+
        /* From libfc struct fc_rport->port_id */
        u32 nport_id;
        /* Binary World Wide unique Node Name for remote FC Initiator Nport */
@@ -23,8 +25,6 @@ struct tcm_qla2xxx_nacl {
        struct qla_tgt_sess *qla_tgt_sess;
        /* Pointer to TCM FC nexus */
        struct se_session *nport_nexus;
-       /* Returned by tcm_qla2xxx_make_nodeacl() */
-       struct se_node_acl se_node_acl;
 };
 
 struct tcm_qla2xxx_tpg_attrib {
@@ -57,8 +57,6 @@ struct tcm_qla2xxx_fc_loopid {
 };
 
 struct tcm_qla2xxx_lport {
-       /* SCSI protocol the lport is providing */
-       u8 lport_proto_id;
        /* Binary World Wide unique Port Name for FC Target Lport */
        u64 lport_wwpn;
        /* Binary World Wide unique Port Name for FC NPIV Target Lport */
index 1f8e2dc9c616a0b7e02559537c05b109123b5b80..30268bb2ddb6a3e78dc114606ed8c0b4b615d54a 100644 (file)
@@ -2363,17 +2363,13 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
        u64 block, rest = 0;
        struct scsi_data_buffer *sdb;
        enum dma_data_direction dir;
-       size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
-                      off_t);
 
        if (do_write) {
                sdb = scsi_out(scmd);
                dir = DMA_TO_DEVICE;
-               func = sg_pcopy_to_buffer;
        } else {
                sdb = scsi_in(scmd);
                dir = DMA_FROM_DEVICE;
-               func = sg_pcopy_from_buffer;
        }
 
        if (!sdb->length)
@@ -2385,16 +2381,16 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
        if (block + num > sdebug_store_sectors)
                rest = block + num - sdebug_store_sectors;
 
-       ret = func(sdb->table.sgl, sdb->table.nents,
+       ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
                   fake_storep + (block * scsi_debug_sector_size),
-                  (num - rest) * scsi_debug_sector_size, 0);
+                  (num - rest) * scsi_debug_sector_size, 0, do_write);
        if (ret != (num - rest) * scsi_debug_sector_size)
                return ret;
 
        if (rest) {
-               ret += func(sdb->table.sgl, sdb->table.nents,
+               ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
                            fake_storep, rest * scsi_debug_sector_size,
-                           (num - rest) * scsi_debug_sector_size);
+                           (num - rest) * scsi_debug_sector_size, do_write);
        }
 
        return ret;
index 81f22980b2def237fbefb4feac8386194188653e..156b790072b47263f5c9aec874e02694e6bb1c9b 100644 (file)
@@ -366,8 +366,9 @@ int __init register_intc_controller(struct intc_desc *desc)
 
                        /* redirect this interrupts to the first one */
                        irq_set_chip(irq2, &dummy_irq_chip);
-                       irq_set_chained_handler(irq2, intc_redirect_irq);
-                       irq_set_handler_data(irq2, (void *)irq);
+                       irq_set_chained_handler_and_data(irq2,
+                                                        intc_redirect_irq,
+                                                        (void *)irq);
                }
        }
 
index f30ac9354ff248f316fb96a26d534884faa8bf5d..f5f1b821241afc92e6854788262d13f665a0dd45 100644 (file)
@@ -243,8 +243,9 @@ restart:
                 */
                irq_set_nothread(irq);
 
-               irq_set_chained_handler(entry->pirq, intc_virq_handler);
+               /* Set handler data before installing the handler */
                add_virq_to_pirq(entry->pirq, irq);
+               irq_set_chained_handler(entry->pirq, intc_virq_handler);
 
                radix_tree_tag_clear(&d->tree, entry->enum_id,
                                     INTC_TAG_VIRQ_NEEDS_ALLOC);
index b562af816c0a5cd1fa0fb525b6d99e12e18fb034..b04b05a0904eec086c49250c3dc4e27cb84a3927 100644 (file)
@@ -260,7 +260,7 @@ static int __init qcom_cpuidle_init(struct device_node *cpu_node, int cpu)
                /* We have atleast one power down mode */
                cpumask_clear(&mask);
                cpumask_set_cpu(cpu, &mask);
-               qcom_scm_set_warm_boot_addr(cpu_resume, &mask);
+               qcom_scm_set_warm_boot_addr(cpu_resume_arm, &mask);
        }
 
        per_cpu(qcom_idle_ops, cpu) = fns;
index cc119d15dd1616feeef9821ecf98c94fc19ecfec..75d0457a77b72ade791df16e6193662028e35847 100644 (file)
@@ -1021,7 +1021,7 @@ static struct platform_driver tegra_pmc_driver = {
        },
        .probe = tegra_pmc_probe,
 };
-module_platform_driver(tegra_pmc_driver);
+builtin_platform_driver(tegra_pmc_driver);
 
 /*
  * Early initialization to allow access to registers in the very early boot
index 1a07bf540fecc384ef44112e0798c1afd798e9dd..e642c4540dda123a39b9ff42437f34f64f3bad06 100644 (file)
@@ -142,4 +142,4 @@ static struct platform_driver realview_soc_driver = {
                .of_match_table = realview_soc_of_match,
        },
 };
-module_platform_driver(realview_soc_driver);
+builtin_platform_driver(realview_soc_driver);
index a3fba366cebe457bc1a59a5006ff5290752d667a..4e68b62193ed7781d3c32f4160878809acfc546d 100644 (file)
@@ -29,7 +29,6 @@
 #include <scsi/scsi_tcq.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 
 #include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
@@ -716,7 +715,7 @@ static int iscsit_add_reject_from_cmd(
         */
        if (cmd->se_cmd.se_tfo != NULL) {
                pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
-               target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+               target_put_sess_cmd(&cmd->se_cmd);
        }
        return -1;
 }
@@ -1002,13 +1001,15 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
                conn->cid);
 
-       target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
+       target_get_sess_cmd(&cmd->se_cmd, true);
 
        cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
                                                     scsilun_to_int(&hdr->lun));
        if (cmd->sense_reason)
                goto attach_cmd;
 
+       /* only used for printks or comparing with ->ref_task_tag */
+       cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
        cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
        if (cmd->sense_reason) {
                if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
@@ -1068,7 +1069,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
                        return -1;
                else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
-                       target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+                       target_put_sess_cmd(&cmd->se_cmd);
                        return 0;
                }
        }
@@ -1084,7 +1085,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                if (!cmd->sense_reason)
                        return 0;
 
-               target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+               target_put_sess_cmd(&cmd->se_cmd);
                return 0;
        }
 
@@ -1115,7 +1116,6 @@ static int
 iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
                          bool dump_payload)
 {
-       struct iscsi_conn *conn = cmd->conn;
        int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
        /*
         * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
@@ -1142,7 +1142,7 @@ after_immediate_data:
 
                        rc = iscsit_dump_data_payload(cmd->conn,
                                                      cmd->first_burst_len, 1);
-                       target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+                       target_put_sess_cmd(&cmd->se_cmd);
                        return rc;
                } else if (cmd->unsolicited_data)
                        iscsit_set_unsoliticed_dataout(cmd);
@@ -1811,7 +1811,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                                      conn->sess->se_sess, 0, DMA_NONE,
                                      TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
 
-               target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
+               target_get_sess_cmd(&cmd->se_cmd, true);
                sess_ref = true;
 
                switch (function) {
@@ -1953,7 +1953,7 @@ attach:
         */
        if (sess_ref) {
                pr_debug("Handle TMR, using sess_ref=true check\n");
-               target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+               target_put_sess_cmd(&cmd->se_cmd);
        }
 
        iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
@@ -2737,11 +2737,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        cmd->iov_data_count = iov_count;
        cmd->tx_size = tx_size;
 
-       /* sendpage is preferred but can't insert markers */
-       if (!conn->conn_ops->IFMarker)
-               ret = iscsit_fe_sendpage_sg(cmd, conn);
-       else
-               ret = iscsit_send_tx_data(cmd, conn, 0);
+       ret = iscsit_fe_sendpage_sg(cmd, conn);
 
        iscsit_unmap_iovec(cmd);
 
@@ -4073,17 +4069,9 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
                        " opcode while ERL=0, closing iSCSI connection.\n");
                        return -1;
                }
-               if (!conn->conn_ops->OFMarker) {
-                       pr_err("Unable to recover from unknown"
-                       " opcode while OFMarker=No, closing iSCSI"
-                               " connection.\n");
-                       return -1;
-               }
-               if (iscsit_recover_from_unknown_opcode(conn) < 0) {
-                       pr_err("Unable to recover from unknown"
-                               " opcode, closing iSCSI connection.\n");
-                       return -1;
-               }
+               pr_err("Unable to recover from unknown opcode while OFMarker=No,"
+                      " closing iSCSI connection.\n");
+               ret = -1;
                break;
        }
 
index 469fce44ebad50a9efda5345fa6135af733d74ca..c1898c84b3d25e3630c012d97d9cfa6270e7d414 100644 (file)
@@ -24,7 +24,6 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 #include <target/iscsi/iscsi_transport.h>
 
@@ -860,57 +859,19 @@ static struct configfs_attribute *lio_target_initiator_attrs[] = {
        NULL,
 };
 
-static struct se_node_acl *lio_tpg_alloc_fabric_acl(
-       struct se_portal_group *se_tpg)
+static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
+               const char *name)
 {
-       struct iscsi_node_acl *acl;
-
-       acl = kzalloc(sizeof(struct iscsi_node_acl), GFP_KERNEL);
-       if (!acl) {
-               pr_err("Unable to allocate memory for struct iscsi_node_acl\n");
-               return NULL;
-       }
-
-       return &acl->se_node_acl;
-}
-
-static struct se_node_acl *lio_target_make_nodeacl(
-       struct se_portal_group *se_tpg,
-       struct config_group *group,
-       const char *name)
-{
-       struct config_group *stats_cg;
-       struct iscsi_node_acl *acl;
-       struct se_node_acl *se_nacl_new, *se_nacl;
-       struct iscsi_portal_group *tpg = container_of(se_tpg,
-                       struct iscsi_portal_group, tpg_se_tpg);
-       u32 cmdsn_depth;
-
-       se_nacl_new = lio_tpg_alloc_fabric_acl(se_tpg);
-       if (!se_nacl_new)
-               return ERR_PTR(-ENOMEM);
-
-       cmdsn_depth = tpg->tpg_attrib.default_cmdsn_depth;
-       /*
-        * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
-        * when converting a NdoeACL from demo mode -> explict
-        */
-       se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
-                               name, cmdsn_depth);
-       if (IS_ERR(se_nacl))
-               return se_nacl;
-
-       acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
-       stats_cg = &se_nacl->acl_fabric_stat_group;
+       struct iscsi_node_acl *acl =
+               container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
+       struct config_group *stats_cg = &se_nacl->acl_fabric_stat_group;
 
        stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
                                GFP_KERNEL);
        if (!stats_cg->default_groups) {
                pr_err("Unable to allocate memory for"
                                " stats_cg->default_groups\n");
-               core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
-               kfree(acl);
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
        }
 
        stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group;
@@ -918,13 +879,11 @@ static struct se_node_acl *lio_target_make_nodeacl(
        config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
                        "iscsi_sess_stats", &iscsi_stat_sess_cit);
 
-       return se_nacl;
+       return 0;
 }
 
-static void lio_target_drop_nodeacl(
-       struct se_node_acl *se_nacl)
+static void lio_target_cleanup_nodeacl( struct se_node_acl *se_nacl)
 {
-       struct se_portal_group *se_tpg = se_nacl->se_tpg;
        struct iscsi_node_acl *acl = container_of(se_nacl,
                        struct iscsi_node_acl, se_node_acl);
        struct config_item *df_item;
@@ -938,9 +897,6 @@ static void lio_target_drop_nodeacl(
                config_item_put(df_item);
        }
        kfree(stats_cg->default_groups);
-
-       core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
-       kfree(acl);
 }
 
 /* End items for lio_target_acl_cit */
@@ -1463,8 +1419,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
        if (!tpg)
                return NULL;
 
-       ret = core_tpg_register(&iscsi_ops, wwn, &tpg->tpg_se_tpg,
-                               tpg, TRANSPORT_TPG_TYPE_NORMAL);
+       ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
        if (ret < 0)
                return NULL;
 
@@ -1735,14 +1690,6 @@ static char *iscsi_get_fabric_name(void)
        return "iSCSI";
 }
 
-static u32 iscsi_get_task_tag(struct se_cmd *se_cmd)
-{
-       struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
-
-       /* only used for printks or comparism with ->ref_task_tag */
-       return (__force u32)cmd->init_task_tag;
-}
-
 static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
 {
        struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
@@ -1832,78 +1779,58 @@ static void lio_aborted_task(struct se_cmd *se_cmd)
        cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
 }
 
-static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
+static inline struct iscsi_portal_group *iscsi_tpg(struct se_portal_group *se_tpg)
 {
-       struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+       return container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+}
 
-       return &tpg->tpg_tiqn->tiqn[0];
+static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
+{
+       return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn;
 }
 
 static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
 {
-       struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
-       return tpg->tpgt;
+       return iscsi_tpg(se_tpg)->tpgt;
 }
 
 static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
 {
-       struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
-       return tpg->tpg_attrib.default_cmdsn_depth;
+       return iscsi_tpg(se_tpg)->tpg_attrib.default_cmdsn_depth;
 }
 
 static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
 {
-       struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
-       return tpg->tpg_attrib.generate_node_acls;
+       return iscsi_tpg(se_tpg)->tpg_attrib.generate_node_acls;
 }
 
 static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
 {
-       struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
-       return tpg->tpg_attrib.cache_dynamic_acls;
+       return iscsi_tpg(se_tpg)->tpg_attrib.cache_dynamic_acls;
 }
 
 static int lio_tpg_check_demo_mode_write_protect(
        struct se_portal_group *se_tpg)
 {
-       struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
-       return tpg->tpg_attrib.demo_mode_write_protect;
+       return iscsi_tpg(se_tpg)->tpg_attrib.demo_mode_write_protect;
 }
 
 static int lio_tpg_check_prod_mode_write_protect(
        struct se_portal_group *se_tpg)
 {
-       struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
-       return tpg->tpg_attrib.prod_mode_write_protect;
+       return iscsi_tpg(se_tpg)->tpg_attrib.prod_mode_write_protect;
 }
 
 static int lio_tpg_check_prot_fabric_only(
        struct se_portal_group *se_tpg)
 {
-       struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
        /*
         * Only report fabric_prot_type if t10_pi has also been enabled
         * for incoming ib_isert sessions.
         */
-       if (!tpg->tpg_attrib.t10_pi)
+       if (!iscsi_tpg(se_tpg)->tpg_attrib.t10_pi)
                return 0;
-
-       return tpg->tpg_attrib.fabric_prot_type;
-}
-
-static void lio_tpg_release_fabric_acl(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_acl)
-{
-       struct iscsi_node_acl *acl = container_of(se_acl,
-                               struct iscsi_node_acl, se_node_acl);
-       kfree(acl);
+       return iscsi_tpg(se_tpg)->tpg_attrib.fabric_prot_type;
 }
 
 /*
@@ -1948,9 +1875,7 @@ static void lio_tpg_close_session(struct se_session *se_sess)
 
 static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
 {
-       struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
-       return tpg->tpg_tiqn->tiqn_index;
+       return iscsi_tpg(se_tpg)->tpg_tiqn->tiqn_index;
 }
 
 static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
@@ -1967,7 +1892,7 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
 
 static int lio_check_stop_free(struct se_cmd *se_cmd)
 {
-       return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+       return target_put_sess_cmd(se_cmd);
 }
 
 static void lio_release_cmd(struct se_cmd *se_cmd)
@@ -1981,14 +1906,11 @@ static void lio_release_cmd(struct se_cmd *se_cmd)
 const struct target_core_fabric_ops iscsi_ops = {
        .module                         = THIS_MODULE,
        .name                           = "iscsi",
+       .node_acl_size                  = sizeof(struct iscsi_node_acl),
        .get_fabric_name                = iscsi_get_fabric_name,
-       .get_fabric_proto_ident         = iscsi_get_fabric_proto_ident,
        .tpg_get_wwn                    = lio_tpg_get_endpoint_wwn,
        .tpg_get_tag                    = lio_tpg_get_tag,
        .tpg_get_default_depth          = lio_tpg_get_default_depth,
-       .tpg_get_pr_transport_id        = iscsi_get_pr_transport_id,
-       .tpg_get_pr_transport_id_len    = iscsi_get_pr_transport_id_len,
-       .tpg_parse_pr_out_transport_id  = iscsi_parse_pr_out_transport_id,
        .tpg_check_demo_mode            = lio_tpg_check_demo_mode,
        .tpg_check_demo_mode_cache      = lio_tpg_check_demo_mode_cache,
        .tpg_check_demo_mode_write_protect =
@@ -1996,8 +1918,6 @@ const struct target_core_fabric_ops iscsi_ops = {
        .tpg_check_prod_mode_write_protect =
                        lio_tpg_check_prod_mode_write_protect,
        .tpg_check_prot_fabric_only     = &lio_tpg_check_prot_fabric_only,
-       .tpg_alloc_fabric_acl           = lio_tpg_alloc_fabric_acl,
-       .tpg_release_fabric_acl         = lio_tpg_release_fabric_acl,
        .tpg_get_inst_index             = lio_tpg_get_inst_index,
        .check_stop_free                = lio_check_stop_free,
        .release_cmd                    = lio_release_cmd,
@@ -2008,7 +1928,6 @@ const struct target_core_fabric_ops iscsi_ops = {
        .write_pending                  = lio_write_pending,
        .write_pending_status           = lio_write_pending_status,
        .set_default_node_attributes    = lio_set_default_node_attributes,
-       .get_task_tag                   = iscsi_get_task_tag,
        .get_cmd_state                  = iscsi_get_cmd_state,
        .queue_data_in                  = lio_queue_data_in,
        .queue_status                   = lio_queue_status,
@@ -2020,8 +1939,8 @@ const struct target_core_fabric_ops iscsi_ops = {
        .fabric_drop_tpg                = lio_target_tiqn_deltpg,
        .fabric_make_np                 = lio_target_call_addnptotpg,
        .fabric_drop_np                 = lio_target_call_delnpfromtpg,
-       .fabric_make_nodeacl            = lio_target_make_nodeacl,
-       .fabric_drop_nodeacl            = lio_target_drop_nodeacl,
+       .fabric_init_nodeacl            = lio_target_init_nodeacl,
+       .fabric_cleanup_nodeacl         = lio_target_cleanup_nodeacl,
 
        .tfc_discovery_attrs            = lio_target_discovery_auth_attrs,
        .tfc_wwn_attrs                  = lio_target_wwn_attrs,
index 959a14c9dd5d65f1cbd3ff4e37a8d3480ee9f177..210f6e4830e37a4341fd5e611a87168521932b7a 100644 (file)
@@ -956,56 +956,3 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
 
        iscsit_handle_connection_cleanup(conn);
 }
-
-/*
- *     This is the simple function that makes the magic of
- *     sync and steering happen in the follow paradoxical order:
- *
- *     0) Receive conn->of_marker (bytes left until next OFMarker)
- *        bytes into an offload buffer.  When we pass the exact number
- *        of bytes in conn->of_marker, iscsit_dump_data_payload() and hence
- *        rx_data() will automatically receive the identical u32 marker
- *        values and store it in conn->of_marker_offset;
- *     1) Now conn->of_marker_offset will contain the offset to the start
- *        of the next iSCSI PDU.  Dump these remaining bytes into another
- *        offload buffer.
- *     2) We are done!
- *        Next byte in the TCP stream will contain the next iSCSI PDU!
- *        Cool Huh?!
- */
-int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn)
-{
-       /*
-        * Make sure the remaining bytes to next maker is a sane value.
-        */
-       if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) {
-               pr_err("Remaining bytes to OFMarker: %u exceeds"
-                       " OFMarkInt bytes: %u.\n", conn->of_marker,
-                               conn->conn_ops->OFMarkInt * 4);
-               return -1;
-       }
-
-       pr_debug("Advancing %u bytes in TCP stream to get to the"
-                       " next OFMarker.\n", conn->of_marker);
-
-       if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0)
-               return -1;
-
-       /*
-        * Make sure the offset marker we retrived is a valid value.
-        */
-       if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) +
-           conn->conn_ops->MaxRecvDataSegmentLength)) {
-               pr_err("OfMarker offset value: %u exceeds limit.\n",
-                       conn->of_marker_offset);
-               return -1;
-       }
-
-       pr_debug("Discarding %u bytes of TCP stream to get to the"
-                       " next iSCSI Opcode.\n", conn->of_marker_offset);
-
-       if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0)
-               return -1;
-
-       return 0;
-}
index 21acc9a063763b50a3b1029dcb02d4990f50461c..a9e2f9497fb22a1734ae27393e63fa351533f2d8 100644 (file)
@@ -10,6 +10,5 @@ extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
 extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
 extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
 extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
-extern int iscsit_recover_from_unknown_opcode(struct iscsi_conn *);
 
 #endif   /*** ISCSI_TARGET_ERL0_H ***/
index 70d799dfab03c2e3b616b06a635c2a63e8941fda..3d0fe4ff55904d00a702958a82413a33873de888 100644 (file)
@@ -410,8 +410,6 @@ static int iscsi_login_zero_tsih_s2(
        if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl))
                return -1;
 
-       if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
-               return -1;
        /*
         * Set RDMAExtensions=Yes by default for iSER enabled network portals
         */
@@ -477,59 +475,6 @@ check_prot:
        return 0;
 }
 
-/*
- * Remove PSTATE_NEGOTIATE for the four FIM related keys.
- * The Initiator node will be able to enable FIM by proposing them itself.
- */
-int iscsi_login_disable_FIM_keys(
-       struct iscsi_param_list *param_list,
-       struct iscsi_conn *conn)
-{
-       struct iscsi_param *param;
-
-       param = iscsi_find_param_from_key("OFMarker", param_list);
-       if (!param) {
-               pr_err("iscsi_find_param_from_key() for"
-                               " OFMarker failed\n");
-               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-                               ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
-       }
-       param->state &= ~PSTATE_NEGOTIATE;
-
-       param = iscsi_find_param_from_key("OFMarkInt", param_list);
-       if (!param) {
-               pr_err("iscsi_find_param_from_key() for"
-                               " IFMarker failed\n");
-               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-                               ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
-       }
-       param->state &= ~PSTATE_NEGOTIATE;
-
-       param = iscsi_find_param_from_key("IFMarker", param_list);
-       if (!param) {
-               pr_err("iscsi_find_param_from_key() for"
-                               " IFMarker failed\n");
-               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-                               ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
-       }
-       param->state &= ~PSTATE_NEGOTIATE;
-
-       param = iscsi_find_param_from_key("IFMarkInt", param_list);
-       if (!param) {
-               pr_err("iscsi_find_param_from_key() for"
-                               " IFMarker failed\n");
-               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-                               ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
-       }
-       param->state &= ~PSTATE_NEGOTIATE;
-
-       return 0;
-}
-
 static int iscsi_login_non_zero_tsih_s1(
        struct iscsi_conn *conn,
        unsigned char *buf)
@@ -616,7 +561,7 @@ static int iscsi_login_non_zero_tsih_s2(
        if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
                return -1;
 
-       return iscsi_login_disable_FIM_keys(conn->param_list, conn);
+       return 0;
 }
 
 int iscsi_login_post_auth_non_zero_tsih(
@@ -765,7 +710,6 @@ int iscsi_post_login_handler(
        conn->conn_state = TARG_CONN_STATE_LOGGED_IN;
 
        iscsi_set_connection_parameters(conn->conn_ops, conn->param_list);
-       iscsit_set_sync_and_steering_values(conn);
        /*
         * SCSI Initiator -> SCSI Target Port Mapping
         */
index 29d098324b7f947e553eade28ad2ccbecc8b887b..1c7358081533ad1e3fb0533f424fa7749feda7d5 100644 (file)
@@ -16,6 +16,5 @@ extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
 extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
                                bool, bool);
 extern int iscsi_target_login_thread(void *);
-extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
 
 #endif   /*** ISCSI_TARGET_LOGIN_H ***/
index d4f9e96456978eab75e280e123308db09f6e95d2..e8a52f7d6204fc7c3e68fe5ad24f8e393ed0e16b 100644 (file)
@@ -34,13 +34,6 @@ int iscsi_login_rx_data(
        iov.iov_len     = length;
        iov.iov_base    = buf;
 
-       /*
-        * Initial Marker-less Interval.
-        * Add the values regardless of IFMarker/OFMarker, considering
-        * it may not be negoitated yet.
-        */
-       conn->of_marker += length;
-
        rx_got = rx_data(conn, &iov, 1, length);
        if (rx_got != length) {
                pr_err("rx_data returned %d, expecting %d.\n",
@@ -72,13 +65,6 @@ int iscsi_login_tx_data(
                iov_cnt++;
        }
 
-       /*
-        * Initial Marker-less Interval.
-        * Add the values regardless of IFMarker/OFMarker, considering
-        * it may not be negoitated yet.
-        */
-       conn->if_marker += length;
-
        tx_sent = tx_data(conn, &iov[0], iov_cnt, length);
        if (tx_sent != length) {
                pr_err("tx_data returned %d, expecting %d.\n",
@@ -97,12 +83,6 @@ void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops)
                                "CRC32C" : "None");
        pr_debug("MaxRecvDataSegmentLength: %u\n",
                                conn_ops->MaxRecvDataSegmentLength);
-       pr_debug("OFMarker: %s\n", (conn_ops->OFMarker) ? "Yes" : "No");
-       pr_debug("IFMarker: %s\n", (conn_ops->IFMarker) ? "Yes" : "No");
-       if (conn_ops->OFMarker)
-               pr_debug("OFMarkInt: %u\n", conn_ops->OFMarkInt);
-       if (conn_ops->IFMarker)
-               pr_debug("IFMarkInt: %u\n", conn_ops->IFMarkInt);
 }
 
 void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops)
@@ -194,10 +174,6 @@ static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *para
        case TYPERANGE_DIGEST:
                param->type = TYPE_VALUE_LIST | TYPE_STRING;
                break;
-       case TYPERANGE_MARKINT:
-               param->type = TYPE_NUMBER_RANGE;
-               param->type_range |= TYPERANGE_1_TO_65535;
-               break;
        case TYPERANGE_ISCSINAME:
        case TYPERANGE_SESSIONTYPE:
        case TYPERANGE_TARGETADDRESS:
@@ -422,13 +398,13 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
 
        param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT,
                        PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
-                       TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+                       TYPERANGE_UTF8, USE_INITIAL_ONLY);
        if (!param)
                goto out;
 
        param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT,
                        PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
-                       TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+                       TYPERANGE_UTF8, USE_INITIAL_ONLY);
        if (!param)
                goto out;
        /*
@@ -524,9 +500,9 @@ int iscsi_set_keys_to_negotiate(
                } else if (!strcmp(param->name, OFMARKER)) {
                        SET_PSTATE_NEGOTIATE(param);
                } else if (!strcmp(param->name, IFMARKINT)) {
-                       SET_PSTATE_NEGOTIATE(param);
+                       SET_PSTATE_REJECT(param);
                } else if (!strcmp(param->name, OFMARKINT)) {
-                       SET_PSTATE_NEGOTIATE(param);
+                       SET_PSTATE_REJECT(param);
                } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
                        if (iser)
                                SET_PSTATE_NEGOTIATE(param);
@@ -906,91 +882,6 @@ static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_pt
        return 0;
 }
 
-static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *value)
-{
-       char *left_val_ptr = NULL, *right_val_ptr = NULL;
-       char *tilde_ptr = NULL;
-       u32 left_val, right_val, local_left_val;
-
-       if (strcmp(param->name, IFMARKINT) &&
-           strcmp(param->name, OFMARKINT)) {
-               pr_err("Only parameters \"%s\" or \"%s\" may contain a"
-                      " numerical range value.\n", IFMARKINT, OFMARKINT);
-               return -1;
-       }
-
-       if (IS_PSTATE_PROPOSER(param))
-               return 0;
-
-       tilde_ptr = strchr(value, '~');
-       if (!tilde_ptr) {
-               pr_err("Unable to locate numerical range indicator"
-                       " \"~\" for \"%s\".\n", param->name);
-               return -1;
-       }
-       *tilde_ptr = '\0';
-
-       left_val_ptr = value;
-       right_val_ptr = value + strlen(left_val_ptr) + 1;
-
-       if (iscsi_check_numerical_value(param, left_val_ptr) < 0)
-               return -1;
-       if (iscsi_check_numerical_value(param, right_val_ptr) < 0)
-               return -1;
-
-       left_val = simple_strtoul(left_val_ptr, NULL, 0);
-       right_val = simple_strtoul(right_val_ptr, NULL, 0);
-       *tilde_ptr = '~';
-
-       if (right_val < left_val) {
-               pr_err("Numerical range for parameter \"%s\" contains"
-                       " a right value which is less than the left.\n",
-                               param->name);
-               return -1;
-       }
-
-       /*
-        * For now,  enforce reasonable defaults for [I,O]FMarkInt.
-        */
-       tilde_ptr = strchr(param->value, '~');
-       if (!tilde_ptr) {
-               pr_err("Unable to locate numerical range indicator"
-                       " \"~\" for \"%s\".\n", param->name);
-               return -1;
-       }
-       *tilde_ptr = '\0';
-
-       left_val_ptr = param->value;
-       right_val_ptr = param->value + strlen(left_val_ptr) + 1;
-
-       local_left_val = simple_strtoul(left_val_ptr, NULL, 0);
-       *tilde_ptr = '~';
-
-       if (param->set_param) {
-               if ((left_val < local_left_val) ||
-                   (right_val < local_left_val)) {
-                       pr_err("Passed value range \"%u~%u\" is below"
-                               " minimum left value \"%u\" for key \"%s\","
-                               " rejecting.\n", left_val, right_val,
-                               local_left_val, param->name);
-                       return -1;
-               }
-       } else {
-               if ((left_val < local_left_val) &&
-                   (right_val < local_left_val)) {
-                       pr_err("Received value range \"%u~%u\" is"
-                               " below minimum left value \"%u\" for key"
-                               " \"%s\", rejecting.\n", left_val, right_val,
-                               local_left_val, param->name);
-                       SET_PSTATE_REJECT(param);
-                       if (iscsi_update_param_value(param, REJECT) < 0)
-                               return -1;
-               }
-       }
-
-       return 0;
-}
-
 static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value)
 {
        if (IS_PSTATE_PROPOSER(param))
@@ -1027,33 +918,6 @@ static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *val
        return 0;
 }
 
-/*
- *     This function is used to pick a value range number,  currently just
- *     returns the lesser of both right values.
- */
-static char *iscsi_get_value_from_number_range(
-       struct iscsi_param *param,
-       char *value)
-{
-       char *end_ptr, *tilde_ptr1 = NULL, *tilde_ptr2 = NULL;
-       u32 acceptor_right_value, proposer_right_value;
-
-       tilde_ptr1 = strchr(value, '~');
-       if (!tilde_ptr1)
-               return NULL;
-       *tilde_ptr1++ = '\0';
-       proposer_right_value = simple_strtoul(tilde_ptr1, &end_ptr, 0);
-
-       tilde_ptr2 = strchr(param->value, '~');
-       if (!tilde_ptr2)
-               return NULL;
-       *tilde_ptr2++ = '\0';
-       acceptor_right_value = simple_strtoul(tilde_ptr2, &end_ptr, 0);
-
-       return (acceptor_right_value >= proposer_right_value) ?
-               tilde_ptr1 : tilde_ptr2;
-}
-
 static char *iscsi_check_valuelist_for_support(
        struct iscsi_param *param,
        char *value)
@@ -1103,7 +967,7 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
                                struct iscsi_conn *conn)
 {
        u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
-       char *negoitated_value = NULL;
+       char *negotiated_value = NULL;
 
        if (IS_PSTATE_ACCEPTOR(param)) {
                pr_err("Received key \"%s\" twice, protocol error.\n",
@@ -1203,24 +1067,16 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
                        pr_debug("Updated %s to target MXDSL value: %s\n",
                                        param->name, param->value);
                }
-
-       } else if (IS_TYPE_NUMBER_RANGE(param)) {
-               negoitated_value = iscsi_get_value_from_number_range(
-                                       param, value);
-               if (!negoitated_value)
-                       return -1;
-               if (iscsi_update_param_value(param, negoitated_value) < 0)
-                       return -1;
        } else if (IS_TYPE_VALUE_LIST(param)) {
-               negoitated_value = iscsi_check_valuelist_for_support(
+               negotiated_value = iscsi_check_valuelist_for_support(
                                        param, value);
-               if (!negoitated_value) {
+               if (!negotiated_value) {
                        pr_err("Proposer's value list \"%s\" contains"
                                " no valid values from Acceptor's value list"
                                " \"%s\".\n", value, param->value);
                        return -1;
                }
-               if (iscsi_update_param_value(param, negoitated_value) < 0)
+               if (iscsi_update_param_value(param, negotiated_value) < 0)
                        return -1;
        } else if (IS_PHASE_DECLARATIVE(param)) {
                if (iscsi_update_param_value(param, value) < 0)
@@ -1239,47 +1095,7 @@ static int iscsi_check_proposer_state(struct iscsi_param *param, char *value)
                return -1;
        }
 
-       if (IS_TYPE_NUMBER_RANGE(param)) {
-               u32 left_val = 0, right_val = 0, recieved_value = 0;
-               char *left_val_ptr = NULL, *right_val_ptr = NULL;
-               char *tilde_ptr = NULL;
-
-               if (!strcmp(value, IRRELEVANT) || !strcmp(value, REJECT)) {
-                       if (iscsi_update_param_value(param, value) < 0)
-                               return -1;
-                       return 0;
-               }
-
-               tilde_ptr = strchr(value, '~');
-               if (tilde_ptr) {
-                       pr_err("Illegal \"~\" in response for \"%s\".\n",
-                                       param->name);
-                       return -1;
-               }
-               tilde_ptr = strchr(param->value, '~');
-               if (!tilde_ptr) {
-                       pr_err("Unable to locate numerical range"
-                               " indicator \"~\" for \"%s\".\n", param->name);
-                       return -1;
-               }
-               *tilde_ptr = '\0';
-
-               left_val_ptr = param->value;
-               right_val_ptr = param->value + strlen(left_val_ptr) + 1;
-               left_val = simple_strtoul(left_val_ptr, NULL, 0);
-               right_val = simple_strtoul(right_val_ptr, NULL, 0);
-               recieved_value = simple_strtoul(value, NULL, 0);
-
-               *tilde_ptr = '~';
-
-               if ((recieved_value < left_val) ||
-                   (recieved_value > right_val)) {
-                       pr_err("Illegal response \"%s=%u\", value must"
-                               " be between %u and %u.\n", param->name,
-                               recieved_value, left_val, right_val);
-                       return -1;
-               }
-       } else if (IS_TYPE_VALUE_LIST(param)) {
+       if (IS_TYPE_VALUE_LIST(param)) {
                char *comma_ptr = NULL, *tmp_ptr = NULL;
 
                comma_ptr = strchr(value, ',');
@@ -1361,9 +1177,6 @@ static int iscsi_check_value(struct iscsi_param *param, char *value)
                } else if (IS_TYPE_NUMBER(param)) {
                        if (iscsi_check_numerical_value(param, value) < 0)
                                return -1;
-               } else if (IS_TYPE_NUMBER_RANGE(param)) {
-                       if (iscsi_check_numerical_range_value(param, value) < 0)
-                               return -1;
                } else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) {
                        if (iscsi_check_string_or_list_value(param, value) < 0)
                                return -1;
@@ -1483,8 +1296,6 @@ static int iscsi_enforce_integrity_rules(
        char *tmpptr;
        u8 DataSequenceInOrder = 0;
        u8 ErrorRecoveryLevel = 0, SessionType = 0;
-       u8 IFMarker = 0, OFMarker = 0;
-       u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
        u32 FirstBurstLength = 0, MaxBurstLength = 0;
        struct iscsi_param *param = NULL;
 
@@ -1503,28 +1314,12 @@ static int iscsi_enforce_integrity_rules(
                if (!strcmp(param->name, MAXBURSTLENGTH))
                        MaxBurstLength = simple_strtoul(param->value,
                                        &tmpptr, 0);
-               if (!strcmp(param->name, IFMARKER))
-                       if (!strcmp(param->value, YES))
-                               IFMarker = 1;
-               if (!strcmp(param->name, OFMARKER))
-                       if (!strcmp(param->value, YES))
-                               OFMarker = 1;
-               if (!strcmp(param->name, IFMARKINT))
-                       if (!strcmp(param->value, REJECT))
-                               IFMarkInt_Reject = 1;
-               if (!strcmp(param->name, OFMARKINT))
-                       if (!strcmp(param->value, REJECT))
-                               OFMarkInt_Reject = 1;
        }
 
        list_for_each_entry(param, &param_list->param_list, p_list) {
                if (!(param->phase & phase))
                        continue;
-               if (!SessionType && (!IS_PSTATE_ACCEPTOR(param) &&
-                    (strcmp(param->name, IFMARKER) &&
-                     strcmp(param->name, OFMARKER) &&
-                     strcmp(param->name, IFMARKINT) &&
-                     strcmp(param->name, OFMARKINT))))
+               if (!SessionType && !IS_PSTATE_ACCEPTOR(param))
                        continue;
                if (!strcmp(param->name, MAXOUTSTANDINGR2T) &&
                    DataSequenceInOrder && (ErrorRecoveryLevel > 0)) {
@@ -1556,38 +1351,6 @@ static int iscsi_enforce_integrity_rules(
                                        param->name, param->value);
                        }
                }
-               if (!strcmp(param->name, IFMARKER) && IFMarkInt_Reject) {
-                       if (iscsi_update_param_value(param, NO) < 0)
-                               return -1;
-                       IFMarker = 0;
-                       pr_debug("Reset \"%s\" to \"%s\".\n",
-                                       param->name, param->value);
-               }
-               if (!strcmp(param->name, OFMARKER) && OFMarkInt_Reject) {
-                       if (iscsi_update_param_value(param, NO) < 0)
-                               return -1;
-                       OFMarker = 0;
-                       pr_debug("Reset \"%s\" to \"%s\".\n",
-                                        param->name, param->value);
-               }
-               if (!strcmp(param->name, IFMARKINT) && !IFMarker) {
-                       if (!strcmp(param->value, REJECT))
-                               continue;
-                       param->state &= ~PSTATE_NEGOTIATE;
-                       if (iscsi_update_param_value(param, IRRELEVANT) < 0)
-                               return -1;
-                       pr_debug("Reset \"%s\" to \"%s\".\n",
-                                       param->name, param->value);
-               }
-               if (!strcmp(param->name, OFMARKINT) && !OFMarker) {
-                       if (!strcmp(param->value, REJECT))
-                               continue;
-                       param->state &= ~PSTATE_NEGOTIATE;
-                       if (iscsi_update_param_value(param, IRRELEVANT) < 0)
-                               return -1;
-                       pr_debug("Reset \"%s\" to \"%s\".\n",
-                                       param->name, param->value);
-               }
        }
 
        return 0;
@@ -1824,24 +1587,6 @@ void iscsi_set_connection_parameters(
                         */
                        pr_debug("MaxRecvDataSegmentLength:     %u\n",
                                ops->MaxRecvDataSegmentLength);
-               } else if (!strcmp(param->name, OFMARKER)) {
-                       ops->OFMarker = !strcmp(param->value, YES);
-                       pr_debug("OFMarker:                     %s\n",
-                               param->value);
-               } else if (!strcmp(param->name, IFMARKER)) {
-                       ops->IFMarker = !strcmp(param->value, YES);
-                       pr_debug("IFMarker:                     %s\n",
-                               param->value);
-               } else if (!strcmp(param->name, OFMARKINT)) {
-                       ops->OFMarkInt =
-                               simple_strtoul(param->value, &tmpptr, 0);
-                       pr_debug("OFMarkInt:                    %s\n",
-                               param->value);
-               } else if (!strcmp(param->name, IFMARKINT)) {
-                       ops->IFMarkInt =
-                               simple_strtoul(param->value, &tmpptr, 0);
-                       pr_debug("IFMarkInt:                    %s\n",
-                               param->value);
                } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
                        ops->InitiatorRecvDataSegmentLength =
                                simple_strtoul(param->value, &tmpptr, 0);
index a47046a752aac7ccdabbdf4e41c936d53a13569b..a0751e3f0813429bd5c87c2ecab16780cfbfa1c7 100644 (file)
@@ -138,8 +138,8 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
 #define INITIAL_SESSIONTYPE                    NORMAL
 #define INITIAL_IFMARKER                       NO
 #define INITIAL_OFMARKER                       NO
-#define INITIAL_IFMARKINT                      "2048~65535"
-#define INITIAL_OFMARKINT                      "2048~65535"
+#define INITIAL_IFMARKINT                      REJECT
+#define INITIAL_OFMARKINT                      REJECT
 
 /*
  * Initial values for iSER parameters following RFC-5046 Section 6
@@ -239,10 +239,9 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
 #define TYPERANGE_AUTH                 0x0200
 #define TYPERANGE_DIGEST               0x0400
 #define TYPERANGE_ISCSINAME            0x0800
-#define TYPERANGE_MARKINT              0x1000
-#define TYPERANGE_SESSIONTYPE          0x2000
-#define TYPERANGE_TARGETADDRESS                0x4000
-#define TYPERANGE_UTF8                 0x8000
+#define TYPERANGE_SESSIONTYPE          0x1000
+#define TYPERANGE_TARGETADDRESS                0x2000
+#define TYPERANGE_UTF8                 0x4000
 
 #define IS_TYPERANGE_0_TO_2(p)         ((p)->type_range & TYPERANGE_0_TO_2)
 #define IS_TYPERANGE_0_TO_3600(p)      ((p)->type_range & TYPERANGE_0_TO_3600)
index fe9a582ca6af4f4e83410dab6ecd22e28d74ff96..cf59c397007bd0d9a48665f34a07c5bf3226e650 100644 (file)
@@ -120,7 +120,7 @@ u8 iscsit_tmr_task_reassign(
        struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
        struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
        struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
-       int ret, ref_lun;
+       u64 ret, ref_lun;
 
        pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
                " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
@@ -164,7 +164,7 @@ u8 iscsit_tmr_task_reassign(
        ref_lun = scsilun_to_int(&hdr->lun);
        if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) {
                pr_err("Unable to perform connection recovery for"
-                       " differing ref_lun: %d ref_cmd orig_fe_lun: %u\n",
+                       " differing ref_lun: %llu ref_cmd orig_fe_lun: %llu\n",
                        ref_lun, ref_cmd->se_cmd.orig_fe_lun);
                return ISCSI_TMF_RSP_REJECTED;
        }
index 5e3295fe404d7cc93aae6354f2578bcbca55ee23..968068ffcb1c87a7ce7d218f8faf0a900dbc517b 100644 (file)
@@ -18,7 +18,6 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 
 #include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_erl0.h"
@@ -67,9 +66,12 @@ int iscsit_load_discovery_tpg(void)
                pr_err("Unable to allocate struct iscsi_portal_group\n");
                return -1;
        }
-
-       ret = core_tpg_register(&iscsi_ops, NULL, &tpg->tpg_se_tpg,
-                               tpg, TRANSPORT_TPG_TYPE_DISCOVERY);
+       /*
+        * Save iscsi_ops pointer for special case discovery TPG that
+        * doesn't exist as se_wwn->wwn_group within configfs.
+        */
+       tpg->tpg_se_tpg.se_tpg_tfo = &iscsi_ops;
+       ret = core_tpg_register(NULL, &tpg->tpg_se_tpg, -1);
        if (ret < 0) {
                kfree(tpg);
                return -1;
@@ -280,8 +282,6 @@ int iscsit_tpg_del_portal_group(
                return -EPERM;
        }
 
-       core_tpg_clear_object_luns(&tpg->tpg_se_tpg);
-
        if (tpg->param_list) {
                iscsi_release_param_list(tpg->param_list);
                tpg->param_list = NULL;
index b18edda3e8af8a6f4b6edba747dbf725bf36a21a..a2bff0702eb25bc4d10bac935d4888b2df131c41 100644 (file)
@@ -22,7 +22,6 @@
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 #include <target/iscsi/iscsi_transport.h>
 
 #include <target/iscsi/iscsi_target_core.h>
@@ -746,7 +745,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
                rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
                if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
                        __iscsit_free_cmd(cmd, true, shutdown);
-                       target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+                       target_put_sess_cmd(se_cmd);
                }
                break;
        case ISCSI_OP_REJECT:
@@ -762,7 +761,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
                        rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
                        if (!rc && shutdown && se_cmd->se_sess) {
                                __iscsit_free_cmd(cmd, true, shutdown);
-                               target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+                               target_put_sess_cmd(se_cmd);
                        }
                        break;
                }
@@ -809,54 +808,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)
        spin_unlock_bh(&sess->session_usage_lock);
 }
 
-/*
- *     Setup conn->if_marker and conn->of_marker values based upon
- *     the initial marker-less interval. (see iSCSI v19 A.2)
- */
-int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn)
-{
-       int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0;
-       /*
-        * IFMarkInt and OFMarkInt are negotiated as 32-bit words.
-        */
-       u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4);
-       u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4);
-
-       if (conn->conn_ops->OFMarker) {
-               /*
-                * Account for the first Login Command received not
-                * via iscsi_recv_msg().
-                */
-               conn->of_marker += ISCSI_HDR_LEN;
-               if (conn->of_marker <= OFMarkInt) {
-                       conn->of_marker = (OFMarkInt - conn->of_marker);
-               } else {
-                       login_ofmarker_count = (conn->of_marker / OFMarkInt);
-                       next_marker = (OFMarkInt * (login_ofmarker_count + 1)) +
-                                       (login_ofmarker_count * MARKER_SIZE);
-                       conn->of_marker = (next_marker - conn->of_marker);
-               }
-               conn->of_marker_offset = 0;
-               pr_debug("Setting OFMarker value to %u based on Initial"
-                       " Markerless Interval.\n", conn->of_marker);
-       }
-
-       if (conn->conn_ops->IFMarker) {
-               if (conn->if_marker <= IFMarkInt) {
-                       conn->if_marker = (IFMarkInt - conn->if_marker);
-               } else {
-                       login_ifmarker_count = (conn->if_marker / IFMarkInt);
-                       next_marker = (IFMarkInt * (login_ifmarker_count + 1)) +
-                                       (login_ifmarker_count * MARKER_SIZE);
-                       conn->if_marker = (next_marker - conn->if_marker);
-               }
-               pr_debug("Setting IFMarker value to %u based on Initial"
-                       " Markerless Interval.\n", conn->if_marker);
-       }
-
-       return 0;
-}
-
 struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
 {
        struct iscsi_conn *conn;
index 1ab754a671ff301977a90c90246ae5a223352051..995f1cb29d0e08268acf9f3547494d23498dc87b 100644 (file)
@@ -34,7 +34,6 @@ extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
 extern int iscsit_check_session_usage_count(struct iscsi_session *);
 extern void iscsit_dec_session_usage_count(struct iscsi_session *);
 extern void iscsit_inc_session_usage_count(struct iscsi_session *);
-extern int iscsit_set_sync_and_steering_values(struct iscsi_conn *);
 extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16);
 extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16);
 extern void iscsit_check_conn_usage_count(struct iscsi_conn *);
index 51f0c895c6a58fd1e93badf13a52e5eae4b64877..a556bdebd775dbc4ecbef99b1a9153493916a9f3 100644 (file)
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
 
 #include "tcm_loop.h"
 
 #define to_tcm_loop_hba(hba)   container_of(hba, struct tcm_loop_hba, dev)
 
-static const struct target_core_fabric_ops loop_ops;
-
 static struct workqueue_struct *tcm_loop_workqueue;
 static struct kmem_cache *tcm_loop_cmd_cache;
 
@@ -165,6 +162,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
                transfer_length = scsi_bufflen(sc);
        }
 
+       se_cmd->tag = tl_cmd->sc_cmd_tag;
        rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
                        &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
                        transfer_length, TCM_SIMPLE_TAG,
@@ -217,7 +215,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
  * to struct scsi_device
  */
 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
-                             int lun, int task, enum tcm_tmreq_table tmr)
+                             u64 lun, int task, enum tcm_tmreq_table tmr)
 {
        struct se_cmd *se_cmd = NULL;
        struct se_session *se_sess;
@@ -409,7 +407,7 @@ static int tcm_loop_driver_probe(struct device *dev)
        sh->max_id = 2;
        sh->max_lun = 0;
        sh->max_channel = 0;
-       sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
+       sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
 
        host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
                    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
@@ -520,147 +518,26 @@ static char *tcm_loop_get_fabric_name(void)
        return "loopback";
 }
 
-static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
 {
-       struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
-       struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
-       /*
-        * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
-        * time based on the protocol dependent prefix of the passed configfs group.
-        *
-        * Based upon tl_proto_id, TCM_Loop emulates the requested fabric
-        * ProtocolID using target_core_fabric_lib.c symbols.
-        */
-       switch (tl_hba->tl_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-               return sas_get_fabric_proto_ident(se_tpg);
-       case SCSI_PROTOCOL_FCP:
-               return fc_get_fabric_proto_ident(se_tpg);
-       case SCSI_PROTOCOL_ISCSI:
-               return iscsi_get_fabric_proto_ident(se_tpg);
-       default:
-               pr_err("Unknown tl_proto_id: 0x%02x, using"
-                       " SAS emulation\n", tl_hba->tl_proto_id);
-               break;
-       }
-
-       return sas_get_fabric_proto_ident(se_tpg);
+       return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
 }
 
 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
 {
-       struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
        /*
         * Return the passed NAA identifier for the SAS Target Port
         */
-       return &tl_tpg->tl_hba->tl_wwn_address[0];
+       return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
 }
 
 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
 {
-       struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
        /*
         * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
         * to represent the SCSI Target Port.
         */
-       return tl_tpg->tl_tpgt;
-}
-
-static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
-{
-       return 1;
-}
-
-static u32 tcm_loop_get_pr_transport_id(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
-       int *format_code,
-       unsigned char *buf)
-{
-       struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
-       struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
-
-       switch (tl_hba->tl_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-               return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                                       format_code, buf);
-       case SCSI_PROTOCOL_FCP:
-               return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                                       format_code, buf);
-       case SCSI_PROTOCOL_ISCSI:
-               return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                                       format_code, buf);
-       default:
-               pr_err("Unknown tl_proto_id: 0x%02x, using"
-                       " SAS emulation\n", tl_hba->tl_proto_id);
-               break;
-       }
-
-       return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                       format_code, buf);
-}
-
-static u32 tcm_loop_get_pr_transport_id_len(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
-       int *format_code)
-{
-       struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
-       struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
-
-       switch (tl_hba->tl_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-               return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                                       format_code);
-       case SCSI_PROTOCOL_FCP:
-               return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                                       format_code);
-       case SCSI_PROTOCOL_ISCSI:
-               return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                                       format_code);
-       default:
-               pr_err("Unknown tl_proto_id: 0x%02x, using"
-                       " SAS emulation\n", tl_hba->tl_proto_id);
-               break;
-       }
-
-       return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                       format_code);
-}
-
-/*
- * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
- * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
- */
-static char *tcm_loop_parse_pr_out_transport_id(
-       struct se_portal_group *se_tpg,
-       const char *buf,
-       u32 *out_tid_len,
-       char **port_nexus_ptr)
-{
-       struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
-       struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
-
-       switch (tl_hba->tl_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-               return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                                       port_nexus_ptr);
-       case SCSI_PROTOCOL_FCP:
-               return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                                       port_nexus_ptr);
-       case SCSI_PROTOCOL_ISCSI:
-               return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                                       port_nexus_ptr);
-       default:
-               pr_err("Unknown tl_proto_id: 0x%02x, using"
-                       " SAS emulation\n", tl_hba->tl_proto_id);
-               break;
-       }
-
-       return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                       port_nexus_ptr);
+       return tl_tpg(se_tpg)->tl_tpgt;
 }
 
 /*
@@ -703,30 +580,6 @@ static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
        return tl_tpg->tl_fabric_prot_type;
 }
 
-static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
-       struct se_portal_group *se_tpg)
-{
-       struct tcm_loop_nacl *tl_nacl;
-
-       tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
-       if (!tl_nacl) {
-               pr_err("Unable to allocate struct tcm_loop_nacl\n");
-               return NULL;
-       }
-
-       return &tl_nacl->se_node_acl;
-}
-
-static void tcm_loop_tpg_release_fabric_acl(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl)
-{
-       struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
-                               struct tcm_loop_nacl, se_node_acl);
-
-       kfree(tl_nacl);
-}
-
 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
 {
        return 1;
@@ -742,14 +595,6 @@ static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
        return;
 }
 
-static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
-{
-       struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
-                       struct tcm_loop_cmd, tl_se_cmd);
-
-       return tl_cmd->sc_cmd_tag;
-}
-
 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
 {
        struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
@@ -902,7 +747,7 @@ static void tcm_loop_port_unlink(
                                se_lun->unpacked_lun);
        if (!sd) {
                pr_err("Unable to locate struct scsi_device for %d:%d:"
-                       "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
+                       "%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
                return;
        }
        /*
@@ -1234,8 +1079,7 @@ static struct se_portal_group *tcm_loop_make_naa_tpg(
        /*
         * Register the tl_tpg as a emulated SAS TCM Target Endpoint
         */
-       ret = core_tpg_register(&loop_ops, wwn, &tl_tpg->tl_se_tpg, tl_tpg,
-                       TRANSPORT_TPG_TYPE_NORMAL);
+       ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
        if (ret < 0)
                return ERR_PTR(-ENOMEM);
 
@@ -1386,13 +1230,8 @@ static const struct target_core_fabric_ops loop_ops = {
        .module                         = THIS_MODULE,
        .name                           = "loopback",
        .get_fabric_name                = tcm_loop_get_fabric_name,
-       .get_fabric_proto_ident         = tcm_loop_get_fabric_proto_ident,
        .tpg_get_wwn                    = tcm_loop_get_endpoint_wwn,
        .tpg_get_tag                    = tcm_loop_get_tag,
-       .tpg_get_default_depth          = tcm_loop_get_default_depth,
-       .tpg_get_pr_transport_id        = tcm_loop_get_pr_transport_id,
-       .tpg_get_pr_transport_id_len    = tcm_loop_get_pr_transport_id_len,
-       .tpg_parse_pr_out_transport_id  = tcm_loop_parse_pr_out_transport_id,
        .tpg_check_demo_mode            = tcm_loop_check_demo_mode,
        .tpg_check_demo_mode_cache      = tcm_loop_check_demo_mode_cache,
        .tpg_check_demo_mode_write_protect =
@@ -1400,8 +1239,6 @@ static const struct target_core_fabric_ops loop_ops = {
        .tpg_check_prod_mode_write_protect =
                                tcm_loop_check_prod_mode_write_protect,
        .tpg_check_prot_fabric_only     = tcm_loop_check_prot_fabric_only,
-       .tpg_alloc_fabric_acl           = tcm_loop_tpg_alloc_fabric_acl,
-       .tpg_release_fabric_acl         = tcm_loop_tpg_release_fabric_acl,
        .tpg_get_inst_index             = tcm_loop_get_inst_index,
        .check_stop_free                = tcm_loop_check_stop_free,
        .release_cmd                    = tcm_loop_release_cmd,
@@ -1411,7 +1248,6 @@ static const struct target_core_fabric_ops loop_ops = {
        .write_pending                  = tcm_loop_write_pending,
        .write_pending_status           = tcm_loop_write_pending_status,
        .set_default_node_attributes    = tcm_loop_set_default_node_attributes,
-       .get_task_tag                   = tcm_loop_get_task_tag,
        .get_cmd_state                  = tcm_loop_get_cmd_state,
        .queue_data_in                  = tcm_loop_queue_data_in,
        .queue_status                   = tcm_loop_queue_status,
index 1e72ff77cac9c3eab83e933d2c5d675edfea483b..4346462094a1af4e4ce778abd41a0f1da1d0a559 100644 (file)
@@ -2,11 +2,6 @@
 #define TL_WWN_ADDR_LEN                        256
 #define TL_TPGS_PER_HBA                        32
 
-/*
- * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
- */
-#define TL_SCSI_MAX_CMD_LEN            32
-
 struct tcm_loop_cmd {
        /* State of Linux/SCSI CDB+Data descriptor */
        u32 sc_cmd_state;
@@ -33,10 +28,6 @@ struct tcm_loop_nexus {
        struct se_session *se_sess;
 };
 
-struct tcm_loop_nacl {
-       struct se_node_acl se_node_acl;
-};
-
 #define TCM_TRANSPORT_ONLINE 0
 #define TCM_TRANSPORT_OFFLINE 1
 
index ce81f17ad1ba5f97595ada370d8de40429eb9e0c..0edf320fb68547a4f4c6d0b8d46687727d47c557 100644 (file)
@@ -36,7 +36,6 @@
 #include <target/target_core_backend.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 #include <asm/unaligned.h>
 
@@ -109,13 +108,13 @@ static struct sbp_session *sbp_session_find_by_guid(
 }
 
 static struct sbp_login_descriptor *sbp_login_find_by_lun(
-               struct sbp_session *session, struct se_lun *lun)
+               struct sbp_session *session, u32 unpacked_lun)
 {
        struct sbp_login_descriptor *login, *found = NULL;
 
        spin_lock_bh(&session->lock);
        list_for_each_entry(login, &session->login_list, link) {
-               if (login->lun == lun)
+               if (login->login_lun == unpacked_lun)
                        found = login;
        }
        spin_unlock_bh(&session->lock);
@@ -125,7 +124,7 @@ static struct sbp_login_descriptor *sbp_login_find_by_lun(
 
 static int sbp_login_count_all_by_lun(
                struct sbp_tpg *tpg,
-               struct se_lun *lun,
+               u32 unpacked_lun,
                int exclusive)
 {
        struct se_session *se_sess;
@@ -139,7 +138,7 @@ static int sbp_login_count_all_by_lun(
 
                spin_lock_bh(&sess->lock);
                list_for_each_entry(login, &sess->login_list, link) {
-                       if (login->lun != lun)
+                       if (login->login_lun != unpacked_lun)
                                continue;
 
                        if (!exclusive || login->exclusive)
@@ -175,23 +174,23 @@ static struct sbp_login_descriptor *sbp_login_find_by_id(
        return found;
 }
 
-static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun)
+static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
 {
        struct se_portal_group *se_tpg = &tpg->se_tpg;
        struct se_lun *se_lun;
 
-       if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
-               return ERR_PTR(-EINVAL);
-
-       spin_lock(&se_tpg->tpg_lun_lock);
-       se_lun = se_tpg->tpg_lun_list[lun];
-
-       if (se_lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
-               se_lun = ERR_PTR(-ENODEV);
-
-       spin_unlock(&se_tpg->tpg_lun_lock);
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
+               if (se_lun->unpacked_lun == login_lun) {
+                       rcu_read_unlock();
+                       *err = 0;
+                       return login_lun;
+               }
+       }
+       rcu_read_unlock();
 
-       return se_lun;
+       *err = -ENODEV;
+       return login_lun;
 }
 
 static struct sbp_session *sbp_session_create(
@@ -295,17 +294,16 @@ static void sbp_management_request_login(
 {
        struct sbp_tport *tport = agent->tport;
        struct sbp_tpg *tpg = tport->tpg;
-       struct se_lun *se_lun;
-       int ret;
-       u64 guid;
        struct sbp_session *sess;
        struct sbp_login_descriptor *login;
        struct sbp_login_response_block *response;
-       int login_response_len;
+       u64 guid;
+       u32 unpacked_lun;
+       int login_response_len, ret;
 
-       se_lun = sbp_get_lun_from_tpg(tpg,
-                       LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
-       if (IS_ERR(se_lun)) {
+       unpacked_lun = sbp_get_lun_from_tpg(tpg,
+                       LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
+       if (ret) {
                pr_notice("login to unknown LUN: %d\n",
                        LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
 
@@ -326,11 +324,11 @@ static void sbp_management_request_login(
        }
 
        pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
-               se_lun->unpacked_lun, guid);
+               unpacked_lun, guid);
 
        sess = sbp_session_find_by_guid(tpg, guid);
        if (sess) {
-               login = sbp_login_find_by_lun(sess, se_lun);
+               login = sbp_login_find_by_lun(sess, unpacked_lun);
                if (login) {
                        pr_notice("initiator already logged-in\n");
 
@@ -358,7 +356,7 @@ static void sbp_management_request_login(
         * reject with access_denied if any logins present
         */
        if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
-                       sbp_login_count_all_by_lun(tpg, se_lun, 0)) {
+                       sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
                pr_warn("refusing exclusive login with other active logins\n");
 
                req->status.status = cpu_to_be32(
@@ -371,7 +369,7 @@ static void sbp_management_request_login(
         * check exclusive bit in any existing login descriptor
         * reject with access_denied if any exclusive logins present
         */
-       if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) {
+       if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
                pr_warn("refusing login while another exclusive login present\n");
 
                req->status.status = cpu_to_be32(
@@ -384,7 +382,7 @@ static void sbp_management_request_login(
         * check we haven't exceeded the number of allowed logins
         * reject with resources_unavailable if we have
         */
-       if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >=
+       if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
                        tport->max_logins_per_lun) {
                pr_warn("max number of logins reached\n");
 
@@ -440,7 +438,7 @@ static void sbp_management_request_login(
        }
 
        login->sess = sess;
-       login->lun = se_lun;
+       login->login_lun = unpacked_lun;
        login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
        login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
        login->login_id = atomic_inc_return(&login_id);
@@ -602,7 +600,7 @@ static void sbp_management_request_logout(
        }
 
        pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
-               login->lun->unpacked_lun, login->login_id);
+               login->login_lun, login->login_id);
 
        if (req->node_addr != login->sess->node_id) {
                pr_warn("logout from different node ID\n");
@@ -1228,12 +1226,14 @@ static void sbp_handle_command(struct sbp_target_request *req)
                goto err;
        }
 
-       unpacked_lun = req->login->lun->unpacked_lun;
+       unpacked_lun = req->login->login_lun;
        sbp_calc_data_length_direction(req, &data_length, &data_dir);
 
        pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
                        req->orb_pointer, unpacked_lun, data_length, data_dir);
 
+       /* only used for printk until we do TMRs */
+       req->se_cmd.tag = req->orb_pointer;
        if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
                              req->sense_buf, unpacked_lun, data_length,
                              TCM_SIMPLE_TAG, data_dir, 0))
@@ -1707,33 +1707,6 @@ static u16 sbp_get_tag(struct se_portal_group *se_tpg)
        return tpg->tport_tpgt;
 }
 
-static u32 sbp_get_default_depth(struct se_portal_group *se_tpg)
-{
-       return 1;
-}
-
-static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
-       struct sbp_nacl *nacl;
-
-       nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL);
-       if (!nacl) {
-               pr_err("Unable to allocate struct sbp_nacl\n");
-               return NULL;
-       }
-
-       return &nacl->se_node_acl;
-}
-
-static void sbp_release_fabric_acl(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl)
-{
-       struct sbp_nacl *nacl =
-               container_of(se_nacl, struct sbp_nacl, se_node_acl);
-       kfree(nacl);
-}
-
 static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
 {
        return 1;
@@ -1795,15 +1768,6 @@ static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
        return;
 }
 
-static u32 sbp_get_task_tag(struct se_cmd *se_cmd)
-{
-       struct sbp_target_request *req = container_of(se_cmd,
-                       struct sbp_target_request, se_cmd);
-
-       /* only used for printk until we do TMRs */
-       return (u32)req->orb_pointer;
-}
-
 static int sbp_get_cmd_state(struct se_cmd *se_cmd)
 {
        return 0;
@@ -1859,106 +1823,23 @@ static int sbp_check_stop_free(struct se_cmd *se_cmd)
        return 1;
 }
 
-/*
- * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3)
- */
-static u8 sbp_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
-       /*
-        * Return a IEEE 1394 SCSI Protocol identifier for loopback operations
-        * This is defined in section 7.5.1 Table 362 in spc4r17
-        */
-       return SCSI_PROTOCOL_SBP;
-}
-
-static u32 sbp_get_pr_transport_id(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
-       int *format_code,
-       unsigned char *buf)
-{
-       int ret;
-
-       /*
-        * Set PROTOCOL IDENTIFIER to 3h for SBP
-        */
-       buf[0] = SCSI_PROTOCOL_SBP;
-       /*
-        * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
-        * over IEEE 1394
-        */
-       ret = hex2bin(&buf[8], se_nacl->initiatorname, 8);
-       if (ret < 0)
-               pr_debug("sbp transport_id: invalid hex string\n");
-
-       /*
-        * The IEEE 1394 Transport ID is a hardcoded 24-byte length
-        */
-       return 24;
-}
-
-static u32 sbp_get_pr_transport_id_len(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
-       int *format_code)
-{
-       *format_code = 0;
-       /*
-        * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
-        * over IEEE 1394
-        *
-        * The SBP Transport ID is a hardcoded 24-byte length
-        */
-       return 24;
-}
-
-/*
- * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
- * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
- */
-static char *sbp_parse_pr_out_transport_id(
-       struct se_portal_group *se_tpg,
-       const char *buf,
-       u32 *out_tid_len,
-       char **port_nexus_ptr)
-{
-       /*
-        * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID
-        * for initiator ports using SCSI over SBP Serial SCSI Protocol
-        *
-        * The TransportID for a IEEE 1394 Initiator Port is of fixed size of
-        * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier,
-        * so we return the **port_nexus_ptr set to NULL.
-        */
-       *port_nexus_ptr = NULL;
-       *out_tid_len = 24;
-
-       return (char *)&buf[8];
-}
-
 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
 {
-       int i, count = 0;
-
-       spin_lock(&tpg->tpg_lun_lock);
-       for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               struct se_lun *se_lun = tpg->tpg_lun_list[i];
-
-               if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
-                       continue;
+       struct se_lun *lun;
+       int count = 0;
 
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
                count++;
-       }
-       spin_unlock(&tpg->tpg_lun_lock);
+       rcu_read_unlock();
 
        return count;
 }
 
 static int sbp_update_unit_directory(struct sbp_tport *tport)
 {
-       int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i;
+       struct se_lun *lun;
+       int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
        u32 *data;
 
        if (tport->unit_directory.data) {
@@ -2020,28 +1901,23 @@ static int sbp_update_unit_directory(struct sbp_tport *tport)
        /* unit unique ID (leaf is just after LUNs) */
        data[idx++] = 0x8d000000 | (num_luns + 1);
 
-       spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
-       for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i];
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
                struct se_device *dev;
                int type;
-
-               if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
-                       continue;
-
-               spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
-
-               dev = se_lun->lun_se_dev;
+               /*
+                * rcu_dereference_raw protected by se_lun->lun_group symlink
+                * reference to se_device->dev_group.
+                */
+               dev = rcu_dereference_raw(lun->lun_se_dev);
                type = dev->transport->get_device_type(dev);
 
                /* logical_unit_number */
                data[idx++] = 0x14000000 |
                        ((type << 16) & 0x1f0000) |
-                       (se_lun->unpacked_lun & 0xffff);
-
-               spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
+                       (lun->unpacked_lun & 0xffff);
        }
-       spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
+       rcu_read_unlock();
 
        /* unit unique ID leaf */
        data[idx++] = 2 << 16;
@@ -2100,48 +1976,13 @@ static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
        return snprintf(buf, len, "%016llx", wwn);
 }
 
-static struct se_node_acl *sbp_make_nodeacl(
-               struct se_portal_group *se_tpg,
-               struct config_group *group,
-               const char *name)
+static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
 {
-       struct se_node_acl *se_nacl, *se_nacl_new;
-       struct sbp_nacl *nacl;
        u64 guid = 0;
-       u32 nexus_depth = 1;
 
        if (sbp_parse_wwn(name, &guid) < 0)
-               return ERR_PTR(-EINVAL);
-
-       se_nacl_new = sbp_alloc_fabric_acl(se_tpg);
-       if (!se_nacl_new)
-               return ERR_PTR(-ENOMEM);
-
-       /*
-        * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
-        * when converting a NodeACL from demo mode -> explict
-        */
-       se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
-                       name, nexus_depth);
-       if (IS_ERR(se_nacl)) {
-               sbp_release_fabric_acl(se_tpg, se_nacl_new);
-               return se_nacl;
-       }
-
-       nacl = container_of(se_nacl, struct sbp_nacl, se_node_acl);
-       nacl->guid = guid;
-       sbp_format_wwn(nacl->iport_name, SBP_NAMELEN, guid);
-
-       return se_nacl;
-}
-
-static void sbp_drop_nodeacl(struct se_node_acl *se_acl)
-{
-       struct sbp_nacl *nacl =
-               container_of(se_acl, struct sbp_nacl, se_node_acl);
-
-       core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
-       kfree(nacl);
+               return -EINVAL;
+       return 0;
 }
 
 static int sbp_post_link_lun(
@@ -2214,8 +2055,7 @@ static struct se_portal_group *sbp_make_tpg(
                goto out_free_tpg;
        }
 
-       ret = core_tpg_register(&sbp_ops, wwn, &tpg->se_tpg, tpg,
-                       TRANSPORT_TPG_TYPE_NORMAL);
+       ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
        if (ret < 0)
                goto out_unreg_mgt_agt;
 
@@ -2505,19 +2345,12 @@ static const struct target_core_fabric_ops sbp_ops = {
        .module                         = THIS_MODULE,
        .name                           = "sbp",
        .get_fabric_name                = sbp_get_fabric_name,
-       .get_fabric_proto_ident         = sbp_get_fabric_proto_ident,
        .tpg_get_wwn                    = sbp_get_fabric_wwn,
        .tpg_get_tag                    = sbp_get_tag,
-       .tpg_get_default_depth          = sbp_get_default_depth,
-       .tpg_get_pr_transport_id        = sbp_get_pr_transport_id,
-       .tpg_get_pr_transport_id_len    = sbp_get_pr_transport_id_len,
-       .tpg_parse_pr_out_transport_id  = sbp_parse_pr_out_transport_id,
        .tpg_check_demo_mode            = sbp_check_true,
        .tpg_check_demo_mode_cache      = sbp_check_true,
        .tpg_check_demo_mode_write_protect = sbp_check_false,
        .tpg_check_prod_mode_write_protect = sbp_check_false,
-       .tpg_alloc_fabric_acl           = sbp_alloc_fabric_acl,
-       .tpg_release_fabric_acl         = sbp_release_fabric_acl,
        .tpg_get_inst_index             = sbp_tpg_get_inst_index,
        .release_cmd                    = sbp_release_cmd,
        .shutdown_session               = sbp_shutdown_session,
@@ -2526,7 +2359,6 @@ static const struct target_core_fabric_ops sbp_ops = {
        .write_pending                  = sbp_write_pending,
        .write_pending_status           = sbp_write_pending_status,
        .set_default_node_attributes    = sbp_set_default_node_attrs,
-       .get_task_tag                   = sbp_get_task_tag,
        .get_cmd_state                  = sbp_get_cmd_state,
        .queue_data_in                  = sbp_queue_data_in,
        .queue_status                   = sbp_queue_status,
@@ -2542,8 +2374,7 @@ static const struct target_core_fabric_ops sbp_ops = {
        .fabric_pre_unlink              = sbp_pre_unlink_lun,
        .fabric_make_np                 = NULL,
        .fabric_drop_np                 = NULL,
-       .fabric_make_nodeacl            = sbp_make_nodeacl,
-       .fabric_drop_nodeacl            = sbp_drop_nodeacl,
+       .fabric_init_nodeacl            = sbp_init_nodeacl,
 
        .tfc_wwn_attrs                  = sbp_wwn_attrs,
        .tfc_tpg_base_attrs             = sbp_tpg_base_attrs,
index 6d0d74a2c54522cdbd277943c2a38291efd2c81a..73bcb12088322779ebcf8a325830859d2f29a672 100644 (file)
@@ -125,7 +125,7 @@ struct sbp_login_descriptor {
        struct sbp_session *sess;
        struct list_head link;
 
-       struct se_lun *lun;
+       u32 login_lun;
 
        u64 status_fifo_addr;
        int exclusive;
@@ -151,15 +151,6 @@ struct sbp_session {
        u64 reconnect_expires;
 };
 
-struct sbp_nacl {
-       /* Initiator EUI-64 */
-       u64 guid;
-       /* ASCII formatted GUID for SBP Initiator port */
-       char iport_name[SBP_NAMELEN];
-       /* Returned by sbp_make_nodeacl() */
-       struct se_node_acl se_node_acl;
-};
-
 struct sbp_tpg {
        /* Target portal group tag for TCM */
        u16 tport_tpgt;
index 8ca3737742765a9aca58c2890a9d11a0cfe50237..49aba4a31747c275dd51ab057808ec2b60202ca9 100644 (file)
@@ -34,7 +34,6 @@
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 
 #include "target_core_internal.h"
 #include "target_core_alua.h"
 static sense_reason_t core_alua_check_transition(int state, int valid,
                                                 int *primary);
 static int core_alua_set_tg_pt_secondary_state(
-               struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
-               struct se_port *port, int explicit, int offline);
+               struct se_lun *lun, int explicit, int offline);
 
 static char *core_alua_dump_state(int state);
 
+static void __target_attach_tg_pt_gp(struct se_lun *lun,
+               struct t10_alua_tg_pt_gp *tg_pt_gp);
+
 static u16 alua_lu_gps_counter;
 static u32 alua_lu_gps_count;
 
@@ -145,9 +146,8 @@ sense_reason_t
 target_emulate_report_target_port_groups(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
-       struct se_port *port;
        struct t10_alua_tg_pt_gp *tg_pt_gp;
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+       struct se_lun *lun;
        unsigned char *buf;
        u32 rd_len = 0, off;
        int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
@@ -222,9 +222,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
                rd_len += 8;
 
                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
-               list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
-                               tg_pt_gp_mem_list) {
-                       port = tg_pt_gp_mem->tg_pt;
+               list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
+                               lun_tg_pt_gp_link) {
                        /*
                         * Start Target Port descriptor format
                         *
@@ -234,8 +233,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
                        /*
                         * Set RELATIVE TARGET PORT IDENTIFIER
                         */
-                       buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
-                       buf[off++] = (port->sep_rtpi & 0xff);
+                       buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
+                       buf[off++] = (lun->lun_rtpi & 0xff);
                        rd_len += 4;
                }
                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
@@ -259,15 +258,11 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
                 * this CDB was received upon to determine this value individually
                 * for ALUA target port group.
                 */
-               port = cmd->se_lun->lun_sep;
-               tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-               if (tg_pt_gp_mem) {
-                       spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-                       tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
-                       if (tg_pt_gp)
-                               buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
-                       spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-               }
+               spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock);
+               tg_pt_gp = cmd->se_lun->lun_tg_pt_gp;
+               if (tg_pt_gp)
+                       buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
+               spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock);
        }
        transport_kunmap_data_sg(cmd);
 
@@ -284,10 +279,9 @@ sense_reason_t
 target_emulate_set_target_port_groups(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
-       struct se_port *port, *l_port = cmd->se_lun->lun_sep;
+       struct se_lun *l_lun = cmd->se_lun;
        struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
        unsigned char *buf;
        unsigned char *ptr;
        sense_reason_t rc = TCM_NO_SENSE;
@@ -295,9 +289,6 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
        int alua_access_state, primary = 0, valid_states;
        u16 tg_pt_id, rtpi;
 
-       if (!l_port)
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
        if (cmd->data_length < 4) {
                pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
                        " small\n", cmd->data_length);
@@ -312,29 +303,24 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
         * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
         * for the local tg_pt_gp.
         */
-       l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
-       if (!l_tg_pt_gp_mem) {
-               pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
-               rc = TCM_UNSUPPORTED_SCSI_OPCODE;
-               goto out;
-       }
-       spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
-       l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
+       spin_lock(&l_lun->lun_tg_pt_gp_lock);
+       l_tg_pt_gp = l_lun->lun_tg_pt_gp;
        if (!l_tg_pt_gp) {
-               spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
-               pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
+               spin_unlock(&l_lun->lun_tg_pt_gp_lock);
+               pr_err("Unable to access l_lun->tg_pt_gp\n");
                rc = TCM_UNSUPPORTED_SCSI_OPCODE;
                goto out;
        }
-       spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
 
        if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
+               spin_unlock(&l_lun->lun_tg_pt_gp_lock);
                pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
                                " while TPGS_EXPLICIT_ALUA is disabled\n");
                rc = TCM_UNSUPPORTED_SCSI_OPCODE;
                goto out;
        }
        valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
+       spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 
        ptr = &buf[4]; /* Skip over RESERVED area in header */
 
@@ -396,7 +382,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
                                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
                                if (!core_alua_do_port_transition(tg_pt_gp,
-                                               dev, l_port, nacl,
+                                               dev, l_lun, nacl,
                                                alua_access_state, 1))
                                        found = true;
 
@@ -406,6 +392,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
                        }
                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
                } else {
+                       struct se_lun *lun;
+
                        /*
                         * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
                         * the Target Port in question for the the incoming
@@ -417,17 +405,16 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
                         * for the struct se_device storage object.
                         */
                        spin_lock(&dev->se_port_lock);
-                       list_for_each_entry(port, &dev->dev_sep_list,
-                                                       sep_list) {
-                               if (port->sep_rtpi != rtpi)
+                       list_for_each_entry(lun, &dev->dev_sep_list,
+                                                       lun_dev_link) {
+                               if (lun->lun_rtpi != rtpi)
                                        continue;
 
-                               tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-
+                               // XXX: racy unlock
                                spin_unlock(&dev->se_port_lock);
 
                                if (!core_alua_set_tg_pt_secondary_state(
-                                               tg_pt_gp_mem, port, 1, 1))
+                                               lun, 1, 1))
                                        found = true;
 
                                spin_lock(&dev->se_port_lock);
@@ -696,9 +683,7 @@ target_alua_state_check(struct se_cmd *cmd)
        struct se_device *dev = cmd->se_dev;
        unsigned char *cdb = cmd->t_task_cdb;
        struct se_lun *lun = cmd->se_lun;
-       struct se_port *port = lun->lun_sep;
        struct t10_alua_tg_pt_gp *tg_pt_gp;
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
        int out_alua_state, nonop_delay_msecs;
 
        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
@@ -706,33 +691,27 @@ target_alua_state_check(struct se_cmd *cmd)
        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
-       if (!port)
-               return 0;
        /*
         * First, check for a struct se_port specific secondary ALUA target port
         * access state: OFFLINE
         */
-       if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
+       if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
                pr_debug("ALUA: Got secondary offline status for local"
                                " target port\n");
                set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
                return TCM_CHECK_CONDITION_NOT_READY;
        }
-        /*
-        * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
-        * ALUA target port group, to obtain current ALUA access state.
-        * Otherwise look for the underlying struct se_device association with
-        * a ALUA logical unit group.
-        */
-       tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-       if (!tg_pt_gp_mem)
+
+       if (!lun->lun_tg_pt_gp)
                return 0;
 
-       spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-       tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+       spin_lock(&lun->lun_tg_pt_gp_lock);
+       tg_pt_gp = lun->lun_tg_pt_gp;
        out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
        nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
-       spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+       // XXX: keeps using tg_pt_gp witout reference after unlock
+       spin_unlock(&lun->lun_tg_pt_gp_lock);
        /*
         * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
         * statement so the compiler knows explicitly to check this case first.
@@ -764,7 +743,7 @@ target_alua_state_check(struct se_cmd *cmd)
                break;
        /*
         * OFFLINE is a secondary ALUA target port group access state, that is
-        * handled above with struct se_port->sep_tg_pt_secondary_offline=1
+        * handled above with struct se_lun->lun_tg_pt_secondary_offline=1
         */
        case ALUA_ACCESS_STATE_OFFLINE:
        default:
@@ -906,10 +885,6 @@ int core_alua_check_nonop_delay(
 }
 EXPORT_SYMBOL(core_alua_check_nonop_delay);
 
-/*
- * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
- *
- */
 static int core_alua_write_tpg_metadata(
        const char *path,
        unsigned char *md_buf,
@@ -965,22 +940,15 @@ static int core_alua_update_tpg_primary_metadata(
        return rc;
 }
 
-static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
+static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
 {
-       struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
-               struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
-       struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
        struct se_dev_entry *se_deve;
+       struct se_lun *lun;
        struct se_lun_acl *lacl;
-       struct se_port *port;
-       struct t10_alua_tg_pt_gp_member *mem;
-       bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
-                        ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
 
        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
-       list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
-                               tg_pt_gp_mem_list) {
-               port = mem->tg_pt;
+       list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
+                               lun_tg_pt_gp_link) {
                /*
                 * After an implicit target port asymmetric access state
                 * change, a device server shall establish a unit attention
@@ -995,38 +963,58 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
                 * every I_T nexus other than the I_T nexus on which the SET
                 * TARGET PORT GROUPS command
                 */
-               atomic_inc_mb(&mem->tg_pt_gp_mem_ref_cnt);
+               if (!percpu_ref_tryget_live(&lun->lun_ref))
+                       continue;
                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 
-               spin_lock_bh(&port->sep_alua_lock);
-               list_for_each_entry(se_deve, &port->sep_alua_list,
-                                       alua_port_list) {
-                       lacl = se_deve->se_lun_acl;
+               spin_lock(&lun->lun_deve_lock);
+               list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
+                       lacl = rcu_dereference_check(se_deve->se_lun_acl,
+                                       lockdep_is_held(&lun->lun_deve_lock));
+
                        /*
-                        * se_deve->se_lun_acl pointer may be NULL for a
-                        * entry created without explicit Node+MappedLUN ACLs
+                        * spc4r37 p.242:
+                        * After an explicit target port asymmetric access
+                        * state change, a device server shall establish a
+                        * unit attention condition with the additional sense
+                        * code set to ASYMMETRIC ACCESS STATE CHANGED for
+                        * the initiator port associated with every I_T nexus
+                        * other than the I_T nexus on which the SET TARGET
+                        * PORT GROUPS command was received.
                         */
-                       if (!lacl)
-                               continue;
-
                        if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
                             ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
-                          (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
-                           (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
-                          (tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
-                           (tg_pt_gp->tg_pt_gp_alua_port == port))
+                          (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
+                           (tg_pt_gp->tg_pt_gp_alua_lun == lun))
                                continue;
 
-                       core_scsi3_ua_allocate(lacl->se_lun_nacl,
-                               se_deve->mapped_lun, 0x2A,
+                       /*
+                        * se_deve->se_lun_acl pointer may be NULL for a
+                        * entry created without explicit Node+MappedLUN ACLs
+                        */
+                       if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
+                           (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
+                               continue;
+
+                       core_scsi3_ua_allocate(se_deve, 0x2A,
                                ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
                }
-               spin_unlock_bh(&port->sep_alua_lock);
+               spin_unlock(&lun->lun_deve_lock);
 
                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
-               atomic_dec_mb(&mem->tg_pt_gp_mem_ref_cnt);
+               percpu_ref_put(&lun->lun_ref);
        }
        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
+{
+       struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
+               struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+       struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
+       bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
+                        ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
+
        /*
         * Update the ALUA metadata buf that has been allocated in
         * core_alua_do_port_transition(), this metadata will be written
@@ -1056,6 +1044,9 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
                tg_pt_gp->tg_pt_gp_id,
                core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
                core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
+
+       core_alua_queue_state_change_ua(tg_pt_gp);
+
        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1108,6 +1099,8 @@ static int core_alua_do_transition_tg_pt(
                                ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
                                ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
 
+       core_alua_queue_state_change_ua(tg_pt_gp);
+
        /*
         * Check for the optional ALUA primary state transition delay
         */
@@ -1142,7 +1135,7 @@ static int core_alua_do_transition_tg_pt(
 int core_alua_do_port_transition(
        struct t10_alua_tg_pt_gp *l_tg_pt_gp,
        struct se_device *l_dev,
-       struct se_port *l_port,
+       struct se_lun *l_lun,
        struct se_node_acl *l_nacl,
        int new_state,
        int explicit)
@@ -1172,7 +1165,7 @@ int core_alua_do_port_transition(
                 * core_alua_do_transition_tg_pt() will always return
                 * success.
                 */
-               l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
+               l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
                l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
                rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
                                                   new_state, explicit);
@@ -1211,10 +1204,10 @@ int core_alua_do_port_transition(
                                continue;
 
                        if (l_tg_pt_gp == tg_pt_gp) {
-                               tg_pt_gp->tg_pt_gp_alua_port = l_port;
+                               tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
                                tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
                        } else {
-                               tg_pt_gp->tg_pt_gp_alua_port = NULL;
+                               tg_pt_gp->tg_pt_gp_alua_lun = NULL;
                                tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
                        }
                        atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
@@ -1251,22 +1244,20 @@ int core_alua_do_port_transition(
        return rc;
 }
 
-/*
- * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
- */
-static int core_alua_update_tpg_secondary_metadata(
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
-       struct se_port *port)
+static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
 {
+       struct se_portal_group *se_tpg = lun->lun_tpg;
        unsigned char *md_buf;
-       struct se_portal_group *se_tpg = port->sep_tpg;
        char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
        int len, rc;
 
+       mutex_lock(&lun->lun_tg_pt_md_mutex);
+
        md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
        if (!md_buf) {
                pr_err("Unable to allocate buf for ALUA metadata\n");
-               return -ENOMEM;
+               rc = -ENOMEM;
+               goto out_unlock;
        }
 
        memset(path, 0, ALUA_METADATA_PATH_LEN);
@@ -1281,32 +1272,33 @@ static int core_alua_update_tpg_secondary_metadata(
 
        len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
                        "alua_tg_pt_status=0x%02x\n",
-                       atomic_read(&port->sep_tg_pt_secondary_offline),
-                       port->sep_tg_pt_secondary_stat);
+                       atomic_read(&lun->lun_tg_pt_secondary_offline),
+                       lun->lun_tg_pt_secondary_stat);
 
-       snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
+       snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%llu",
                        se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
-                       port->sep_lun->unpacked_lun);
+                       lun->unpacked_lun);
 
        rc = core_alua_write_tpg_metadata(path, md_buf, len);
        kfree(md_buf);
 
+out_unlock:
+       mutex_unlock(&lun->lun_tg_pt_md_mutex);
        return rc;
 }
 
 static int core_alua_set_tg_pt_secondary_state(
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
-       struct se_port *port,
+       struct se_lun *lun,
        int explicit,
        int offline)
 {
        struct t10_alua_tg_pt_gp *tg_pt_gp;
        int trans_delay_msecs;
 
-       spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-       tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+       spin_lock(&lun->lun_tg_pt_gp_lock);
+       tg_pt_gp = lun->lun_tg_pt_gp;
        if (!tg_pt_gp) {
-               spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+               spin_unlock(&lun->lun_tg_pt_gp_lock);
                pr_err("Unable to complete secondary state"
                                " transition\n");
                return -EINVAL;
@@ -1314,14 +1306,14 @@ static int core_alua_set_tg_pt_secondary_state(
        trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
        /*
         * Set the secondary ALUA target port access state to OFFLINE
-        * or release the previously secondary state for struct se_port
+        * or release the previously secondary state for struct se_lun
         */
        if (offline)
-               atomic_set(&port->sep_tg_pt_secondary_offline, 1);
+               atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
        else
-               atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+               atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
 
-       port->sep_tg_pt_secondary_stat = (explicit) ?
+       lun->lun_tg_pt_secondary_stat = (explicit) ?
                        ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
                        ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
 
@@ -1330,7 +1322,7 @@ static int core_alua_set_tg_pt_secondary_state(
                "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
                tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
 
-       spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+       spin_unlock(&lun->lun_tg_pt_gp_lock);
        /*
         * Do the optional transition delay after we set the secondary
         * ALUA access state.
@@ -1341,11 +1333,8 @@ static int core_alua_set_tg_pt_secondary_state(
         * See if we need to update the ALUA fabric port metadata for
         * secondary state and status
         */
-       if (port->sep_tg_pt_secondary_write_md) {
-               mutex_lock(&port->sep_tg_pt_md_mutex);
-               core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
-               mutex_unlock(&port->sep_tg_pt_md_mutex);
-       }
+       if (lun->lun_tg_pt_secondary_write_md)
+               core_alua_update_tpg_secondary_metadata(lun);
 
        return 0;
 }
@@ -1699,7 +1688,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
                return NULL;
        }
        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
-       INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
+       INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
        mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
        spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
        atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
@@ -1793,32 +1782,11 @@ again:
        return 0;
 }
 
-struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
-       struct se_port *port)
-{
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
-
-       tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
-                               GFP_KERNEL);
-       if (!tg_pt_gp_mem) {
-               pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
-               return ERR_PTR(-ENOMEM);
-       }
-       INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
-       spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-       atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
-
-       tg_pt_gp_mem->tg_pt = port;
-       port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
-
-       return tg_pt_gp_mem;
-}
-
 void core_alua_free_tg_pt_gp(
        struct t10_alua_tg_pt_gp *tg_pt_gp)
 {
        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
+       struct se_lun *lun, *next;
 
        /*
         * Once we have reached this point, config_item_put() has already
@@ -1849,30 +1817,24 @@ void core_alua_free_tg_pt_gp(
         * struct se_port.
         */
        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
-       list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
-                       &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
-               if (tg_pt_gp_mem->tg_pt_gp_assoc) {
-                       list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
-                       tg_pt_gp->tg_pt_gp_members--;
-                       tg_pt_gp_mem->tg_pt_gp_assoc = 0;
-               }
+       list_for_each_entry_safe(lun, next,
+                       &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
+               list_del_init(&lun->lun_tg_pt_gp_link);
+               tg_pt_gp->tg_pt_gp_members--;
+
                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
                /*
-                * tg_pt_gp_mem is associated with a single
-                * se_port->sep_alua_tg_pt_gp_mem, and is released via
-                * core_alua_free_tg_pt_gp_mem().
-                *
                 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
                 * assume we want to re-associate a given tg_pt_gp_mem with
                 * default_tg_pt_gp.
                 */
-               spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+               spin_lock(&lun->lun_tg_pt_gp_lock);
                if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
-                       __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+                       __target_attach_tg_pt_gp(lun,
                                        dev->t10_alua.default_tg_pt_gp);
                } else
-                       tg_pt_gp_mem->tg_pt_gp = NULL;
-               spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+                       lun->lun_tg_pt_gp = NULL;
+               spin_unlock(&lun->lun_tg_pt_gp_lock);
 
                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
        }
@@ -1881,35 +1843,6 @@ void core_alua_free_tg_pt_gp(
        kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
 }
 
-void core_alua_free_tg_pt_gp_mem(struct se_port *port)
-{
-       struct t10_alua_tg_pt_gp *tg_pt_gp;
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
-
-       tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-       if (!tg_pt_gp_mem)
-               return;
-
-       while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
-               cpu_relax();
-
-       spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-       tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
-       if (tg_pt_gp) {
-               spin_lock(&tg_pt_gp->tg_pt_gp_lock);
-               if (tg_pt_gp_mem->tg_pt_gp_assoc) {
-                       list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
-                       tg_pt_gp->tg_pt_gp_members--;
-                       tg_pt_gp_mem->tg_pt_gp_assoc = 0;
-               }
-               spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
-               tg_pt_gp_mem->tg_pt_gp = NULL;
-       }
-       spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-
-       kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
-}
-
 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
                struct se_device *dev, const char *name)
 {
@@ -1943,50 +1876,65 @@ static void core_alua_put_tg_pt_gp_from_name(
        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 }
 
-/*
- * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
- */
-void __core_alua_attach_tg_pt_gp_mem(
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
-       struct t10_alua_tg_pt_gp *tg_pt_gp)
+static void __target_attach_tg_pt_gp(struct se_lun *lun,
+               struct t10_alua_tg_pt_gp *tg_pt_gp)
 {
+       struct se_dev_entry *se_deve;
+
+       assert_spin_locked(&lun->lun_tg_pt_gp_lock);
+
        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
-       tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
-       tg_pt_gp_mem->tg_pt_gp_assoc = 1;
-       list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
-                       &tg_pt_gp->tg_pt_gp_mem_list);
+       lun->lun_tg_pt_gp = tg_pt_gp;
+       list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
        tg_pt_gp->tg_pt_gp_members++;
+       spin_lock(&lun->lun_deve_lock);
+       list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
+               core_scsi3_ua_allocate(se_deve, 0x3f,
+                                      ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
+       spin_unlock(&lun->lun_deve_lock);
        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 }
 
-/*
- * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
- */
-static void __core_alua_drop_tg_pt_gp_mem(
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
-       struct t10_alua_tg_pt_gp *tg_pt_gp)
+void target_attach_tg_pt_gp(struct se_lun *lun,
+               struct t10_alua_tg_pt_gp *tg_pt_gp)
 {
+       spin_lock(&lun->lun_tg_pt_gp_lock);
+       __target_attach_tg_pt_gp(lun, tg_pt_gp);
+       spin_unlock(&lun->lun_tg_pt_gp_lock);
+}
+
+static void __target_detach_tg_pt_gp(struct se_lun *lun,
+               struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+       assert_spin_locked(&lun->lun_tg_pt_gp_lock);
+
        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
-       list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
-       tg_pt_gp_mem->tg_pt_gp = NULL;
-       tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+       list_del_init(&lun->lun_tg_pt_gp_link);
        tg_pt_gp->tg_pt_gp_members--;
        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+       lun->lun_tg_pt_gp = NULL;
 }
 
-ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
+void target_detach_tg_pt_gp(struct se_lun *lun)
+{
+       struct t10_alua_tg_pt_gp *tg_pt_gp;
+
+       spin_lock(&lun->lun_tg_pt_gp_lock);
+       tg_pt_gp = lun->lun_tg_pt_gp;
+       if (tg_pt_gp)
+               __target_detach_tg_pt_gp(lun, tg_pt_gp);
+       spin_unlock(&lun->lun_tg_pt_gp_lock);
+}
+
+ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
 {
        struct config_item *tg_pt_ci;
        struct t10_alua_tg_pt_gp *tg_pt_gp;
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
        ssize_t len = 0;
 
-       tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-       if (!tg_pt_gp_mem)
-               return len;
-
-       spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-       tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+       spin_lock(&lun->lun_tg_pt_gp_lock);
+       tg_pt_gp = lun->lun_tg_pt_gp;
        if (tg_pt_gp) {
                tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
                len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
@@ -1998,34 +1946,33 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
                                        &tg_pt_gp->tg_pt_gp_alua_access_state)),
                        core_alua_dump_status(
                                tg_pt_gp->tg_pt_gp_alua_access_status),
-                       (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
+                       atomic_read(&lun->lun_tg_pt_secondary_offline) ?
                        "Offline" : "None",
-                       core_alua_dump_status(port->sep_tg_pt_secondary_stat));
+                       core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
        }
-       spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+       spin_unlock(&lun->lun_tg_pt_gp_lock);
 
        return len;
 }
 
 ssize_t core_alua_store_tg_pt_gp_info(
-       struct se_port *port,
+       struct se_lun *lun,
        const char *page,
        size_t count)
 {
-       struct se_portal_group *tpg;
-       struct se_lun *lun;
-       struct se_device *dev = port->sep_lun->lun_se_dev;
+       struct se_portal_group *tpg = lun->lun_tpg;
+       /*
+        * rcu_dereference_raw protected by se_lun->lun_group symlink
+        * reference to se_device->dev_group.
+        */
+       struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
        unsigned char buf[TG_PT_GROUP_NAME_BUF];
        int move = 0;
 
-       tpg = port->sep_tpg;
-       lun = port->sep_lun;
-
-       tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-       if (!tg_pt_gp_mem)
-               return 0;
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+           (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+               return -ENODEV;
 
        if (count > TG_PT_GROUP_NAME_BUF) {
                pr_err("ALUA Target Port Group alias too large!\n");
@@ -2049,8 +1996,8 @@ ssize_t core_alua_store_tg_pt_gp_info(
                        return -ENODEV;
        }
 
-       spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-       tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+       spin_lock(&lun->lun_tg_pt_gp_lock);
+       tg_pt_gp = lun->lun_tg_pt_gp;
        if (tg_pt_gp) {
                /*
                 * Clearing an existing tg_pt_gp association, and replacing
@@ -2068,24 +2015,19 @@ ssize_t core_alua_store_tg_pt_gp_info(
                                        &tg_pt_gp->tg_pt_gp_group.cg_item),
                                tg_pt_gp->tg_pt_gp_id);
 
-                       __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
-                       __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+                       __target_detach_tg_pt_gp(lun, tg_pt_gp);
+                       __target_attach_tg_pt_gp(lun,
                                        dev->t10_alua.default_tg_pt_gp);
-                       spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+                       spin_unlock(&lun->lun_tg_pt_gp_lock);
 
                        return count;
                }
-               /*
-                * Removing existing association of tg_pt_gp_mem with tg_pt_gp
-                */
-               __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+               __target_detach_tg_pt_gp(lun, tg_pt_gp);
                move = 1;
        }
-       /*
-        * Associate tg_pt_gp_mem with tg_pt_gp_new.
-        */
-       __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
-       spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+       __target_attach_tg_pt_gp(lun, tg_pt_gp_new);
+       spin_unlock(&lun->lun_tg_pt_gp_lock);
        pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
                " Target Port Group: alua/%s, ID: %hu\n", (move) ?
                "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
@@ -2268,11 +2210,8 @@ ssize_t core_alua_store_preferred_bit(
 
 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
 {
-       if (!lun->lun_sep)
-               return -ENODEV;
-
        return sprintf(page, "%d\n",
-               atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
+               atomic_read(&lun->lun_tg_pt_secondary_offline));
 }
 
 ssize_t core_alua_store_offline_bit(
@@ -2280,11 +2219,16 @@ ssize_t core_alua_store_offline_bit(
        const char *page,
        size_t count)
 {
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+       /*
+        * rcu_dereference_raw protected by se_lun->lun_group symlink
+        * reference to se_device->dev_group.
+        */
+       struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
        unsigned long tmp;
        int ret;
 
-       if (!lun->lun_sep)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+           (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
                return -ENODEV;
 
        ret = kstrtoul(page, 0, &tmp);
@@ -2297,14 +2241,8 @@ ssize_t core_alua_store_offline_bit(
                                tmp);
                return -EINVAL;
        }
-       tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
-       if (!tg_pt_gp_mem) {
-               pr_err("Unable to locate *tg_pt_gp_mem\n");
-               return -EINVAL;
-       }
 
-       ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
-                       lun->lun_sep, 0, (int)tmp);
+       ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
        if (ret < 0)
                return -EINVAL;
 
@@ -2315,7 +2253,7 @@ ssize_t core_alua_show_secondary_status(
        struct se_lun *lun,
        char *page)
 {
-       return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
+       return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
 }
 
 ssize_t core_alua_store_secondary_status(
@@ -2338,7 +2276,7 @@ ssize_t core_alua_store_secondary_status(
                                tmp);
                return -EINVAL;
        }
-       lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
+       lun->lun_tg_pt_secondary_stat = (int)tmp;
 
        return count;
 }
@@ -2347,8 +2285,7 @@ ssize_t core_alua_show_secondary_write_metadata(
        struct se_lun *lun,
        char *page)
 {
-       return sprintf(page, "%d\n",
-                       lun->lun_sep->sep_tg_pt_secondary_write_md);
+       return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
 }
 
 ssize_t core_alua_store_secondary_write_metadata(
@@ -2369,7 +2306,7 @@ ssize_t core_alua_store_secondary_write_metadata(
                                " %lu\n", tmp);
                return -EINVAL;
        }
-       lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
+       lun->lun_tg_pt_secondary_write_md = (int)tmp;
 
        return count;
 }
index 0a7d65e804045e465a488ae52c521a4669d4b9ba..9b250f9b33bfb830ff194e3b0bf028f9e0ff02fb 100644 (file)
@@ -85,7 +85,6 @@
 extern struct kmem_cache *t10_alua_lu_gp_cache;
 extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
 extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
-extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
 extern struct kmem_cache *t10_alua_lba_map_cache;
 extern struct kmem_cache *t10_alua_lba_map_mem_cache;
 
@@ -94,7 +93,7 @@ extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
 extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
 extern int core_alua_check_nonop_delay(struct se_cmd *);
 extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
-                               struct se_device *, struct se_port *,
+                               struct se_device *, struct se_lun *,
                                struct se_node_acl *, int, int);
 extern char *core_alua_dump_status(int);
 extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
@@ -117,14 +116,11 @@ extern void core_alua_drop_lu_gp_dev(struct se_device *);
 extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
                        struct se_device *, const char *, int);
 extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
-extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
-                                       struct se_port *);
 extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
-extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
-extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
-                                       struct t10_alua_tg_pt_gp *);
-extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
-extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
+extern void target_detach_tg_pt_gp(struct se_lun *);
+extern void target_attach_tg_pt_gp(struct se_lun *, struct t10_alua_tg_pt_gp *);
+extern ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *, char *);
+extern ssize_t core_alua_store_tg_pt_gp_info(struct se_lun *, const char *,
                                                size_t);
 extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
 extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
index e7b0430a0575d0403dbb38b0fd4d41df1ccce79d..0b0de36474784987c781906243eabbed27ecab00 100644 (file)
@@ -41,7 +41,6 @@
 #include <target/target_core_backend.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
 #include "target_core_internal.h"
 #include "target_core_xcopy.h"
 
 #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)             \
-static void target_core_setup_##_name##_cit(struct se_subsystem_api *sa) \
+static void target_core_setup_##_name##_cit(struct target_backend *tb) \
 {                                                                      \
-       struct target_backend_cits *tbc = &sa->tb_cits;                 \
-       struct config_item_type *cit = &tbc->tb_##_name##_cit;          \
+       struct config_item_type *cit = &tb->tb_##_name##_cit;           \
                                                                        \
        cit->ct_item_ops = _item_ops;                                   \
        cit->ct_group_ops = _group_ops;                                 \
        cit->ct_attrs = _attrs;                                         \
-       cit->ct_owner = sa->owner;                                      \
+       cit->ct_owner = tb->ops->owner;                                 \
+       pr_debug("Setup generic %s\n", __stringify(_name));             \
+}
+
+#define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops)                 \
+static void target_core_setup_##_name##_cit(struct target_backend *tb) \
+{                                                                      \
+       struct config_item_type *cit = &tb->tb_##_name##_cit;           \
+                                                                       \
+       cit->ct_item_ops = _item_ops;                                   \
+       cit->ct_group_ops = _group_ops;                                 \
+       cit->ct_attrs = tb->ops->tb_##_name##_attrs;                    \
+       cit->ct_owner = tb->ops->owner;                                 \
        pr_debug("Setup generic %s\n", __stringify(_name));             \
 }
 
@@ -92,7 +102,7 @@ static ssize_t target_core_attr_show(struct config_item *item,
                                      char *page)
 {
        return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
-               " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
+               " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION,
                utsname()->sysname, utsname()->machine);
 }
 
@@ -116,7 +126,7 @@ static struct target_fabric_configfs *target_core_get_fabric(
 
        mutex_lock(&g_tf_lock);
        list_for_each_entry(tf, &g_tf_list, tf_list) {
-               if (!strcmp(tf->tf_name, name)) {
+               if (!strcmp(tf->tf_ops->name, name)) {
                        atomic_inc(&tf->tf_access_cnt);
                        mutex_unlock(&g_tf_lock);
                        return tf;
@@ -193,29 +203,24 @@ static struct config_group *target_core_register_fabric(
                return ERR_PTR(-EINVAL);
        }
        pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
-                       " %s\n", tf->tf_name);
+                       " %s\n", tf->tf_ops->name);
        /*
         * On a successful target_core_get_fabric() look, the returned
         * struct target_fabric_configfs *tf will contain a usage reference.
         */
        pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
-                       &tf->tf_cit_tmpl.tfc_wwn_cit);
+                       &tf->tf_wwn_cit);
 
        tf->tf_group.default_groups = tf->tf_default_groups;
        tf->tf_group.default_groups[0] = &tf->tf_disc_group;
        tf->tf_group.default_groups[1] = NULL;
 
-       config_group_init_type_name(&tf->tf_group, name,
-                       &tf->tf_cit_tmpl.tfc_wwn_cit);
+       config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
        config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
-                       &tf->tf_cit_tmpl.tfc_discovery_cit);
+                       &tf->tf_discovery_cit);
 
        pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
                        " %s\n", tf->tf_group.cg_item.ci_name);
-       tf->tf_fabric = &tf->tf_group.cg_item;
-       pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
-                       " for %s\n", name);
-
        return &tf->tf_group;
 }
 
@@ -236,13 +241,9 @@ static void target_core_deregister_fabric(
                " tf list\n", config_item_name(item));
 
        pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
-                       " %s\n", tf->tf_name);
+                       " %s\n", tf->tf_ops->name);
        atomic_dec(&tf->tf_access_cnt);
 
-       pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing"
-                       " tf->tf_fabric for %s\n", tf->tf_name);
-       tf->tf_fabric = NULL;
-
        pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
                        " %s\n", config_item_name(item));
 
@@ -318,10 +319,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
                pr_err("Missing tfo->get_fabric_name()\n");
                return -EINVAL;
        }
-       if (!tfo->get_fabric_proto_ident) {
-               pr_err("Missing tfo->get_fabric_proto_ident()\n");
-               return -EINVAL;
-       }
        if (!tfo->tpg_get_wwn) {
                pr_err("Missing tfo->tpg_get_wwn()\n");
                return -EINVAL;
@@ -330,18 +327,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
                pr_err("Missing tfo->tpg_get_tag()\n");
                return -EINVAL;
        }
-       if (!tfo->tpg_get_default_depth) {
-               pr_err("Missing tfo->tpg_get_default_depth()\n");
-               return -EINVAL;
-       }
-       if (!tfo->tpg_get_pr_transport_id) {
-               pr_err("Missing tfo->tpg_get_pr_transport_id()\n");
-               return -EINVAL;
-       }
-       if (!tfo->tpg_get_pr_transport_id_len) {
-               pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n");
-               return -EINVAL;
-       }
        if (!tfo->tpg_check_demo_mode) {
                pr_err("Missing tfo->tpg_check_demo_mode()\n");
                return -EINVAL;
@@ -358,14 +343,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
                pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
                return -EINVAL;
        }
-       if (!tfo->tpg_alloc_fabric_acl) {
-               pr_err("Missing tfo->tpg_alloc_fabric_acl()\n");
-               return -EINVAL;
-       }
-       if (!tfo->tpg_release_fabric_acl) {
-               pr_err("Missing tfo->tpg_release_fabric_acl()\n");
-               return -EINVAL;
-       }
        if (!tfo->tpg_get_inst_index) {
                pr_err("Missing tfo->tpg_get_inst_index()\n");
                return -EINVAL;
@@ -398,10 +375,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
                pr_err("Missing tfo->set_default_node_attributes()\n");
                return -EINVAL;
        }
-       if (!tfo->get_task_tag) {
-               pr_err("Missing tfo->get_task_tag()\n");
-               return -EINVAL;
-       }
        if (!tfo->get_cmd_state) {
                pr_err("Missing tfo->get_cmd_state()\n");
                return -EINVAL;
@@ -464,15 +437,7 @@ int target_register_template(const struct target_core_fabric_ops *fo)
 
        INIT_LIST_HEAD(&tf->tf_list);
        atomic_set(&tf->tf_access_cnt, 0);
-
-       /*
-        * Setup the default generic struct config_item_type's (cits) in
-        * struct target_fabric_configfs->tf_cit_tmpl
-        */
-       tf->tf_module = fo->module;
-       snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name);
-
-       tf->tf_ops = *fo;
+       tf->tf_ops = fo;
        target_fabric_setup_cits(tf);
 
        mutex_lock(&g_tf_lock);
@@ -489,7 +454,7 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
 
        mutex_lock(&g_tf_lock);
        list_for_each_entry(t, &g_tf_list, tf_list) {
-               if (!strcmp(t->tf_name, fo->name)) {
+               if (!strcmp(t->tf_ops->name, fo->name)) {
                        BUG_ON(atomic_read(&t->tf_access_cnt));
                        list_del(&t->tf_list);
                        kfree(t);
@@ -505,16 +470,605 @@ EXPORT_SYMBOL(target_unregister_template);
 //############################################################################*/
 
 /* Start functions for struct config_item_type tb_dev_attrib_cit */
+#define DEF_TB_DEV_ATTRIB_SHOW(_name)                                  \
+static ssize_t show_##_name(struct se_dev_attrib *da, char *page)      \
+{                                                                      \
+       return snprintf(page, PAGE_SIZE, "%u\n", da->_name);            \
+}
+
+DEF_TB_DEV_ATTRIB_SHOW(emulate_model_alias);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_dpo);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_fua_write);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_fua_read);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_write_cache);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_ua_intlck_ctrl);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_tas);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_tpu);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_tpws);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_caw);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_3pc);
+DEF_TB_DEV_ATTRIB_SHOW(pi_prot_type);
+DEF_TB_DEV_ATTRIB_SHOW(hw_pi_prot_type);
+DEF_TB_DEV_ATTRIB_SHOW(pi_prot_format);
+DEF_TB_DEV_ATTRIB_SHOW(enforce_pr_isids);
+DEF_TB_DEV_ATTRIB_SHOW(is_nonrot);
+DEF_TB_DEV_ATTRIB_SHOW(emulate_rest_reord);
+DEF_TB_DEV_ATTRIB_SHOW(force_pr_aptpl);
+DEF_TB_DEV_ATTRIB_SHOW(hw_block_size);
+DEF_TB_DEV_ATTRIB_SHOW(block_size);
+DEF_TB_DEV_ATTRIB_SHOW(hw_max_sectors);
+DEF_TB_DEV_ATTRIB_SHOW(optimal_sectors);
+DEF_TB_DEV_ATTRIB_SHOW(hw_queue_depth);
+DEF_TB_DEV_ATTRIB_SHOW(queue_depth);
+DEF_TB_DEV_ATTRIB_SHOW(max_unmap_lba_count);
+DEF_TB_DEV_ATTRIB_SHOW(max_unmap_block_desc_count);
+DEF_TB_DEV_ATTRIB_SHOW(unmap_granularity);
+DEF_TB_DEV_ATTRIB_SHOW(unmap_granularity_alignment);
+DEF_TB_DEV_ATTRIB_SHOW(max_write_same_len);
+
+#define DEF_TB_DEV_ATTRIB_STORE_U32(_name)                             \
+static ssize_t store_##_name(struct se_dev_attrib *da, const char *page,\
+               size_t count)                                           \
+{                                                                      \
+       u32 val;                                                        \
+       int ret;                                                        \
+                                                                       \
+       ret = kstrtou32(page, 0, &val);                                 \
+       if (ret < 0)                                                    \
+               return ret;                                             \
+       da->_name = val;                                                \
+       return count;                                                   \
+}
+
+DEF_TB_DEV_ATTRIB_STORE_U32(max_unmap_lba_count);
+DEF_TB_DEV_ATTRIB_STORE_U32(max_unmap_block_desc_count);
+DEF_TB_DEV_ATTRIB_STORE_U32(unmap_granularity);
+DEF_TB_DEV_ATTRIB_STORE_U32(unmap_granularity_alignment);
+DEF_TB_DEV_ATTRIB_STORE_U32(max_write_same_len);
+
+#define DEF_TB_DEV_ATTRIB_STORE_BOOL(_name)                            \
+static ssize_t store_##_name(struct se_dev_attrib *da, const char *page,\
+               size_t count)                                           \
+{                                                                      \
+       bool flag;                                                      \
+       int ret;                                                        \
+                                                                       \
+       ret = strtobool(page, &flag);                                   \
+       if (ret < 0)                                                    \
+               return ret;                                             \
+       da->_name = flag;                                               \
+       return count;                                                   \
+}
+
+DEF_TB_DEV_ATTRIB_STORE_BOOL(emulate_fua_write);
+DEF_TB_DEV_ATTRIB_STORE_BOOL(emulate_caw);
+DEF_TB_DEV_ATTRIB_STORE_BOOL(emulate_3pc);
+DEF_TB_DEV_ATTRIB_STORE_BOOL(enforce_pr_isids);
+DEF_TB_DEV_ATTRIB_STORE_BOOL(is_nonrot);
+
+#define DEF_TB_DEV_ATTRIB_STORE_STUB(_name)                            \
+static ssize_t store_##_name(struct se_dev_attrib *da, const char *page,\
+               size_t count)                                           \
+{                                                                      \
+       printk_once(KERN_WARNING                                        \
+               "ignoring deprecated ##_name## attribute\n");   \
+       return count;                                                   \
+}
+
+DEF_TB_DEV_ATTRIB_STORE_STUB(emulate_dpo);
+DEF_TB_DEV_ATTRIB_STORE_STUB(emulate_fua_read);
+
+static void dev_set_t10_wwn_model_alias(struct se_device *dev)
+{
+       const char *configname;
+
+       configname = config_item_name(&dev->dev_group.cg_item);
+       if (strlen(configname) >= 16) {
+               pr_warn("dev[%p]: Backstore name '%s' is too long for "
+                       "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
+                       configname);
+       }
+       snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
+}
+
+static ssize_t store_emulate_model_alias(struct se_dev_attrib *da,
+               const char *page, size_t count)
+{
+       struct se_device *dev = da->da_dev;
+       bool flag;
+       int ret;
+
+       if (dev->export_count) {
+               pr_err("dev[%p]: Unable to change model alias"
+                       " while export_count is %d\n",
+                       dev, dev->export_count);
+               return -EINVAL;
+       }
+
+       ret = strtobool(page, &flag);
+       if (ret < 0)
+               return ret;
+
+       if (flag) {
+               dev_set_t10_wwn_model_alias(dev);
+       } else {
+               strncpy(&dev->t10_wwn.model[0],
+                       dev->transport->inquiry_prod, 16);
+       }
+       da->emulate_model_alias = flag;
+       return count;
+}
+
+static ssize_t store_emulate_write_cache(struct se_dev_attrib *da,
+               const char *page, size_t count)
+{
+       bool flag;
+       int ret;
+
+       ret = strtobool(page, &flag);
+       if (ret < 0)
+               return ret;
+
+       if (flag && da->da_dev->transport->get_write_cache) {
+               pr_err("emulate_write_cache not supported for this device\n");
+               return -EINVAL;
+       }
+
+       da->emulate_write_cache = flag;
+       pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+                       da->da_dev, flag);
+       return count;
+}
+
+static ssize_t store_emulate_ua_intlck_ctrl(struct se_dev_attrib *da,
+               const char *page, size_t count)
+{
+       u32 val;
+       int ret;
+
+       ret = kstrtou32(page, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       if (val != 0 && val != 1 && val != 2) {
+               pr_err("Illegal value %d\n", val);
+               return -EINVAL;
+       }
+
+       if (da->da_dev->export_count) {
+               pr_err("dev[%p]: Unable to change SE Device"
+                       " UA_INTRLCK_CTRL while export_count is %d\n",
+                       da->da_dev, da->da_dev->export_count);
+               return -EINVAL;
+       }
+       da->emulate_ua_intlck_ctrl = val;
+       pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+               da->da_dev, val);
+       return count;
+}
+
+static ssize_t store_emulate_tas(struct se_dev_attrib *da,
+               const char *page, size_t count)
+{
+       bool flag;
+       int ret;
+
+       ret = strtobool(page, &flag);
+       if (ret < 0)
+               return ret;
+
+       if (da->da_dev->export_count) {
+               pr_err("dev[%p]: Unable to change SE Device TAS while"
+                       " export_count is %d\n",
+                       da->da_dev, da->da_dev->export_count);
+               return -EINVAL;
+       }
+       da->emulate_tas = flag;
+       pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+               da->da_dev, flag ? "Enabled" : "Disabled");
+
+       return count;
+}
+
+static ssize_t store_emulate_tpu(struct se_dev_attrib *da,
+               const char *page, size_t count)
+{
+       bool flag;
+       int ret;
+
+       ret = strtobool(page, &flag);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * We expect this value to be non-zero when generic Block Layer
+        * Discard supported is detected iblock_create_virtdevice().
+        */
+       if (flag && !da->max_unmap_block_desc_count) {
+               pr_err("Generic Block Discard not supported\n");
+               return -ENOSYS;
+       }
+
+       da->emulate_tpu = flag;
+       pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+               da->da_dev, flag);
+       return count;
+}
+
+static ssize_t store_emulate_tpws(struct se_dev_attrib *da,
+               const char *page, size_t count)
+{
+       bool flag;
+       int ret;
+
+       ret = strtobool(page, &flag);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * We expect this value to be non-zero when generic Block Layer
+        * Discard supported is detected iblock_create_virtdevice().
+        */
+       if (flag && !da->max_unmap_block_desc_count) {
+               pr_err("Generic Block Discard not supported\n");
+               return -ENOSYS;
+       }
+
+       da->emulate_tpws = flag;
+       pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+                               da->da_dev, flag);
+       return count;
+}
+
+static ssize_t store_pi_prot_type(struct se_dev_attrib *da,
+               const char *page, size_t count)
+{
+       int old_prot = da->pi_prot_type, ret;
+       struct se_device *dev = da->da_dev;
+       u32 flag;
+
+       ret = kstrtou32(page, 0, &flag);
+       if (ret < 0)
+               return ret;
+
+       if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
+               pr_err("Illegal value %d for pi_prot_type\n", flag);
+               return -EINVAL;
+       }
+       if (flag == 2) {
+               pr_err("DIF TYPE2 protection currently not supported\n");
+               return -ENOSYS;
+       }
+       if (da->hw_pi_prot_type) {
+               pr_warn("DIF protection enabled on underlying hardware,"
+                       " ignoring\n");
+               return count;
+       }
+       if (!dev->transport->init_prot || !dev->transport->free_prot) {
+               /* 0 is only allowed value for non-supporting backends */
+               if (flag == 0)
+                       return 0;
+
+               pr_err("DIF protection not supported by backend: %s\n",
+                      dev->transport->name);
+               return -ENOSYS;
+       }
+       if (!(dev->dev_flags & DF_CONFIGURED)) {
+               pr_err("DIF protection requires device to be configured\n");
+               return -ENODEV;
+       }
+       if (dev->export_count) {
+               pr_err("dev[%p]: Unable to change SE Device PROT type while"
+                      " export_count is %d\n", dev, dev->export_count);
+               return -EINVAL;
+       }
+
+       da->pi_prot_type = flag;
+
+       if (flag && !old_prot) {
+               ret = dev->transport->init_prot(dev);
+               if (ret) {
+                       da->pi_prot_type = old_prot;
+                       return ret;
+               }
+
+       } else if (!flag && old_prot) {
+               dev->transport->free_prot(dev);
+       }
+
+       pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
+       return count;
+}
+
+static ssize_t store_pi_prot_format(struct se_dev_attrib *da,
+               const char *page, size_t count)
+{
+       struct se_device *dev = da->da_dev;
+       bool flag;
+       int ret;
+
+       ret = strtobool(page, &flag);
+       if (ret < 0)
+               return ret;
+
+       if (!flag)
+               return count;
+
+       if (!dev->transport->format_prot) {
+               pr_err("DIF protection format not supported by backend %s\n",
+                      dev->transport->name);
+               return -ENOSYS;
+       }
+       if (!(dev->dev_flags & DF_CONFIGURED)) {
+               pr_err("DIF protection format requires device to be configured\n");
+               return -ENODEV;
+       }
+       if (dev->export_count) {
+               pr_err("dev[%p]: Unable to format SE Device PROT type while"
+                      " export_count is %d\n", dev, dev->export_count);
+               return -EINVAL;
+       }
+
+       ret = dev->transport->format_prot(dev);
+       if (ret)
+               return ret;
+
+       pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
+       return count;
+}
+
+static ssize_t store_force_pr_aptpl(struct se_dev_attrib *da,
+               const char *page, size_t count)
+{
+       bool flag;
+       int ret;
+
+       ret = strtobool(page, &flag);
+       if (ret < 0)
+               return ret;
+       if (da->da_dev->export_count) {
+               pr_err("dev[%p]: Unable to set force_pr_aptpl while"
+                      " export_count is %d\n",
+                      da->da_dev, da->da_dev->export_count);
+               return -EINVAL;
+       }
+
+       da->force_pr_aptpl = flag;
+       pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag);
+       return count;
+}
+
+static ssize_t store_emulate_rest_reord(struct se_dev_attrib *da,
+               const char *page, size_t count)
+{
+       bool flag;
+       int ret;
+
+       ret = strtobool(page, &flag);
+       if (ret < 0)
+               return ret;
+
+       if (flag != 0) {
+               printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
+                       " reordering not implemented\n", da->da_dev);
+               return -ENOSYS;
+       }
+       da->emulate_rest_reord = flag;
+       pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
+               da->da_dev, flag);
+       return count;
+}
+
+/*
+ * Note, this can only be called on unexported SE Device Object.
+ */
+static ssize_t store_queue_depth(struct se_dev_attrib *da,
+               const char *page, size_t count)
+{
+       struct se_device *dev = da->da_dev;
+       u32 val;
+       int ret;
+
+       ret = kstrtou32(page, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       if (dev->export_count) {
+               pr_err("dev[%p]: Unable to change SE Device TCQ while"
+                       " export_count is %d\n",
+                       dev, dev->export_count);
+               return -EINVAL;
+       }
+       if (!val) {
+               pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev);
+               return -EINVAL;
+       }
+
+       if (val > dev->dev_attrib.queue_depth) {
+               if (val > dev->dev_attrib.hw_queue_depth) {
+                       pr_err("dev[%p]: Passed queue_depth:"
+                               " %u exceeds TCM/SE_Device MAX"
+                               " TCQ: %u\n", dev, val,
+                               dev->dev_attrib.hw_queue_depth);
+                       return -EINVAL;
+               }
+       }
+       da->queue_depth = dev->queue_depth = val;
+       pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val);
+       return count;
+}
+
+static ssize_t store_optimal_sectors(struct se_dev_attrib *da,
+               const char *page, size_t count)
+{
+       u32 val;
+       int ret;
+
+       ret = kstrtou32(page, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       if (da->da_dev->export_count) {
+               pr_err("dev[%p]: Unable to change SE Device"
+                       " optimal_sectors while export_count is %d\n",
+                       da->da_dev, da->da_dev->export_count);
+               return -EINVAL;
+       }
+       if (val > da->hw_max_sectors) {
+               pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
+                       " greater than hw_max_sectors: %u\n",
+                       da->da_dev, val, da->hw_max_sectors);
+               return -EINVAL;
+       }
+
+       da->optimal_sectors = val;
+       pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
+                       da->da_dev, val);
+       return count;
+}
+
+static ssize_t store_block_size(struct se_dev_attrib *da,
+               const char *page, size_t count)
+{
+       u32 val;
+       int ret;
+
+       ret = kstrtou32(page, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       if (da->da_dev->export_count) {
+               pr_err("dev[%p]: Unable to change SE Device block_size"
+                       " while export_count is %d\n",
+                       da->da_dev, da->da_dev->export_count);
+               return -EINVAL;
+       }
+
+       if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
+               pr_err("dev[%p]: Illegal value for block_device: %u"
+                       " for SE device, must be 512, 1024, 2048 or 4096\n",
+                       da->da_dev, val);
+               return -EINVAL;
+       }
+
+       da->block_size = val;
+       if (da->max_bytes_per_io)
+               da->hw_max_sectors = da->max_bytes_per_io / val;
+
+       pr_debug("dev[%p]: SE Device block_size changed to %u\n",
+                       da->da_dev, val);
+       return count;
+}
+
+CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
+#define TB_DEV_ATTR(_backend, _name, _mode)                            \
+static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
+       __CONFIGFS_EATTR(_name, _mode,                                  \
+       show_##_name,                                                   \
+       store_##_name);
+
+#define TB_DEV_ATTR_RO(_backend, _name)                                        \
+static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
+       __CONFIGFS_EATTR_RO(_name,                                      \
+       show_##_name);
+
+TB_DEV_ATTR(target_core, emulate_model_alias, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_dpo, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_fua_write, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_fua_read, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_write_cache, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_tas, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_tpu, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_tpws, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_caw, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_3pc, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, pi_prot_type, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR_RO(target_core, hw_pi_prot_type);
+TB_DEV_ATTR(target_core, pi_prot_format, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, enforce_pr_isids, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, is_nonrot, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, emulate_rest_reord, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, force_pr_aptpl, S_IRUGO | S_IWUSR)
+TB_DEV_ATTR_RO(target_core, hw_block_size);
+TB_DEV_ATTR(target_core, block_size, S_IRUGO | S_IWUSR)
+TB_DEV_ATTR_RO(target_core, hw_max_sectors);
+TB_DEV_ATTR(target_core, optimal_sectors, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR_RO(target_core, hw_queue_depth);
+TB_DEV_ATTR(target_core, queue_depth, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, max_unmap_lba_count, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, unmap_granularity, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, unmap_granularity_alignment, S_IRUGO | S_IWUSR);
+TB_DEV_ATTR(target_core, max_write_same_len, S_IRUGO | S_IWUSR);
 
 CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
 CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
 
+/*
+ * dev_attrib attributes for devices using the target core SBC/SPC
+ * interpreter.  Any backend using spc_parse_cdb should be using
+ * these.
+ */
+struct configfs_attribute *sbc_attrib_attrs[] = {
+       &target_core_dev_attrib_emulate_model_alias.attr,
+       &target_core_dev_attrib_emulate_dpo.attr,
+       &target_core_dev_attrib_emulate_fua_write.attr,
+       &target_core_dev_attrib_emulate_fua_read.attr,
+       &target_core_dev_attrib_emulate_write_cache.attr,
+       &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
+       &target_core_dev_attrib_emulate_tas.attr,
+       &target_core_dev_attrib_emulate_tpu.attr,
+       &target_core_dev_attrib_emulate_tpws.attr,
+       &target_core_dev_attrib_emulate_caw.attr,
+       &target_core_dev_attrib_emulate_3pc.attr,
+       &target_core_dev_attrib_pi_prot_type.attr,
+       &target_core_dev_attrib_hw_pi_prot_type.attr,
+       &target_core_dev_attrib_pi_prot_format.attr,
+       &target_core_dev_attrib_enforce_pr_isids.attr,
+       &target_core_dev_attrib_is_nonrot.attr,
+       &target_core_dev_attrib_emulate_rest_reord.attr,
+       &target_core_dev_attrib_force_pr_aptpl.attr,
+       &target_core_dev_attrib_hw_block_size.attr,
+       &target_core_dev_attrib_block_size.attr,
+       &target_core_dev_attrib_hw_max_sectors.attr,
+       &target_core_dev_attrib_optimal_sectors.attr,
+       &target_core_dev_attrib_hw_queue_depth.attr,
+       &target_core_dev_attrib_queue_depth.attr,
+       &target_core_dev_attrib_max_unmap_lba_count.attr,
+       &target_core_dev_attrib_max_unmap_block_desc_count.attr,
+       &target_core_dev_attrib_unmap_granularity.attr,
+       &target_core_dev_attrib_unmap_granularity_alignment.attr,
+       &target_core_dev_attrib_max_write_same_len.attr,
+       NULL,
+};
+EXPORT_SYMBOL(sbc_attrib_attrs);
+
+TB_DEV_ATTR_RO(target_pt, hw_pi_prot_type);
+TB_DEV_ATTR_RO(target_pt, hw_block_size);
+TB_DEV_ATTR_RO(target_pt, hw_max_sectors);
+TB_DEV_ATTR_RO(target_pt, hw_queue_depth);
+
+/*
+ * Minimal dev_attrib attributes for devices passing through CDBs.
+ * In this case we only provide a few read-only attributes for
+ * backwards compatibility.
+ */
+struct configfs_attribute *passthrough_attrib_attrs[] = {
+       &target_pt_dev_attrib_hw_pi_prot_type.attr,
+       &target_pt_dev_attrib_hw_block_size.attr,
+       &target_pt_dev_attrib_hw_max_sectors.attr,
+       &target_pt_dev_attrib_hw_queue_depth.attr,
+       NULL,
+};
+EXPORT_SYMBOL(passthrough_attrib_attrs);
+
 static struct configfs_item_operations target_core_dev_attrib_ops = {
        .show_attribute         = target_core_dev_attrib_attr_show,
        .store_attribute        = target_core_dev_attrib_attr_store,
 };
 
-TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, NULL);
+TB_CIT_SETUP_DRV(dev_attrib, &target_core_dev_attrib_ops, NULL);
 
 /* End functions for struct config_item_type tb_dev_attrib_cit */
 
@@ -862,7 +1416,6 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
                struct se_device *dev, char *page)
 {
        struct se_node_acl *se_nacl;
-       struct se_lun *lun;
        struct se_portal_group *se_tpg;
        struct t10_pr_registration *pr_reg;
        const struct target_core_fabric_ops *tfo;
@@ -877,7 +1430,6 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
 
        se_nacl = pr_reg->pr_reg_nacl;
        se_tpg = se_nacl->se_tpg;
-       lun = pr_reg->pr_reg_tg_pt_lun;
        tfo = se_tpg->se_tpg_tfo;
 
        len += sprintf(page+len, "SPC-3 Reservation: %s"
@@ -885,9 +1437,9 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
                tfo->tpg_get_wwn(se_tpg));
        len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
                " Identifier Tag: %hu %s Portal Group Tag: %hu"
-               " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
+               " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
                tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
-               tfo->get_fabric_name(), lun->unpacked_lun);
+               tfo->get_fabric_name(), pr_reg->pr_aptpl_target_lun);
 
 out_unlock:
        spin_unlock(&dev->dev_reservation_lock);
@@ -1012,12 +1564,12 @@ static match_table_t tokens = {
        {Opt_res_type, "res_type=%d"},
        {Opt_res_scope, "res_scope=%d"},
        {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
-       {Opt_mapped_lun, "mapped_lun=%d"},
+       {Opt_mapped_lun, "mapped_lun=%lld"},
        {Opt_target_fabric, "target_fabric=%s"},
        {Opt_target_node, "target_node=%s"},
        {Opt_tpgt, "tpgt=%d"},
        {Opt_port_rtpi, "port_rtpi=%d"},
-       {Opt_target_lun, "target_lun=%d"},
+       {Opt_target_lun, "target_lun=%lld"},
        {Opt_err, NULL}
 };
 
@@ -1032,10 +1584,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
        substring_t args[MAX_OPT_ARGS];
        unsigned long long tmp_ll;
        u64 sa_res_key = 0;
-       u32 mapped_lun = 0, target_lun = 0;
+       u64 mapped_lun = 0, target_lun = 0;
        int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
-       u16 port_rpti = 0, tpgt = 0;
-       u8 type = 0, scope;
+       u16 tpgt = 0;
+       u8 type = 0;
 
        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
@@ -1115,7 +1667,6 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                        break;
                case Opt_res_scope:
                        match_int(args, &arg);
-                       scope = (u8)arg;
                        break;
                case Opt_res_all_tg_pt:
                        match_int(args, &arg);
@@ -1123,7 +1674,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                        break;
                case Opt_mapped_lun:
                        match_int(args, &arg);
-                       mapped_lun = (u32)arg;
+                       mapped_lun = (u64)arg;
                        break;
                /*
                 * PR APTPL Metadata for Target Port
@@ -1155,11 +1706,10 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                        break;
                case Opt_port_rtpi:
                        match_int(args, &arg);
-                       port_rpti = (u16)arg;
                        break;
                case Opt_target_lun:
                        match_int(args, &arg);
-                       target_lun = (u32)arg;
+                       target_lun = (u64)arg;
                        break;
                default:
                        break;
@@ -1223,13 +1773,13 @@ TB_CIT_SETUP(dev_pr, &target_core_dev_pr_ops, NULL, target_core_dev_pr_attrs);
 static ssize_t target_core_show_dev_info(void *p, char *page)
 {
        struct se_device *dev = p;
-       struct se_subsystem_api *t = dev->transport;
        int bl = 0;
        ssize_t read_bytes = 0;
 
        transport_dump_dev_state(dev, page, &bl);
        read_bytes += bl;
-       read_bytes += t->show_configfs_dev_params(dev, page+read_bytes);
+       read_bytes += dev->transport->show_configfs_dev_params(dev,
+                       page+read_bytes);
        return read_bytes;
 }
 
@@ -1247,9 +1797,8 @@ static ssize_t target_core_store_dev_control(
        size_t count)
 {
        struct se_device *dev = p;
-       struct se_subsystem_api *t = dev->transport;
 
-       return t->set_configfs_dev_params(dev, page, count);
+       return dev->transport->set_configfs_dev_params(dev, page, count);
 }
 
 static struct target_core_configfs_attribute target_core_attr_dev_control = {
@@ -2339,21 +2888,16 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
        struct t10_alua_tg_pt_gp *tg_pt_gp,
        char *page)
 {
-       struct se_port *port;
-       struct se_portal_group *tpg;
        struct se_lun *lun;
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
        ssize_t len = 0, cur_len;
        unsigned char buf[TG_PT_GROUP_NAME_BUF];
 
        memset(buf, 0, TG_PT_GROUP_NAME_BUF);
 
        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
-       list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
-                       tg_pt_gp_mem_list) {
-               port = tg_pt_gp_mem->tg_pt;
-               tpg = port->sep_tpg;
-               lun = port->sep_lun;
+       list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
+                       lun_tg_pt_gp_link) {
+               struct se_portal_group *tpg = lun->lun_tpg;
 
                cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
                        "/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -2526,9 +3070,9 @@ static struct config_group *target_core_make_subdev(
        const char *name)
 {
        struct t10_alua_tg_pt_gp *tg_pt_gp;
-       struct se_subsystem_api *t;
        struct config_item *hba_ci = &group->cg_item;
        struct se_hba *hba = item_to_hba(hba_ci);
+       struct target_backend *tb = hba->backend;
        struct se_device *dev;
        struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
        struct config_group *dev_stat_grp = NULL;
@@ -2537,10 +3081,6 @@ static struct config_group *target_core_make_subdev(
        ret = mutex_lock_interruptible(&hba->hba_access_mutex);
        if (ret)
                return ERR_PTR(ret);
-       /*
-        * Locate the struct se_subsystem_api from parent's struct se_hba.
-        */
-       t = hba->transport;
 
        dev = target_alloc_device(hba, name);
        if (!dev)
@@ -2553,17 +3093,17 @@ static struct config_group *target_core_make_subdev(
        if (!dev_cg->default_groups)
                goto out_free_device;
 
-       config_group_init_type_name(dev_cg, name, &t->tb_cits.tb_dev_cit);
+       config_group_init_type_name(dev_cg, name, &tb->tb_dev_cit);
        config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
-                       &t->tb_cits.tb_dev_attrib_cit);
+                       &tb->tb_dev_attrib_cit);
        config_group_init_type_name(&dev->dev_pr_group, "pr",
-                       &t->tb_cits.tb_dev_pr_cit);
+                       &tb->tb_dev_pr_cit);
        config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
-                       &t->tb_cits.tb_dev_wwn_cit);
+                       &tb->tb_dev_wwn_cit);
        config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
-                       "alua", &t->tb_cits.tb_dev_alua_tg_pt_gps_cit);
+                       "alua", &tb->tb_dev_alua_tg_pt_gps_cit);
        config_group_init_type_name(&dev->dev_stat_grps.stat_group,
-                       "statistics", &t->tb_cits.tb_dev_stat_cit);
+                       "statistics", &tb->tb_dev_stat_cit);
 
        dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
        dev_cg->default_groups[1] = &dev->dev_pr_group;
@@ -2693,8 +3233,8 @@ static ssize_t target_core_hba_show_attr_hba_info(
        char *page)
 {
        return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
-                       hba->hba_id, hba->transport->name,
-                       TARGET_CORE_CONFIGFS_VERSION);
+                       hba->hba_id, hba->backend->ops->name,
+                       TARGET_CORE_VERSION);
 }
 
 SE_HBA_ATTR_RO(hba_info);
@@ -2713,11 +3253,10 @@ static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
 static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
                                const char *page, size_t count)
 {
-       struct se_subsystem_api *transport = hba->transport;
        unsigned long mode_flag;
        int ret;
 
-       if (transport->pmode_enable_hba == NULL)
+       if (hba->backend->ops->pmode_enable_hba == NULL)
                return -EINVAL;
 
        ret = kstrtoul(page, 0, &mode_flag);
@@ -2731,7 +3270,7 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
                return -EINVAL;
        }
 
-       ret = transport->pmode_enable_hba(hba, mode_flag);
+       ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
        if (ret < 0)
                return -EINVAL;
        if (ret > 0)
@@ -2857,16 +3396,15 @@ static struct config_item_type target_core_cit = {
 
 /* Stop functions for struct config_item_type target_core_hba_cit */
 
-void target_core_setup_sub_cits(struct se_subsystem_api *sa)
+void target_setup_backend_cits(struct target_backend *tb)
 {
-       target_core_setup_dev_cit(sa);
-       target_core_setup_dev_attrib_cit(sa);
-       target_core_setup_dev_pr_cit(sa);
-       target_core_setup_dev_wwn_cit(sa);
-       target_core_setup_dev_alua_tg_pt_gps_cit(sa);
-       target_core_setup_dev_stat_cit(sa);
+       target_core_setup_dev_cit(tb);
+       target_core_setup_dev_attrib_cit(tb);
+       target_core_setup_dev_pr_cit(tb);
+       target_core_setup_dev_wwn_cit(tb);
+       target_core_setup_dev_alua_tg_pt_gps_cit(tb);
+       target_core_setup_dev_stat_cit(tb);
 }
-EXPORT_SYMBOL(target_core_setup_sub_cits);
 
 static int __init target_core_init_configfs(void)
 {
@@ -2968,7 +3506,7 @@ static int __init target_core_init_configfs(void)
                goto out_global;
        }
        pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
-               " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
+               " Infrastructure: "TARGET_CORE_VERSION" on %s/%s"
                " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
        /*
         * Register built-in RAMDISK subsystem logic for virtual LUN 0
index 417f88b498c72585570398f6f783850c5473bd67..09e682b1c54953477a59057a6b3f3098c5dfbbf5 100644 (file)
@@ -56,40 +56,37 @@ static struct se_hba *lun0_hba;
 struct se_device *g_lun0_dev;
 
 sense_reason_t
-transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
+transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
 {
        struct se_lun *se_lun = NULL;
        struct se_session *se_sess = se_cmd->se_sess;
-       struct se_device *dev;
-       unsigned long flags;
-
-       if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
-               return TCM_NON_EXISTENT_LUN;
-
-       spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
-       se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
-       if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
-               struct se_dev_entry *deve = se_cmd->se_deve;
+       struct se_node_acl *nacl = se_sess->se_node_acl;
+       struct se_dev_entry *deve;
 
-               deve->total_cmds++;
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, unpacked_lun);
+       if (deve) {
+               atomic_long_inc(&deve->total_cmds);
 
                if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
                    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
                        pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
-                               " Access for 0x%08x\n",
+                               " Access for 0x%08llx\n",
                                se_cmd->se_tfo->get_fabric_name(),
                                unpacked_lun);
-                       spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+                       rcu_read_unlock();
                        return TCM_WRITE_PROTECTED;
                }
 
                if (se_cmd->data_direction == DMA_TO_DEVICE)
-                       deve->write_bytes += se_cmd->data_length;
+                       atomic_long_add(se_cmd->data_length,
+                                       &deve->write_bytes);
                else if (se_cmd->data_direction == DMA_FROM_DEVICE)
-                       deve->read_bytes += se_cmd->data_length;
+                       atomic_long_add(se_cmd->data_length,
+                                       &deve->read_bytes);
 
-               se_lun = deve->se_lun;
-               se_cmd->se_lun = deve->se_lun;
+               se_lun = rcu_dereference(deve->se_lun);
+               se_cmd->se_lun = rcu_dereference(deve->se_lun);
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
@@ -97,7 +94,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                percpu_ref_get(&se_lun->lun_ref);
                se_cmd->lun_ref_active = true;
        }
-       spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+       rcu_read_unlock();
 
        if (!se_lun) {
                /*
@@ -107,7 +104,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                 */
                if (unpacked_lun != 0) {
                        pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
-                               " Access for 0x%08x\n",
+                               " Access for 0x%08llx\n",
                                se_cmd->se_tfo->get_fabric_name(),
                                unpacked_lun);
                        return TCM_NON_EXISTENT_LUN;
@@ -119,64 +116,66 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                    (se_cmd->data_direction != DMA_NONE))
                        return TCM_WRITE_PROTECTED;
 
-               se_lun = &se_sess->se_tpg->tpg_virt_lun0;
-               se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+               se_lun = se_sess->se_tpg->tpg_virt_lun0;
+               se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->orig_fe_lun = 0;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 
                percpu_ref_get(&se_lun->lun_ref);
                se_cmd->lun_ref_active = true;
        }
+       /*
+        * RCU reference protected by percpu se_lun->lun_ref taken above that
+        * must drop to zero (including initial reference) before this se_lun
+        * pointer can be kfree_rcu() by the final se_lun->lun_group put via
+        * target_core_fabric_configfs.c:target_fabric_port_release
+        */
+       se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+       atomic_long_inc(&se_cmd->se_dev->num_cmds);
 
-       /* Directly associate cmd with se_dev */
-       se_cmd->se_dev = se_lun->lun_se_dev;
-
-       dev = se_lun->lun_se_dev;
-       atomic_long_inc(&dev->num_cmds);
        if (se_cmd->data_direction == DMA_TO_DEVICE)
-               atomic_long_add(se_cmd->data_length, &dev->write_bytes);
+               atomic_long_add(se_cmd->data_length,
+                               &se_cmd->se_dev->write_bytes);
        else if (se_cmd->data_direction == DMA_FROM_DEVICE)
-               atomic_long_add(se_cmd->data_length, &dev->read_bytes);
+               atomic_long_add(se_cmd->data_length,
+                               &se_cmd->se_dev->read_bytes);
 
        return 0;
 }
 EXPORT_SYMBOL(transport_lookup_cmd_lun);
 
-int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
+int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
 {
        struct se_dev_entry *deve;
        struct se_lun *se_lun = NULL;
        struct se_session *se_sess = se_cmd->se_sess;
+       struct se_node_acl *nacl = se_sess->se_node_acl;
        struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
        unsigned long flags;
 
-       if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
-               return -ENODEV;
-
-       spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
-       se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
-       deve = se_cmd->se_deve;
-
-       if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
-               se_tmr->tmr_lun = deve->se_lun;
-               se_cmd->se_lun = deve->se_lun;
-               se_lun = deve->se_lun;
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, unpacked_lun);
+       if (deve) {
+               se_tmr->tmr_lun = rcu_dereference(deve->se_lun);
+               se_cmd->se_lun = rcu_dereference(deve->se_lun);
+               se_lun = rcu_dereference(deve->se_lun);
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
        }
-       spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+       rcu_read_unlock();
 
        if (!se_lun) {
                pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
-                       " Access for 0x%08x\n",
+                       " Access for 0x%08llx\n",
                        se_cmd->se_tfo->get_fabric_name(),
                        unpacked_lun);
                return -ENODEV;
        }
-
-       /* Directly associate cmd with se_dev */
-       se_cmd->se_dev = se_lun->lun_se_dev;
-       se_tmr->tmr_dev = se_lun->lun_se_dev;
+       /*
+        * XXX: Add percpu se_lun->lun_ref reference count for TMR
+        */
+       se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+       se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 
        spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
        list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
@@ -186,9 +185,24 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 }
 EXPORT_SYMBOL(transport_lookup_tmr_lun);
 
+bool target_lun_is_rdonly(struct se_cmd *cmd)
+{
+       struct se_session *se_sess = cmd->se_sess;
+       struct se_dev_entry *deve;
+       bool ret;
+
+       rcu_read_lock();
+       deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
+       ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL(target_lun_is_rdonly);
+
 /*
  * This function is called from core_scsi3_emulate_pro_register_and_move()
- * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
+ * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
  * when a matching rtpi is found.
  */
 struct se_dev_entry *core_get_se_deve_from_rtpi(
@@ -197,231 +211,238 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
 {
        struct se_dev_entry *deve;
        struct se_lun *lun;
-       struct se_port *port;
        struct se_portal_group *tpg = nacl->se_tpg;
-       u32 i;
-
-       spin_lock_irq(&nacl->device_list_lock);
-       for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               deve = nacl->device_list[i];
 
-               if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
-                       continue;
-
-               lun = deve->se_lun;
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+               lun = rcu_dereference(deve->se_lun);
                if (!lun) {
                        pr_err("%s device entries device pointer is"
                                " NULL, but Initiator has access.\n",
                                tpg->se_tpg_tfo->get_fabric_name());
                        continue;
                }
-               port = lun->lun_sep;
-               if (!port) {
-                       pr_err("%s device entries device pointer is"
-                               " NULL, but Initiator has access.\n",
-                               tpg->se_tpg_tfo->get_fabric_name());
-                       continue;
-               }
-               if (port->sep_rtpi != rtpi)
+               if (lun->lun_rtpi != rtpi)
                        continue;
 
-               atomic_inc_mb(&deve->pr_ref_count);
-               spin_unlock_irq(&nacl->device_list_lock);
+               kref_get(&deve->pr_kref);
+               rcu_read_unlock();
 
                return deve;
        }
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
 
        return NULL;
 }
 
-int core_free_device_list_for_node(
+void core_free_device_list_for_node(
        struct se_node_acl *nacl,
        struct se_portal_group *tpg)
 {
        struct se_dev_entry *deve;
-       struct se_lun *lun;
-       u32 i;
-
-       if (!nacl->device_list)
-               return 0;
-
-       spin_lock_irq(&nacl->device_list_lock);
-       for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               deve = nacl->device_list[i];
-
-               if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
-                       continue;
-
-               if (!deve->se_lun) {
-                       pr_err("%s device entries device pointer is"
-                               " NULL, but Initiator has access.\n",
-                               tpg->se_tpg_tfo->get_fabric_name());
-                       continue;
-               }
-               lun = deve->se_lun;
 
-               spin_unlock_irq(&nacl->device_list_lock);
-               core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
-                       TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
-               spin_lock_irq(&nacl->device_list_lock);
+       mutex_lock(&nacl->lun_entry_mutex);
+       hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+               struct se_lun *lun = rcu_dereference_check(deve->se_lun,
+                                       lockdep_is_held(&nacl->lun_entry_mutex));
+               core_disable_device_list_for_node(lun, deve, nacl, tpg);
        }
-       spin_unlock_irq(&nacl->device_list_lock);
-
-       array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
-       nacl->device_list = NULL;
-
-       return 0;
+       mutex_unlock(&nacl->lun_entry_mutex);
 }
 
 void core_update_device_list_access(
-       u32 mapped_lun,
+       u64 mapped_lun,
        u32 lun_access,
        struct se_node_acl *nacl)
 {
        struct se_dev_entry *deve;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[mapped_lun];
-       if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
-               deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
-               deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
-       } else {
-               deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
-               deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+       mutex_lock(&nacl->lun_entry_mutex);
+       deve = target_nacl_find_deve(nacl, mapped_lun);
+       if (deve) {
+               if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+                       deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+                       deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+               } else {
+                       deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+                       deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+               }
        }
-       spin_unlock_irq(&nacl->device_list_lock);
+       mutex_unlock(&nacl->lun_entry_mutex);
 }
 
-/*      core_enable_device_list_for_node():
- *
- *
+/*
+ * Called with rcu_read_lock or nacl->device_list_lock held.
  */
+struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
+{
+       struct se_dev_entry *deve;
+
+       hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
+               if (deve->mapped_lun == mapped_lun)
+                       return deve;
+
+       return NULL;
+}
+EXPORT_SYMBOL(target_nacl_find_deve);
+
+void target_pr_kref_release(struct kref *kref)
+{
+       struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
+                                                pr_kref);
+       complete(&deve->pr_comp);
+}
+
+static void
+target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
+                            bool skip_new)
+{
+       struct se_dev_entry *tmp;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
+               if (skip_new && tmp == new)
+                       continue;
+               core_scsi3_ua_allocate(tmp, 0x3F,
+                                      ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
+       }
+       rcu_read_unlock();
+}
+
 int core_enable_device_list_for_node(
        struct se_lun *lun,
        struct se_lun_acl *lun_acl,
-       u32 mapped_lun,
+       u64 mapped_lun,
        u32 lun_access,
        struct se_node_acl *nacl,
        struct se_portal_group *tpg)
 {
-       struct se_port *port = lun->lun_sep;
-       struct se_dev_entry *deve;
-
-       spin_lock_irq(&nacl->device_list_lock);
-
-       deve = nacl->device_list[mapped_lun];
-
-       /*
-        * Check if the call is handling demo mode -> explicit LUN ACL
-        * transition.  This transition must be for the same struct se_lun
-        * + mapped_lun that was setup in demo mode..
-        */
-       if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
-               if (deve->se_lun_acl != NULL) {
-                       pr_err("struct se_dev_entry->se_lun_acl"
-                              " already set for demo mode -> explicit"
-                              " LUN ACL transition\n");
-                       spin_unlock_irq(&nacl->device_list_lock);
+       struct se_dev_entry *orig, *new;
+
+       new = kzalloc(sizeof(*new), GFP_KERNEL);
+       if (!new) {
+               pr_err("Unable to allocate se_dev_entry memory\n");
+               return -ENOMEM;
+       }
+
+       atomic_set(&new->ua_count, 0);
+       spin_lock_init(&new->ua_lock);
+       INIT_LIST_HEAD(&new->ua_list);
+       INIT_LIST_HEAD(&new->lun_link);
+
+       new->mapped_lun = mapped_lun;
+       kref_init(&new->pr_kref);
+       init_completion(&new->pr_comp);
+
+       if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
+               new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+       else
+               new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+
+       new->creation_time = get_jiffies_64();
+       new->attach_count++;
+
+       mutex_lock(&nacl->lun_entry_mutex);
+       orig = target_nacl_find_deve(nacl, mapped_lun);
+       if (orig && orig->se_lun) {
+               struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
+                                       lockdep_is_held(&nacl->lun_entry_mutex));
+
+               if (orig_lun != lun) {
+                       pr_err("Existing orig->se_lun doesn't match new lun"
+                              " for dynamic -> explicit NodeACL conversion:"
+                               " %s\n", nacl->initiatorname);
+                       mutex_unlock(&nacl->lun_entry_mutex);
+                       kfree(new);
                        return -EINVAL;
                }
-               if (deve->se_lun != lun) {
-                       pr_err("struct se_dev_entry->se_lun does"
-                              " match passed struct se_lun for demo mode"
-                              " -> explicit LUN ACL transition\n");
-                       spin_unlock_irq(&nacl->device_list_lock);
-                       return -EINVAL;
-               }
-               deve->se_lun_acl = lun_acl;
+               BUG_ON(orig->se_lun_acl != NULL);
 
-               if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
-                       deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
-                       deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
-               } else {
-                       deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
-                       deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
-               }
+               rcu_assign_pointer(new->se_lun, lun);
+               rcu_assign_pointer(new->se_lun_acl, lun_acl);
+               hlist_del_rcu(&orig->link);
+               hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
+               mutex_unlock(&nacl->lun_entry_mutex);
 
-               spin_unlock_irq(&nacl->device_list_lock);
-               return 0;
-       }
+               spin_lock(&lun->lun_deve_lock);
+               list_del(&orig->lun_link);
+               list_add_tail(&new->lun_link, &lun->lun_deve_list);
+               spin_unlock(&lun->lun_deve_lock);
+
+               kref_put(&orig->pr_kref, target_pr_kref_release);
+               wait_for_completion(&orig->pr_comp);
 
-       deve->se_lun = lun;
-       deve->se_lun_acl = lun_acl;
-       deve->mapped_lun = mapped_lun;
-       deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
-
-       if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
-               deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
-               deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
-       } else {
-               deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
-               deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+               target_luns_data_has_changed(nacl, new, true);
+               kfree_rcu(orig, rcu_head);
+               return 0;
        }
 
-       deve->creation_time = get_jiffies_64();
-       deve->attach_count++;
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_assign_pointer(new->se_lun, lun);
+       rcu_assign_pointer(new->se_lun_acl, lun_acl);
+       hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
+       mutex_unlock(&nacl->lun_entry_mutex);
 
-       spin_lock_bh(&port->sep_alua_lock);
-       list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
-       spin_unlock_bh(&port->sep_alua_lock);
+       spin_lock(&lun->lun_deve_lock);
+       list_add_tail(&new->lun_link, &lun->lun_deve_list);
+       spin_unlock(&lun->lun_deve_lock);
 
+       target_luns_data_has_changed(nacl, new, true);
        return 0;
 }
 
-/*      core_disable_device_list_for_node():
- *
- *
+/*
+ *     Called with se_node_acl->lun_entry_mutex held.
  */
-int core_disable_device_list_for_node(
+void core_disable_device_list_for_node(
        struct se_lun *lun,
-       struct se_lun_acl *lun_acl,
-       u32 mapped_lun,
-       u32 lun_access,
+       struct se_dev_entry *orig,
        struct se_node_acl *nacl,
        struct se_portal_group *tpg)
 {
-       struct se_port *port = lun->lun_sep;
-       struct se_dev_entry *deve = nacl->device_list[mapped_lun];
-
+       /*
+        * rcu_dereference_raw protected by se_lun->lun_group symlink
+        * reference to se_device->dev_group.
+        */
+       struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
        /*
         * If the MappedLUN entry is being disabled, the entry in
-        * port->sep_alua_list must be removed now before clearing the
+        * lun->lun_deve_list must be removed now before clearing the
         * struct se_dev_entry pointers below as logic in
         * core_alua_do_transition_tg_pt() depends on these being present.
         *
         * deve->se_lun_acl will be NULL for demo-mode created LUNs
         * that have not been explicitly converted to MappedLUNs ->
-        * struct se_lun_acl, but we remove deve->alua_port_list from
-        * port->sep_alua_list. This also means that active UAs and
+        * struct se_lun_acl, but we remove deve->lun_link from
+        * lun->lun_deve_list. This also means that active UAs and
         * NodeACL context specific PR metadata for demo-mode
         * MappedLUN *deve will be released below..
         */
-       spin_lock_bh(&port->sep_alua_lock);
-       list_del(&deve->alua_port_list);
-       spin_unlock_bh(&port->sep_alua_lock);
+       spin_lock(&lun->lun_deve_lock);
+       list_del(&orig->lun_link);
+       spin_unlock(&lun->lun_deve_lock);
        /*
-        * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
-        * PR operation to complete.
+        * Disable struct se_dev_entry LUN ACL mapping
         */
-       while (atomic_read(&deve->pr_ref_count) != 0)
-               cpu_relax();
-
-       spin_lock_irq(&nacl->device_list_lock);
+       core_scsi3_ua_release_all(orig);
+
+       hlist_del_rcu(&orig->link);
+       clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
+       rcu_assign_pointer(orig->se_lun, NULL);
+       rcu_assign_pointer(orig->se_lun_acl, NULL);
+       orig->lun_flags = 0;
+       orig->creation_time = 0;
+       orig->attach_count--;
        /*
-        * Disable struct se_dev_entry LUN ACL mapping
+        * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
+        * or REGISTER_AND_MOVE PR operation to complete.
         */
-       core_scsi3_ua_release_all(deve);
-       deve->se_lun = NULL;
-       deve->se_lun_acl = NULL;
-       deve->lun_flags = 0;
-       deve->creation_time = 0;
-       deve->attach_count--;
-       spin_unlock_irq(&nacl->device_list_lock);
-
-       core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
-       return 0;
+       kref_put(&orig->pr_kref, target_pr_kref_release);
+       wait_for_completion(&orig->pr_comp);
+
+       kfree_rcu(orig, rcu_head);
+
+       core_scsi3_free_pr_reg_from_nacl(dev, nacl);
+       target_luns_data_has_changed(nacl, NULL, false);
 }
 
 /*      core_clear_lun_from_tpg():
@@ -432,53 +453,35 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
 {
        struct se_node_acl *nacl;
        struct se_dev_entry *deve;
-       u32 i;
 
-       spin_lock_irq(&tpg->acl_node_lock);
+       mutex_lock(&tpg->acl_node_mutex);
        list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
-               spin_unlock_irq(&tpg->acl_node_lock);
 
-               spin_lock_irq(&nacl->device_list_lock);
-               for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-                       deve = nacl->device_list[i];
-                       if (lun != deve->se_lun)
-                               continue;
-                       spin_unlock_irq(&nacl->device_list_lock);
+               mutex_lock(&nacl->lun_entry_mutex);
+               hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+                       struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
+                                       lockdep_is_held(&nacl->lun_entry_mutex));
 
-                       core_disable_device_list_for_node(lun, NULL,
-                               deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
-                               nacl, tpg);
+                       if (lun != tmp_lun)
+                               continue;
 
-                       spin_lock_irq(&nacl->device_list_lock);
+                       core_disable_device_list_for_node(lun, deve, nacl, tpg);
                }
-               spin_unlock_irq(&nacl->device_list_lock);
-
-               spin_lock_irq(&tpg->acl_node_lock);
+               mutex_unlock(&nacl->lun_entry_mutex);
        }
-       spin_unlock_irq(&tpg->acl_node_lock);
+       mutex_unlock(&tpg->acl_node_mutex);
 }
 
-static struct se_port *core_alloc_port(struct se_device *dev)
+int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
 {
-       struct se_port *port, *port_tmp;
-
-       port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
-       if (!port) {
-               pr_err("Unable to allocate struct se_port\n");
-               return ERR_PTR(-ENOMEM);
-       }
-       INIT_LIST_HEAD(&port->sep_alua_list);
-       INIT_LIST_HEAD(&port->sep_list);
-       atomic_set(&port->sep_tg_pt_secondary_offline, 0);
-       spin_lock_init(&port->sep_alua_lock);
-       mutex_init(&port->sep_tg_pt_md_mutex);
+       struct se_lun *tmp;
 
        spin_lock(&dev->se_port_lock);
-       if (dev->dev_port_count == 0x0000ffff) {
+       if (dev->export_count == 0x0000ffff) {
                pr_warn("Reached dev->dev_port_count =="
                                " 0x0000ffff\n");
                spin_unlock(&dev->se_port_lock);
-               return ERR_PTR(-ENOSPC);
+               return -ENOSPC;
        }
 again:
        /*
@@ -493,133 +496,23 @@ again:
         * 2h        Relative port 2, historically known as port B
         * 3h to FFFFh    Relative port 3 through 65 535
         */
-       port->sep_rtpi = dev->dev_rpti_counter++;
-       if (!port->sep_rtpi)
+       lun->lun_rtpi = dev->dev_rpti_counter++;
+       if (!lun->lun_rtpi)
                goto again;
 
-       list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
+       list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
                /*
                 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
                 * for 16-bit wrap..
                 */
-               if (port->sep_rtpi == port_tmp->sep_rtpi)
+               if (lun->lun_rtpi == tmp->lun_rtpi)
                        goto again;
        }
        spin_unlock(&dev->se_port_lock);
 
-       return port;
-}
-
-static void core_export_port(
-       struct se_device *dev,
-       struct se_portal_group *tpg,
-       struct se_port *port,
-       struct se_lun *lun)
-{
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
-
-       spin_lock(&dev->se_port_lock);
-       spin_lock(&lun->lun_sep_lock);
-       port->sep_tpg = tpg;
-       port->sep_lun = lun;
-       lun->lun_sep = port;
-       spin_unlock(&lun->lun_sep_lock);
-
-       list_add_tail(&port->sep_list, &dev->dev_sep_list);
-       spin_unlock(&dev->se_port_lock);
-
-       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
-           !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
-               tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
-               if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
-                       pr_err("Unable to allocate t10_alua_tg_pt"
-                                       "_gp_member_t\n");
-                       return;
-               }
-               spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-               __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
-                       dev->t10_alua.default_tg_pt_gp);
-               spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-               pr_debug("%s/%s: Adding to default ALUA Target Port"
-                       " Group: alua/default_tg_pt_gp\n",
-                       dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
-       }
-
-       dev->dev_port_count++;
-       port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
-}
-
-/*
- *     Called with struct se_device->se_port_lock spinlock held.
- */
-static void core_release_port(struct se_device *dev, struct se_port *port)
-       __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
-{
-       /*
-        * Wait for any port reference for PR ALL_TG_PT=1 operation
-        * to complete in __core_scsi3_alloc_registration()
-        */
-       spin_unlock(&dev->se_port_lock);
-       if (atomic_read(&port->sep_tg_pt_ref_cnt))
-               cpu_relax();
-       spin_lock(&dev->se_port_lock);
-
-       core_alua_free_tg_pt_gp_mem(port);
-
-       list_del(&port->sep_list);
-       dev->dev_port_count--;
-       kfree(port);
-}
-
-int core_dev_export(
-       struct se_device *dev,
-       struct se_portal_group *tpg,
-       struct se_lun *lun)
-{
-       struct se_hba *hba = dev->se_hba;
-       struct se_port *port;
-
-       port = core_alloc_port(dev);
-       if (IS_ERR(port))
-               return PTR_ERR(port);
-
-       lun->lun_se_dev = dev;
-
-       spin_lock(&hba->device_lock);
-       dev->export_count++;
-       spin_unlock(&hba->device_lock);
-
-       core_export_port(dev, tpg, port, lun);
        return 0;
 }
 
-void core_dev_unexport(
-       struct se_device *dev,
-       struct se_portal_group *tpg,
-       struct se_lun *lun)
-{
-       struct se_hba *hba = dev->se_hba;
-       struct se_port *port = lun->lun_sep;
-
-       spin_lock(&lun->lun_sep_lock);
-       if (lun->lun_se_dev == NULL) {
-               spin_unlock(&lun->lun_sep_lock);
-               return;
-       }
-       spin_unlock(&lun->lun_sep_lock);
-
-       spin_lock(&dev->se_port_lock);
-       core_release_port(dev, port);
-       spin_unlock(&dev->se_port_lock);
-
-       spin_lock(&hba->device_lock);
-       dev->export_count--;
-       spin_unlock(&hba->device_lock);
-
-       lun->lun_sep = NULL;
-       lun->lun_se_dev = NULL;
-}
-
 static void se_release_vpd_for_dev(struct se_device *dev)
 {
        struct t10_vpd *vpd, *vpd_tmp;
@@ -651,556 +544,19 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
        return aligned_max_sectors;
 }
 
-bool se_dev_check_wce(struct se_device *dev)
-{
-       bool wce = false;
-
-       if (dev->transport->get_write_cache)
-               wce = dev->transport->get_write_cache(dev);
-       else if (dev->dev_attrib.emulate_write_cache > 0)
-               wce = true;
-
-       return wce;
-}
-
-int se_dev_set_max_unmap_lba_count(
-       struct se_device *dev,
-       u32 max_unmap_lba_count)
-{
-       dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
-       pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
-                       dev, dev->dev_attrib.max_unmap_lba_count);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count);
-
-int se_dev_set_max_unmap_block_desc_count(
-       struct se_device *dev,
-       u32 max_unmap_block_desc_count)
-{
-       dev->dev_attrib.max_unmap_block_desc_count =
-               max_unmap_block_desc_count;
-       pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
-                       dev, dev->dev_attrib.max_unmap_block_desc_count);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count);
-
-int se_dev_set_unmap_granularity(
-       struct se_device *dev,
-       u32 unmap_granularity)
-{
-       dev->dev_attrib.unmap_granularity = unmap_granularity;
-       pr_debug("dev[%p]: Set unmap_granularity: %u\n",
-                       dev, dev->dev_attrib.unmap_granularity);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_unmap_granularity);
-
-int se_dev_set_unmap_granularity_alignment(
-       struct se_device *dev,
-       u32 unmap_granularity_alignment)
-{
-       dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
-       pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
-                       dev, dev->dev_attrib.unmap_granularity_alignment);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment);
-
-int se_dev_set_max_write_same_len(
-       struct se_device *dev,
-       u32 max_write_same_len)
-{
-       dev->dev_attrib.max_write_same_len = max_write_same_len;
-       pr_debug("dev[%p]: Set max_write_same_len: %u\n",
-                       dev, dev->dev_attrib.max_write_same_len);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_max_write_same_len);
-
-static void dev_set_t10_wwn_model_alias(struct se_device *dev)
-{
-       const char *configname;
-
-       configname = config_item_name(&dev->dev_group.cg_item);
-       if (strlen(configname) >= 16) {
-               pr_warn("dev[%p]: Backstore name '%s' is too long for "
-                       "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
-                       configname);
-       }
-       snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
-}
-
-int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
-{
-       if (dev->export_count) {
-               pr_err("dev[%p]: Unable to change model alias"
-                       " while export_count is %d\n",
-                       dev, dev->export_count);
-                       return -EINVAL;
-       }
-
-       if (flag != 0 && flag != 1) {
-               pr_err("Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-
-       if (flag) {
-               dev_set_t10_wwn_model_alias(dev);
-       } else {
-               strncpy(&dev->t10_wwn.model[0],
-                       dev->transport->inquiry_prod, 16);
-       }
-       dev->dev_attrib.emulate_model_alias = flag;
-
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_model_alias);
-
-int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
-{
-       if (flag != 0 && flag != 1) {
-               pr_err("Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-
-       if (flag) {
-               pr_err("dpo_emulated not supported\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_dpo);
-
-int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
-{
-       if (flag != 0 && flag != 1) {
-               pr_err("Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-       if (flag &&
-           dev->transport->get_write_cache) {
-               pr_warn("emulate_fua_write not supported for this device, ignoring\n");
-               return 0;
-       }
-       if (dev->export_count) {
-               pr_err("emulate_fua_write cannot be changed with active"
-                      " exports: %d\n", dev->export_count);
-               return -EINVAL;
-       }
-       dev->dev_attrib.emulate_fua_write = flag;
-       pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
-                       dev, dev->dev_attrib.emulate_fua_write);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_fua_write);
-
-int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
-{
-       if (flag != 0 && flag != 1) {
-               pr_err("Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-
-       if (flag) {
-               pr_err("ua read emulated not supported\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_fua_read);
-
-int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
-{
-       if (flag != 0 && flag != 1) {
-               pr_err("Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-       if (flag &&
-           dev->transport->get_write_cache) {
-               pr_err("emulate_write_cache not supported for this device\n");
-               return -EINVAL;
-       }
-       if (dev->export_count) {
-               pr_err("emulate_write_cache cannot be changed with active"
-                      " exports: %d\n", dev->export_count);
-               return -EINVAL;
-       }
-       dev->dev_attrib.emulate_write_cache = flag;
-       pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
-                       dev, dev->dev_attrib.emulate_write_cache);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_write_cache);
-
-int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
-{
-       if ((flag != 0) && (flag != 1) && (flag != 2)) {
-               pr_err("Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-
-       if (dev->export_count) {
-               pr_err("dev[%p]: Unable to change SE Device"
-                       " UA_INTRLCK_CTRL while export_count is %d\n",
-                       dev, dev->export_count);
-               return -EINVAL;
-       }
-       dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
-       pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
-               dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
-
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl);
-
-int se_dev_set_emulate_tas(struct se_device *dev, int flag)
-{
-       if ((flag != 0) && (flag != 1)) {
-               pr_err("Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-
-       if (dev->export_count) {
-               pr_err("dev[%p]: Unable to change SE Device TAS while"
-                       " export_count is %d\n",
-                       dev, dev->export_count);
-               return -EINVAL;
-       }
-       dev->dev_attrib.emulate_tas = flag;
-       pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
-               dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
-
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_tas);
-
-int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
-{
-       if ((flag != 0) && (flag != 1)) {
-               pr_err("Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-       /*
-        * We expect this value to be non-zero when generic Block Layer
-        * Discard supported is detected iblock_create_virtdevice().
-        */
-       if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
-               pr_err("Generic Block Discard not supported\n");
-               return -ENOSYS;
-       }
-
-       dev->dev_attrib.emulate_tpu = flag;
-       pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
-                               dev, flag);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_tpu);
-
-int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
-{
-       if ((flag != 0) && (flag != 1)) {
-               pr_err("Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-       /*
-        * We expect this value to be non-zero when generic Block Layer
-        * Discard supported is detected iblock_create_virtdevice().
-        */
-       if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
-               pr_err("Generic Block Discard not supported\n");
-               return -ENOSYS;
-       }
-
-       dev->dev_attrib.emulate_tpws = flag;
-       pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
-                               dev, flag);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_tpws);
-
-int se_dev_set_emulate_caw(struct se_device *dev, int flag)
-{
-       if (flag != 0 && flag != 1) {
-               pr_err("Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-       dev->dev_attrib.emulate_caw = flag;
-       pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
-                dev, flag);
-
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_caw);
-
-int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
-{
-       if (flag != 0 && flag != 1) {
-               pr_err("Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-       dev->dev_attrib.emulate_3pc = flag;
-       pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
-               dev, flag);
-
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_3pc);
-
-int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
-{
-       int rc, old_prot = dev->dev_attrib.pi_prot_type;
-
-       if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
-               pr_err("Illegal value %d for pi_prot_type\n", flag);
-               return -EINVAL;
-       }
-       if (flag == 2) {
-               pr_err("DIF TYPE2 protection currently not supported\n");
-               return -ENOSYS;
-       }
-       if (dev->dev_attrib.hw_pi_prot_type) {
-               pr_warn("DIF protection enabled on underlying hardware,"
-                       " ignoring\n");
-               return 0;
-       }
-       if (!dev->transport->init_prot || !dev->transport->free_prot) {
-               /* 0 is only allowed value for non-supporting backends */
-               if (flag == 0)
-                       return 0;
-
-               pr_err("DIF protection not supported by backend: %s\n",
-                      dev->transport->name);
-               return -ENOSYS;
-       }
-       if (!(dev->dev_flags & DF_CONFIGURED)) {
-               pr_err("DIF protection requires device to be configured\n");
-               return -ENODEV;
-       }
-       if (dev->export_count) {
-               pr_err("dev[%p]: Unable to change SE Device PROT type while"
-                      " export_count is %d\n", dev, dev->export_count);
-               return -EINVAL;
-       }
-
-       dev->dev_attrib.pi_prot_type = flag;
-
-       if (flag && !old_prot) {
-               rc = dev->transport->init_prot(dev);
-               if (rc) {
-                       dev->dev_attrib.pi_prot_type = old_prot;
-                       return rc;
-               }
-
-       } else if (!flag && old_prot) {
-               dev->transport->free_prot(dev);
-       }
-       pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
-
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_pi_prot_type);
-
-int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
-{
-       int rc;
-
-       if (!flag)
-               return 0;
-
-       if (flag != 1) {
-               pr_err("Illegal value %d for pi_prot_format\n", flag);
-               return -EINVAL;
-       }
-       if (!dev->transport->format_prot) {
-               pr_err("DIF protection format not supported by backend %s\n",
-                      dev->transport->name);
-               return -ENOSYS;
-       }
-       if (!(dev->dev_flags & DF_CONFIGURED)) {
-               pr_err("DIF protection format requires device to be configured\n");
-               return -ENODEV;
-       }
-       if (dev->export_count) {
-               pr_err("dev[%p]: Unable to format SE Device PROT type while"
-                      " export_count is %d\n", dev, dev->export_count);
-               return -EINVAL;
-       }
-
-       rc = dev->transport->format_prot(dev);
-       if (rc)
-               return rc;
-
-       pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
-
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_pi_prot_format);
-
-int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
-{
-       if ((flag != 0) && (flag != 1)) {
-               pr_err("Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-       dev->dev_attrib.enforce_pr_isids = flag;
-       pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
-               (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_enforce_pr_isids);
-
-int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
-{
-       if ((flag != 0) && (flag != 1)) {
-               printk(KERN_ERR "Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-       if (dev->export_count) {
-               pr_err("dev[%p]: Unable to set force_pr_aptpl while"
-                      " export_count is %d\n", dev, dev->export_count);
-               return -EINVAL;
-       }
-
-       dev->dev_attrib.force_pr_aptpl = flag;
-       pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_force_pr_aptpl);
-
-int se_dev_set_is_nonrot(struct se_device *dev, int flag)
-{
-       if ((flag != 0) && (flag != 1)) {
-               printk(KERN_ERR "Illegal value %d\n", flag);
-               return -EINVAL;
-       }
-       dev->dev_attrib.is_nonrot = flag;
-       pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
-              dev, flag);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_is_nonrot);
-
-int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
-{
-       if (flag != 0) {
-               printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
-                       " reordering not implemented\n", dev);
-               return -ENOSYS;
-       }
-       dev->dev_attrib.emulate_rest_reord = flag;
-       pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_emulate_rest_reord);
-
-/*
- * Note, this can only be called on unexported SE Device Object.
- */
-int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
-{
-       if (dev->export_count) {
-               pr_err("dev[%p]: Unable to change SE Device TCQ while"
-                       " export_count is %d\n",
-                       dev, dev->export_count);
-               return -EINVAL;
-       }
-       if (!queue_depth) {
-               pr_err("dev[%p]: Illegal ZERO value for queue"
-                       "_depth\n", dev);
-               return -EINVAL;
-       }
-
-       if (queue_depth > dev->dev_attrib.queue_depth) {
-               if (queue_depth > dev->dev_attrib.hw_queue_depth) {
-                       pr_err("dev[%p]: Passed queue_depth:"
-                               " %u exceeds TCM/SE_Device MAX"
-                               " TCQ: %u\n", dev, queue_depth,
-                               dev->dev_attrib.hw_queue_depth);
-                       return -EINVAL;
-               }
-       }
-       dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
-       pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
-                       dev, queue_depth);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_queue_depth);
-
-int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
-{
-       if (dev->export_count) {
-               pr_err("dev[%p]: Unable to change SE Device"
-                       " optimal_sectors while export_count is %d\n",
-                       dev, dev->export_count);
-               return -EINVAL;
-       }
-       if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
-               pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
-                       " greater than hw_max_sectors: %u\n", dev,
-                       optimal_sectors, dev->dev_attrib.hw_max_sectors);
-               return -EINVAL;
-       }
-
-       dev->dev_attrib.optimal_sectors = optimal_sectors;
-       pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
-                       dev, optimal_sectors);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_optimal_sectors);
-
-int se_dev_set_block_size(struct se_device *dev, u32 block_size)
-{
-       if (dev->export_count) {
-               pr_err("dev[%p]: Unable to change SE Device block_size"
-                       " while export_count is %d\n",
-                       dev, dev->export_count);
-               return -EINVAL;
-       }
-
-       if ((block_size != 512) &&
-           (block_size != 1024) &&
-           (block_size != 2048) &&
-           (block_size != 4096)) {
-               pr_err("dev[%p]: Illegal value for block_device: %u"
-                       " for SE device, must be 512, 1024, 2048 or 4096\n",
-                       dev, block_size);
-               return -EINVAL;
-       }
-
-       dev->dev_attrib.block_size = block_size;
-       pr_debug("dev[%p]: SE Device block_size changed to %u\n",
-                       dev, block_size);
-
-       if (dev->dev_attrib.max_bytes_per_io)
-               dev->dev_attrib.hw_max_sectors =
-                       dev->dev_attrib.max_bytes_per_io / block_size;
-
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_block_size);
-
-struct se_lun *core_dev_add_lun(
+int core_dev_add_lun(
        struct se_portal_group *tpg,
        struct se_device *dev,
-       u32 unpacked_lun)
+       struct se_lun *lun)
 {
-       struct se_lun *lun;
        int rc;
 
-       lun = core_tpg_alloc_lun(tpg, unpacked_lun);
-       if (IS_ERR(lun))
-               return lun;
-
        rc = core_tpg_add_lun(tpg, lun,
                                TRANSPORT_LUNFLAGS_READ_WRITE, dev);
        if (rc < 0)
-               return ERR_PTR(rc);
+               return rc;
 
-       pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
+       pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
                " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
                tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
                tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
@@ -1210,20 +566,19 @@ struct se_lun *core_dev_add_lun(
         */
        if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
                struct se_node_acl *acl;
-               spin_lock_irq(&tpg->acl_node_lock);
+
+               mutex_lock(&tpg->acl_node_mutex);
                list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
                        if (acl->dynamic_node_acl &&
                            (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
                             !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
-                               spin_unlock_irq(&tpg->acl_node_lock);
-                               core_tpg_add_node_to_devs(acl, tpg);
-                               spin_lock_irq(&tpg->acl_node_lock);
+                               core_tpg_add_node_to_devs(acl, tpg, lun);
                        }
                }
-               spin_unlock_irq(&tpg->acl_node_lock);
+               mutex_unlock(&tpg->acl_node_mutex);
        }
 
-       return lun;
+       return 0;
 }
 
 /*      core_dev_del_lun():
@@ -1234,7 +589,7 @@ void core_dev_del_lun(
        struct se_portal_group *tpg,
        struct se_lun *lun)
 {
-       pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
+       pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
                " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
                tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
                tpg->se_tpg_tfo->get_fabric_name());
@@ -1242,72 +597,10 @@ void core_dev_del_lun(
        core_tpg_remove_lun(tpg, lun);
 }
 
-struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
-{
-       struct se_lun *lun;
-
-       spin_lock(&tpg->tpg_lun_lock);
-       if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
-               pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
-                       "_PER_TPG-1: %u for Target Portal Group: %hu\n",
-                       tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
-                       TRANSPORT_MAX_LUNS_PER_TPG-1,
-                       tpg->se_tpg_tfo->tpg_get_tag(tpg));
-               spin_unlock(&tpg->tpg_lun_lock);
-               return NULL;
-       }
-       lun = tpg->tpg_lun_list[unpacked_lun];
-
-       if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
-               pr_err("%s Logical Unit Number: %u is not free on"
-                       " Target Portal Group: %hu, ignoring request.\n",
-                       tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
-                       tpg->se_tpg_tfo->tpg_get_tag(tpg));
-               spin_unlock(&tpg->tpg_lun_lock);
-               return NULL;
-       }
-       spin_unlock(&tpg->tpg_lun_lock);
-
-       return lun;
-}
-
-/*      core_dev_get_lun():
- *
- *
- */
-static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
-{
-       struct se_lun *lun;
-
-       spin_lock(&tpg->tpg_lun_lock);
-       if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
-               pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
-                       "_TPG-1: %u for Target Portal Group: %hu\n",
-                       tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
-                       TRANSPORT_MAX_LUNS_PER_TPG-1,
-                       tpg->se_tpg_tfo->tpg_get_tag(tpg));
-               spin_unlock(&tpg->tpg_lun_lock);
-               return NULL;
-       }
-       lun = tpg->tpg_lun_list[unpacked_lun];
-
-       if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
-               pr_err("%s Logical Unit Number: %u is not active on"
-                       " Target Portal Group: %hu, ignoring request.\n",
-                       tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
-                       tpg->se_tpg_tfo->tpg_get_tag(tpg));
-               spin_unlock(&tpg->tpg_lun_lock);
-               return NULL;
-       }
-       spin_unlock(&tpg->tpg_lun_lock);
-
-       return lun;
-}
-
 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
        struct se_portal_group *tpg,
        struct se_node_acl *nacl,
-       u32 mapped_lun,
+       u64 mapped_lun,
        int *ret)
 {
        struct se_lun_acl *lacl;
@@ -1325,7 +618,6 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
                return NULL;
        }
 
-       INIT_LIST_HEAD(&lacl->lacl_list);
        lacl->mapped_lun = mapped_lun;
        lacl->se_lun_nacl = nacl;
        snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
@@ -1337,22 +629,16 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
 int core_dev_add_initiator_node_lun_acl(
        struct se_portal_group *tpg,
        struct se_lun_acl *lacl,
-       u32 unpacked_lun,
+       struct se_lun *lun,
        u32 lun_access)
 {
-       struct se_lun *lun;
-       struct se_node_acl *nacl;
-
-       lun = core_dev_get_lun(tpg, unpacked_lun);
-       if (!lun) {
-               pr_err("%s Logical Unit Number: %u is not active on"
-                       " Target Portal Group: %hu, ignoring request.\n",
-                       tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
-                       tpg->se_tpg_tfo->tpg_get_tag(tpg));
-               return -EINVAL;
-       }
+       struct se_node_acl *nacl = lacl->se_lun_nacl;
+       /*
+        * rcu_dereference_raw protected by se_lun->lun_group symlink
+        * reference to se_device->dev_group.
+        */
+       struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 
-       nacl = lacl->se_lun_nacl;
        if (!nacl)
                return -EINVAL;
 
@@ -1366,52 +652,40 @@ int core_dev_add_initiator_node_lun_acl(
                        lun_access, nacl, tpg) < 0)
                return -EINVAL;
 
-       spin_lock(&lun->lun_acl_lock);
-       list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
-       atomic_inc_mb(&lun->lun_acl_count);
-       spin_unlock(&lun->lun_acl_lock);
-
-       pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
+       pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
                " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
-               tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
+               tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
                (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
                lacl->initiatorname);
        /*
         * Check to see if there are any existing persistent reservation APTPL
         * pre-registrations that need to be enabled for this LUN ACL..
         */
-       core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
+       core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
                                            lacl->mapped_lun);
        return 0;
 }
 
-/*      core_dev_del_initiator_node_lun_acl():
- *
- *
- */
 int core_dev_del_initiator_node_lun_acl(
-       struct se_portal_group *tpg,
        struct se_lun *lun,
        struct se_lun_acl *lacl)
 {
+       struct se_portal_group *tpg = lun->lun_tpg;
        struct se_node_acl *nacl;
+       struct se_dev_entry *deve;
 
        nacl = lacl->se_lun_nacl;
        if (!nacl)
                return -EINVAL;
 
-       spin_lock(&lun->lun_acl_lock);
-       list_del(&lacl->lacl_list);
-       atomic_dec_mb(&lun->lun_acl_count);
-       spin_unlock(&lun->lun_acl_lock);
-
-       core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
-               TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
-
-       lacl->se_lun = NULL;
+       mutex_lock(&nacl->lun_entry_mutex);
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (deve)
+               core_disable_device_list_for_node(lun, deve, nacl, tpg);
+       mutex_unlock(&nacl->lun_entry_mutex);
 
-       pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
-               " InitiatorNode: %s Mapped LUN: %u\n",
+       pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
+               " InitiatorNode: %s Mapped LUN: %llu\n",
                tpg->se_tpg_tfo->get_fabric_name(),
                tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
                lacl->initiatorname, lacl->mapped_lun);
@@ -1424,7 +698,7 @@ void core_dev_free_initiator_node_lun_acl(
        struct se_lun_acl *lacl)
 {
        pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
-               " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
+               " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
                tpg->se_tpg_tfo->tpg_get_tag(tpg),
                tpg->se_tpg_tfo->get_fabric_name(),
                lacl->initiatorname, lacl->mapped_lun);
@@ -1473,14 +747,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
        struct se_device *dev;
        struct se_lun *xcopy_lun;
 
-       dev = hba->transport->alloc_device(hba, name);
+       dev = hba->backend->ops->alloc_device(hba, name);
        if (!dev)
                return NULL;
 
        dev->dev_link_magic = SE_DEV_LINK_MAGIC;
        dev->se_hba = hba;
-       dev->transport = hba->transport;
+       dev->transport = hba->backend->ops;
        dev->prot_length = sizeof(struct se_dif_v1_tuple);
+       dev->hba_index = hba->hba_index;
 
        INIT_LIST_HEAD(&dev->dev_list);
        INIT_LIST_HEAD(&dev->dev_sep_list);
@@ -1513,9 +788,9 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
 
        dev->dev_attrib.da_dev = dev;
        dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
-       dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
-       dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
-       dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
+       dev->dev_attrib.emulate_dpo = 1;
+       dev->dev_attrib.emulate_fua_write = 1;
+       dev->dev_attrib.emulate_fua_read = 1;
        dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
        dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
        dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
@@ -1537,12 +812,12 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
        dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
 
        xcopy_lun = &dev->xcopy_lun;
-       xcopy_lun->lun_se_dev = dev;
-       init_completion(&xcopy_lun->lun_shutdown_comp);
-       INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
-       spin_lock_init(&xcopy_lun->lun_acl_lock);
-       spin_lock_init(&xcopy_lun->lun_sep_lock);
+       rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
        init_completion(&xcopy_lun->lun_ref_comp);
+       INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
+       INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
+       mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
+       xcopy_lun->lun_tpg = &xcopy_pt_tpg;
 
        return dev;
 }
@@ -1679,7 +954,7 @@ int core_dev_setup_virtual_lun0(void)
                goto out_free_hba;
        }
 
-       hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
+       hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
 
        ret = target_configure_device(dev);
        if (ret)
index 1f7886bb16bfc213bad718ac45c2aff3a8ee5f97..48a36989c1a659408b5a1a58bbb97a0989576055 100644 (file)
@@ -36,7 +36,6 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
 #include "target_core_internal.h"
 #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)             \
 static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
 {                                                                      \
-       struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
-       struct config_item_type *cit = &tfc->tfc_##_name##_cit;         \
+       struct config_item_type *cit = &tf->tf_##_name##_cit;           \
                                                                        \
        cit->ct_item_ops = _item_ops;                                   \
        cit->ct_group_ops = _group_ops;                                 \
        cit->ct_attrs = _attrs;                                         \
-       cit->ct_owner = tf->tf_module;                                  \
+       cit->ct_owner = tf->tf_ops->module;                             \
        pr_debug("Setup generic %s\n", __stringify(_name));             \
 }
 
 #define TF_CIT_SETUP_DRV(_name, _item_ops, _group_ops)         \
 static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
 {                                                                      \
-       struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
-       struct config_item_type *cit = &tfc->tfc_##_name##_cit;         \
-       struct configfs_attribute **attrs = tf->tf_ops.tfc_##_name##_attrs; \
+       struct config_item_type *cit = &tf->tf_##_name##_cit;           \
+       struct configfs_attribute **attrs = tf->tf_ops->tfc_##_name##_attrs; \
                                                                        \
        cit->ct_item_ops = _item_ops;                                   \
        cit->ct_group_ops = _group_ops;                                 \
        cit->ct_attrs = attrs;                                          \
-       cit->ct_owner = tf->tf_module;                                  \
+       cit->ct_owner = tf->tf_ops->module;                             \
        pr_debug("Setup generic %s\n", __stringify(_name));             \
 }
 
@@ -83,7 +80,7 @@ static int target_fabric_mappedlun_link(
                        struct se_lun_acl, se_lun_group);
        struct se_portal_group *se_tpg;
        struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
-       int ret = 0, lun_access;
+       int lun_access;
 
        if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
                pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
@@ -93,12 +90,11 @@ static int target_fabric_mappedlun_link(
        /*
         * Ensure that the source port exists
         */
-       if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
-               pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
-                               "_tpg does not exist\n");
+       if (!lun->lun_se_dev) {
+               pr_err("Source se_lun->lun_se_dev does not exist\n");
                return -EINVAL;
        }
-       se_tpg = lun->lun_sep->sep_tpg;
+       se_tpg = lun->lun_tpg;
 
        nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
        tpg_ci = &nacl_ci->ci_group->cg_item;
@@ -125,49 +121,35 @@ static int target_fabric_mappedlun_link(
         * which be will write protected (READ-ONLY) when
         * tpg_1/attrib/demo_mode_write_protect=1
         */
-       spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
-       deve = lacl->se_lun_nacl->device_list[lacl->mapped_lun];
-       if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
+       rcu_read_lock();
+       deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
+       if (deve)
                lun_access = deve->lun_flags;
        else
                lun_access =
                        (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
                                se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
                                           TRANSPORT_LUNFLAGS_READ_WRITE;
-       spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
+       rcu_read_unlock();
        /*
         * Determine the actual mapped LUN value user wants..
         *
         * This value is what the SCSI Initiator actually sees the
-        * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
+        * $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
         */
-       ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
-                       lun->unpacked_lun, lun_access);
-
-       return (ret < 0) ? -EINVAL : 0;
+       return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access);
 }
 
 static int target_fabric_mappedlun_unlink(
        struct config_item *lun_acl_ci,
        struct config_item *lun_ci)
 {
-       struct se_lun *lun;
        struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
                        struct se_lun_acl, se_lun_group);
-       struct se_node_acl *nacl = lacl->se_lun_nacl;
-       struct se_dev_entry *deve = nacl->device_list[lacl->mapped_lun];
-       struct se_portal_group *se_tpg;
-       /*
-        * Determine if the underlying MappedLUN has already been released..
-        */
-       if (!deve->se_lun)
-               return 0;
-
-       lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
-       se_tpg = lun->lun_sep->sep_tpg;
+       struct se_lun *lun = container_of(to_config_group(lun_ci),
+                       struct se_lun, lun_group);
 
-       core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
-       return 0;
+       return core_dev_del_initiator_node_lun_acl(lun, lacl);
 }
 
 CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
@@ -183,14 +165,15 @@ static ssize_t target_fabric_mappedlun_show_write_protect(
 {
        struct se_node_acl *se_nacl = lacl->se_lun_nacl;
        struct se_dev_entry *deve;
-       ssize_t len;
+       ssize_t len = 0;
 
-       spin_lock_irq(&se_nacl->device_list_lock);
-       deve = se_nacl->device_list[lacl->mapped_lun];
-       len = sprintf(page, "%d\n",
-                       (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
-                       1 : 0);
-       spin_unlock_irq(&se_nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
+       if (deve) {
+               len = sprintf(page, "%d\n",
+                       (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 1 : 0);
+       }
+       rcu_read_unlock();
 
        return len;
 }
@@ -218,7 +201,7 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
                        lacl->se_lun_nacl);
 
        pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
-               " Mapped LUN: %u Write Protect bit to %s\n",
+               " Mapped LUN: %llu Write Protect bit to %s\n",
                se_tpg->se_tpg_tfo->get_fabric_name(),
                lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
 
@@ -338,7 +321,7 @@ static struct config_group *target_fabric_make_mappedlun(
        struct config_item *acl_ci;
        struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
        char *buf;
-       unsigned long mapped_lun;
+       unsigned long long mapped_lun;
        int ret = 0;
 
        acl_ci = &group->cg_item;
@@ -366,21 +349,9 @@ static struct config_group *target_fabric_make_mappedlun(
         * Determine the Mapped LUN value.  This is what the SCSI Initiator
         * Port will actually see.
         */
-       ret = kstrtoul(buf + 4, 0, &mapped_lun);
+       ret = kstrtoull(buf + 4, 0, &mapped_lun);
        if (ret)
                goto out;
-       if (mapped_lun > UINT_MAX) {
-               ret = -EINVAL;
-               goto out;
-       }
-       if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
-               pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
-                       "-1: %u for Target Portal Group: %u\n", mapped_lun,
-                       TRANSPORT_MAX_LUNS_PER_TPG-1,
-                       se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
-               ret = -EINVAL;
-               goto out;
-       }
 
        lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
                        mapped_lun, &ret);
@@ -399,9 +370,9 @@ static struct config_group *target_fabric_make_mappedlun(
        }
 
        config_group_init_type_name(&lacl->se_lun_group, name,
-                       &tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit);
+                       &tf->tf_tpg_mappedlun_cit);
        config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
-                       "statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit);
+                       "statistics", &tf->tf_tpg_mappedlun_stat_cit);
        lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
        lacl_cg->default_groups[1] = NULL;
 
@@ -458,10 +429,11 @@ static void target_fabric_nacl_base_release(struct config_item *item)
 {
        struct se_node_acl *se_nacl = container_of(to_config_group(item),
                        struct se_node_acl, acl_group);
-       struct se_portal_group *se_tpg = se_nacl->se_tpg;
-       struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+       struct target_fabric_configfs *tf = se_nacl->se_tpg->se_tpg_wwn->wwn_tf;
 
-       tf->tf_ops.fabric_drop_nodeacl(se_nacl);
+       if (tf->tf_ops->fabric_cleanup_nodeacl)
+               tf->tf_ops->fabric_cleanup_nodeacl(se_nacl);
+       core_tpg_del_initiator_node_acl(se_nacl);
 }
 
 static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
@@ -501,15 +473,18 @@ static struct config_group *target_fabric_make_nodeacl(
        struct se_node_acl *se_nacl;
        struct config_group *nacl_cg;
 
-       if (!tf->tf_ops.fabric_make_nodeacl) {
-               pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n");
-               return ERR_PTR(-ENOSYS);
-       }
-
-       se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
+       se_nacl = core_tpg_add_initiator_node_acl(se_tpg, name);
        if (IS_ERR(se_nacl))
                return ERR_CAST(se_nacl);
 
+       if (tf->tf_ops->fabric_init_nodeacl) {
+               int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
+               if (ret) {
+                       core_tpg_del_initiator_node_acl(se_nacl);
+                       return ERR_PTR(ret);
+               }
+       }
+
        nacl_cg = &se_nacl->acl_group;
        nacl_cg->default_groups = se_nacl->acl_default_groups;
        nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
@@ -519,16 +494,15 @@ static struct config_group *target_fabric_make_nodeacl(
        nacl_cg->default_groups[4] = NULL;
 
        config_group_init_type_name(&se_nacl->acl_group, name,
-                       &tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit);
+                       &tf->tf_tpg_nacl_base_cit);
        config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
-                       &tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit);
+                       &tf->tf_tpg_nacl_attrib_cit);
        config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
-                       &tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit);
+                       &tf->tf_tpg_nacl_auth_cit);
        config_group_init_type_name(&se_nacl->acl_param_group, "param",
-                       &tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit);
+                       &tf->tf_tpg_nacl_param_cit);
        config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
-                       "fabric_statistics",
-                       &tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit);
+                       "fabric_statistics", &tf->tf_tpg_nacl_stat_cit);
 
        return &se_nacl->acl_group;
 }
@@ -575,7 +549,7 @@ static void target_fabric_np_base_release(struct config_item *item)
        struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
        struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
 
-       tf->tf_ops.fabric_drop_np(se_tpg_np);
+       tf->tf_ops->fabric_drop_np(se_tpg_np);
 }
 
 static struct configfs_item_operations target_fabric_np_base_item_ops = {
@@ -599,18 +573,18 @@ static struct config_group *target_fabric_make_np(
        struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
        struct se_tpg_np *se_tpg_np;
 
-       if (!tf->tf_ops.fabric_make_np) {
+       if (!tf->tf_ops->fabric_make_np) {
                pr_err("tf->tf_ops.fabric_make_np is NULL\n");
                return ERR_PTR(-ENOSYS);
        }
 
-       se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
+       se_tpg_np = tf->tf_ops->fabric_make_np(se_tpg, group, name);
        if (!se_tpg_np || IS_ERR(se_tpg_np))
                return ERR_PTR(-EINVAL);
 
        se_tpg_np->tpg_np_parent = se_tpg;
        config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
-                       &tf->tf_cit_tmpl.tfc_tpg_np_base_cit);
+                       &tf->tf_tpg_np_base_cit);
 
        return &se_tpg_np->tpg_np_group;
 }
@@ -654,10 +628,10 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
        struct se_lun *lun,
        char *page)
 {
-       if (!lun || !lun->lun_sep)
+       if (!lun || !lun->lun_se_dev)
                return -ENODEV;
 
-       return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
+       return core_alua_show_tg_pt_gp_info(lun, page);
 }
 
 static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
@@ -665,10 +639,10 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
        const char *page,
        size_t count)
 {
-       if (!lun || !lun->lun_sep)
+       if (!lun || !lun->lun_se_dev)
                return -ENODEV;
 
-       return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
+       return core_alua_store_tg_pt_gp_info(lun, page, count);
 }
 
 TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
@@ -680,7 +654,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
        struct se_lun *lun,
        char *page)
 {
-       if (!lun || !lun->lun_sep)
+       if (!lun || !lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_show_offline_bit(lun, page);
@@ -691,7 +665,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
        const char *page,
        size_t count)
 {
-       if (!lun || !lun->lun_sep)
+       if (!lun || !lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_store_offline_bit(lun, page, count);
@@ -706,7 +680,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
        struct se_lun *lun,
        char *page)
 {
-       if (!lun || !lun->lun_sep)
+       if (!lun || !lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_show_secondary_status(lun, page);
@@ -717,7 +691,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
        const char *page,
        size_t count)
 {
-       if (!lun || !lun->lun_sep)
+       if (!lun || !lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_store_secondary_status(lun, page, count);
@@ -732,7 +706,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
        struct se_lun *lun,
        char *page)
 {
-       if (!lun || !lun->lun_sep)
+       if (!lun || !lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_show_secondary_write_metadata(lun, page);
@@ -743,7 +717,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
        const char *page,
        size_t count)
 {
-       if (!lun || !lun->lun_sep)
+       if (!lun || !lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_store_secondary_write_metadata(lun, page, count);
@@ -769,7 +743,6 @@ static int target_fabric_port_link(
        struct config_item *tpg_ci;
        struct se_lun *lun = container_of(to_config_group(lun_ci),
                                struct se_lun, lun_group);
-       struct se_lun *lun_p;
        struct se_portal_group *se_tpg;
        struct se_device *dev =
                container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
@@ -797,20 +770,19 @@ static int target_fabric_port_link(
                return -EEXIST;
        }
 
-       lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
-       if (IS_ERR(lun_p)) {
-               pr_err("core_dev_add_lun() failed\n");
-               ret = PTR_ERR(lun_p);
+       ret = core_dev_add_lun(se_tpg, dev, lun);
+       if (ret) {
+               pr_err("core_dev_add_lun() failed: %d\n", ret);
                goto out;
        }
 
-       if (tf->tf_ops.fabric_post_link) {
+       if (tf->tf_ops->fabric_post_link) {
                /*
                 * Call the optional fabric_post_link() to allow a
                 * fabric module to setup any additional state once
                 * core_dev_add_lun() has been called..
                 */
-               tf->tf_ops.fabric_post_link(se_tpg, lun);
+               tf->tf_ops->fabric_post_link(se_tpg, lun);
        }
 
        return 0;
@@ -824,25 +796,34 @@ static int target_fabric_port_unlink(
 {
        struct se_lun *lun = container_of(to_config_group(lun_ci),
                                struct se_lun, lun_group);
-       struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
+       struct se_portal_group *se_tpg = lun->lun_tpg;
        struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
 
-       if (tf->tf_ops.fabric_pre_unlink) {
+       if (tf->tf_ops->fabric_pre_unlink) {
                /*
                 * Call the optional fabric_pre_unlink() to allow a
                 * fabric module to release any additional stat before
                 * core_dev_del_lun() is called.
                */
-               tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
+               tf->tf_ops->fabric_pre_unlink(se_tpg, lun);
        }
 
        core_dev_del_lun(se_tpg, lun);
        return 0;
 }
 
+static void target_fabric_port_release(struct config_item *item)
+{
+       struct se_lun *lun = container_of(to_config_group(item),
+                                         struct se_lun, lun_group);
+
+       kfree_rcu(lun, rcu_head);
+}
+
 static struct configfs_item_operations target_fabric_port_item_ops = {
        .show_attribute         = target_fabric_port_attr_show,
        .store_attribute        = target_fabric_port_attr_store,
+       .release                = target_fabric_port_release,
        .allow_link             = target_fabric_port_link,
        .drop_link              = target_fabric_port_unlink,
 };
@@ -887,7 +868,7 @@ static struct config_group *target_fabric_make_lun(
                        struct se_portal_group, tpg_lun_group);
        struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
        struct config_group *lun_cg = NULL, *port_stat_grp = NULL;
-       unsigned long unpacked_lun;
+       unsigned long long unpacked_lun;
        int errno;
 
        if (strstr(name, "lun_") != name) {
@@ -895,28 +876,27 @@ static struct config_group *target_fabric_make_lun(
                                " \"lun_$LUN_NUMBER\"\n");
                return ERR_PTR(-EINVAL);
        }
-       errno = kstrtoul(name + 4, 0, &unpacked_lun);
+       errno = kstrtoull(name + 4, 0, &unpacked_lun);
        if (errno)
                return ERR_PTR(errno);
-       if (unpacked_lun > UINT_MAX)
-               return ERR_PTR(-EINVAL);
 
-       lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
-       if (!lun)
-               return ERR_PTR(-EINVAL);
+       lun = core_tpg_alloc_lun(se_tpg, unpacked_lun);
+       if (IS_ERR(lun))
+               return ERR_CAST(lun);
 
        lun_cg = &lun->lun_group;
        lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
                                GFP_KERNEL);
        if (!lun_cg->default_groups) {
                pr_err("Unable to allocate lun_cg->default_groups\n");
+               kfree(lun);
                return ERR_PTR(-ENOMEM);
        }
 
        config_group_init_type_name(&lun->lun_group, name,
-                       &tf->tf_cit_tmpl.tfc_tpg_port_cit);
+                       &tf->tf_tpg_port_cit);
        config_group_init_type_name(&lun->port_stat_grps.stat_group,
-                       "statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit);
+                       "statistics", &tf->tf_tpg_port_stat_cit);
        lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
        lun_cg->default_groups[1] = NULL;
 
@@ -926,6 +906,7 @@ static struct config_group *target_fabric_make_lun(
        if (!port_stat_grp->default_groups) {
                pr_err("Unable to allocate port_stat_grp->default_groups\n");
                kfree(lun_cg->default_groups);
+               kfree(lun);
                return ERR_PTR(-ENOMEM);
        }
        target_stat_setup_port_default_groups(lun);
@@ -1023,7 +1004,7 @@ static void target_fabric_tpg_release(struct config_item *item)
        struct se_wwn *wwn = se_tpg->se_tpg_wwn;
        struct target_fabric_configfs *tf = wwn->wwn_tf;
 
-       tf->tf_ops.fabric_drop_tpg(se_tpg);
+       tf->tf_ops->fabric_drop_tpg(se_tpg);
 }
 
 static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
@@ -1046,12 +1027,12 @@ static struct config_group *target_fabric_make_tpg(
        struct target_fabric_configfs *tf = wwn->wwn_tf;
        struct se_portal_group *se_tpg;
 
-       if (!tf->tf_ops.fabric_make_tpg) {
-               pr_err("tf->tf_ops.fabric_make_tpg is NULL\n");
+       if (!tf->tf_ops->fabric_make_tpg) {
+               pr_err("tf->tf_ops->fabric_make_tpg is NULL\n");
                return ERR_PTR(-ENOSYS);
        }
 
-       se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
+       se_tpg = tf->tf_ops->fabric_make_tpg(wwn, group, name);
        if (!se_tpg || IS_ERR(se_tpg))
                return ERR_PTR(-EINVAL);
        /*
@@ -1067,19 +1048,19 @@ static struct config_group *target_fabric_make_tpg(
        se_tpg->tpg_group.default_groups[6] = NULL;
 
        config_group_init_type_name(&se_tpg->tpg_group, name,
-                       &tf->tf_cit_tmpl.tfc_tpg_base_cit);
+                       &tf->tf_tpg_base_cit);
        config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
-                       &tf->tf_cit_tmpl.tfc_tpg_lun_cit);
+                       &tf->tf_tpg_lun_cit);
        config_group_init_type_name(&se_tpg->tpg_np_group, "np",
-                       &tf->tf_cit_tmpl.tfc_tpg_np_cit);
+                       &tf->tf_tpg_np_cit);
        config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
-                       &tf->tf_cit_tmpl.tfc_tpg_nacl_cit);
+                       &tf->tf_tpg_nacl_cit);
        config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
-                       &tf->tf_cit_tmpl.tfc_tpg_attrib_cit);
+                       &tf->tf_tpg_attrib_cit);
        config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
-                       &tf->tf_cit_tmpl.tfc_tpg_auth_cit);
+                       &tf->tf_tpg_auth_cit);
        config_group_init_type_name(&se_tpg->tpg_param_group, "param",
-                       &tf->tf_cit_tmpl.tfc_tpg_param_cit);
+                       &tf->tf_tpg_param_cit);
 
        return &se_tpg->tpg_group;
 }
@@ -1112,7 +1093,7 @@ static void target_fabric_release_wwn(struct config_item *item)
                                struct se_wwn, wwn_group);
        struct target_fabric_configfs *tf = wwn->wwn_tf;
 
-       tf->tf_ops.fabric_drop_wwn(wwn);
+       tf->tf_ops->fabric_drop_wwn(wwn);
 }
 
 static struct configfs_item_operations target_fabric_tpg_item_ops = {
@@ -1148,12 +1129,12 @@ static struct config_group *target_fabric_make_wwn(
                                struct target_fabric_configfs, tf_group);
        struct se_wwn *wwn;
 
-       if (!tf->tf_ops.fabric_make_wwn) {
+       if (!tf->tf_ops->fabric_make_wwn) {
                pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
                return ERR_PTR(-ENOSYS);
        }
 
-       wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
+       wwn = tf->tf_ops->fabric_make_wwn(tf, group, name);
        if (!wwn || IS_ERR(wwn))
                return ERR_PTR(-EINVAL);
 
@@ -1165,10 +1146,9 @@ static struct config_group *target_fabric_make_wwn(
        wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group;
        wwn->wwn_group.default_groups[1] = NULL;
 
-       config_group_init_type_name(&wwn->wwn_group, name,
-                       &tf->tf_cit_tmpl.tfc_tpg_cit);
+       config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit);
        config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
-                       &tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit);
+                       &tf->tf_wwn_fabric_stats_cit);
 
        return &wwn->wwn_group;
 }
index 41f4f270f91982746e8ce237f22bd1680d51722e..cb6497ce4b61a81cbb9eba00bd79d0ecbc08f847 100644 (file)
  *
  ******************************************************************************/
 
+/*
+ * See SPC4, section 7.5 "Protocol specific parameters" for details
+ * on the formats implemented in this file.
+ */
+
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/ctype.h>
 
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 
 #include "target_core_internal.h"
 #include "target_core_pr.h"
 
-/*
- * Handlers for Serial Attached SCSI (SAS)
- */
-u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
-       /*
-        * Return a SAS Serial SCSI Protocol identifier for loopback operations
-        * This is defined in  section 7.5.1 Table 362 in spc4r17
-        */
-       return 0x6;
-}
-EXPORT_SYMBOL(sas_get_fabric_proto_ident);
 
-u32 sas_get_pr_transport_id(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
+static int sas_get_pr_transport_id(
+       struct se_node_acl *nacl,
        int *format_code,
        unsigned char *buf)
 {
-       unsigned char *ptr;
        int ret;
 
-       /*
-        * Set PROTOCOL IDENTIFIER to 6h for SAS
-        */
-       buf[0] = 0x06;
-       /*
-        * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
-        * over SAS Serial SCSI Protocol
-        */
-       ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
-
-       ret = hex2bin(&buf[4], ptr, 8);
-       if (ret < 0)
-               pr_debug("sas transport_id: invalid hex string\n");
-
-       /*
-        * The SAS Transport ID is a hardcoded 24-byte length
-        */
-       return 24;
-}
-EXPORT_SYMBOL(sas_get_pr_transport_id);
-
-u32 sas_get_pr_transport_id_len(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
-       int *format_code)
-{
-       *format_code = 0;
-       /*
-        * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
-        * over SAS Serial SCSI Protocol
-        *
-        * The SAS Transport ID is a hardcoded 24-byte length
-        */
-       return 24;
-}
-EXPORT_SYMBOL(sas_get_pr_transport_id_len);
-
-/*
- * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
- * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
- */
-char *sas_parse_pr_out_transport_id(
-       struct se_portal_group *se_tpg,
-       const char *buf,
-       u32 *out_tid_len,
-       char **port_nexus_ptr)
-{
-       /*
-        * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
-        * for initiator ports using SCSI over SAS Serial SCSI Protocol
-        *
-        * The TransportID for a SAS Initiator Port is of fixed size of
-        * 24 bytes, and SAS does not contain a I_T nexus identifier,
-        * so we return the **port_nexus_ptr set to NULL.
-        */
-       *port_nexus_ptr = NULL;
-       *out_tid_len = 24;
-
-       return (char *)&buf[4];
-}
-EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
-
-/*
- * Handlers for Fibre Channel Protocol (FCP)
- */
-u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
-       return 0x0;     /* 0 = fcp-2 per SPC4 section 7.5.1 */
-}
-EXPORT_SYMBOL(fc_get_fabric_proto_ident);
+       /* Skip over 'naa. prefix */
+       ret = hex2bin(&buf[4], &nacl->initiatorname[4], 8);
+       if (ret) {
+               pr_debug("%s: invalid hex string\n", __func__);
+               return ret;
+       }
 
-u32 fc_get_pr_transport_id_len(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
-       int *format_code)
-{
-       *format_code = 0;
-       /*
-        * The FC Transport ID is a hardcoded 24-byte length
-        */
        return 24;
 }
-EXPORT_SYMBOL(fc_get_pr_transport_id_len);
 
-u32 fc_get_pr_transport_id(
-       struct se_portal_group *se_tpg,
+static int fc_get_pr_transport_id(
        struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
        int *format_code,
        unsigned char *buf)
 {
@@ -160,24 +71,20 @@ u32 fc_get_pr_transport_id(
        u32 off = 8;
 
        /*
-        * PROTOCOL IDENTIFIER is 0h for FCP-2
-        *
-        * From spc4r17, 7.5.4.2 TransportID for initiator ports using
-        * SCSI over Fibre Channel
-        *
         * We convert the ASCII formatted N Port name into a binary
         * encoded TransportID.
         */
        ptr = &se_nacl->initiatorname[0];
-
        for (i = 0; i < 24; ) {
                if (!strncmp(&ptr[i], ":", 1)) {
                        i++;
                        continue;
                }
                ret = hex2bin(&buf[off++], &ptr[i], 1);
-               if (ret < 0)
-                       pr_debug("fc transport_id: invalid hex string\n");
+               if (ret < 0) {
+                       pr_debug("%s: invalid hex string\n", __func__);
+                       return ret;
+               }
                i += 2;
        }
        /*
@@ -185,42 +92,52 @@ u32 fc_get_pr_transport_id(
         */
        return 24;
 }
-EXPORT_SYMBOL(fc_get_pr_transport_id);
 
-char *fc_parse_pr_out_transport_id(
-       struct se_portal_group *se_tpg,
-       const char *buf,
-       u32 *out_tid_len,
-       char **port_nexus_ptr)
+static int sbp_get_pr_transport_id(
+       struct se_node_acl *nacl,
+       int *format_code,
+       unsigned char *buf)
 {
-       /*
-        * The TransportID for a FC N Port is of fixed size of
-        * 24 bytes, and FC does not contain a I_T nexus identifier,
-        * so we return the **port_nexus_ptr set to NULL.
-        */
-       *port_nexus_ptr = NULL;
-       *out_tid_len = 24;
+       int ret;
 
-        return (char *)&buf[8];
-}
-EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
+       ret = hex2bin(&buf[8], nacl->initiatorname, 8);
+       if (ret) {
+               pr_debug("%s: invalid hex string\n", __func__);
+               return ret;
+       }
 
-/*
- * Handlers for Internet Small Computer Systems Interface (iSCSI)
- */
+       return 24;
+}
 
-u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+static int srp_get_pr_transport_id(
+       struct se_node_acl *nacl,
+       int *format_code,
+       unsigned char *buf)
 {
-       /*
-        * This value is defined for "Internet SCSI (iSCSI)"
-        * in spc4r17 section 7.5.1 Table 362
-        */
-       return 0x5;
+       const char *p;
+       unsigned len, count, leading_zero_bytes;
+       int rc;
+
+       p = nacl->initiatorname;
+       if (strncasecmp(p, "0x", 2) == 0)
+               p += 2;
+       len = strlen(p);
+       if (len % 2)
+               return -EINVAL;
+
+       count = min(len / 2, 16U);
+       leading_zero_bytes = 16 - count;
+       memset(buf + 8, 0, leading_zero_bytes);
+       rc = hex2bin(buf + 8 + leading_zero_bytes, p, count);
+       if (rc < 0) {
+               pr_debug("hex2bin failed for %s: %d\n", __func__, rc);
+               return rc;
+       }
+
+       return 24;
 }
-EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
 
-u32 iscsi_get_pr_transport_id(
-       struct se_portal_group *se_tpg,
+static int iscsi_get_pr_transport_id(
        struct se_node_acl *se_nacl,
        struct t10_pr_registration *pr_reg,
        int *format_code,
@@ -230,10 +147,6 @@ u32 iscsi_get_pr_transport_id(
        u16 len = 0;
 
        spin_lock_irq(&se_nacl->nacl_sess_lock);
-       /*
-        * Set PROTOCOL IDENTIFIER to 5h for iSCSI
-       */
-       buf[0] = 0x05;
        /*
         * From spc4r17 Section 7.5.4.6: TransportID for initiator
         * ports using SCSI over iSCSI.
@@ -313,10 +226,8 @@ u32 iscsi_get_pr_transport_id(
 
        return len;
 }
-EXPORT_SYMBOL(iscsi_get_pr_transport_id);
 
-u32 iscsi_get_pr_transport_id_len(
-       struct se_portal_group *se_tpg,
+static int iscsi_get_pr_transport_id_len(
        struct se_node_acl *se_nacl,
        struct t10_pr_registration *pr_reg,
        int *format_code)
@@ -359,9 +270,8 @@ u32 iscsi_get_pr_transport_id_len(
 
        return len;
 }
-EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
 
-char *iscsi_parse_pr_out_transport_id(
+static char *iscsi_parse_pr_out_transport_id(
        struct se_portal_group *se_tpg,
        const char *buf,
        u32 *out_tid_len,
@@ -448,4 +358,79 @@ char *iscsi_parse_pr_out_transport_id(
 
        return (char *)&buf[4];
 }
-EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
+
+int target_get_pr_transport_id_len(struct se_node_acl *nacl,
+               struct t10_pr_registration *pr_reg, int *format_code)
+{
+       switch (nacl->se_tpg->proto_id) {
+       case SCSI_PROTOCOL_FCP:
+       case SCSI_PROTOCOL_SBP:
+       case SCSI_PROTOCOL_SRP:
+       case SCSI_PROTOCOL_SAS:
+               break;
+       case SCSI_PROTOCOL_ISCSI:
+               return iscsi_get_pr_transport_id_len(nacl, pr_reg, format_code);
+       default:
+               pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id);
+               return -EINVAL;
+       }
+
+       /*
+        * Most transports use a fixed length 24 byte identifier.
+        */
+       *format_code = 0;
+       return 24;
+}
+
+int target_get_pr_transport_id(struct se_node_acl *nacl,
+               struct t10_pr_registration *pr_reg, int *format_code,
+               unsigned char *buf)
+{
+       switch (nacl->se_tpg->proto_id) {
+       case SCSI_PROTOCOL_SAS:
+               return sas_get_pr_transport_id(nacl, format_code, buf);
+       case SCSI_PROTOCOL_SBP:
+               return sbp_get_pr_transport_id(nacl, format_code, buf);
+       case SCSI_PROTOCOL_SRP:
+               return srp_get_pr_transport_id(nacl, format_code, buf);
+       case SCSI_PROTOCOL_FCP:
+               return fc_get_pr_transport_id(nacl, format_code, buf);
+       case SCSI_PROTOCOL_ISCSI:
+               return iscsi_get_pr_transport_id(nacl, pr_reg, format_code,
+                               buf);
+       default:
+               pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id);
+               return -EINVAL;
+       }
+}
+
+const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg,
+               const char *buf, u32 *out_tid_len, char **port_nexus_ptr)
+{
+       u32 offset;
+
+       switch (tpg->proto_id) {
+       case SCSI_PROTOCOL_SAS:
+               /*
+                * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
+                * for initiator ports using SCSI over SAS Serial SCSI Protocol.
+                */
+               offset = 4;
+               break;
+       case SCSI_PROTOCOL_SBP:
+       case SCSI_PROTOCOL_SRP:
+       case SCSI_PROTOCOL_FCP:
+               offset = 8;
+               break;
+       case SCSI_PROTOCOL_ISCSI:
+               return iscsi_parse_pr_out_transport_id(tpg, buf, out_tid_len,
+                                       port_nexus_ptr);
+       default:
+               pr_err("Unknown proto_id: 0x%02x\n", tpg->proto_id);
+               return NULL;
+       }
+
+       *port_nexus_ptr = NULL;
+       *out_tid_len = 24;
+       return buf + offset;
+}
index 66417135328964d589023346b80d64b31f6a6925..e3195700211a3ebc38192391bf6488ed6ae86099 100644 (file)
@@ -37,7 +37,6 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
-#include <target/target_core_backend_configfs.h>
 
 #include "target_core_file.h"
 
@@ -46,10 +45,6 @@ static inline struct fd_dev *FD_DEV(struct se_device *dev)
        return container_of(dev, struct fd_dev, dev);
 }
 
-/*     fd_attach_hba(): (Part of se_subsystem_api_t template)
- *
- *
- */
 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
 {
        struct fd_host *fd_host;
@@ -66,7 +61,7 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
 
        pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
                " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
-               TARGET_CORE_MOD_VERSION);
+               TARGET_CORE_VERSION);
        pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
                hba->hba_id, fd_host->fd_host_id);
 
@@ -246,87 +241,34 @@ fail:
        return ret;
 }
 
-static void fd_free_device(struct se_device *dev)
+static void fd_dev_call_rcu(struct rcu_head *p)
 {
+       struct se_device *dev = container_of(p, struct se_device, rcu_head);
        struct fd_dev *fd_dev = FD_DEV(dev);
 
-       if (fd_dev->fd_file) {
-               filp_close(fd_dev->fd_file, NULL);
-               fd_dev->fd_file = NULL;
-       }
-
        kfree(fd_dev);
 }
 
-static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
-                        int is_write)
+static void fd_free_device(struct se_device *dev)
 {
-       struct se_device *se_dev = cmd->se_dev;
-       struct fd_dev *dev = FD_DEV(se_dev);
-       struct file *prot_fd = dev->fd_prot_file;
-       loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
-       unsigned char *buf;
-       u32 prot_size;
-       int rc, ret = 1;
-
-       prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
-                    se_dev->prot_length;
-
-       if (!is_write) {
-               fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
-               if (!fd_prot->prot_buf) {
-                       pr_err("Unable to allocate fd_prot->prot_buf\n");
-                       return -ENOMEM;
-               }
-               buf = fd_prot->prot_buf;
-
-               fd_prot->prot_sg_nents = 1;
-               fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
-                                          GFP_KERNEL);
-               if (!fd_prot->prot_sg) {
-                       pr_err("Unable to allocate fd_prot->prot_sg\n");
-                       kfree(fd_prot->prot_buf);
-                       return -ENOMEM;
-               }
-               sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
-               sg_set_buf(fd_prot->prot_sg, buf, prot_size);
-       }
-
-       if (is_write) {
-               rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos);
-               if (rc < 0 || prot_size != rc) {
-                       pr_err("kernel_write() for fd_do_prot_rw failed:"
-                              " %d\n", rc);
-                       ret = -EINVAL;
-               }
-       } else {
-               rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size);
-               if (rc < 0) {
-                       pr_err("kernel_read() for fd_do_prot_rw failed:"
-                              " %d\n", rc);
-                       ret = -EINVAL;
-               }
-       }
+       struct fd_dev *fd_dev = FD_DEV(dev);
 
-       if (is_write || ret < 0) {
-               kfree(fd_prot->prot_sg);
-               kfree(fd_prot->prot_buf);
+       if (fd_dev->fd_file) {
+               filp_close(fd_dev->fd_file, NULL);
+               fd_dev->fd_file = NULL;
        }
-
-       return ret;
+       call_rcu(&dev->rcu_head, fd_dev_call_rcu);
 }
 
-static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
-               u32 sgl_nents, int is_write)
+static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
+                   u32 block_size, struct scatterlist *sgl,
+                   u32 sgl_nents, u32 data_length, int is_write)
 {
-       struct se_device *se_dev = cmd->se_dev;
-       struct fd_dev *dev = FD_DEV(se_dev);
-       struct file *fd = dev->fd_file;
        struct scatterlist *sg;
        struct iov_iter iter;
        struct bio_vec *bvec;
        ssize_t len = 0;
-       loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
+       loff_t pos = (cmd->t_task_lba * block_size);
        int ret = 0, i;
 
        bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
@@ -352,7 +294,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
        kfree(bvec);
 
        if (is_write) {
-               if (ret < 0 || ret != cmd->data_length) {
+               if (ret < 0 || ret != data_length) {
                        pr_err("%s() write returned %d\n", __func__, ret);
                        return (ret < 0 ? ret : -EINVAL);
                }
@@ -363,10 +305,10 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
                 * block_device.
                 */
                if (S_ISBLK(file_inode(fd)->i_mode)) {
-                       if (ret < 0 || ret != cmd->data_length) {
+                       if (ret < 0 || ret != data_length) {
                                pr_err("%s() returned %d, expecting %u for "
                                                "S_ISBLK\n", __func__, ret,
-                                               cmd->data_length);
+                                               data_length);
                                return (ret < 0 ? ret : -EINVAL);
                        }
                } else {
@@ -533,9 +475,9 @@ fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 }
 
 static sense_reason_t
-fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
+fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 {
-       struct file *file = priv;
+       struct file *file = FD_DEV(cmd->se_dev)->fd_file;
        struct inode *inode = file->f_mapping->host;
        int ret;
 
@@ -576,43 +518,14 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
        return 0;
 }
 
-static sense_reason_t
-fd_execute_write_same_unmap(struct se_cmd *cmd)
-{
-       struct se_device *se_dev = cmd->se_dev;
-       struct fd_dev *fd_dev = FD_DEV(se_dev);
-       struct file *file = fd_dev->fd_file;
-       sector_t lba = cmd->t_task_lba;
-       sector_t nolb = sbc_get_write_same_sectors(cmd);
-       sense_reason_t ret;
-
-       if (!nolb) {
-               target_complete_cmd(cmd, SAM_STAT_GOOD);
-               return 0;
-       }
-
-       ret = fd_do_unmap(cmd, file, lba, nolb);
-       if (ret)
-               return ret;
-
-       target_complete_cmd(cmd, GOOD);
-       return 0;
-}
-
-static sense_reason_t
-fd_execute_unmap(struct se_cmd *cmd)
-{
-       struct file *file = FD_DEV(cmd->se_dev)->fd_file;
-
-       return sbc_execute_unmap(cmd, fd_do_unmap, file);
-}
-
 static sense_reason_t
 fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
              enum dma_data_direction data_direction)
 {
        struct se_device *dev = cmd->se_dev;
-       struct fd_prot fd_prot;
+       struct fd_dev *fd_dev = FD_DEV(dev);
+       struct file *file = fd_dev->fd_file;
+       struct file *pfile = fd_dev->fd_prot_file;
        sense_reason_t rc;
        int ret = 0;
        /*
@@ -630,58 +543,45 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
         * physical memory addresses to struct iovec virtual memory.
         */
        if (data_direction == DMA_FROM_DEVICE) {
-               memset(&fd_prot, 0, sizeof(struct fd_prot));
-
                if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
-                       ret = fd_do_prot_rw(cmd, &fd_prot, false);
+                       ret = fd_do_rw(cmd, pfile, dev->prot_length,
+                                      cmd->t_prot_sg, cmd->t_prot_nents,
+                                      cmd->prot_length, 0);
                        if (ret < 0)
                                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                }
 
-               ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
+               ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
+                              sgl, sgl_nents, cmd->data_length, 0);
 
                if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
-                       u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+                       u32 sectors = cmd->data_length >>
+                                       ilog2(dev->dev_attrib.block_size);
 
-                       rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors,
-                                                0, fd_prot.prot_sg, 0);
-                       if (rc) {
-                               kfree(fd_prot.prot_sg);
-                               kfree(fd_prot.prot_buf);
+                       rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
+                                           0, cmd->t_prot_sg, 0);
+                       if (rc)
                                return rc;
-                       }
-                       kfree(fd_prot.prot_sg);
-                       kfree(fd_prot.prot_buf);
                }
        } else {
-               memset(&fd_prot, 0, sizeof(struct fd_prot));
-
                if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
-                       u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+                       u32 sectors = cmd->data_length >>
+                                       ilog2(dev->dev_attrib.block_size);
 
-                       ret = fd_do_prot_rw(cmd, &fd_prot, false);
-                       if (ret < 0)
-                               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
-                       rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors,
-                                                 0, fd_prot.prot_sg, 0);
-                       if (rc) {
-                               kfree(fd_prot.prot_sg);
-                               kfree(fd_prot.prot_buf);
+                       rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
+                                           0, cmd->t_prot_sg, 0);
+                       if (rc)
                                return rc;
-                       }
                }
 
-               ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
+               ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
+                              sgl, sgl_nents, cmd->data_length, 1);
                /*
                 * Perform implicit vfs_fsync_range() for fd_do_writev() ops
                 * for SCSI WRITEs with Forced Unit Access (FUA) set.
                 * Allow this to happen independent of WCE=0 setting.
                 */
-               if (ret > 0 &&
-                   dev->dev_attrib.emulate_fua_write > 0 &&
-                   (cmd->se_cmd_flags & SCF_FUA)) {
-                       struct fd_dev *fd_dev = FD_DEV(dev);
+               if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) {
                        loff_t start = cmd->t_task_lba *
                                dev->dev_attrib.block_size;
                        loff_t end;
@@ -695,17 +595,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                }
 
                if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
-                       ret = fd_do_prot_rw(cmd, &fd_prot, true);
+                       ret = fd_do_rw(cmd, pfile, dev->prot_length,
+                                      cmd->t_prot_sg, cmd->t_prot_nents,
+                                      cmd->prot_length, 1);
                        if (ret < 0)
                                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                }
        }
 
-       if (ret < 0) {
-               kfree(fd_prot.prot_sg);
-               kfree(fd_prot.prot_buf);
+       if (ret < 0)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-       }
 
        if (ret)
                target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -908,7 +807,6 @@ static struct sbc_ops fd_sbc_ops = {
        .execute_rw             = fd_execute_rw,
        .execute_sync_cache     = fd_execute_sync_cache,
        .execute_write_same     = fd_execute_write_same,
-       .execute_write_same_unmap = fd_execute_write_same_unmap,
        .execute_unmap          = fd_execute_unmap,
 };
 
@@ -918,42 +816,7 @@ fd_parse_cdb(struct se_cmd *cmd)
        return sbc_parse_cdb(cmd, &fd_sbc_ops);
 }
 
-DEF_TB_DEFAULT_ATTRIBS(fileio);
-
-static struct configfs_attribute *fileio_backend_dev_attrs[] = {
-       &fileio_dev_attrib_emulate_model_alias.attr,
-       &fileio_dev_attrib_emulate_dpo.attr,
-       &fileio_dev_attrib_emulate_fua_write.attr,
-       &fileio_dev_attrib_emulate_fua_read.attr,
-       &fileio_dev_attrib_emulate_write_cache.attr,
-       &fileio_dev_attrib_emulate_ua_intlck_ctrl.attr,
-       &fileio_dev_attrib_emulate_tas.attr,
-       &fileio_dev_attrib_emulate_tpu.attr,
-       &fileio_dev_attrib_emulate_tpws.attr,
-       &fileio_dev_attrib_emulate_caw.attr,
-       &fileio_dev_attrib_emulate_3pc.attr,
-       &fileio_dev_attrib_pi_prot_type.attr,
-       &fileio_dev_attrib_hw_pi_prot_type.attr,
-       &fileio_dev_attrib_pi_prot_format.attr,
-       &fileio_dev_attrib_enforce_pr_isids.attr,
-       &fileio_dev_attrib_is_nonrot.attr,
-       &fileio_dev_attrib_emulate_rest_reord.attr,
-       &fileio_dev_attrib_force_pr_aptpl.attr,
-       &fileio_dev_attrib_hw_block_size.attr,
-       &fileio_dev_attrib_block_size.attr,
-       &fileio_dev_attrib_hw_max_sectors.attr,
-       &fileio_dev_attrib_optimal_sectors.attr,
-       &fileio_dev_attrib_hw_queue_depth.attr,
-       &fileio_dev_attrib_queue_depth.attr,
-       &fileio_dev_attrib_max_unmap_lba_count.attr,
-       &fileio_dev_attrib_max_unmap_block_desc_count.attr,
-       &fileio_dev_attrib_unmap_granularity.attr,
-       &fileio_dev_attrib_unmap_granularity_alignment.attr,
-       &fileio_dev_attrib_max_write_same_len.attr,
-       NULL,
-};
-
-static struct se_subsystem_api fileio_template = {
+static const struct target_backend_ops fileio_ops = {
        .name                   = "fileio",
        .inquiry_prod           = "FILEIO",
        .inquiry_rev            = FD_VERSION,
@@ -971,21 +834,17 @@ static struct se_subsystem_api fileio_template = {
        .init_prot              = fd_init_prot,
        .format_prot            = fd_format_prot,
        .free_prot              = fd_free_prot,
+       .tb_dev_attrib_attrs    = sbc_attrib_attrs,
 };
 
 static int __init fileio_module_init(void)
 {
-       struct target_backend_cits *tbc = &fileio_template.tb_cits;
-
-       target_core_setup_sub_cits(&fileio_template);
-       tbc->tb_dev_attrib_cit.ct_attrs = fileio_backend_dev_attrs;
-
-       return transport_subsystem_register(&fileio_template);
+       return transport_backend_register(&fileio_ops);
 }
 
 static void __exit fileio_module_exit(void)
 {
-       transport_subsystem_release(&fileio_template);
+       target_backend_unregister(&fileio_ops);
 }
 
 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
index 182cbb2950395efa43630364ba3d3f78aca3f925..068966fce3089527fb7f14dd7bd3aa0fafdd1041 100644 (file)
 #define FDBD_HAS_BUFFERED_IO_WCE 0x04
 #define FDBD_FORMAT_UNIT_SIZE  2048
 
-struct fd_prot {
-       unsigned char   *prot_buf;
-       struct scatterlist *prot_sg;
-       u32 prot_sg_nents;
-};
-
 struct fd_dev {
        struct se_device dev;
 
index ff95f95dcd13d571a59c70b7f6a7b1c6d0a6120d..62ea4e8e70a8935398f2a0e86fc44627dfa3368e 100644 (file)
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 
 #include "target_core_internal.h"
 
-static LIST_HEAD(subsystem_list);
-static DEFINE_MUTEX(subsystem_mutex);
+static LIST_HEAD(backend_list);
+static DEFINE_MUTEX(backend_mutex);
 
 static u32 hba_id_counter;
 
 static DEFINE_SPINLOCK(hba_lock);
 static LIST_HEAD(hba_list);
 
-int transport_subsystem_register(struct se_subsystem_api *sub_api)
-{
-       struct se_subsystem_api *s;
-
-       INIT_LIST_HEAD(&sub_api->sub_api_list);
 
-       mutex_lock(&subsystem_mutex);
-       list_for_each_entry(s, &subsystem_list, sub_api_list) {
-               if (!strcmp(s->name, sub_api->name)) {
-                       pr_err("%p is already registered with"
-                               " duplicate name %s, unable to process"
-                               " request\n", s, s->name);
-                       mutex_unlock(&subsystem_mutex);
+int transport_backend_register(const struct target_backend_ops *ops)
+{
+       struct target_backend *tb, *old;
+
+       tb = kzalloc(sizeof(*tb), GFP_KERNEL);
+       if (!tb)
+               return -ENOMEM;
+       tb->ops = ops;
+
+       mutex_lock(&backend_mutex);
+       list_for_each_entry(old, &backend_list, list) {
+               if (!strcmp(old->ops->name, ops->name)) {
+                       pr_err("backend %s already registered.\n", ops->name);
+                       mutex_unlock(&backend_mutex);
+                       kfree(tb);
                        return -EEXIST;
                }
        }
-       list_add_tail(&sub_api->sub_api_list, &subsystem_list);
-       mutex_unlock(&subsystem_mutex);
+       target_setup_backend_cits(tb);
+       list_add_tail(&tb->list, &backend_list);
+       mutex_unlock(&backend_mutex);
 
-       pr_debug("TCM: Registered subsystem plugin: %s struct module:"
-                       " %p\n", sub_api->name, sub_api->owner);
+       pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n",
+                       ops->name, ops->owner);
        return 0;
 }
-EXPORT_SYMBOL(transport_subsystem_register);
+EXPORT_SYMBOL(transport_backend_register);
 
-void transport_subsystem_release(struct se_subsystem_api *sub_api)
+void target_backend_unregister(const struct target_backend_ops *ops)
 {
-       mutex_lock(&subsystem_mutex);
-       list_del(&sub_api->sub_api_list);
-       mutex_unlock(&subsystem_mutex);
+       struct target_backend *tb;
+
+       mutex_lock(&backend_mutex);
+       list_for_each_entry(tb, &backend_list, list) {
+               if (tb->ops == ops) {
+                       list_del(&tb->list);
+                       kfree(tb);
+                       break;
+               }
+       }
+       mutex_unlock(&backend_mutex);
 }
-EXPORT_SYMBOL(transport_subsystem_release);
+EXPORT_SYMBOL(target_backend_unregister);
 
-static struct se_subsystem_api *core_get_backend(const char *sub_name)
+static struct target_backend *core_get_backend(const char *name)
 {
-       struct se_subsystem_api *s;
+       struct target_backend *tb;
 
-       mutex_lock(&subsystem_mutex);
-       list_for_each_entry(s, &subsystem_list, sub_api_list) {
-               if (!strcmp(s->name, sub_name))
+       mutex_lock(&backend_mutex);
+       list_for_each_entry(tb, &backend_list, list) {
+               if (!strcmp(tb->ops->name, name))
                        goto found;
        }
-       mutex_unlock(&subsystem_mutex);
+       mutex_unlock(&backend_mutex);
        return NULL;
 found:
-       if (s->owner && !try_module_get(s->owner))
-               s = NULL;
-       mutex_unlock(&subsystem_mutex);
-       return s;
+       if (tb->ops->owner && !try_module_get(tb->ops->owner))
+               tb = NULL;
+       mutex_unlock(&backend_mutex);
+       return tb;
 }
 
 struct se_hba *
@@ -117,13 +128,13 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
        hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
        hba->hba_flags |= hba_flags;
 
-       hba->transport = core_get_backend(plugin_name);
-       if (!hba->transport) {
+       hba->backend = core_get_backend(plugin_name);
+       if (!hba->backend) {
                ret = -EINVAL;
                goto out_free_hba;
        }
 
-       ret = hba->transport->attach_hba(hba, plugin_dep_id);
+       ret = hba->backend->ops->attach_hba(hba, plugin_dep_id);
        if (ret < 0)
                goto out_module_put;
 
@@ -138,8 +149,8 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
        return hba;
 
 out_module_put:
-       module_put(hba->transport->owner);
-       hba->transport = NULL;
+       module_put(hba->backend->ops->owner);
+       hba->backend = NULL;
 out_free_hba:
        kfree(hba);
        return ERR_PTR(ret);
@@ -150,7 +161,7 @@ core_delete_hba(struct se_hba *hba)
 {
        WARN_ON(hba->dev_count);
 
-       hba->transport->detach_hba(hba);
+       hba->backend->ops->detach_hba(hba);
 
        spin_lock(&hba_lock);
        list_del(&hba->hba_node);
@@ -159,9 +170,9 @@ core_delete_hba(struct se_hba *hba)
        pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
                        " Core\n", hba->hba_id);
 
-       module_put(hba->transport->owner);
+       module_put(hba->backend->ops->owner);
 
-       hba->transport = NULL;
+       hba->backend = NULL;
        kfree(hba);
        return 0;
 }
index 972ed1781ae2f0ad08c5671d7f8cc535cf12cde9..6d88d24e6cce97ab3cca4078d3fb41a2ae0c0220 100644 (file)
@@ -40,7 +40,6 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
-#include <target/target_core_backend_configfs.h>
 
 #include "target_core_iblock.h"
 
@@ -53,17 +52,11 @@ static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
 }
 
 
-static struct se_subsystem_api iblock_template;
-
-/*     iblock_attach_hba(): (Part of se_subsystem_api_t template)
- *
- *
- */
 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
 {
        pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
                " Generic Target Core Stack %s\n", hba->hba_id,
-               IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
+               IBLOCK_VERSION, TARGET_CORE_VERSION);
        return 0;
 }
 
@@ -197,6 +190,14 @@ out:
        return ret;
 }
 
+static void iblock_dev_call_rcu(struct rcu_head *p)
+{
+       struct se_device *dev = container_of(p, struct se_device, rcu_head);
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+
+       kfree(ib_dev);
+}
+
 static void iblock_free_device(struct se_device *dev)
 {
        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -206,7 +207,7 @@ static void iblock_free_device(struct se_device *dev)
        if (ib_dev->ibd_bio_set != NULL)
                bioset_free(ib_dev->ibd_bio_set);
 
-       kfree(ib_dev);
+       call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
 }
 
 static unsigned long long iblock_emulate_read_cap_with_block_size(
@@ -414,10 +415,9 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
 }
 
 static sense_reason_t
-iblock_do_unmap(struct se_cmd *cmd, void *priv,
-               sector_t lba, sector_t nolb)
+iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 {
-       struct block_device *bdev = priv;
+       struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
        int ret;
 
        ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
@@ -429,30 +429,6 @@ iblock_do_unmap(struct se_cmd *cmd, void *priv,
        return 0;
 }
 
-static sense_reason_t
-iblock_execute_unmap(struct se_cmd *cmd)
-{
-       struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
-
-       return sbc_execute_unmap(cmd, iblock_do_unmap, bdev);
-}
-
-static sense_reason_t
-iblock_execute_write_same_unmap(struct se_cmd *cmd)
-{
-       struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
-       sector_t lba = cmd->t_task_lba;
-       sector_t nolb = sbc_get_write_same_sectors(cmd);
-       sense_reason_t ret;
-
-       ret = iblock_do_unmap(cmd, bdev, lba, nolb);
-       if (ret)
-               return ret;
-
-       target_complete_cmd(cmd, GOOD);
-       return 0;
-}
-
 static sense_reason_t
 iblock_execute_write_same(struct se_cmd *cmd)
 {
@@ -844,7 +820,6 @@ static struct sbc_ops iblock_sbc_ops = {
        .execute_rw             = iblock_execute_rw,
        .execute_sync_cache     = iblock_execute_sync_cache,
        .execute_write_same     = iblock_execute_write_same,
-       .execute_write_same_unmap = iblock_execute_write_same_unmap,
        .execute_unmap          = iblock_execute_unmap,
 };
 
@@ -863,42 +838,7 @@ static bool iblock_get_write_cache(struct se_device *dev)
        return q->flush_flags & REQ_FLUSH;
 }
 
-DEF_TB_DEFAULT_ATTRIBS(iblock);
-
-static struct configfs_attribute *iblock_backend_dev_attrs[] = {
-       &iblock_dev_attrib_emulate_model_alias.attr,
-       &iblock_dev_attrib_emulate_dpo.attr,
-       &iblock_dev_attrib_emulate_fua_write.attr,
-       &iblock_dev_attrib_emulate_fua_read.attr,
-       &iblock_dev_attrib_emulate_write_cache.attr,
-       &iblock_dev_attrib_emulate_ua_intlck_ctrl.attr,
-       &iblock_dev_attrib_emulate_tas.attr,
-       &iblock_dev_attrib_emulate_tpu.attr,
-       &iblock_dev_attrib_emulate_tpws.attr,
-       &iblock_dev_attrib_emulate_caw.attr,
-       &iblock_dev_attrib_emulate_3pc.attr,
-       &iblock_dev_attrib_pi_prot_type.attr,
-       &iblock_dev_attrib_hw_pi_prot_type.attr,
-       &iblock_dev_attrib_pi_prot_format.attr,
-       &iblock_dev_attrib_enforce_pr_isids.attr,
-       &iblock_dev_attrib_is_nonrot.attr,
-       &iblock_dev_attrib_emulate_rest_reord.attr,
-       &iblock_dev_attrib_force_pr_aptpl.attr,
-       &iblock_dev_attrib_hw_block_size.attr,
-       &iblock_dev_attrib_block_size.attr,
-       &iblock_dev_attrib_hw_max_sectors.attr,
-       &iblock_dev_attrib_optimal_sectors.attr,
-       &iblock_dev_attrib_hw_queue_depth.attr,
-       &iblock_dev_attrib_queue_depth.attr,
-       &iblock_dev_attrib_max_unmap_lba_count.attr,
-       &iblock_dev_attrib_max_unmap_block_desc_count.attr,
-       &iblock_dev_attrib_unmap_granularity.attr,
-       &iblock_dev_attrib_unmap_granularity_alignment.attr,
-       &iblock_dev_attrib_max_write_same_len.attr,
-       NULL,
-};
-
-static struct se_subsystem_api iblock_template = {
+static const struct target_backend_ops iblock_ops = {
        .name                   = "iblock",
        .inquiry_prod           = "IBLOCK",
        .inquiry_rev            = IBLOCK_VERSION,
@@ -918,21 +858,17 @@ static struct se_subsystem_api iblock_template = {
        .get_io_min             = iblock_get_io_min,
        .get_io_opt             = iblock_get_io_opt,
        .get_write_cache        = iblock_get_write_cache,
+       .tb_dev_attrib_attrs    = sbc_attrib_attrs,
 };
 
 static int __init iblock_module_init(void)
 {
-       struct target_backend_cits *tbc = &iblock_template.tb_cits;
-
-       target_core_setup_sub_cits(&iblock_template);
-       tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs;
-
-       return transport_subsystem_register(&iblock_template);
+       return transport_backend_register(&iblock_ops);
 }
 
 static void __exit iblock_module_exit(void)
 {
-       transport_subsystem_release(&iblock_template);
+       target_backend_unregister(&iblock_ops);
 }
 
 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
index 68bd7f5d9f73cf6feacd2dfefb951db99dd21c4f..99c24acfe6761797fc9788f4f233d1c8c94a0e1d 100644 (file)
@@ -1,6 +1,53 @@
 #ifndef TARGET_CORE_INTERNAL_H
 #define TARGET_CORE_INTERNAL_H
 
+#define TARGET_CORE_NAME_MAX_LEN       64
+#define TARGET_FABRIC_NAME_SIZE                32
+
+struct target_backend {
+       struct list_head list;
+
+       const struct target_backend_ops *ops;
+
+       struct config_item_type tb_dev_cit;
+       struct config_item_type tb_dev_attrib_cit;
+       struct config_item_type tb_dev_pr_cit;
+       struct config_item_type tb_dev_wwn_cit;
+       struct config_item_type tb_dev_alua_tg_pt_gps_cit;
+       struct config_item_type tb_dev_stat_cit;
+};
+
+struct target_fabric_configfs {
+       atomic_t                tf_access_cnt;
+       struct list_head        tf_list;
+       struct config_group     tf_group;
+       struct config_group     tf_disc_group;
+       struct config_group     *tf_default_groups[2];
+       const struct target_core_fabric_ops *tf_ops;
+
+       struct config_item_type tf_discovery_cit;
+       struct config_item_type tf_wwn_cit;
+       struct config_item_type tf_wwn_fabric_stats_cit;
+       struct config_item_type tf_tpg_cit;
+       struct config_item_type tf_tpg_base_cit;
+       struct config_item_type tf_tpg_lun_cit;
+       struct config_item_type tf_tpg_port_cit;
+       struct config_item_type tf_tpg_port_stat_cit;
+       struct config_item_type tf_tpg_np_cit;
+       struct config_item_type tf_tpg_np_base_cit;
+       struct config_item_type tf_tpg_attrib_cit;
+       struct config_item_type tf_tpg_auth_cit;
+       struct config_item_type tf_tpg_param_cit;
+       struct config_item_type tf_tpg_nacl_cit;
+       struct config_item_type tf_tpg_nacl_base_cit;
+       struct config_item_type tf_tpg_nacl_attrib_cit;
+       struct config_item_type tf_tpg_nacl_auth_cit;
+       struct config_item_type tf_tpg_nacl_param_cit;
+       struct config_item_type tf_tpg_nacl_stat_cit;
+       struct config_item_type tf_tpg_mappedlun_cit;
+       struct config_item_type tf_tpg_mappedlun_stat_cit;
+};
+
 /* target_core_alua.c */
 extern struct t10_alua_lu_gp *default_lu_gp;
 
@@ -8,28 +55,27 @@ extern struct t10_alua_lu_gp *default_lu_gp;
 extern struct mutex g_device_mutex;
 extern struct list_head g_device_list;
 
+int    core_alloc_rtpi(struct se_lun *lun, struct se_device *dev);
 struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
-int    core_free_device_list_for_node(struct se_node_acl *,
+void   target_pr_kref_release(struct kref *);
+void   core_free_device_list_for_node(struct se_node_acl *,
                struct se_portal_group *);
-void   core_update_device_list_access(u32, u32, struct se_node_acl *);
+void   core_update_device_list_access(u64, u32, struct se_node_acl *);
+struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64);
 int    core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
-               u32, u32, struct se_node_acl *, struct se_portal_group *);
-int    core_disable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
-               u32, u32, struct se_node_acl *, struct se_portal_group *);
+               u64, u32, struct se_node_acl *, struct se_portal_group *);
+void   core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
+               struct se_node_acl *, struct se_portal_group *);
 void   core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
-int    core_dev_export(struct se_device *, struct se_portal_group *,
-               struct se_lun *);
-void   core_dev_unexport(struct se_device *, struct se_portal_group *,
-               struct se_lun *);
-struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
+int    core_dev_add_lun(struct se_portal_group *, struct se_device *,
+               struct se_lun *lun);
 void   core_dev_del_lun(struct se_portal_group *, struct se_lun *);
-struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
-               struct se_node_acl *, u32, int *);
+               struct se_node_acl *, u64, int *);
 int    core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
-               struct se_lun_acl *, u32, u32);
-int    core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
-               struct se_lun *, struct se_lun_acl *);
+               struct se_lun_acl *, struct se_lun *lun, u32);
+int    core_dev_del_initiator_node_lun_acl(struct se_lun *,
+               struct se_lun_acl *);
 void   core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
                struct se_lun_acl *lacl);
 int    core_dev_setup_virtual_lun0(void);
@@ -38,6 +84,18 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
 int    target_configure_device(struct se_device *dev);
 void   target_free_device(struct se_device *);
 
+/* target_core_configfs.c */
+void   target_setup_backend_cits(struct target_backend *);
+
+/* target_core_fabric_lib.c */
+int    target_get_pr_transport_id_len(struct se_node_acl *nacl,
+               struct t10_pr_registration *pr_reg, int *format_code);
+int    target_get_pr_transport_id(struct se_node_acl *nacl,
+               struct t10_pr_registration *pr_reg, int *format_code,
+               unsigned char *buf);
+const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg,
+               const char *buf, u32 *out_tid_len, char **port_nexus_ptr);
+
 /* target_core_hba.c */
 struct se_hba *core_alloc_hba(const char *, u32, u32);
 int    core_delete_hba(struct se_hba *);
@@ -53,12 +111,16 @@ extern struct se_device *g_lun0_dev;
 
 struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
                const char *);
-void   core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
+void   core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
+                                 struct se_lun *);
 void   core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
-struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
+struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
 int    core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
                u32, struct se_device *);
 void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
+struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
+               const char *initiatorname);
+void core_tpg_del_initiator_node_acl(struct se_node_acl *acl);
 
 /* target_core_transport.c */
 extern struct kmem_cache *se_tmr_req_cache;
@@ -77,14 +139,19 @@ int        transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
 int    transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
 int    transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
 bool   target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
-int    transport_clear_lun_ref(struct se_lun *);
+void   transport_clear_lun_ref(struct se_lun *);
 void   transport_send_task_abort(struct se_cmd *);
 sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
 void   target_qf_do_work(struct work_struct *work);
+bool   target_check_wce(struct se_device *dev);
+bool   target_check_fua(struct se_device *dev);
 
 /* target_core_stat.c */
 void   target_stat_setup_dev_default_groups(struct se_device *);
 void   target_stat_setup_port_default_groups(struct se_lun *);
 void   target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
 
+/* target_core_xcopy.c */
+extern struct se_portal_group xcopy_pt_tpg;
+
 #endif /* TARGET_CORE_INTERNAL_H */
index 8e5fa291f87803ac9bccd92e3ea006112426b086..0fdbe43b7dad99479f7288584a0d95815c4dab72 100644 (file)
@@ -35,7 +35,6 @@
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 
 #include "target_core_internal.h"
 #include "target_core_pr.h"
@@ -45,7 +44,6 @@
  * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT)
  */
 struct pr_transport_id_holder {
-       int dest_local_nexus;
        struct t10_pr_registration *dest_pr_reg;
        struct se_portal_group *dest_tpg;
        struct se_node_acl *dest_node_acl;
@@ -231,9 +229,10 @@ target_scsi2_reservation_release(struct se_cmd *cmd)
                dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
        }
        tpg = sess->se_tpg;
-       pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
-               " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
-               cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
+       pr_debug("SCSI-2 Released reservation for %s LUN: %llu ->"
+               " MAPPED LUN: %llu for %s\n",
+               tpg->se_tpg_tfo->get_fabric_name(),
+               cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
                sess->se_node_acl->initiatorname);
 
 out_unlock:
@@ -277,12 +276,12 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
           (dev->dev_reserved_node_acl != sess->se_node_acl)) {
                pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
                        tpg->se_tpg_tfo->get_fabric_name());
-               pr_err("Original reserver LUN: %u %s\n",
+               pr_err("Original reserver LUN: %llu %s\n",
                        cmd->se_lun->unpacked_lun,
                        dev->dev_reserved_node_acl->initiatorname);
-               pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
+               pr_err("Current attempt - LUN: %llu -> MAPPED LUN: %llu"
                        " from %s \n", cmd->se_lun->unpacked_lun,
-                       cmd->se_deve->mapped_lun,
+                       cmd->orig_fe_lun,
                        sess->se_node_acl->initiatorname);
                ret = TCM_RESERVATION_CONFLICT;
                goto out_unlock;
@@ -294,9 +293,9 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
                dev->dev_res_bin_isid = sess->sess_bin_isid;
                dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID;
        }
-       pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
+       pr_debug("SCSI-2 Reserved %s LUN: %llu -> MAPPED LUN: %llu"
                " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
-               cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
+               cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
                sess->se_node_acl->initiatorname);
 
 out_unlock:
@@ -314,28 +313,31 @@ out:
  * This function is called by those initiator ports who are *NOT*
  * the active PR reservation holder when a reservation is present.
  */
-static int core_scsi3_pr_seq_non_holder(
-       struct se_cmd *cmd,
-       u32 pr_reg_type)
+static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
+                                       bool isid_mismatch)
 {
        unsigned char *cdb = cmd->t_task_cdb;
-       struct se_dev_entry *se_deve;
        struct se_session *se_sess = cmd->se_sess;
-       int other_cdb = 0, ignore_reg;
+       struct se_node_acl *nacl = se_sess->se_node_acl;
+       int other_cdb = 0;
        int registered_nexus = 0, ret = 1; /* Conflict by default */
        int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
        int we = 0; /* Write Exclusive */
        int legacy = 0; /* Act like a legacy device and return
                         * RESERVATION CONFLICT on some CDBs */
 
-       se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
-       /*
-        * Determine if the registration should be ignored due to
-        * non-matching ISIDs in target_scsi3_pr_reservation_check().
-        */
-       ignore_reg = (pr_reg_type & 0x80000000);
-       if (ignore_reg)
-               pr_reg_type &= ~0x80000000;
+       if (isid_mismatch) {
+               registered_nexus = 0;
+       } else {
+               struct se_dev_entry *se_deve;
+
+               rcu_read_lock();
+               se_deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+               if (se_deve)
+                       registered_nexus = test_bit(DEF_PR_REG_ACTIVE,
+                                                   &se_deve->deve_flags);
+               rcu_read_unlock();
+       }
 
        switch (pr_reg_type) {
        case PR_TYPE_WRITE_EXCLUSIVE:
@@ -345,8 +347,6 @@ static int core_scsi3_pr_seq_non_holder(
                 * Some commands are only allowed for the persistent reservation
                 * holder.
                 */
-               if ((se_deve->def_pr_registered) && !(ignore_reg))
-                       registered_nexus = 1;
                break;
        case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
                we = 1;
@@ -355,8 +355,6 @@ static int core_scsi3_pr_seq_non_holder(
                 * Some commands are only allowed for registered I_T Nexuses.
                 */
                reg_only = 1;
-               if ((se_deve->def_pr_registered) && !(ignore_reg))
-                       registered_nexus = 1;
                break;
        case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
                we = 1;
@@ -365,8 +363,6 @@ static int core_scsi3_pr_seq_non_holder(
                 * Each registered I_T Nexus is a reservation holder.
                 */
                all_reg = 1;
-               if ((se_deve->def_pr_registered) && !(ignore_reg))
-                       registered_nexus = 1;
                break;
        default:
                return -EINVAL;
@@ -572,6 +568,7 @@ target_scsi3_pr_reservation_check(struct se_cmd *cmd)
        struct se_device *dev = cmd->se_dev;
        struct se_session *sess = cmd->se_sess;
        u32 pr_reg_type;
+       bool isid_mismatch = false;
 
        if (!dev->dev_pr_res_holder)
                return 0;
@@ -584,7 +581,7 @@ target_scsi3_pr_reservation_check(struct se_cmd *cmd)
        if (dev->dev_pr_res_holder->isid_present_at_reg) {
                if (dev->dev_pr_res_holder->pr_reg_bin_isid !=
                    sess->sess_bin_isid) {
-                       pr_reg_type |= 0x80000000;
+                       isid_mismatch = true;
                        goto check_nonholder;
                }
        }
@@ -592,7 +589,7 @@ target_scsi3_pr_reservation_check(struct se_cmd *cmd)
        return 0;
 
 check_nonholder:
-       if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type))
+       if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type, isid_mismatch))
                return TCM_RESERVATION_CONFLICT;
        return 0;
 }
@@ -620,7 +617,9 @@ static u32 core_scsi3_pr_generation(struct se_device *dev)
 static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
        struct se_device *dev,
        struct se_node_acl *nacl,
+       struct se_lun *lun,
        struct se_dev_entry *deve,
+       u64 mapped_lun,
        unsigned char *isid,
        u64 sa_res_key,
        int all_tg_pt,
@@ -642,12 +641,12 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
        atomic_set(&pr_reg->pr_res_holders, 0);
        pr_reg->pr_reg_nacl = nacl;
        pr_reg->pr_reg_deve = deve;
-       pr_reg->pr_res_mapped_lun = deve->mapped_lun;
-       pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun;
+       pr_reg->pr_res_mapped_lun = mapped_lun;
+       pr_reg->pr_aptpl_target_lun = lun->unpacked_lun;
+       pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
        pr_reg->pr_res_key = sa_res_key;
        pr_reg->pr_reg_all_tg_pt = all_tg_pt;
        pr_reg->pr_reg_aptpl = aptpl;
-       pr_reg->pr_reg_tg_pt_lun = deve->se_lun;
        /*
         * If an ISID value for this SCSI Initiator Port exists,
         * save it to the registration now.
@@ -671,7 +670,9 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *);
 static struct t10_pr_registration *__core_scsi3_alloc_registration(
        struct se_device *dev,
        struct se_node_acl *nacl,
+       struct se_lun *lun,
        struct se_dev_entry *deve,
+       u64 mapped_lun,
        unsigned char *isid,
        u64 sa_res_key,
        int all_tg_pt,
@@ -679,7 +680,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
 {
        struct se_dev_entry *deve_tmp;
        struct se_node_acl *nacl_tmp;
-       struct se_port *port, *port_tmp;
+       struct se_lun_acl *lacl_tmp;
+       struct se_lun *lun_tmp, *next, *dest_lun;
        const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
        struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
        int ret;
@@ -687,8 +689,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
         * Create a registration for the I_T Nexus upon which the
         * PROUT REGISTER was received.
         */
-       pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
-                       sa_res_key, all_tg_pt, aptpl);
+       pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, lun, deve, mapped_lun,
+                                                   isid, sa_res_key, all_tg_pt,
+                                                   aptpl);
        if (!pr_reg)
                return NULL;
        /*
@@ -701,13 +704,13 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
         * for ALL_TG_PT=1
         */
        spin_lock(&dev->se_port_lock);
-       list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
-               atomic_inc_mb(&port->sep_tg_pt_ref_cnt);
+       list_for_each_entry_safe(lun_tmp, next, &dev->dev_sep_list, lun_dev_link) {
+               if (!percpu_ref_tryget_live(&lun_tmp->lun_ref))
+                       continue;
                spin_unlock(&dev->se_port_lock);
 
-               spin_lock_bh(&port->sep_alua_lock);
-               list_for_each_entry(deve_tmp, &port->sep_alua_list,
-                                       alua_port_list) {
+               spin_lock(&lun_tmp->lun_deve_lock);
+               list_for_each_entry(deve_tmp, &lun_tmp->lun_deve_list, lun_link) {
                        /*
                         * This pointer will be NULL for demo mode MappedLUNs
                         * that have not been make explicit via a ConfigFS
@@ -716,7 +719,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
                        if (!deve_tmp->se_lun_acl)
                                continue;
 
-                       nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
+                       lacl_tmp = rcu_dereference_check(deve_tmp->se_lun_acl,
+                                               lockdep_is_held(&lun_tmp->lun_deve_lock));
+                       nacl_tmp = lacl_tmp->se_lun_nacl;
                        /*
                         * Skip the matching struct se_node_acl that is allocated
                         * above..
@@ -736,8 +741,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
                        if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
                                continue;
 
-                       atomic_inc_mb(&deve_tmp->pr_ref_count);
-                       spin_unlock_bh(&port->sep_alua_lock);
+                       kref_get(&deve_tmp->pr_kref);
+                       spin_unlock(&lun_tmp->lun_deve_lock);
                        /*
                         * Grab a configfs group dependency that is released
                         * for the exception path at label out: below, or upon
@@ -748,8 +753,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
                        if (ret < 0) {
                                pr_err("core_scsi3_lunacl_depend"
                                                "_item() failed\n");
-                               atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
-                               atomic_dec_mb(&deve_tmp->pr_ref_count);
+                               percpu_ref_put(&lun_tmp->lun_ref);
+                               kref_put(&deve_tmp->pr_kref, target_pr_kref_release);
                                goto out;
                        }
                        /*
@@ -759,24 +764,27 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
                         * the original *pr_reg is processed in
                         * __core_scsi3_add_registration()
                         */
+                       dest_lun = rcu_dereference_check(deve_tmp->se_lun,
+                               atomic_read(&deve_tmp->pr_kref.refcount) != 0);
+
                        pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
-                                               nacl_tmp, deve_tmp, NULL,
+                                               nacl_tmp, dest_lun, deve_tmp,
+                                               deve_tmp->mapped_lun, NULL,
                                                sa_res_key, all_tg_pt, aptpl);
                        if (!pr_reg_atp) {
-                               atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
-                               atomic_dec_mb(&deve_tmp->pr_ref_count);
+                               percpu_ref_put(&lun_tmp->lun_ref);
                                core_scsi3_lunacl_undepend_item(deve_tmp);
                                goto out;
                        }
 
                        list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
                                      &pr_reg->pr_reg_atp_list);
-                       spin_lock_bh(&port->sep_alua_lock);
+                       spin_lock(&lun_tmp->lun_deve_lock);
                }
-               spin_unlock_bh(&port->sep_alua_lock);
+               spin_unlock(&lun_tmp->lun_deve_lock);
 
                spin_lock(&dev->se_port_lock);
-               atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
+               percpu_ref_put(&lun_tmp->lun_ref);
        }
        spin_unlock(&dev->se_port_lock);
 
@@ -797,10 +805,10 @@ int core_scsi3_alloc_aptpl_registration(
        u64 sa_res_key,
        unsigned char *i_port,
        unsigned char *isid,
-       u32 mapped_lun,
+       u64 mapped_lun,
        unsigned char *t_port,
        u16 tpgt,
-       u32 target_lun,
+       u64 target_lun,
        int res_holder,
        int all_tg_pt,
        u8 type)
@@ -831,7 +839,6 @@ int core_scsi3_alloc_aptpl_registration(
        pr_reg->pr_res_key = sa_res_key;
        pr_reg->pr_reg_all_tg_pt = all_tg_pt;
        pr_reg->pr_reg_aptpl = 1;
-       pr_reg->pr_reg_tg_pt_lun = NULL;
        pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */
        pr_reg->pr_res_type = type;
        /*
@@ -895,9 +902,9 @@ static int __core_scsi3_check_aptpl_registration(
        struct se_device *dev,
        struct se_portal_group *tpg,
        struct se_lun *lun,
-       u32 target_lun,
+       u64 target_lun,
        struct se_node_acl *nacl,
-       struct se_dev_entry *deve)
+       u64 mapped_lun)
 {
        struct t10_pr_registration *pr_reg, *pr_reg_tmp;
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
@@ -925,14 +932,13 @@ static int __core_scsi3_check_aptpl_registration(
                                pr_reg_aptpl_list) {
 
                if (!strcmp(pr_reg->pr_iport, i_port) &&
-                    (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
+                    (pr_reg->pr_res_mapped_lun == mapped_lun) &&
                    !(strcmp(pr_reg->pr_tport, t_port)) &&
                     (pr_reg->pr_reg_tpgt == tpgt) &&
                     (pr_reg->pr_aptpl_target_lun == target_lun)) {
 
                        pr_reg->pr_reg_nacl = nacl;
-                       pr_reg->pr_reg_deve = deve;
-                       pr_reg->pr_reg_tg_pt_lun = lun;
+                       pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
 
                        list_del(&pr_reg->pr_reg_aptpl_list);
                        spin_unlock(&pr_tmpl->aptpl_reg_lock);
@@ -967,15 +973,14 @@ int core_scsi3_check_aptpl_registration(
        struct se_portal_group *tpg,
        struct se_lun *lun,
        struct se_node_acl *nacl,
-       u32 mapped_lun)
+       u64 mapped_lun)
 {
-       struct se_dev_entry *deve = nacl->device_list[mapped_lun];
-
        if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
                return 0;
 
        return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
-                               lun->unpacked_lun, nacl, deve);
+                                                    lun->unpacked_lun, nacl,
+                                                    mapped_lun);
 }
 
 static void __core_scsi3_dump_registration(
@@ -1009,10 +1014,6 @@ static void __core_scsi3_dump_registration(
                pr_reg->pr_reg_aptpl);
 }
 
-/*
- * this function can be called with struct se_device->dev_reservation_lock
- * when register_move = 1
- */
 static void __core_scsi3_add_registration(
        struct se_device *dev,
        struct se_node_acl *nacl,
@@ -1023,6 +1024,7 @@ static void __core_scsi3_add_registration(
        const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
        struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
+       struct se_dev_entry *deve;
 
        /*
         * Increment PRgeneration counter for struct se_device upon a successful
@@ -1039,10 +1041,16 @@ static void __core_scsi3_add_registration(
 
        spin_lock(&pr_tmpl->registration_lock);
        list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list);
-       pr_reg->pr_reg_deve->def_pr_registered = 1;
 
        __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
        spin_unlock(&pr_tmpl->registration_lock);
+
+       rcu_read_lock();
+       deve = pr_reg->pr_reg_deve;
+       if (deve)
+               set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+       rcu_read_unlock();
+
        /*
         * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
         */
@@ -1054,6 +1062,8 @@ static void __core_scsi3_add_registration(
         */
        list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
                        &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+               struct se_node_acl *nacl_tmp = pr_reg_tmp->pr_reg_nacl;
+
                list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
 
                pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev);
@@ -1061,12 +1071,17 @@ static void __core_scsi3_add_registration(
                spin_lock(&pr_tmpl->registration_lock);
                list_add_tail(&pr_reg_tmp->pr_reg_list,
                              &pr_tmpl->registration_list);
-               pr_reg_tmp->pr_reg_deve->def_pr_registered = 1;
 
-               __core_scsi3_dump_registration(tfo, dev,
-                               pr_reg_tmp->pr_reg_nacl, pr_reg_tmp,
-                               register_type);
+               __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp,
+                                              register_type);
                spin_unlock(&pr_tmpl->registration_lock);
+
+               rcu_read_lock();
+               deve = pr_reg_tmp->pr_reg_deve;
+               if (deve)
+                       set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+               rcu_read_unlock();
+
                /*
                 * Drop configfs group dependency reference from
                 * __core_scsi3_alloc_registration()
@@ -1078,7 +1093,9 @@ static void __core_scsi3_add_registration(
 static int core_scsi3_alloc_registration(
        struct se_device *dev,
        struct se_node_acl *nacl,
+       struct se_lun *lun,
        struct se_dev_entry *deve,
+       u64 mapped_lun,
        unsigned char *isid,
        u64 sa_res_key,
        int all_tg_pt,
@@ -1088,8 +1105,9 @@ static int core_scsi3_alloc_registration(
 {
        struct t10_pr_registration *pr_reg;
 
-       pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
-                       sa_res_key, all_tg_pt, aptpl);
+       pr_reg = __core_scsi3_alloc_registration(dev, nacl, lun, deve, mapped_lun,
+                                                isid, sa_res_key, all_tg_pt,
+                                                aptpl);
        if (!pr_reg)
                return -EPERM;
 
@@ -1242,13 +1260,13 @@ static void __core_scsi3_free_registration(
        const struct target_core_fabric_ops *tfo =
                        pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
+       struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+       struct se_dev_entry *deve;
        char i_buf[PR_REG_ISID_ID_LEN];
 
        memset(i_buf, 0, PR_REG_ISID_ID_LEN);
        core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
 
-       pr_reg->pr_reg_deve->def_pr_registered = 0;
-       pr_reg->pr_reg_deve->pr_res_key = 0;
        if (!list_empty(&pr_reg->pr_reg_list))
                list_del(&pr_reg->pr_reg_list);
        /*
@@ -1257,6 +1275,8 @@ static void __core_scsi3_free_registration(
         */
        if (dec_holders)
                core_scsi3_put_pr_reg(pr_reg);
+
+       spin_unlock(&pr_tmpl->registration_lock);
        /*
         * Wait until all reference from any other I_T nexuses for this
         * *pr_reg have been released.  Because list_del() is called above,
@@ -1264,13 +1284,18 @@ static void __core_scsi3_free_registration(
         * count back to zero, and we release *pr_reg.
         */
        while (atomic_read(&pr_reg->pr_res_holders) != 0) {
-               spin_unlock(&pr_tmpl->registration_lock);
                pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
                                tfo->get_fabric_name());
                cpu_relax();
-               spin_lock(&pr_tmpl->registration_lock);
        }
 
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, pr_reg->pr_res_mapped_lun);
+       if (deve)
+               clear_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+       rcu_read_unlock();
+
+       spin_lock(&pr_tmpl->registration_lock);
        pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
                " Node: %s%s\n", tfo->get_fabric_name(),
                pr_reg->pr_reg_nacl->initiatorname,
@@ -1392,12 +1417,14 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
 
 static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
 {
-       struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+       struct se_lun_acl *lun_acl;
        struct se_node_acl *nacl;
        struct se_portal_group *tpg;
        /*
         * For nacl->dynamic_node_acl=1
         */
+       lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
+                               atomic_read(&se_deve->pr_kref.refcount) != 0);
        if (!lun_acl)
                return 0;
 
@@ -1409,21 +1436,23 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
 
 static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
 {
-       struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+       struct se_lun_acl *lun_acl;
        struct se_node_acl *nacl;
        struct se_portal_group *tpg;
        /*
         * For nacl->dynamic_node_acl=1
         */
+       lun_acl = rcu_dereference_check(se_deve->se_lun_acl,
+                               atomic_read(&se_deve->pr_kref.refcount) != 0);
        if (!lun_acl) {
-               atomic_dec_mb(&se_deve->pr_ref_count);
+               kref_put(&se_deve->pr_kref, target_pr_kref_release);
                return;
        }
        nacl = lun_acl->se_lun_nacl;
        tpg = nacl->se_tpg;
 
        target_undepend_item(&lun_acl->se_lun_group.cg_item);
-       atomic_dec_mb(&se_deve->pr_ref_count);
+       kref_put(&se_deve->pr_kref, target_pr_kref_release);
 }
 
 static sense_reason_t
@@ -1436,30 +1465,25 @@ core_scsi3_decode_spec_i_port(
        int aptpl)
 {
        struct se_device *dev = cmd->se_dev;
-       struct se_port *tmp_port;
        struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
        struct se_session *se_sess = cmd->se_sess;
        struct se_node_acl *dest_node_acl = NULL;
-       struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
+       struct se_dev_entry *dest_se_deve = NULL;
        struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
        struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
        LIST_HEAD(tid_dest_list);
        struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
-       const struct target_core_fabric_ops *tmp_tf_ops;
-       unsigned char *buf;
-       unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
+       unsigned char *buf, *ptr, proto_ident;
+       const unsigned char *i_str;
        char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
        sense_reason_t ret;
        u32 tpdl, tid_len = 0;
-       int dest_local_nexus;
        u32 dest_rtpi = 0;
 
-       local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
        /*
         * Allocate a struct pr_transport_id_holder and setup the
-        * local_node_acl and local_se_deve pointers and add to
-        * struct list_head tid_dest_list for add registration
-        * processing in the loop of tid_dest_list below.
+        * local_node_acl pointer and add to struct list_head tid_dest_list
+        * for add registration processing in the loop of tid_dest_list below.
         */
        tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
        if (!tidh_new) {
@@ -1469,10 +1493,10 @@ core_scsi3_decode_spec_i_port(
        INIT_LIST_HEAD(&tidh_new->dest_list);
        tidh_new->dest_tpg = tpg;
        tidh_new->dest_node_acl = se_sess->se_node_acl;
-       tidh_new->dest_se_deve = local_se_deve;
 
        local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
-                               se_sess->se_node_acl, local_se_deve, l_isid,
+                               se_sess->se_node_acl, cmd->se_lun,
+                               NULL, cmd->orig_fe_lun, l_isid,
                                sa_res_key, all_tg_pt, aptpl);
        if (!local_pr_reg) {
                kfree(tidh_new);
@@ -1481,10 +1505,10 @@ core_scsi3_decode_spec_i_port(
        tidh_new->dest_pr_reg = local_pr_reg;
        /*
         * The local I_T nexus does not hold any configfs dependances,
-        * so we set tid_h->dest_local_nexus=1 to prevent the
+        * so we set tidh_new->dest_se_deve to NULL to prevent the
         * configfs_undepend_item() calls in the tid_dest_list loops below.
         */
-       tidh_new->dest_local_nexus = 1;
+       tidh_new->dest_se_deve = NULL;
        list_add_tail(&tidh_new->dest_list, &tid_dest_list);
 
        if (cmd->data_length < 28) {
@@ -1525,32 +1549,25 @@ core_scsi3_decode_spec_i_port(
        ptr = &buf[28];
 
        while (tpdl > 0) {
+               struct se_lun *dest_lun, *tmp_lun;
+
                proto_ident = (ptr[0] & 0x0f);
                dest_tpg = NULL;
 
                spin_lock(&dev->se_port_lock);
-               list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
-                       tmp_tpg = tmp_port->sep_tpg;
-                       if (!tmp_tpg)
-                               continue;
-                       tmp_tf_ops = tmp_tpg->se_tpg_tfo;
-                       if (!tmp_tf_ops)
-                               continue;
-                       if (!tmp_tf_ops->get_fabric_proto_ident ||
-                           !tmp_tf_ops->tpg_parse_pr_out_transport_id)
-                               continue;
+               list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) {
+                       tmp_tpg = tmp_lun->lun_tpg;
+
                        /*
                         * Look for the matching proto_ident provided by
                         * the received TransportID
                         */
-                       tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg);
-                       if (tmp_proto_ident != proto_ident)
+                       if (tmp_tpg->proto_id != proto_ident)
                                continue;
-                       dest_rtpi = tmp_port->sep_rtpi;
+                       dest_rtpi = tmp_lun->lun_rtpi;
 
-                       i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
-                                       tmp_tpg, (const char *)ptr, &tid_len,
-                                       &iport_ptr);
+                       i_str = target_parse_pr_out_transport_id(tmp_tpg,
+                                       (const char *)ptr, &tid_len, &iport_ptr);
                        if (!i_str)
                                continue;
 
@@ -1569,12 +1586,12 @@ core_scsi3_decode_spec_i_port(
                         * from the decoded fabric module specific TransportID
                         * at *i_str.
                         */
-                       spin_lock_irq(&tmp_tpg->acl_node_lock);
+                       mutex_lock(&tmp_tpg->acl_node_mutex);
                        dest_node_acl = __core_tpg_get_initiator_node_acl(
                                                tmp_tpg, i_str);
                        if (dest_node_acl)
                                atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
-                       spin_unlock_irq(&tmp_tpg->acl_node_lock);
+                       mutex_unlock(&tmp_tpg->acl_node_mutex);
 
                        if (!dest_node_acl) {
                                core_scsi3_tpg_undepend_item(tmp_tpg);
@@ -1644,7 +1661,7 @@ core_scsi3_decode_spec_i_port(
                if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
                        pr_err("core_scsi3_lunacl_depend_item()"
                                        " failed\n");
-                       atomic_dec_mb(&dest_se_deve->pr_ref_count);
+                       kref_put(&dest_se_deve->pr_kref, target_pr_kref_release);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
                        ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1652,7 +1669,7 @@ core_scsi3_decode_spec_i_port(
                }
 
                pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
-                       " dest_se_deve mapped_lun: %u\n",
+                       " dest_se_deve mapped_lun: %llu\n",
                        dest_tpg->se_tpg_tfo->get_fabric_name(),
                        dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
 
@@ -1708,9 +1725,13 @@ core_scsi3_decode_spec_i_port(
                 * and then call __core_scsi3_add_registration() in the
                 * 2nd loop which will never fail.
                 */
+               dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
+                               atomic_read(&dest_se_deve->pr_kref.refcount) != 0);
+
                dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
-                               dest_node_acl, dest_se_deve, iport_ptr,
-                               sa_res_key, all_tg_pt, aptpl);
+                                       dest_node_acl, dest_lun, dest_se_deve,
+                                       dest_se_deve->mapped_lun, iport_ptr,
+                                       sa_res_key, all_tg_pt, aptpl);
                if (!dest_pr_reg) {
                        core_scsi3_lunacl_undepend_item(dest_se_deve);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
@@ -1748,7 +1769,6 @@ core_scsi3_decode_spec_i_port(
                dest_node_acl = tidh->dest_node_acl;
                dest_se_deve = tidh->dest_se_deve;
                dest_pr_reg = tidh->dest_pr_reg;
-               dest_local_nexus = tidh->dest_local_nexus;
 
                list_del(&tidh->dest_list);
                kfree(tidh);
@@ -1761,10 +1781,11 @@ core_scsi3_decode_spec_i_port(
 
                pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
                        " registered Transport ID for Node: %s%s Mapped LUN:"
-                       " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
-                       dest_node_acl->initiatorname, i_buf, dest_se_deve->mapped_lun);
+                       " %llu\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
+                       dest_node_acl->initiatorname, i_buf, (dest_se_deve) ?
+                       dest_se_deve->mapped_lun : 0);
 
-               if (dest_local_nexus)
+               if (!dest_se_deve)
                        continue;
 
                core_scsi3_lunacl_undepend_item(dest_se_deve);
@@ -1785,7 +1806,6 @@ out:
                dest_node_acl = tidh->dest_node_acl;
                dest_se_deve = tidh->dest_se_deve;
                dest_pr_reg = tidh->dest_pr_reg;
-               dest_local_nexus = tidh->dest_local_nexus;
 
                list_del(&tidh->dest_list);
                kfree(tidh);
@@ -1803,7 +1823,7 @@ out:
 
                kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
 
-               if (dest_local_nexus)
+               if (!dest_se_deve)
                        continue;
 
                core_scsi3_lunacl_undepend_item(dest_se_deve);
@@ -1818,7 +1838,6 @@ static int core_scsi3_update_aptpl_buf(
        unsigned char *buf,
        u32 pr_aptpl_buf_len)
 {
-       struct se_lun *lun;
        struct se_portal_group *tpg;
        struct t10_pr_registration *pr_reg;
        unsigned char tmp[512], isid_buf[32];
@@ -1837,7 +1856,6 @@ static int core_scsi3_update_aptpl_buf(
                tmp[0] = '\0';
                isid_buf[0] = '\0';
                tpg = pr_reg->pr_reg_nacl->se_tpg;
-               lun = pr_reg->pr_reg_tg_pt_lun;
                /*
                 * Write out any ISID value to APTPL metadata that was included
                 * in the original registration.
@@ -1856,7 +1874,7 @@ static int core_scsi3_update_aptpl_buf(
                                "sa_res_key=%llu\n"
                                "res_holder=1\nres_type=%02x\n"
                                "res_scope=%02x\nres_all_tg_pt=%d\n"
-                               "mapped_lun=%u\n", reg_count,
+                               "mapped_lun=%llu\n", reg_count,
                                tpg->se_tpg_tfo->get_fabric_name(),
                                pr_reg->pr_reg_nacl->initiatorname, isid_buf,
                                pr_reg->pr_res_key, pr_reg->pr_res_type,
@@ -1866,7 +1884,7 @@ static int core_scsi3_update_aptpl_buf(
                        snprintf(tmp, 512, "PR_REG_START: %d\n"
                                "initiator_fabric=%s\ninitiator_node=%s\n%s"
                                "sa_res_key=%llu\nres_holder=0\n"
-                               "res_all_tg_pt=%d\nmapped_lun=%u\n",
+                               "res_all_tg_pt=%d\nmapped_lun=%llu\n",
                                reg_count, tpg->se_tpg_tfo->get_fabric_name(),
                                pr_reg->pr_reg_nacl->initiatorname, isid_buf,
                                pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
@@ -1885,11 +1903,12 @@ static int core_scsi3_update_aptpl_buf(
                 * Include information about the associated SCSI target port.
                 */
                snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
-                       "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
+                       "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%llu\nPR_REG_END:"
                        " %d\n", tpg->se_tpg_tfo->get_fabric_name(),
                        tpg->se_tpg_tfo->tpg_get_wwn(tpg),
                        tpg->se_tpg_tfo->tpg_get_tag(tpg),
-                       lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
+                       pr_reg->tg_pt_sep_rtpi, pr_reg->pr_aptpl_target_lun,
+                       reg_count);
 
                if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
                        pr_err("Unable to update renaming APTPL metadata,"
@@ -2000,7 +2019,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
 {
        struct se_session *se_sess = cmd->se_sess;
        struct se_device *dev = cmd->se_dev;
-       struct se_dev_entry *se_deve;
        struct se_lun *se_lun = cmd->se_lun;
        struct se_portal_group *se_tpg;
        struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp;
@@ -2014,7 +2032,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        se_tpg = se_sess->se_tpg;
-       se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
 
        if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
                memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
@@ -2045,7 +2062,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
                         * Logical Unit of the SCSI device server.
                         */
                        if (core_scsi3_alloc_registration(cmd->se_dev,
-                                       se_sess->se_node_acl, se_deve, isid_ptr,
+                                       se_sess->se_node_acl, cmd->se_lun,
+                                       NULL, cmd->orig_fe_lun, isid_ptr,
                                        sa_res_key, all_tg_pt, aptpl,
                                        register_type, 0)) {
                                pr_err("Unable to allocate"
@@ -2066,7 +2084,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
                        if (ret != 0)
                                return ret;
                }
-
                return core_scsi3_update_and_write_aptpl(dev, aptpl);
        }
 
@@ -2180,7 +2197,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
                                        &pr_tmpl->registration_list,
                                        pr_reg_list) {
 
-                               core_scsi3_ua_allocate(
+                               target_ua_allocate_lun(
                                        pr_reg_p->pr_reg_nacl,
                                        pr_reg_p->pr_res_mapped_lun,
                                        0x2A,
@@ -2607,7 +2624,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
                if (pr_reg_p == pr_reg)
                        continue;
 
-               core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl,
+               target_ua_allocate_lun(pr_reg_p->pr_reg_nacl,
                                pr_reg_p->pr_res_mapped_lun,
                                0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
        }
@@ -2630,7 +2647,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
        struct se_session *se_sess = cmd->se_sess;
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
        struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
-       u32 pr_res_mapped_lun = 0;
+       u64 pr_res_mapped_lun = 0;
        int calling_it_nexus = 0;
        /*
         * Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2692,7 +2709,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
                 *    additional sense code set to RESERVATIONS PREEMPTED.
                 */
                if (!calling_it_nexus)
-                       core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
+                       target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun,
                                0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
        }
        spin_unlock(&pr_tmpl->registration_lock);
@@ -2786,7 +2803,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
        LIST_HEAD(preempt_and_abort_list);
        struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
-       u32 pr_res_mapped_lun = 0;
+       u64 pr_res_mapped_lun = 0;
        int all_reg = 0, calling_it_nexus = 0;
        bool sa_res_key_unmatched = sa_res_key != 0;
        int prh_type = 0, prh_scope = 0;
@@ -2901,7 +2918,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
                                                NULL, 0);
                        }
                        if (!calling_it_nexus)
-                               core_scsi3_ua_allocate(pr_reg_nacl,
+                               target_ua_allocate_lun(pr_reg_nacl,
                                        pr_res_mapped_lun, 0x2A,
                                        ASCQ_2AH_REGISTRATIONS_PREEMPTED);
                }
@@ -3007,7 +3024,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
                 *    persistent reservation and/or registration, with the
                 *    additional sense code set to REGISTRATIONS PREEMPTED;
                 */
-               core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
+               target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
                                ASCQ_2AH_REGISTRATIONS_PREEMPTED);
        }
        spin_unlock(&pr_tmpl->registration_lock);
@@ -3040,7 +3057,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
                        if (calling_it_nexus)
                                continue;
 
-                       core_scsi3_ua_allocate(pr_reg->pr_reg_nacl,
+                       target_ua_allocate_lun(pr_reg->pr_reg_nacl,
                                        pr_reg->pr_res_mapped_lun, 0x2A,
                                        ASCQ_2AH_RESERVATIONS_RELEASED);
                }
@@ -3099,15 +3116,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
        struct se_session *se_sess = cmd->se_sess;
        struct se_device *dev = cmd->se_dev;
        struct se_dev_entry *dest_se_deve = NULL;
-       struct se_lun *se_lun = cmd->se_lun;
+       struct se_lun *se_lun = cmd->se_lun, *tmp_lun;
        struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
-       struct se_port *se_port;
        struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
        const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
        struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
        unsigned char *buf;
-       unsigned char *initiator_str;
+       const unsigned char *initiator_str;
        char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
        u32 tid_len, tmp_tid_len;
        int new_reg = 0, type, scope, matching_iname;
@@ -3186,12 +3202,10 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
        }
 
        spin_lock(&dev->se_port_lock);
-       list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) {
-               if (se_port->sep_rtpi != rtpi)
-                       continue;
-               dest_se_tpg = se_port->sep_tpg;
-               if (!dest_se_tpg)
+       list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) {
+               if (tmp_lun->lun_rtpi != rtpi)
                        continue;
+               dest_se_tpg = tmp_lun->lun_tpg;
                dest_tf_ops = dest_se_tpg->se_tpg_tfo;
                if (!dest_tf_ops)
                        continue;
@@ -3230,23 +3244,16 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
        pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
                        " 0x%02x\n", proto_ident);
 
-       if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
+       if (proto_ident != dest_se_tpg->proto_id) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
                        " proto_ident: 0x%02x does not match ident: 0x%02x"
                        " from fabric: %s\n", proto_ident,
-                       dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
+                       dest_se_tpg->proto_id,
                        dest_tf_ops->get_fabric_name());
                ret = TCM_INVALID_PARAMETER_LIST;
                goto out;
        }
-       if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
-               pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
-                       " containg a valid tpg_parse_pr_out_transport_id"
-                       " function pointer\n");
-               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               goto out;
-       }
-       initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
+       initiator_str = target_parse_pr_out_transport_id(dest_se_tpg,
                        (const char *)&buf[24], &tmp_tid_len, &iport_ptr);
        if (!initiator_str) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
@@ -3295,12 +3302,12 @@ after_iport_check:
        /*
         * Locate the destination struct se_node_acl from the received Transport ID
         */
-       spin_lock_irq(&dest_se_tpg->acl_node_lock);
+       mutex_lock(&dest_se_tpg->acl_node_mutex);
        dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
                                initiator_str);
        if (dest_node_acl)
                atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
-       spin_unlock_irq(&dest_se_tpg->acl_node_lock);
+       mutex_unlock(&dest_se_tpg->acl_node_mutex);
 
        if (!dest_node_acl) {
                pr_err("Unable to locate %s dest_node_acl for"
@@ -3337,14 +3344,14 @@ after_iport_check:
 
        if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
                pr_err("core_scsi3_lunacl_depend_item() failed\n");
-               atomic_dec_mb(&dest_se_deve->pr_ref_count);
+               kref_put(&dest_se_deve->pr_kref, target_pr_kref_release);
                dest_se_deve = NULL;
                ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                goto out;
        }
 
        pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
-               " ACL for dest_se_deve->mapped_lun: %u\n",
+               " ACL for dest_se_deve->mapped_lun: %llu\n",
                dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
                dest_se_deve->mapped_lun);
 
@@ -3421,13 +3428,17 @@ after_iport_check:
        dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
                                        iport_ptr);
        if (!dest_pr_reg) {
-               if (core_scsi3_alloc_registration(cmd->se_dev,
-                               dest_node_acl, dest_se_deve, iport_ptr,
-                               sa_res_key, 0, aptpl, 2, 1)) {
-                       spin_unlock(&dev->dev_reservation_lock);
+               struct se_lun *dest_lun = rcu_dereference_check(dest_se_deve->se_lun,
+                               atomic_read(&dest_se_deve->pr_kref.refcount) != 0);
+
+               spin_unlock(&dev->dev_reservation_lock);
+               if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
+                                       dest_lun, dest_se_deve, dest_se_deve->mapped_lun,
+                                       iport_ptr, sa_res_key, 0, aptpl, 2, 1)) {
                        ret = TCM_INVALID_PARAMETER_LIST;
                        goto out;
                }
+               spin_lock(&dev->dev_reservation_lock);
                dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
                                                iport_ptr);
                new_reg = 1;
@@ -3883,9 +3894,10 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
        struct t10_pr_registration *pr_reg, *pr_reg_tmp;
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
        unsigned char *buf;
-       u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
+       u32 add_desc_len = 0, add_len = 0;
        u32 off = 8; /* off into first Full Status descriptor */
        int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
+       int exp_desc_len, desc_len;
        bool all_reg = false;
 
        if (cmd->data_length < 8) {
@@ -3930,10 +3942,10 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
                 * Determine expected length of $FABRIC_MOD specific
                 * TransportID full status descriptor..
                 */
-               exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len(
-                               se_tpg, se_nacl, pr_reg, &format_code);
-
-               if ((exp_desc_len + add_len) > cmd->data_length) {
+               exp_desc_len = target_get_pr_transport_id_len(se_nacl, pr_reg,
+                                       &format_code);
+               if (exp_desc_len < 0 ||
+                   exp_desc_len + add_len > cmd->data_length) {
                        pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
                                " out of buffer: %d\n", cmd->data_length);
                        spin_lock(&pr_tmpl->registration_lock);
@@ -3990,21 +4002,26 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
                 * IDENTIFIER field are not defined by this standard.
                 */
                if (!pr_reg->pr_reg_all_tg_pt) {
-                       struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
+                       u16 sep_rtpi = pr_reg->tg_pt_sep_rtpi;
 
-                       buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
-                       buf[off++] = (port->sep_rtpi & 0xff);
+                       buf[off++] = ((sep_rtpi >> 8) & 0xff);
+                       buf[off++] = (sep_rtpi & 0xff);
                } else
                        off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */
 
+               buf[off+4] = se_tpg->proto_id;
+
                /*
-                * Now, have the $FABRIC_MOD fill in the protocol identifier
+                * Now, have the $FABRIC_MOD fill in the transport ID.
                 */
-               desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg,
-                               se_nacl, pr_reg, &format_code, &buf[off+4]);
+               desc_len = target_get_pr_transport_id(se_nacl, pr_reg,
+                               &format_code, &buf[off+4]);
 
                spin_lock(&pr_tmpl->registration_lock);
                atomic_dec_mb(&pr_reg->pr_res_holders);
+
+               if (desc_len < 0)
+                       break;
                /*
                 * Set the ADDITIONAL DESCRIPTOR LENGTH
                 */
index 749fd7bb7510d0d7d9dcfcbc223c1169b0b3b5ae..e3d26e9126a01fa860693d4ecb6852b0c7de3d36 100644 (file)
@@ -56,11 +56,11 @@ extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
 extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
 extern int core_scsi3_alloc_aptpl_registration(
                        struct t10_reservation *, u64,
-                       unsigned char *, unsigned char *, u32,
-                       unsigned char *, u16, u32, int, int, u8);
+                       unsigned char *, unsigned char *, u64,
+                       unsigned char *, u16, u64, int, int, u8);
 extern int core_scsi3_check_aptpl_registration(struct se_device *,
                        struct se_portal_group *, struct se_lun *,
-                       struct se_node_acl *, u32);
+                       struct se_node_acl *, u64);
 extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
                                             struct se_node_acl *);
 extern void core_scsi3_free_all_registrations(struct se_device *);
index 26581e2151415563996f5d2ceaedb96c789749d1..08e9084ee615bed5a65e5fb66d0a6bc2d50efd5b 100644 (file)
@@ -42,9 +42,9 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
-#include <target/target_core_backend_configfs.h>
 
 #include "target_core_alua.h"
+#include "target_core_internal.h"
 #include "target_core_pscsi.h"
 
 #define ISPRINT(a)  ((a >= ' ') && (a <= '~'))
@@ -54,8 +54,6 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
        return container_of(dev, struct pscsi_dev_virt, dev);
 }
 
-static struct se_subsystem_api pscsi_template;
-
 static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
 static void pscsi_req_done(struct request *, int);
 
@@ -80,7 +78,7 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
 
        pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
                " Generic Target Core Stack %s\n", hba->hba_id,
-               PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
+               PSCSI_VERSION, TARGET_CORE_VERSION);
        pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
               hba->hba_id);
 
@@ -579,6 +577,14 @@ static int pscsi_configure_device(struct se_device *dev)
        return -ENODEV;
 }
 
+static void pscsi_dev_call_rcu(struct rcu_head *p)
+{
+       struct se_device *dev = container_of(p, struct se_device, rcu_head);
+       struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+
+       kfree(pdv);
+}
+
 static void pscsi_free_device(struct se_device *dev)
 {
        struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
@@ -610,8 +616,7 @@ static void pscsi_free_device(struct se_device *dev)
 
                pdv->pdv_sd = NULL;
        }
-
-       kfree(pdv);
+       call_rcu(&dev->rcu_head, pscsi_dev_call_rcu);
 }
 
 static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
@@ -635,12 +640,14 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
         * Hack to make sure that Write-Protect modepage is set if R/O mode is
         * forced.
         */
-       if (!cmd->se_deve || !cmd->data_length)
+       if (!cmd->data_length)
                goto after_mode_sense;
 
        if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
             (status_byte(result) << 1) == SAM_STAT_GOOD) {
-               if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
+               bool read_only = target_lun_is_rdonly(cmd);
+
+               if (read_only) {
                        unsigned char *buf;
 
                        buf = transport_kmap_data_sg(cmd);
@@ -1116,27 +1123,7 @@ static void pscsi_req_done(struct request *req, int uptodate)
        kfree(pt);
 }
 
-DEF_TB_DEV_ATTRIB_RO(pscsi, hw_pi_prot_type);
-TB_DEV_ATTR_RO(pscsi, hw_pi_prot_type);
-
-DEF_TB_DEV_ATTRIB_RO(pscsi, hw_block_size);
-TB_DEV_ATTR_RO(pscsi, hw_block_size);
-
-DEF_TB_DEV_ATTRIB_RO(pscsi, hw_max_sectors);
-TB_DEV_ATTR_RO(pscsi, hw_max_sectors);
-
-DEF_TB_DEV_ATTRIB_RO(pscsi, hw_queue_depth);
-TB_DEV_ATTR_RO(pscsi, hw_queue_depth);
-
-static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
-       &pscsi_dev_attrib_hw_pi_prot_type.attr,
-       &pscsi_dev_attrib_hw_block_size.attr,
-       &pscsi_dev_attrib_hw_max_sectors.attr,
-       &pscsi_dev_attrib_hw_queue_depth.attr,
-       NULL,
-};
-
-static struct se_subsystem_api pscsi_template = {
+static const struct target_backend_ops pscsi_ops = {
        .name                   = "pscsi",
        .owner                  = THIS_MODULE,
        .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
@@ -1152,21 +1139,17 @@ static struct se_subsystem_api pscsi_template = {
        .show_configfs_dev_params = pscsi_show_configfs_dev_params,
        .get_device_type        = pscsi_get_device_type,
        .get_blocks             = pscsi_get_blocks,
+       .tb_dev_attrib_attrs    = passthrough_attrib_attrs,
 };
 
 static int __init pscsi_module_init(void)
 {
-       struct target_backend_cits *tbc = &pscsi_template.tb_cits;
-
-       target_core_setup_sub_cits(&pscsi_template);
-       tbc->tb_dev_attrib_cit.ct_attrs = pscsi_backend_dev_attrs;
-
-       return transport_subsystem_register(&pscsi_template);
+       return transport_backend_register(&pscsi_ops);
 }
 
 static void __exit pscsi_module_exit(void)
 {
-       transport_subsystem_release(&pscsi_template);
+       target_backend_unregister(&pscsi_ops);
 }
 
 MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
index b2d8f6f9163336a7f17ce3107e447476c4729994..4703f403f31c0dd6cc9b4d31422bbc16387b20e7 100644 (file)
@@ -33,7 +33,6 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
-#include <target/target_core_backend_configfs.h>
 
 #include "target_core_rd.h"
 
@@ -42,10 +41,6 @@ static inline struct rd_dev *RD_DEV(struct se_device *dev)
        return container_of(dev, struct rd_dev, dev);
 }
 
-/*     rd_attach_hba(): (Part of se_subsystem_api_t template)
- *
- *
- */
 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
 {
        struct rd_host *rd_host;
@@ -62,7 +57,7 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
 
        pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
                " Generic Target Core Stack %s\n", hba->hba_id,
-               RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
+               RD_HBA_VERSION, TARGET_CORE_VERSION);
 
        return 0;
 }
@@ -354,12 +349,20 @@ fail:
        return ret;
 }
 
+static void rd_dev_call_rcu(struct rcu_head *p)
+{
+       struct se_device *dev = container_of(p, struct se_device, rcu_head);
+       struct rd_dev *rd_dev = RD_DEV(dev);
+
+       kfree(rd_dev);
+}
+
 static void rd_free_device(struct se_device *dev)
 {
        struct rd_dev *rd_dev = RD_DEV(dev);
 
        rd_release_device_space(rd_dev);
-       kfree(rd_dev);
+       call_rcu(&dev->rcu_head, rd_dev_call_rcu);
 }
 
 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
@@ -402,10 +405,7 @@ static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page
        return NULL;
 }
 
-typedef sense_reason_t (*dif_verify)(struct se_cmd *, sector_t, unsigned int,
-                                    unsigned int, struct scatterlist *, int);
-
-static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify)
+static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
 {
        struct se_device *se_dev = cmd->se_dev;
        struct rd_dev *dev = RD_DEV(se_dev);
@@ -465,7 +465,16 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify)
 
 #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
 
-       rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset);
+       if (is_read)
+               rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
+                                   prot_sg, prot_offset);
+       else
+               rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
+                                   cmd->t_prot_sg, 0);
+
+       if (!rc)
+               sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
+
        if (need_to_release)
                kfree(prot_sg);
 
@@ -511,7 +520,7 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 
        if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
            data_direction == DMA_TO_DEVICE) {
-               rc = rd_do_prot_rw(cmd, sbc_dif_verify_write);
+               rc = rd_do_prot_rw(cmd, false);
                if (rc)
                        return rc;
        }
@@ -579,7 +588,7 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 
        if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
            data_direction == DMA_FROM_DEVICE) {
-               rc = rd_do_prot_rw(cmd, sbc_dif_verify_read);
+               rc = rd_do_prot_rw(cmd, true);
                if (rc)
                        return rc;
        }
@@ -693,42 +702,7 @@ rd_parse_cdb(struct se_cmd *cmd)
        return sbc_parse_cdb(cmd, &rd_sbc_ops);
 }
 
-DEF_TB_DEFAULT_ATTRIBS(rd_mcp);
-
-static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
-       &rd_mcp_dev_attrib_emulate_model_alias.attr,
-       &rd_mcp_dev_attrib_emulate_dpo.attr,
-       &rd_mcp_dev_attrib_emulate_fua_write.attr,
-       &rd_mcp_dev_attrib_emulate_fua_read.attr,
-       &rd_mcp_dev_attrib_emulate_write_cache.attr,
-       &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
-       &rd_mcp_dev_attrib_emulate_tas.attr,
-       &rd_mcp_dev_attrib_emulate_tpu.attr,
-       &rd_mcp_dev_attrib_emulate_tpws.attr,
-       &rd_mcp_dev_attrib_emulate_caw.attr,
-       &rd_mcp_dev_attrib_emulate_3pc.attr,
-       &rd_mcp_dev_attrib_pi_prot_type.attr,
-       &rd_mcp_dev_attrib_hw_pi_prot_type.attr,
-       &rd_mcp_dev_attrib_pi_prot_format.attr,
-       &rd_mcp_dev_attrib_enforce_pr_isids.attr,
-       &rd_mcp_dev_attrib_is_nonrot.attr,
-       &rd_mcp_dev_attrib_emulate_rest_reord.attr,
-       &rd_mcp_dev_attrib_force_pr_aptpl.attr,
-       &rd_mcp_dev_attrib_hw_block_size.attr,
-       &rd_mcp_dev_attrib_block_size.attr,
-       &rd_mcp_dev_attrib_hw_max_sectors.attr,
-       &rd_mcp_dev_attrib_optimal_sectors.attr,
-       &rd_mcp_dev_attrib_hw_queue_depth.attr,
-       &rd_mcp_dev_attrib_queue_depth.attr,
-       &rd_mcp_dev_attrib_max_unmap_lba_count.attr,
-       &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
-       &rd_mcp_dev_attrib_unmap_granularity.attr,
-       &rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
-       &rd_mcp_dev_attrib_max_write_same_len.attr,
-       NULL,
-};
-
-static struct se_subsystem_api rd_mcp_template = {
+static const struct target_backend_ops rd_mcp_ops = {
        .name                   = "rd_mcp",
        .inquiry_prod           = "RAMDISK-MCP",
        .inquiry_rev            = RD_MCP_VERSION,
@@ -744,25 +718,15 @@ static struct se_subsystem_api rd_mcp_template = {
        .get_blocks             = rd_get_blocks,
        .init_prot              = rd_init_prot,
        .free_prot              = rd_free_prot,
+       .tb_dev_attrib_attrs    = sbc_attrib_attrs,
 };
 
 int __init rd_module_init(void)
 {
-       struct target_backend_cits *tbc = &rd_mcp_template.tb_cits;
-       int ret;
-
-       target_core_setup_sub_cits(&rd_mcp_template);
-       tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs;
-
-       ret = transport_subsystem_register(&rd_mcp_template);
-       if (ret < 0) {
-               return ret;
-       }
-
-       return 0;
+       return transport_backend_register(&rd_mcp_ops);
 }
 
 void rd_module_exit(void)
 {
-       transport_subsystem_release(&rd_mcp_template);
+       target_backend_unregister(&rd_mcp_ops);
 }
index 43719b393ca9e607912693ee5b14525ad8830266..e318ddbe15da05338a2af842d4975d2958a0abab 100644 (file)
@@ -38,6 +38,7 @@
 
 static sense_reason_t
 sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
+static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
 
 static sense_reason_t
 sbc_emulate_readcapacity(struct se_cmd *cmd)
@@ -176,6 +177,23 @@ sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
 }
 EXPORT_SYMBOL(sbc_get_write_same_sectors);
 
+static sense_reason_t
+sbc_execute_write_same_unmap(struct se_cmd *cmd)
+{
+       struct sbc_ops *ops = cmd->protocol_data;
+       sector_t nolb = sbc_get_write_same_sectors(cmd);
+       sense_reason_t ret;
+
+       if (nolb) {
+               ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb);
+               if (ret)
+                       return ret;
+       }
+
+       target_complete_cmd(cmd, GOOD);
+       return 0;
+}
+
 static sense_reason_t
 sbc_emulate_noop(struct se_cmd *cmd)
 {
@@ -299,7 +317,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
         * translated into block discard requests within backend code.
         */
        if (flags[0] & 0x08) {
-               if (!ops->execute_write_same_unmap)
+               if (!ops->execute_unmap)
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
                if (!dev->dev_attrib.emulate_tpws) {
@@ -307,7 +325,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
                               " has emulate_tpws disabled\n");
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
                }
-               cmd->execute_cmd = ops->execute_write_same_unmap;
+               cmd->execute_cmd = sbc_execute_write_same_unmap;
                return 0;
        }
        if (!ops->execute_write_same)
@@ -381,7 +399,9 @@ out:
 static sense_reason_t
 sbc_execute_rw(struct se_cmd *cmd)
 {
-       return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
+       struct sbc_ops *ops = cmd->protocol_data;
+
+       return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
                               cmd->data_direction);
 }
 
@@ -560,6 +580,7 @@ out:
 static sense_reason_t
 sbc_compare_and_write(struct se_cmd *cmd)
 {
+       struct sbc_ops *ops = cmd->protocol_data;
        struct se_device *dev = cmd->se_dev;
        sense_reason_t ret;
        int rc;
@@ -579,7 +600,7 @@ sbc_compare_and_write(struct se_cmd *cmd)
         */
        cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
 
-       ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
+       ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
                              DMA_FROM_DEVICE);
        if (ret) {
                cmd->transport_complete_callback = NULL;
@@ -738,14 +759,15 @@ static int
 sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
 {
        if (cdb[1] & 0x10) {
-               if (!dev->dev_attrib.emulate_dpo) {
+               /* see explanation in spc_emulate_modesense */
+               if (!target_check_fua(dev)) {
                        pr_err("Got CDB: 0x%02x with DPO bit set, but device"
                               " does not advertise support for DPO\n", cdb[0]);
                        return -EINVAL;
                }
        }
        if (cdb[1] & 0x8) {
-               if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) {
+               if (!target_check_fua(dev)) {
                        pr_err("Got CDB: 0x%02x with FUA bit set, but device"
                               " does not advertise support for FUA write\n",
                               cdb[0]);
@@ -765,12 +787,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
        u32 sectors = 0;
        sense_reason_t ret;
 
+       cmd->protocol_data = ops;
+
        switch (cdb[0]) {
        case READ_6:
                sectors = transport_get_sectors_6(cdb);
                cmd->t_task_lba = transport_lba_21(cdb);
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-               cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
                break;
        case READ_10:
@@ -785,7 +808,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-               cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
                break;
        case READ_12:
@@ -800,7 +822,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-               cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
                break;
        case READ_16:
@@ -815,14 +836,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-               cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
                break;
        case WRITE_6:
                sectors = transport_get_sectors_6(cdb);
                cmd->t_task_lba = transport_lba_21(cdb);
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-               cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
                break;
        case WRITE_10:
@@ -838,7 +857,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-               cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
                break;
        case WRITE_12:
@@ -853,7 +871,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-               cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
                break;
        case WRITE_16:
@@ -868,7 +885,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-               cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
                break;
        case XDWRITEREAD_10:
@@ -886,7 +902,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                /*
                 * Setup BIDI XOR callback to be run after I/O completion.
                 */
-               cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
                cmd->transport_complete_callback = &xdreadwrite_callback;
                break;
@@ -910,7 +925,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                         * Setup BIDI XOR callback to be run during after I/O
                         * completion.
                         */
-                       cmd->execute_rw = ops->execute_rw;
                        cmd->execute_cmd = sbc_execute_rw;
                        cmd->transport_complete_callback = &xdreadwrite_callback;
                        break;
@@ -954,7 +968,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
                cmd->t_task_nolb = sectors;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
-               cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_compare_and_write;
                cmd->transport_complete_callback = compare_and_write_callback;
                break;
@@ -1004,7 +1017,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
                }
                size = get_unaligned_be16(&cdb[7]);
-               cmd->execute_cmd = ops->execute_unmap;
+               cmd->execute_cmd = sbc_execute_unmap;
                break;
        case WRITE_SAME_16:
                sectors = transport_get_sectors_16(cdb);
@@ -1092,12 +1105,10 @@ u32 sbc_get_device_type(struct se_device *dev)
 }
 EXPORT_SYMBOL(sbc_get_device_type);
 
-sense_reason_t
-sbc_execute_unmap(struct se_cmd *cmd,
-       sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
-                                     sector_t, sector_t),
-       void *priv)
+static sense_reason_t
+sbc_execute_unmap(struct se_cmd *cmd)
 {
+       struct sbc_ops *ops = cmd->protocol_data;
        struct se_device *dev = cmd->se_dev;
        unsigned char *buf, *ptr = NULL;
        sector_t lba;
@@ -1161,7 +1172,7 @@ sbc_execute_unmap(struct se_cmd *cmd,
                        goto err;
                }
 
-               ret = do_unmap_fn(cmd, priv, lba, range);
+               ret = ops->execute_unmap(cmd, lba, range);
                if (ret)
                        goto err;
 
@@ -1175,34 +1186,56 @@ err:
                target_complete_cmd(cmd, GOOD);
        return ret;
 }
-EXPORT_SYMBOL(sbc_execute_unmap);
 
 void
 sbc_dif_generate(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_dif_v1_tuple *sdt;
-       struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+       struct scatterlist *dsg = cmd->t_data_sg, *psg;
        sector_t sector = cmd->t_task_lba;
        void *daddr, *paddr;
        int i, j, offset = 0;
+       unsigned int block_size = dev->dev_attrib.block_size;
 
-       for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
-               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+       for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
                paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
 
-               for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+               for (j = 0; j < psg->length;
+                               j += sizeof(struct se_dif_v1_tuple)) {
+                       __u16 crc;
+                       unsigned int avail;
+
+                       if (offset >= dsg->length) {
+                               offset -= dsg->length;
+                               kunmap_atomic(daddr - dsg->offset);
+                               dsg = sg_next(dsg);
+                               if (!dsg) {
+                                       kunmap_atomic(paddr - psg->offset);
+                                       return;
+                               }
+                               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+                       }
 
-                       if (offset >= psg->length) {
-                               kunmap_atomic(paddr);
-                               psg = sg_next(psg);
-                               paddr = kmap_atomic(sg_page(psg)) + psg->offset;
-                               offset = 0;
+                       sdt = paddr + j;
+                       avail = min(block_size, dsg->length - offset);
+                       crc = crc_t10dif(daddr + offset, avail);
+                       if (avail < block_size) {
+                               kunmap_atomic(daddr - dsg->offset);
+                               dsg = sg_next(dsg);
+                               if (!dsg) {
+                                       kunmap_atomic(paddr - psg->offset);
+                                       return;
+                               }
+                               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+                               offset = block_size - avail;
+                               crc = crc_t10dif_update(crc, daddr, offset);
+                       } else {
+                               offset += block_size;
                        }
 
-                       sdt = paddr + offset;
-                       sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
-                                               dev->dev_attrib.block_size));
+                       sdt->guard_tag = cpu_to_be16(crc);
                        if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
                                sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
                        sdt->app_tag = 0;
@@ -1215,26 +1248,23 @@ sbc_dif_generate(struct se_cmd *cmd)
                                 be32_to_cpu(sdt->ref_tag));
 
                        sector++;
-                       offset += sizeof(struct se_dif_v1_tuple);
                }
 
-               kunmap_atomic(paddr);
-               kunmap_atomic(daddr);
+               kunmap_atomic(daddr - dsg->offset);
+               kunmap_atomic(paddr - psg->offset);
        }
 }
 
 static sense_reason_t
 sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt,
-                 const void *p, sector_t sector, unsigned int ei_lba)
+                 __u16 crc, sector_t sector, unsigned int ei_lba)
 {
-       struct se_device *dev = cmd->se_dev;
-       int block_size = dev->dev_attrib.block_size;
        __be16 csum;
 
        if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
                goto check_ref;
 
-       csum = cpu_to_be16(crc_t10dif(p, block_size));
+       csum = cpu_to_be16(crc);
 
        if (sdt->guard_tag != csum) {
                pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
@@ -1266,9 +1296,8 @@ check_ref:
        return 0;
 }
 
-static void
-sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
-                 struct scatterlist *sg, int sg_off)
+void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
+                      struct scatterlist *sg, int sg_off)
 {
        struct se_device *dev = cmd->se_dev;
        struct scatterlist *psg;
@@ -1300,100 +1329,54 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
                        copied += len;
                        psg_len -= len;
 
+                       kunmap_atomic(addr - sg->offset - offset);
+
                        if (offset >= sg->length) {
                                sg = sg_next(sg);
                                offset = 0;
                        }
-                       kunmap_atomic(addr);
                }
-               kunmap_atomic(paddr);
+               kunmap_atomic(paddr - psg->offset);
        }
 }
+EXPORT_SYMBOL(sbc_dif_copy_prot);
 
 sense_reason_t
-sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
-                    unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+              unsigned int ei_lba, struct scatterlist *psg, int psg_off)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_dif_v1_tuple *sdt;
-       struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+       struct scatterlist *dsg = cmd->t_data_sg;
        sector_t sector = start;
        void *daddr, *paddr;
-       int i, j, offset = 0;
+       int i;
        sense_reason_t rc;
+       int dsg_off = 0;
+       unsigned int block_size = dev->dev_attrib.block_size;
 
-       for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
-               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+       for (; psg && sector < start + sectors; psg = sg_next(psg)) {
                paddr = kmap_atomic(sg_page(psg)) + psg->offset;
-
-               for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
-
-                       if (offset >= psg->length) {
-                               kunmap_atomic(paddr);
-                               psg = sg_next(psg);
-                               paddr = kmap_atomic(sg_page(psg)) + psg->offset;
-                               offset = 0;
-                       }
-
-                       sdt = paddr + offset;
-
-                       pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
-                                " app_tag: 0x%04x ref_tag: %u\n",
-                                (unsigned long long)sector, sdt->guard_tag,
-                                sdt->app_tag, be32_to_cpu(sdt->ref_tag));
-
-                       rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector,
-                                              ei_lba);
-                       if (rc) {
-                               kunmap_atomic(paddr);
-                               kunmap_atomic(daddr);
-                               cmd->bad_sector = sector;
-                               return rc;
-                       }
-
-                       sector++;
-                       ei_lba++;
-                       offset += sizeof(struct se_dif_v1_tuple);
-               }
-
-               kunmap_atomic(paddr);
-               kunmap_atomic(daddr);
-       }
-       if (!sg)
-               return 0;
-
-       sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
-
-       return 0;
-}
-EXPORT_SYMBOL(sbc_dif_verify_write);
-
-static sense_reason_t
-__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
-                     unsigned int ei_lba, struct scatterlist *sg, int sg_off)
-{
-       struct se_device *dev = cmd->se_dev;
-       struct se_dif_v1_tuple *sdt;
-       struct scatterlist *dsg, *psg = sg;
-       sector_t sector = start;
-       void *daddr, *paddr;
-       int i, j, offset = sg_off;
-       sense_reason_t rc;
-
-       for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
                daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
-               paddr = kmap_atomic(sg_page(psg)) + sg->offset;
-
-               for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
 
-                       if (offset >= psg->length) {
-                               kunmap_atomic(paddr);
-                               psg = sg_next(psg);
-                               paddr = kmap_atomic(sg_page(psg)) + psg->offset;
-                               offset = 0;
+               for (i = psg_off; i < psg->length &&
+                               sector < start + sectors;
+                               i += sizeof(struct se_dif_v1_tuple)) {
+                       __u16 crc;
+                       unsigned int avail;
+
+                       if (dsg_off >= dsg->length) {
+                               dsg_off -= dsg->length;
+                               kunmap_atomic(daddr - dsg->offset);
+                               dsg = sg_next(dsg);
+                               if (!dsg) {
+                                       kunmap_atomic(paddr - psg->offset);
+                                       return 0;
+                               }
+                               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
                        }
 
-                       sdt = paddr + offset;
+                       sdt = paddr + i;
 
                        pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
                                 " app_tag: 0x%04x ref_tag: %u\n",
@@ -1401,53 +1384,43 @@ __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
                                 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
 
                        if (sdt->app_tag == cpu_to_be16(0xffff)) {
-                               sector++;
-                               offset += sizeof(struct se_dif_v1_tuple);
-                               continue;
+                               dsg_off += block_size;
+                               goto next;
+                       }
+
+                       avail = min(block_size, dsg->length - dsg_off);
+                       crc = crc_t10dif(daddr + dsg_off, avail);
+                       if (avail < block_size) {
+                               kunmap_atomic(daddr - dsg->offset);
+                               dsg = sg_next(dsg);
+                               if (!dsg) {
+                                       kunmap_atomic(paddr - psg->offset);
+                                       return 0;
+                               }
+                               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+                               dsg_off = block_size - avail;
+                               crc = crc_t10dif_update(crc, daddr, dsg_off);
+                       } else {
+                               dsg_off += block_size;
                        }
 
-                       rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector,
-                                              ei_lba);
+                       rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba);
                        if (rc) {
-                               kunmap_atomic(paddr);
-                               kunmap_atomic(daddr);
+                               kunmap_atomic(daddr - dsg->offset);
+                               kunmap_atomic(paddr - psg->offset);
                                cmd->bad_sector = sector;
                                return rc;
                        }
-
+next:
                        sector++;
                        ei_lba++;
-                       offset += sizeof(struct se_dif_v1_tuple);
                }
 
-               kunmap_atomic(paddr);
-               kunmap_atomic(daddr);
+               psg_off = 0;
+               kunmap_atomic(daddr - dsg->offset);
+               kunmap_atomic(paddr - psg->offset);
        }
 
        return 0;
 }
-
-sense_reason_t
-sbc_dif_read_strip(struct se_cmd *cmd)
-{
-       struct se_device *dev = cmd->se_dev;
-       u32 sectors = cmd->prot_length / dev->prot_length;
-
-       return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
-                                    cmd->t_prot_sg, 0);
-}
-
-sense_reason_t
-sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
-                   unsigned int ei_lba, struct scatterlist *sg, int sg_off)
-{
-       sense_reason_t rc;
-
-       rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
-       if (rc)
-               return rc;
-
-       sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
-       return 0;
-}
-EXPORT_SYMBOL(sbc_dif_verify_read);
+EXPORT_SYMBOL(sbc_dif_verify);
index 52ea640274f4fde0dd387866842c3aac3fcf77a3..b0744433315a80496a84d8d6f49e01300471f463 100644 (file)
 #include "target_core_ua.h"
 #include "target_core_xcopy.h"
 
-static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
+static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
 {
        struct t10_alua_tg_pt_gp *tg_pt_gp;
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
 
        /*
         * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
@@ -54,17 +53,11 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
         *
         * See spc4r17 section 6.4.2 Table 135
         */
-       if (!port)
-               return;
-       tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-       if (!tg_pt_gp_mem)
-               return;
-
-       spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-       tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+       spin_lock(&lun->lun_tg_pt_gp_lock);
+       tg_pt_gp = lun->lun_tg_pt_gp;
        if (tg_pt_gp)
                buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
-       spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+       spin_unlock(&lun->lun_tg_pt_gp_lock);
 }
 
 sense_reason_t
@@ -95,7 +88,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
        /*
         * Enable SCCS and TPGS fields for Emulated ALUA
         */
-       spc_fill_alua_data(lun->lun_sep, buf);
+       spc_fill_alua_data(lun, buf);
 
        /*
         * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
@@ -182,11 +175,9 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_lun *lun = cmd->se_lun;
-       struct se_port *port = NULL;
        struct se_portal_group *tpg = NULL;
        struct t10_alua_lu_gp_member *lu_gp_mem;
        struct t10_alua_tg_pt_gp *tg_pt_gp;
-       struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
        unsigned char *prod = &dev->t10_wwn.model[0];
        u32 prod_len;
        u32 unit_serial_len, off = 0;
@@ -268,18 +259,15 @@ check_t10_vend_desc:
        /* Header size for Designation descriptor */
        len += (id_len + 4);
        off += (id_len + 4);
-       /*
-        * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
-        */
-       port = lun->lun_sep;
-       if (port) {
+
+       if (1) {
                struct t10_alua_lu_gp *lu_gp;
                u32 padding, scsi_name_len, scsi_target_len;
                u16 lu_gp_id = 0;
                u16 tg_pt_gp_id = 0;
                u16 tpgt;
 
-               tpg = port->sep_tpg;
+               tpg = lun->lun_tpg;
                /*
                 * Relative target port identifer, see spc4r17
                 * section 7.7.3.7
@@ -287,8 +275,7 @@ check_t10_vend_desc:
                 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
                 * section 7.5.1 Table 362
                 */
-               buf[off] =
-                       (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+               buf[off] = tpg->proto_id << 4;
                buf[off++] |= 0x1; /* CODE SET == Binary */
                buf[off] = 0x80; /* Set PIV=1 */
                /* Set ASSOCIATION == target port: 01b */
@@ -300,8 +287,8 @@ check_t10_vend_desc:
                /* Skip over Obsolete field in RTPI payload
                 * in Table 472 */
                off += 2;
-               buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
-               buf[off++] = (port->sep_rtpi & 0xff);
+               buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
+               buf[off++] = (lun->lun_rtpi & 0xff);
                len += 8; /* Header size + Designation descriptor */
                /*
                 * Target port group identifier, see spc4r17
@@ -310,21 +297,16 @@ check_t10_vend_desc:
                 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
                 * section 7.5.1 Table 362
                 */
-               tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-               if (!tg_pt_gp_mem)
-                       goto check_lu_gp;
-
-               spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-               tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+               spin_lock(&lun->lun_tg_pt_gp_lock);
+               tg_pt_gp = lun->lun_tg_pt_gp;
                if (!tg_pt_gp) {
-                       spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+                       spin_unlock(&lun->lun_tg_pt_gp_lock);
                        goto check_lu_gp;
                }
                tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
-               spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+               spin_unlock(&lun->lun_tg_pt_gp_lock);
 
-               buf[off] =
-                       (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+               buf[off] = tpg->proto_id << 4;
                buf[off++] |= 0x1; /* CODE SET == Binary */
                buf[off] = 0x80; /* Set PIV=1 */
                /* Set ASSOCIATION == target port: 01b */
@@ -372,8 +354,7 @@ check_lu_gp:
                 * section 7.5.1 Table 362
                 */
 check_scsi_name:
-               buf[off] =
-                       (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+               buf[off] = tpg->proto_id << 4;
                buf[off++] |= 0x3; /* CODE SET == UTF-8 */
                buf[off] = 0x80; /* Set PIV=1 */
                /* Set ASSOCIATION == target port: 01b */
@@ -413,8 +394,7 @@ check_scsi_name:
                /*
                 * Target device designator
                 */
-               buf[off] =
-                       (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+               buf[off] = tpg->proto_id << 4;
                buf[off++] |= 0x3; /* CODE SET == UTF-8 */
                buf[off] = 0x80; /* Set PIV=1 */
                /* Set ASSOCIATION == target device: 10b */
@@ -482,7 +462,7 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
        buf[5] = 0x07;
 
        /* If WriteCache emulation is enabled, set V_SUP */
-       if (se_dev_check_wce(dev))
+       if (target_check_wce(dev))
                buf[6] = 0x01;
        /* If an LBA map is present set R_SUP */
        spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
@@ -699,7 +679,7 @@ static sense_reason_t
 spc_emulate_inquiry(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
-       struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
+       struct se_portal_group *tpg = cmd->se_lun->lun_tpg;
        unsigned char *rbuf;
        unsigned char *cdb = cmd->t_task_cdb;
        unsigned char *buf;
@@ -713,7 +693,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
 
-       if (dev == tpg->tpg_virt_lun0.lun_se_dev)
+       if (dev == rcu_access_pointer(tpg->tpg_virt_lun0->lun_se_dev))
                buf[0] = 0x3f; /* Not connected */
        else
                buf[0] = dev->transport->get_device_type(dev);
@@ -889,7 +869,7 @@ static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
        if (pc == 1)
                goto out;
 
-       if (se_dev_check_wce(dev))
+       if (target_check_wce(dev))
                p[2] = 0x04; /* Write Cache Enable */
        p[12] = 0x20; /* Disabled Read Ahead */
 
@@ -986,6 +966,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
        int length = 0;
        int ret;
        int i;
+       bool read_only = target_lun_is_rdonly(cmd);;
 
        memset(buf, 0, SE_MODE_PAGE_BUF);
 
@@ -996,13 +977,15 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
        length = ten ? 3 : 2;
 
        /* DEVICE-SPECIFIC PARAMETER */
-       if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
-           (cmd->se_deve &&
-            (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+       if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || read_only)
                spc_modesense_write_protect(&buf[length], type);
 
-       if ((se_dev_check_wce(dev)) &&
-           (dev->dev_attrib.emulate_fua_write > 0))
+       /*
+        * SBC only allows us to enable FUA and DPO together.  Fortunately
+        * DPO is explicitly specified as a hint, so a noop is a perfectly
+        * valid implementation.
+        */
+       if (target_check_fua(dev))
                spc_modesense_dpofua(&buf[length], type);
 
        ++length;
@@ -1212,8 +1195,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
 {
        struct se_dev_entry *deve;
        struct se_session *sess = cmd->se_sess;
+       struct se_node_acl *nacl;
        unsigned char *buf;
-       u32 lun_count = 0, offset = 8, i;
+       u32 lun_count = 0, offset = 8;
 
        if (cmd->data_length < 16) {
                pr_warn("REPORT LUNS allocation length %u too small\n",
@@ -1235,12 +1219,10 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
                lun_count = 1;
                goto done;
        }
+       nacl = sess->se_node_acl;
 
-       spin_lock_irq(&sess->se_node_acl->device_list_lock);
-       for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               deve = sess->se_node_acl->device_list[i];
-               if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
-                       continue;
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
                /*
                 * We determine the correct LUN LIST LENGTH even once we
                 * have reached the initial allocation length.
@@ -1253,7 +1235,7 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
                int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
                offset += 8;
        }
-       spin_unlock_irq(&sess->se_node_acl->device_list_lock);
+       rcu_read_unlock();
 
        /*
         * See SPC3 r07, page 159.
index 40f6c13780414894a636cc43ef5b7eb6e0fea0d8..20ed5d2e151a6f50211273967e2cc107ba2b5bce 100644 (file)
@@ -37,7 +37,6 @@
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
 #include "target_core_internal.h"
@@ -104,7 +103,7 @@ static ssize_t target_stat_scsi_dev_show_attr_ports(
        struct se_device *dev =
                container_of(sgrps, struct se_device, dev_stat_grps);
 
-       return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
+       return snprintf(page, PAGE_SIZE, "%u\n", dev->export_count);
 }
 DEV_STAT_SCSI_DEV_ATTR_RO(ports);
 
@@ -540,20 +539,14 @@ static ssize_t target_stat_scsi_port_show_attr_inst(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_port *sep;
-       struct se_device *dev = lun->lun_se_dev;
-       struct se_hba *hba;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-       hba = dev->se_hba;
-       ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_PORT_ATTR_RO(inst);
@@ -562,18 +555,14 @@ static ssize_t target_stat_scsi_port_show_attr_dev(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_port *sep;
-       struct se_device *dev = lun->lun_se_dev;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-       ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_PORT_ATTR_RO(dev);
@@ -582,17 +571,14 @@ static ssize_t target_stat_scsi_port_show_attr_indx(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_port *sep;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-       ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_PORT_ATTR_RO(indx);
@@ -601,21 +587,14 @@ static ssize_t target_stat_scsi_port_show_attr_role(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_device *dev = lun->lun_se_dev;
-       struct se_port *sep;
-       ssize_t ret;
-
-       if (!dev)
-               return -ENODEV;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-       ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_PORT_ATTR_RO(role);
@@ -624,18 +603,16 @@ static ssize_t target_stat_scsi_port_show_attr_busy_count(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_port *sep;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev) {
+               /* FIXME: scsiPortBusyStatuses  */
+               ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
        }
-       /* FIXME: scsiPortBusyStatuses  */
-       ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
-       spin_unlock(&lun->lun_sep_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_PORT_ATTR_RO(busy_count);
@@ -683,20 +660,14 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_inst(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_device *dev = lun->lun_se_dev;
-       struct se_port *sep;
-       struct se_hba *hba;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-       hba = dev->se_hba;
-       ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst);
@@ -705,18 +676,14 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_dev(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_device *dev = lun->lun_se_dev;
-       struct se_port *sep;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-       ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev);
@@ -725,17 +692,14 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_indx(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_port *sep;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-       ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx);
@@ -744,21 +708,17 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_name(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_port *sep;
-       struct se_portal_group *tpg;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-       tpg = sep->sep_tpg;
-
-       ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
-               tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index);
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_portal_group *tpg = lun->lun_tpg;
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
+                       tpg->se_tpg_tfo->get_fabric_name(),
+                       lun->lun_rtpi);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name);
@@ -767,22 +727,17 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_port *sep;
-       struct se_portal_group *tpg;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-       tpg = sep->sep_tpg;
-
-       ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
-               tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
-               tpg->se_tpg_tfo->tpg_get_tag(tpg));
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_portal_group *tpg = lun->lun_tpg;
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
+                       tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
+                       tpg->se_tpg_tfo->tpg_get_tag(tpg));
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index);
@@ -791,18 +746,15 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_port *sep;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-
-       ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus);
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%lu\n",
+                              atomic_long_read(&lun->lun_stats.cmd_pdus));
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds);
@@ -811,19 +763,15 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_port *sep;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-
-       ret = snprintf(page, PAGE_SIZE, "%u\n",
-                       (u32)(sep->sep_stats.rx_data_octets >> 20));
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%u\n",
+                       (u32)(atomic_long_read(&lun->lun_stats.rx_data_octets) >> 20));
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes);
@@ -832,19 +780,15 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_port *sep;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-
-       ret = snprintf(page, PAGE_SIZE, "%u\n",
-                       (u32)(sep->sep_stats.tx_data_octets >> 20));
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%u\n",
+                               (u32)(atomic_long_read(&lun->lun_stats.tx_data_octets) >> 20));
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes);
@@ -853,19 +797,16 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_port *sep;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev) {
+               /* FIXME: scsiTgtPortHsInCommands */
+               ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
        }
-
-       /* FIXME: scsiTgtPortHsInCommands */
-       ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
-       spin_unlock(&lun->lun_sep_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds);
@@ -919,21 +860,14 @@ static ssize_t target_stat_scsi_transport_show_attr_inst(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_device *dev = lun->lun_se_dev;
-       struct se_port *sep;
-       struct se_hba *hba;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-
-       hba = dev->se_hba;
-       ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_device *dev;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst);
@@ -942,21 +876,18 @@ static ssize_t target_stat_scsi_transport_show_attr_device(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_port *sep;
-       struct se_portal_group *tpg;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
+       struct se_device *dev;
+       struct se_portal_group *tpg = lun->lun_tpg;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev) {
+               /* scsiTransportType */
+               ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
+                              tpg->se_tpg_tfo->get_fabric_name());
        }
-       tpg = sep->sep_tpg;
-       /* scsiTransportType */
-       ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
-                       tpg->se_tpg_tfo->get_fabric_name());
-       spin_unlock(&lun->lun_sep_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device);
@@ -965,20 +896,16 @@ static ssize_t target_stat_scsi_transport_show_attr_indx(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_port *sep;
-       struct se_portal_group *tpg;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
-       }
-       tpg = sep->sep_tpg;
-       ret = snprintf(page, PAGE_SIZE, "%u\n",
-                       tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
-       spin_unlock(&lun->lun_sep_lock);
+       struct se_device *dev;
+       struct se_portal_group *tpg = lun->lun_tpg;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev)
+               ret = snprintf(page, PAGE_SIZE, "%u\n",
+                              tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx);
@@ -987,26 +914,22 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name(
        struct se_port_stat_grps *pgrps, char *page)
 {
        struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
-       struct se_device *dev = lun->lun_se_dev;
-       struct se_port *sep;
-       struct se_portal_group *tpg;
+       struct se_device *dev;
+       struct se_portal_group *tpg = lun->lun_tpg;
        struct t10_wwn *wwn;
-       ssize_t ret;
-
-       spin_lock(&lun->lun_sep_lock);
-       sep = lun->lun_sep;
-       if (!sep) {
-               spin_unlock(&lun->lun_sep_lock);
-               return -ENODEV;
+       ssize_t ret = -ENODEV;
+
+       rcu_read_lock();
+       dev = rcu_dereference(lun->lun_se_dev);
+       if (dev) {
+               wwn = &dev->t10_wwn;
+               /* scsiTransportDevName */
+               ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
+                               tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+                               (strlen(wwn->unit_serial)) ? wwn->unit_serial :
+                               wwn->vendor);
        }
-       tpg = sep->sep_tpg;
-       wwn = &dev->t10_wwn;
-       /* scsiTransportDevName */
-       ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
-                       tpg->se_tpg_tfo->tpg_get_wwn(tpg),
-                       (strlen(wwn->unit_serial)) ? wwn->unit_serial :
-                       wwn->vendor);
-       spin_unlock(&lun->lun_sep_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name);
@@ -1082,17 +1005,17 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
        struct se_portal_group *tpg;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        tpg = nacl->se_tpg;
        /* scsiInstIndex */
        ret = snprintf(page, PAGE_SIZE, "%u\n",
                        tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst);
@@ -1107,16 +1030,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_dev(
        struct se_lun *lun;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
-       lun = deve->se_lun;
+       lun = rcu_dereference(deve->se_lun);
        /* scsiDeviceIndex */
-       ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
-       spin_unlock_irq(&nacl->device_list_lock);
+       ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev);
@@ -1131,16 +1054,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port(
        struct se_portal_group *tpg;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        tpg = nacl->se_tpg;
        /* scsiAuthIntrTgtPortIndex */
        ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port);
@@ -1154,15 +1077,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_indx(
        struct se_dev_entry *deve;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        /* scsiAuthIntrIndex */
        ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx);
@@ -1176,15 +1099,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port(
        struct se_dev_entry *deve;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        /* scsiAuthIntrDevOrPort */
        ret = snprintf(page, PAGE_SIZE, "%u\n", 1);
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port);
@@ -1198,15 +1121,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name(
        struct se_dev_entry *deve;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        /* scsiAuthIntrName */
        ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname);
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name);
@@ -1220,15 +1143,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx(
        struct se_dev_entry *deve;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        /* FIXME: scsiAuthIntrLunMapIndex */
        ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx);
@@ -1242,15 +1165,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_att_count(
        struct se_dev_entry *deve;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        /* scsiAuthIntrAttachedTimes */
        ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count);
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count);
@@ -1264,15 +1187,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds(
        struct se_dev_entry *deve;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        /* scsiAuthIntrOutCommands */
-       ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds);
-       spin_unlock_irq(&nacl->device_list_lock);
+       ret = snprintf(page, PAGE_SIZE, "%lu\n",
+                      atomic_long_read(&deve->total_cmds));
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds);
@@ -1286,15 +1210,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes(
        struct se_dev_entry *deve;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        /* scsiAuthIntrReadMegaBytes */
-       ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20));
-       spin_unlock_irq(&nacl->device_list_lock);
+       ret = snprintf(page, PAGE_SIZE, "%u\n",
+                     (u32)(atomic_long_read(&deve->read_bytes) >> 20));
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes);
@@ -1308,15 +1233,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes(
        struct se_dev_entry *deve;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        /* scsiAuthIntrWrittenMegaBytes */
-       ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20));
-       spin_unlock_irq(&nacl->device_list_lock);
+       ret = snprintf(page, PAGE_SIZE, "%u\n",
+                     (u32)(atomic_long_read(&deve->write_bytes) >> 20));
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes);
@@ -1330,15 +1256,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds(
        struct se_dev_entry *deve;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        /* FIXME: scsiAuthIntrHSOutCommands */
        ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds);
@@ -1352,16 +1278,16 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time(
        struct se_dev_entry *deve;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        /* scsiAuthIntrLastCreation */
        ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time -
                                INITIAL_JIFFIES) * 100 / HZ));
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time);
@@ -1375,15 +1301,15 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_row_status(
        struct se_dev_entry *deve;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        /* FIXME: scsiAuthIntrRowStatus */
        ret = snprintf(page, PAGE_SIZE, "Ready\n");
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status);
@@ -1448,17 +1374,17 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
        struct se_portal_group *tpg;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        tpg = nacl->se_tpg;
        /* scsiInstIndex */
        ret = snprintf(page, PAGE_SIZE, "%u\n",
                        tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst);
@@ -1473,16 +1399,16 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_dev(
        struct se_lun *lun;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
-       lun = deve->se_lun;
+       lun = rcu_dereference(deve->se_lun);
        /* scsiDeviceIndex */
-       ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
-       spin_unlock_irq(&nacl->device_list_lock);
+       ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_index);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev);
@@ -1497,16 +1423,16 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
        struct se_portal_group *tpg;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        tpg = nacl->se_tpg;
        /* scsiPortIndex */
        ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port);
@@ -1546,15 +1472,15 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx(
        struct se_dev_entry *deve;
        ssize_t ret;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[lacl->mapped_lun];
-       if (!deve->se_lun || !deve->se_lun_acl) {
-               spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return -ENODEV;
        }
        /* scsiAttIntrPortAuthIntrIdx */
        ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
        return ret;
 }
 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx);
index a5bb0c46e57e5c6398903141dcab6075ede4a1f8..5b2820312310ec21bf83824efa318caab132e046 100644 (file)
@@ -31,7 +31,6 @@
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 
 #include "target_core_internal.h"
 #include "target_core_alua.h"
@@ -115,7 +114,7 @@ void core_tmr_abort_task(
 {
        struct se_cmd *se_cmd;
        unsigned long flags;
-       int ref_tag;
+       u64 ref_tag;
 
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
        list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
@@ -127,16 +126,17 @@ void core_tmr_abort_task(
                if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
                        continue;
 
-               ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
+               ref_tag = se_cmd->tag;
                if (tmr->ref_task_tag != ref_tag)
                        continue;
 
-               printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
+               printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
                        se_cmd->se_tfo->get_fabric_name(), ref_tag);
 
                spin_lock(&se_cmd->t_state_lock);
                if (se_cmd->transport_state & CMD_T_COMPLETE) {
-                       printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
+                       printk("ABORT_TASK: ref_tag: %llu already complete,"
+                              " skipping\n", ref_tag);
                        spin_unlock(&se_cmd->t_state_lock);
                        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
                        goto out;
@@ -151,18 +151,18 @@ void core_tmr_abort_task(
                cancel_work_sync(&se_cmd->work);
                transport_wait_for_tasks(se_cmd);
 
-               target_put_sess_cmd(se_sess, se_cmd);
+               target_put_sess_cmd(se_cmd);
                transport_cmd_finish_abort(se_cmd, true);
 
                printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
-                               " ref_tag: %d\n", ref_tag);
+                               " ref_tag: %llu\n", ref_tag);
                tmr->response = TMR_FUNCTION_COMPLETE;
                return;
        }
        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 
 out:
-       printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %d\n",
+       printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n",
                        tmr->ref_task_tag);
        tmr->response = TMR_TASK_DOES_NOT_EXIST;
 }
@@ -287,16 +287,16 @@ static void core_tmr_drain_state_list(
                list_del(&cmd->state_list);
 
                pr_debug("LUN_RESET: %s cmd: %p"
-                       " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
+                       " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
                        "cdb: 0x%02x\n",
                        (preempt_and_abort_list) ? "Preempt" : "", cmd,
-                       cmd->se_tfo->get_task_tag(cmd), 0,
+                       cmd->tag, 0,
                        cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
                        cmd->t_task_cdb[0]);
-               pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
+               pr_debug("LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx"
                        " -- CMD_T_ACTIVE: %d"
                        " CMD_T_STOP: %d CMD_T_SENT: %d\n",
-                       cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
+                       cmd->tag, cmd->pr_res_key,
                        (cmd->transport_state & CMD_T_ACTIVE) != 0,
                        (cmd->transport_state & CMD_T_STOP) != 0,
                        (cmd->transport_state & CMD_T_SENT) != 0);
index 84de757bd4580b04b1519934e71eb24606bbd3a6..babde4ad841f18a7956c7d56b1d23ff1f075e3d3 100644 (file)
@@ -39,6 +39,7 @@
 #include <target/target_core_fabric.h>
 
 #include "target_core_internal.h"
+#include "target_core_alua.h"
 #include "target_core_pr.h"
 
 extern struct se_device *g_lun0_dev;
@@ -46,45 +47,9 @@ extern struct se_device *g_lun0_dev;
 static DEFINE_SPINLOCK(tpg_lock);
 static LIST_HEAD(tpg_list);
 
-/*     core_clear_initiator_node_from_tpg():
- *
- *
- */
-static void core_clear_initiator_node_from_tpg(
-       struct se_node_acl *nacl,
-       struct se_portal_group *tpg)
-{
-       int i;
-       struct se_dev_entry *deve;
-       struct se_lun *lun;
-
-       spin_lock_irq(&nacl->device_list_lock);
-       for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               deve = nacl->device_list[i];
-
-               if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
-                       continue;
-
-               if (!deve->se_lun) {
-                       pr_err("%s device entries device pointer is"
-                               " NULL, but Initiator has access.\n",
-                               tpg->se_tpg_tfo->get_fabric_name());
-                       continue;
-               }
-
-               lun = deve->se_lun;
-               spin_unlock_irq(&nacl->device_list_lock);
-               core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
-                       TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
-
-               spin_lock_irq(&nacl->device_list_lock);
-       }
-       spin_unlock_irq(&nacl->device_list_lock);
-}
-
 /*     __core_tpg_get_initiator_node_acl():
  *
- *     spin_lock_bh(&tpg->acl_node_lock); must be held when calling
+ *     mutex_lock(&tpg->acl_node_mutex); must be held when calling
  */
 struct se_node_acl *__core_tpg_get_initiator_node_acl(
        struct se_portal_group *tpg,
@@ -110,9 +75,9 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
 {
        struct se_node_acl *acl;
 
-       spin_lock_irq(&tpg->acl_node_lock);
+       mutex_lock(&tpg->acl_node_mutex);
        acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
-       spin_unlock_irq(&tpg->acl_node_lock);
+       mutex_unlock(&tpg->acl_node_mutex);
 
        return acl;
 }
@@ -124,22 +89,20 @@ EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
  */
 void core_tpg_add_node_to_devs(
        struct se_node_acl *acl,
-       struct se_portal_group *tpg)
+       struct se_portal_group *tpg,
+       struct se_lun *lun_orig)
 {
-       int i = 0;
        u32 lun_access = 0;
        struct se_lun *lun;
        struct se_device *dev;
 
-       spin_lock(&tpg->tpg_lun_lock);
-       for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               lun = tpg->tpg_lun_list[i];
-               if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
+       mutex_lock(&tpg->tpg_lun_mutex);
+       hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
+               if (lun_orig && lun != lun_orig)
                        continue;
 
-               spin_unlock(&tpg->tpg_lun_lock);
-
-               dev = lun->lun_se_dev;
+               dev = rcu_dereference_check(lun->lun_se_dev,
+                                           lockdep_is_held(&tpg->tpg_lun_mutex));
                /*
                 * By default in LIO-Target $FABRIC_MOD,
                 * demo_mode_write_protect is ON, or READ_ONLY;
@@ -157,7 +120,7 @@ void core_tpg_add_node_to_devs(
                                lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
                }
 
-               pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
+               pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
                        " access for LUN in Demo Mode\n",
                        tpg->se_tpg_tfo->get_fabric_name(),
                        tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
@@ -165,7 +128,7 @@ void core_tpg_add_node_to_devs(
                        "READ-WRITE" : "READ-ONLY");
 
                core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
-                               lun_access, acl, tpg);
+                                                lun_access, acl, tpg);
                /*
                 * Check to see if there are any existing persistent reservation
                 * APTPL pre-registrations that need to be enabled for this dynamic
@@ -173,9 +136,8 @@ void core_tpg_add_node_to_devs(
                 */
                core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
                                                    lun->unpacked_lun);
-               spin_lock(&tpg->tpg_lun_lock);
        }
-       spin_unlock(&tpg->tpg_lun_lock);
+       mutex_unlock(&tpg->tpg_lun_mutex);
 }
 
 /*      core_set_queue_depth_for_node():
@@ -196,67 +158,63 @@ static int core_set_queue_depth_for_node(
        return 0;
 }
 
-void array_free(void *array, int n)
+static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
+               const unsigned char *initiatorname)
 {
-       void **a = array;
-       int i;
+       struct se_node_acl *acl;
 
-       for (i = 0; i < n; i++)
-               kfree(a[i]);
-       kfree(a);
-}
+       acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
+                       GFP_KERNEL);
+       if (!acl)
+               return NULL;
 
-static void *array_zalloc(int n, size_t size, gfp_t flags)
-{
-       void **a;
-       int i;
+       INIT_LIST_HEAD(&acl->acl_list);
+       INIT_LIST_HEAD(&acl->acl_sess_list);
+       INIT_HLIST_HEAD(&acl->lun_entry_hlist);
+       kref_init(&acl->acl_kref);
+       init_completion(&acl->acl_free_comp);
+       spin_lock_init(&acl->nacl_sess_lock);
+       mutex_init(&acl->lun_entry_mutex);
+       atomic_set(&acl->acl_pr_ref_count, 0);
+       if (tpg->se_tpg_tfo->tpg_get_default_depth)
+               acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
+       else
+               acl->queue_depth = 1;
+       snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+       acl->se_tpg = tpg;
+       acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
 
-       a = kzalloc(n * sizeof(void*), flags);
-       if (!a)
-               return NULL;
-       for (i = 0; i < n; i++) {
-               a[i] = kzalloc(size, flags);
-               if (!a[i]) {
-                       array_free(a, n);
-                       return NULL;
-               }
-       }
-       return a;
+       tpg->se_tpg_tfo->set_default_node_attributes(acl);
+
+       if (core_set_queue_depth_for_node(tpg, acl) < 0)
+               goto out_free_acl;
+
+       return acl;
+
+out_free_acl:
+       kfree(acl);
+       return NULL;
 }
 
-/*      core_create_device_list_for_node():
- *
- *
- */
-static int core_create_device_list_for_node(struct se_node_acl *nacl)
+static void target_add_node_acl(struct se_node_acl *acl)
 {
-       struct se_dev_entry *deve;
-       int i;
-
-       nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
-                       sizeof(struct se_dev_entry), GFP_KERNEL);
-       if (!nacl->device_list) {
-               pr_err("Unable to allocate memory for"
-                       " struct se_node_acl->device_list\n");
-               return -ENOMEM;
-       }
-       for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               deve = nacl->device_list[i];
-
-               atomic_set(&deve->ua_count, 0);
-               atomic_set(&deve->pr_ref_count, 0);
-               spin_lock_init(&deve->ua_lock);
-               INIT_LIST_HEAD(&deve->alua_port_list);
-               INIT_LIST_HEAD(&deve->ua_list);
-       }
+       struct se_portal_group *tpg = acl->se_tpg;
 
-       return 0;
+       mutex_lock(&tpg->acl_node_mutex);
+       list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+       tpg->num_node_acls++;
+       mutex_unlock(&tpg->acl_node_mutex);
+
+       pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
+               " Initiator Node: %s\n",
+               tpg->se_tpg_tfo->get_fabric_name(),
+               tpg->se_tpg_tfo->tpg_get_tag(tpg),
+               acl->dynamic_node_acl ? "DYNAMIC" : "",
+               acl->queue_depth,
+               tpg->se_tpg_tfo->get_fabric_name(),
+               acl->initiatorname);
 }
 
-/*     core_tpg_check_initiator_node_acl()
- *
- *
- */
 struct se_node_acl *core_tpg_check_initiator_node_acl(
        struct se_portal_group *tpg,
        unsigned char *initiatorname)
@@ -270,35 +228,11 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
        if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
                return NULL;
 
-       acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
+       acl = target_alloc_node_acl(tpg, initiatorname);
        if (!acl)
                return NULL;
-
-       INIT_LIST_HEAD(&acl->acl_list);
-       INIT_LIST_HEAD(&acl->acl_sess_list);
-       kref_init(&acl->acl_kref);
-       init_completion(&acl->acl_free_comp);
-       spin_lock_init(&acl->device_list_lock);
-       spin_lock_init(&acl->nacl_sess_lock);
-       atomic_set(&acl->acl_pr_ref_count, 0);
-       acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
-       snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
-       acl->se_tpg = tpg;
-       acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
        acl->dynamic_node_acl = 1;
 
-       tpg->se_tpg_tfo->set_default_node_attributes(acl);
-
-       if (core_create_device_list_for_node(acl) < 0) {
-               tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
-               return NULL;
-       }
-
-       if (core_set_queue_depth_for_node(tpg, acl) < 0) {
-               core_free_device_list_for_node(acl, tpg);
-               tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
-               return NULL;
-       }
        /*
         * Here we only create demo-mode MappedLUNs from the active
         * TPG LUNs if the fabric is not explicitly asking for
@@ -306,18 +240,9 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
         */
        if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
            (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
-               core_tpg_add_node_to_devs(acl, tpg);
-
-       spin_lock_irq(&tpg->acl_node_lock);
-       list_add_tail(&acl->acl_list, &tpg->acl_node_list);
-       tpg->num_node_acls++;
-       spin_unlock_irq(&tpg->acl_node_lock);
-
-       pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
-               " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
-               tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
-               tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
+               core_tpg_add_node_to_devs(acl, tpg, NULL);
 
+       target_add_node_acl(acl);
        return acl;
 }
 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
@@ -328,40 +253,13 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
                cpu_relax();
 }
 
-void core_tpg_clear_object_luns(struct se_portal_group *tpg)
-{
-       int i;
-       struct se_lun *lun;
-
-       spin_lock(&tpg->tpg_lun_lock);
-       for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               lun = tpg->tpg_lun_list[i];
-
-               if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
-                   (lun->lun_se_dev == NULL))
-                       continue;
-
-               spin_unlock(&tpg->tpg_lun_lock);
-               core_dev_del_lun(tpg, lun);
-               spin_lock(&tpg->tpg_lun_lock);
-       }
-       spin_unlock(&tpg->tpg_lun_lock);
-}
-EXPORT_SYMBOL(core_tpg_clear_object_luns);
-
-/*     core_tpg_add_initiator_node_acl():
- *
- *
- */
 struct se_node_acl *core_tpg_add_initiator_node_acl(
        struct se_portal_group *tpg,
-       struct se_node_acl *se_nacl,
-       const char *initiatorname,
-       u32 queue_depth)
+       const char *initiatorname)
 {
-       struct se_node_acl *acl = NULL;
+       struct se_node_acl *acl;
 
-       spin_lock_irq(&tpg->acl_node_lock);
+       mutex_lock(&tpg->acl_node_mutex);
        acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
        if (acl) {
                if (acl->dynamic_node_acl) {
@@ -369,99 +267,42 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
                        pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
                                " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
                                tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
-                       spin_unlock_irq(&tpg->acl_node_lock);
-                       /*
-                        * Release the locally allocated struct se_node_acl
-                        * because * core_tpg_add_initiator_node_acl() returned
-                        * a pointer to an existing demo mode node ACL.
-                        */
-                       if (se_nacl)
-                               tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
-                                                       se_nacl);
-                       goto done;
+                       mutex_unlock(&tpg->acl_node_mutex);
+                       return acl;
                }
 
                pr_err("ACL entry for %s Initiator"
                        " Node %s already exists for TPG %u, ignoring"
                        " request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
                        initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
-               spin_unlock_irq(&tpg->acl_node_lock);
+               mutex_unlock(&tpg->acl_node_mutex);
                return ERR_PTR(-EEXIST);
        }
-       spin_unlock_irq(&tpg->acl_node_lock);
-
-       if (!se_nacl) {
-               pr_err("struct se_node_acl pointer is NULL\n");
-               return ERR_PTR(-EINVAL);
-       }
-       /*
-        * For v4.x logic the se_node_acl_s is hanging off a fabric
-        * dependent structure allocated via
-        * struct target_core_fabric_ops->fabric_make_nodeacl()
-        */
-       acl = se_nacl;
+       mutex_unlock(&tpg->acl_node_mutex);
 
-       INIT_LIST_HEAD(&acl->acl_list);
-       INIT_LIST_HEAD(&acl->acl_sess_list);
-       kref_init(&acl->acl_kref);
-       init_completion(&acl->acl_free_comp);
-       spin_lock_init(&acl->device_list_lock);
-       spin_lock_init(&acl->nacl_sess_lock);
-       atomic_set(&acl->acl_pr_ref_count, 0);
-       acl->queue_depth = queue_depth;
-       snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
-       acl->se_tpg = tpg;
-       acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
-
-       tpg->se_tpg_tfo->set_default_node_attributes(acl);
-
-       if (core_create_device_list_for_node(acl) < 0) {
-               tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
+       acl = target_alloc_node_acl(tpg, initiatorname);
+       if (!acl)
                return ERR_PTR(-ENOMEM);
-       }
-
-       if (core_set_queue_depth_for_node(tpg, acl) < 0) {
-               core_free_device_list_for_node(acl, tpg);
-               tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
-               return ERR_PTR(-EINVAL);
-       }
-
-       spin_lock_irq(&tpg->acl_node_lock);
-       list_add_tail(&acl->acl_list, &tpg->acl_node_list);
-       tpg->num_node_acls++;
-       spin_unlock_irq(&tpg->acl_node_lock);
-
-done:
-       pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
-               " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
-               tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
-               tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
 
+       target_add_node_acl(acl);
        return acl;
 }
-EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
 
-/*     core_tpg_del_initiator_node_acl():
- *
- *
- */
-int core_tpg_del_initiator_node_acl(
-       struct se_portal_group *tpg,
-       struct se_node_acl *acl,
-       int force)
+void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
 {
+       struct se_portal_group *tpg = acl->se_tpg;
        LIST_HEAD(sess_list);
        struct se_session *sess, *sess_tmp;
        unsigned long flags;
        int rc;
 
-       spin_lock_irq(&tpg->acl_node_lock);
+       mutex_lock(&tpg->acl_node_mutex);
        if (acl->dynamic_node_acl) {
                acl->dynamic_node_acl = 0;
        }
        list_del(&acl->acl_list);
        tpg->num_node_acls--;
-       spin_unlock_irq(&tpg->acl_node_lock);
+       mutex_unlock(&tpg->acl_node_mutex);
 
        spin_lock_irqsave(&acl->nacl_sess_lock, flags);
        acl->acl_stop = 1;
@@ -493,7 +334,6 @@ int core_tpg_del_initiator_node_acl(
        wait_for_completion(&acl->acl_free_comp);
 
        core_tpg_wait_for_nacl_pr_ref(acl);
-       core_clear_initiator_node_from_tpg(acl, tpg);
        core_free_device_list_for_node(acl, tpg);
 
        pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
@@ -501,9 +341,8 @@ int core_tpg_del_initiator_node_acl(
                tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
                tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
 
-       return 0;
+       kfree(acl);
 }
-EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
 
 /*     core_tpg_set_initiator_node_queue_depth():
  *
@@ -520,21 +359,21 @@ int core_tpg_set_initiator_node_queue_depth(
        unsigned long flags;
        int dynamic_acl = 0;
 
-       spin_lock_irq(&tpg->acl_node_lock);
+       mutex_lock(&tpg->acl_node_mutex);
        acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
        if (!acl) {
                pr_err("Access Control List entry for %s Initiator"
                        " Node %s does not exists for TPG %hu, ignoring"
                        " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
                        initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
-               spin_unlock_irq(&tpg->acl_node_lock);
+               mutex_unlock(&tpg->acl_node_mutex);
                return -ENODEV;
        }
        if (acl->dynamic_node_acl) {
                acl->dynamic_node_acl = 0;
                dynamic_acl = 1;
        }
-       spin_unlock_irq(&tpg->acl_node_lock);
+       mutex_unlock(&tpg->acl_node_mutex);
 
        spin_lock_irqsave(&tpg->session_lock, flags);
        list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
@@ -550,10 +389,10 @@ int core_tpg_set_initiator_node_queue_depth(
                                tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
                        spin_unlock_irqrestore(&tpg->session_lock, flags);
 
-                       spin_lock_irq(&tpg->acl_node_lock);
+                       mutex_lock(&tpg->acl_node_mutex);
                        if (dynamic_acl)
                                acl->dynamic_node_acl = 1;
-                       spin_unlock_irq(&tpg->acl_node_lock);
+                       mutex_unlock(&tpg->acl_node_mutex);
                        return -EEXIST;
                }
                /*
@@ -588,10 +427,10 @@ int core_tpg_set_initiator_node_queue_depth(
                if (init_sess)
                        tpg->se_tpg_tfo->close_session(init_sess);
 
-               spin_lock_irq(&tpg->acl_node_lock);
+               mutex_lock(&tpg->acl_node_mutex);
                if (dynamic_acl)
                        acl->dynamic_node_acl = 1;
-               spin_unlock_irq(&tpg->acl_node_lock);
+               mutex_unlock(&tpg->acl_node_mutex);
                return -EINVAL;
        }
        spin_unlock_irqrestore(&tpg->session_lock, flags);
@@ -607,10 +446,10 @@ int core_tpg_set_initiator_node_queue_depth(
                initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
                tpg->se_tpg_tfo->tpg_get_tag(tpg));
 
-       spin_lock_irq(&tpg->acl_node_lock);
+       mutex_lock(&tpg->acl_node_mutex);
        if (dynamic_acl)
                acl->dynamic_node_acl = 1;
-       spin_unlock_irq(&tpg->acl_node_lock);
+       mutex_unlock(&tpg->acl_node_mutex);
 
        return 0;
 }
@@ -646,78 +485,54 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref)
        complete(&lun->lun_ref_comp);
 }
 
-static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
-{
-       /* Set in core_dev_setup_virtual_lun0() */
-       struct se_device *dev = g_lun0_dev;
-       struct se_lun *lun = &se_tpg->tpg_virt_lun0;
-       u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
-       int ret;
-
-       lun->unpacked_lun = 0;
-       lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
-       atomic_set(&lun->lun_acl_count, 0);
-       init_completion(&lun->lun_shutdown_comp);
-       INIT_LIST_HEAD(&lun->lun_acl_list);
-       spin_lock_init(&lun->lun_acl_lock);
-       spin_lock_init(&lun->lun_sep_lock);
-       init_completion(&lun->lun_ref_comp);
-
-       ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-
 int core_tpg_register(
-       const struct target_core_fabric_ops *tfo,
        struct se_wwn *se_wwn,
        struct se_portal_group *se_tpg,
-       void *tpg_fabric_ptr,
-       int se_tpg_type)
+       int proto_id)
 {
-       struct se_lun *lun;
-       u32 i;
-
-       se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
-                       sizeof(struct se_lun), GFP_KERNEL);
-       if (!se_tpg->tpg_lun_list) {
-               pr_err("Unable to allocate struct se_portal_group->"
-                               "tpg_lun_list\n");
-               return -ENOMEM;
-       }
+       int ret;
+
+       if (!se_tpg)
+               return -EINVAL;
+       /*
+        * For the typical case where core_tpg_register() is called by a
+        * fabric driver from target_core_fabric_ops->fabric_make_tpg()
+        * configfs context, use the original tf_ops pointer already saved
+        * by target-core in target_fabric_make_wwn().
+        *
+        * Otherwise, for special cases like iscsi-target discovery TPGs
+        * the caller is responsible for setting ->se_tpg_tfo ahead of
+        * calling core_tpg_register().
+        */
+       if (se_wwn)
+               se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
 
-       for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               lun = se_tpg->tpg_lun_list[i];
-               lun->unpacked_lun = i;
-               lun->lun_link_magic = SE_LUN_LINK_MAGIC;
-               lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
-               atomic_set(&lun->lun_acl_count, 0);
-               init_completion(&lun->lun_shutdown_comp);
-               INIT_LIST_HEAD(&lun->lun_acl_list);
-               spin_lock_init(&lun->lun_acl_lock);
-               spin_lock_init(&lun->lun_sep_lock);
-               init_completion(&lun->lun_ref_comp);
+       if (!se_tpg->se_tpg_tfo) {
+               pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
+               return -EINVAL;
        }
 
-       se_tpg->se_tpg_type = se_tpg_type;
-       se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
-       se_tpg->se_tpg_tfo = tfo;
+       INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
+       se_tpg->proto_id = proto_id;
        se_tpg->se_tpg_wwn = se_wwn;
        atomic_set(&se_tpg->tpg_pr_ref_count, 0);
        INIT_LIST_HEAD(&se_tpg->acl_node_list);
        INIT_LIST_HEAD(&se_tpg->se_tpg_node);
        INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
-       spin_lock_init(&se_tpg->acl_node_lock);
        spin_lock_init(&se_tpg->session_lock);
-       spin_lock_init(&se_tpg->tpg_lun_lock);
-
-       if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
-               if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
-                       array_free(se_tpg->tpg_lun_list,
-                                  TRANSPORT_MAX_LUNS_PER_TPG);
-                       return -ENOMEM;
+       mutex_init(&se_tpg->tpg_lun_mutex);
+       mutex_init(&se_tpg->acl_node_mutex);
+
+       if (se_tpg->proto_id >= 0) {
+               se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
+               if (IS_ERR(se_tpg->tpg_virt_lun0))
+                       return PTR_ERR(se_tpg->tpg_virt_lun0);
+
+               ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
+                               TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev);
+               if (ret < 0) {
+                       kfree(se_tpg->tpg_virt_lun0);
+                       return ret;
                }
        }
 
@@ -725,11 +540,11 @@ int core_tpg_register(
        list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
        spin_unlock_bh(&tpg_lock);
 
-       pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
-               " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
-               (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
-               "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
-               "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
+       pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
+                "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
+               se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
+               se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
+               se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
 
        return 0;
 }
@@ -737,14 +552,14 @@ EXPORT_SYMBOL(core_tpg_register);
 
 int core_tpg_deregister(struct se_portal_group *se_tpg)
 {
+       const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
        struct se_node_acl *nacl, *nacl_tmp;
+       LIST_HEAD(node_list);
 
-       pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
-               " for endpoint: %s Portal Tag %u\n",
-               (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
-               "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
-               se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
-               se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
+       pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
+                "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
+               tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
+               se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
 
        spin_lock_bh(&tpg_lock);
        list_del(&se_tpg->se_tpg_node);
@@ -752,61 +567,56 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
 
        while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
                cpu_relax();
+
+       mutex_lock(&se_tpg->acl_node_mutex);
+       list_splice_init(&se_tpg->acl_node_list, &node_list);
+       mutex_unlock(&se_tpg->acl_node_mutex);
        /*
         * Release any remaining demo-mode generated se_node_acl that have
         * not been released because of TFO->tpg_check_demo_mode_cache() == 1
         * in transport_deregister_session().
         */
-       spin_lock_irq(&se_tpg->acl_node_lock);
-       list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
-                       acl_list) {
+       list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
                list_del(&nacl->acl_list);
                se_tpg->num_node_acls--;
-               spin_unlock_irq(&se_tpg->acl_node_lock);
 
                core_tpg_wait_for_nacl_pr_ref(nacl);
                core_free_device_list_for_node(nacl, se_tpg);
-               se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
-
-               spin_lock_irq(&se_tpg->acl_node_lock);
+               kfree(nacl);
        }
-       spin_unlock_irq(&se_tpg->acl_node_lock);
 
-       if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
-               core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0);
+       if (se_tpg->proto_id >= 0) {
+               core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
+               kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
+       }
 
-       se_tpg->se_tpg_fabric_ptr = NULL;
-       array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
        return 0;
 }
 EXPORT_SYMBOL(core_tpg_deregister);
 
 struct se_lun *core_tpg_alloc_lun(
        struct se_portal_group *tpg,
-       u32 unpacked_lun)
+       u64 unpacked_lun)
 {
        struct se_lun *lun;
 
-       if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
-               pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
-                       "-1: %u for Target Portal Group: %u\n",
-                       tpg->se_tpg_tfo->get_fabric_name(),
-                       unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
-                       tpg->se_tpg_tfo->tpg_get_tag(tpg));
-               return ERR_PTR(-EOVERFLOW);
-       }
-
-       spin_lock(&tpg->tpg_lun_lock);
-       lun = tpg->tpg_lun_list[unpacked_lun];
-       if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
-               pr_err("TPG Logical Unit Number: %u is already active"
-                       " on %s Target Portal Group: %u, ignoring request.\n",
-                       unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
-                       tpg->se_tpg_tfo->tpg_get_tag(tpg));
-               spin_unlock(&tpg->tpg_lun_lock);
-               return ERR_PTR(-EINVAL);
+       lun = kzalloc(sizeof(*lun), GFP_KERNEL);
+       if (!lun) {
+               pr_err("Unable to allocate se_lun memory\n");
+               return ERR_PTR(-ENOMEM);
        }
-       spin_unlock(&tpg->tpg_lun_lock);
+       lun->unpacked_lun = unpacked_lun;
+       lun->lun_link_magic = SE_LUN_LINK_MAGIC;
+       atomic_set(&lun->lun_acl_count, 0);
+       init_completion(&lun->lun_ref_comp);
+       INIT_LIST_HEAD(&lun->lun_deve_list);
+       INIT_LIST_HEAD(&lun->lun_dev_link);
+       atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
+       spin_lock_init(&lun->lun_deve_lock);
+       mutex_init(&lun->lun_tg_pt_md_mutex);
+       INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
+       spin_lock_init(&lun->lun_tg_pt_gp_lock);
+       lun->lun_tpg = tpg;
 
        return lun;
 }
@@ -822,34 +632,70 @@ int core_tpg_add_lun(
        ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
                              GFP_KERNEL);
        if (ret < 0)
-               return ret;
+               goto out;
 
-       ret = core_dev_export(dev, tpg, lun);
-       if (ret < 0) {
-               percpu_ref_exit(&lun->lun_ref);
-               return ret;
-       }
+       ret = core_alloc_rtpi(lun, dev);
+       if (ret)
+               goto out_kill_ref;
+
+       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+           !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+               target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
+
+       mutex_lock(&tpg->tpg_lun_mutex);
+
+       spin_lock(&dev->se_port_lock);
+       lun->lun_index = dev->dev_index;
+       rcu_assign_pointer(lun->lun_se_dev, dev);
+       dev->export_count++;
+       list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
+       spin_unlock(&dev->se_port_lock);
 
-       spin_lock(&tpg->tpg_lun_lock);
        lun->lun_access = lun_access;
-       lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
-       spin_unlock(&tpg->tpg_lun_lock);
+       if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+               hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
+       mutex_unlock(&tpg->tpg_lun_mutex);
 
        return 0;
+
+out_kill_ref:
+       percpu_ref_exit(&lun->lun_ref);
+out:
+       return ret;
 }
 
 void core_tpg_remove_lun(
        struct se_portal_group *tpg,
        struct se_lun *lun)
 {
+       /*
+        * rcu_dereference_raw protected by se_lun->lun_group symlink
+        * reference to se_device->dev_group.
+        */
+       struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
+
        core_clear_lun_from_tpg(lun, tpg);
+       /*
+        * Wait for any active I/O references to percpu se_lun->lun_ref to
+        * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
+        * logic when referencing a remote target port during ALL_TGT_PT=1
+        * and generating UNIT_ATTENTIONs for ALUA access state transition.
+        */
        transport_clear_lun_ref(lun);
 
-       core_dev_unexport(lun->lun_se_dev, tpg, lun);
+       mutex_lock(&tpg->tpg_lun_mutex);
+       if (lun->lun_se_dev) {
+               target_detach_tg_pt_gp(lun);
 
-       spin_lock(&tpg->tpg_lun_lock);
-       lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
-       spin_unlock(&tpg->tpg_lun_lock);
+               spin_lock(&dev->se_port_lock);
+               list_del(&lun->lun_dev_link);
+               dev->export_count--;
+               rcu_assign_pointer(lun->lun_se_dev, NULL);
+               spin_unlock(&dev->se_port_lock);
+       }
+       if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+               hlist_del_rcu(&lun->link);
+       mutex_unlock(&tpg->tpg_lun_mutex);
 
        percpu_ref_exit(&lun->lun_ref);
 }
index 0b4e24217564ab902f3dc389deb48acb63c79440..ce8574b7220ced193e969e46141c411156e7e9a7 100644 (file)
@@ -43,7 +43,6 @@
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 
 #include "target_core_internal.h"
 #include "target_core_alua.h"
@@ -60,7 +59,6 @@ struct kmem_cache *t10_pr_reg_cache;
 struct kmem_cache *t10_alua_lu_gp_cache;
 struct kmem_cache *t10_alua_lu_gp_mem_cache;
 struct kmem_cache *t10_alua_tg_pt_gp_cache;
-struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
 struct kmem_cache *t10_alua_lba_map_cache;
 struct kmem_cache *t10_alua_lba_map_mem_cache;
 
@@ -119,16 +117,6 @@ int init_se_kmem_caches(void)
                                "cache failed\n");
                goto out_free_lu_gp_mem_cache;
        }
-       t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
-                       "t10_alua_tg_pt_gp_mem_cache",
-                       sizeof(struct t10_alua_tg_pt_gp_member),
-                       __alignof__(struct t10_alua_tg_pt_gp_member),
-                       0, NULL);
-       if (!t10_alua_tg_pt_gp_mem_cache) {
-               pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
-                               "mem_t failed\n");
-               goto out_free_tg_pt_gp_cache;
-       }
        t10_alua_lba_map_cache = kmem_cache_create(
                        "t10_alua_lba_map_cache",
                        sizeof(struct t10_alua_lba_map),
@@ -136,7 +124,7 @@ int init_se_kmem_caches(void)
        if (!t10_alua_lba_map_cache) {
                pr_err("kmem_cache_create() for t10_alua_lba_map_"
                                "cache failed\n");
-               goto out_free_tg_pt_gp_mem_cache;
+               goto out_free_tg_pt_gp_cache;
        }
        t10_alua_lba_map_mem_cache = kmem_cache_create(
                        "t10_alua_lba_map_mem_cache",
@@ -159,8 +147,6 @@ out_free_lba_map_mem_cache:
        kmem_cache_destroy(t10_alua_lba_map_mem_cache);
 out_free_lba_map_cache:
        kmem_cache_destroy(t10_alua_lba_map_cache);
-out_free_tg_pt_gp_mem_cache:
-       kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
 out_free_tg_pt_gp_cache:
        kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
 out_free_lu_gp_mem_cache:
@@ -186,7 +172,6 @@ void release_se_kmem_caches(void)
        kmem_cache_destroy(t10_alua_lu_gp_cache);
        kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
        kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
-       kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
        kmem_cache_destroy(t10_alua_lba_map_cache);
        kmem_cache_destroy(t10_alua_lba_map_mem_cache);
 }
@@ -279,10 +264,7 @@ int transport_alloc_session_tags(struct se_session *se_sess,
        if (rc < 0) {
                pr_err("Unable to init se_sess->sess_tag_pool,"
                        " tag_num: %u\n", tag_num);
-               if (is_vmalloc_addr(se_sess->sess_cmd_map))
-                       vfree(se_sess->sess_cmd_map);
-               else
-                       kfree(se_sess->sess_cmd_map);
+               kvfree(se_sess->sess_cmd_map);
                se_sess->sess_cmd_map = NULL;
                return -ENOMEM;
        }
@@ -409,12 +391,6 @@ EXPORT_SYMBOL(target_get_session);
 
 void target_put_session(struct se_session *se_sess)
 {
-       struct se_portal_group *tpg = se_sess->se_tpg;
-
-       if (tpg->se_tpg_tfo->put_session != NULL) {
-               tpg->se_tpg_tfo->put_session(se_sess);
-               return;
-       }
        kref_put(&se_sess->sess_kref, target_release_session);
 }
 EXPORT_SYMBOL(target_put_session);
@@ -489,10 +465,7 @@ void transport_free_session(struct se_session *se_sess)
 {
        if (se_sess->sess_cmd_map) {
                percpu_ida_destroy(&se_sess->sess_tag_pool);
-               if (is_vmalloc_addr(se_sess->sess_cmd_map))
-                       vfree(se_sess->sess_cmd_map);
-               else
-                       kfree(se_sess->sess_cmd_map);
+               kvfree(se_sess->sess_cmd_map);
        }
        kmem_cache_free(se_sess_cache, se_sess);
 }
@@ -504,7 +477,7 @@ void transport_deregister_session(struct se_session *se_sess)
        const struct target_core_fabric_ops *se_tfo;
        struct se_node_acl *se_nacl;
        unsigned long flags;
-       bool comp_nacl = true;
+       bool comp_nacl = true, drop_nacl = false;
 
        if (!se_tpg) {
                transport_free_session(se_sess);
@@ -524,22 +497,22 @@ void transport_deregister_session(struct se_session *se_sess)
         */
        se_nacl = se_sess->se_node_acl;
 
-       spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
+       mutex_lock(&se_tpg->acl_node_mutex);
        if (se_nacl && se_nacl->dynamic_node_acl) {
                if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
                        list_del(&se_nacl->acl_list);
                        se_tpg->num_node_acls--;
-                       spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
-                       core_tpg_wait_for_nacl_pr_ref(se_nacl);
-                       core_free_device_list_for_node(se_nacl, se_tpg);
-                       se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);
-
-                       comp_nacl = false;
-                       spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
+                       drop_nacl = true;
                }
        }
-       spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
+       mutex_unlock(&se_tpg->acl_node_mutex);
 
+       if (drop_nacl) {
+               core_tpg_wait_for_nacl_pr_ref(se_nacl);
+               core_free_device_list_for_node(se_nacl, se_tpg);
+               kfree(se_nacl);
+               comp_nacl = false;
+       }
        pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
                se_tpg->se_tpg_tfo->get_fabric_name());
        /*
@@ -599,9 +572,8 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
         * this command for frontend exceptions.
         */
        if (cmd->transport_state & CMD_T_STOP) {
-               pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
-                       __func__, __LINE__,
-                       cmd->se_tfo->get_task_tag(cmd));
+               pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
+                       __func__, __LINE__, cmd->tag);
 
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
@@ -1154,6 +1126,8 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
 /*
  * Used by fabric modules containing a local struct se_cmd within their
  * fabric dependent per I/O descriptor.
+ *
+ * Preserves the value of @cmd->tag.
  */
 void transport_init_se_cmd(
        struct se_cmd *cmd,
@@ -1280,11 +1254,7 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
                return ret;
 
        cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
-
-       spin_lock(&cmd->se_lun->lun_sep_lock);
-       if (cmd->se_lun->lun_sep)
-               cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
-       spin_unlock(&cmd->se_lun->lun_sep_lock);
+       atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
        return 0;
 }
 EXPORT_SYMBOL(target_setup_cmd_from_cdb);
@@ -1352,11 +1322,9 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
 
        cmd->t_data_sg = sgl;
        cmd->t_data_nents = sgl_count;
+       cmd->t_bidi_data_sg = sgl_bidi;
+       cmd->t_bidi_data_nents = sgl_bidi_count;
 
-       if (sgl_bidi && sgl_bidi_count) {
-               cmd->t_bidi_data_sg = sgl_bidi;
-               cmd->t_bidi_data_nents = sgl_bidi_count;
-       }
        cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
        return 0;
 }
@@ -1381,6 +1349,8 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
  * @sgl_prot: struct scatterlist memory protection information
  * @sgl_prot_count: scatterlist count for protection information
  *
+ * Task tags are supported if the caller has set @se_cmd->tag.
+ *
  * Returns non zero to signal active I/O shutdown failure.  All other
  * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
  * but still return zero here.
@@ -1389,7 +1359,7 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
  * assumes internal allocation of fabric payload buffer by target-core.
  */
 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
-               unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
+               unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
                u32 data_length, int task_attr, int data_dir, int flags,
                struct scatterlist *sgl, u32 sgl_count,
                struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
@@ -1418,7 +1388,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
         * for fabrics using TARGET_SCF_ACK_KREF that expect a second
         * kref_put() to happen during fabric packet acknowledgement.
         */
-       ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+       ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
        if (ret)
                return ret;
        /*
@@ -1432,7 +1402,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
        rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
        if (rc) {
                transport_send_check_condition_and_sense(se_cmd, rc, 0);
-               target_put_sess_cmd(se_sess, se_cmd);
+               target_put_sess_cmd(se_cmd);
                return 0;
        }
 
@@ -1449,6 +1419,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
        if (sgl_prot_count) {
                se_cmd->t_prot_sg = sgl_prot;
                se_cmd->t_prot_nents = sgl_prot_count;
+               se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
        }
 
        /*
@@ -1512,6 +1483,8 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls);
  * @data_dir: DMA data direction
  * @flags: flags for command submission from target_sc_flags_tables
  *
+ * Task tags are supported if the caller has set @se_cmd->tag.
+ *
  * Returns non zero to signal active I/O shutdown failure.  All other
  * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
  * but still return zero here.
@@ -1522,7 +1495,7 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls);
  * It also assumes interal target core SGL memory allocation.
  */
 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
-               unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
+               unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
                u32 data_length, int task_attr, int data_dir, int flags)
 {
        return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
@@ -1559,7 +1532,7 @@ static void target_complete_tmr_failure(struct work_struct *work)
  **/
 
 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
-               unsigned char *sense, u32 unpacked_lun,
+               unsigned char *sense, u64 unpacked_lun,
                void *fabric_tmr_ptr, unsigned char tm_type,
                gfp_t gfp, unsigned int tag, int flags)
 {
@@ -1583,7 +1556,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
                se_cmd->se_tmr_req->ref_task_tag = tag;
 
        /* See target_submit_cmd for commentary */
-       ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+       ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
        if (ret) {
                core_tmr_release_req(se_cmd->se_tmr_req);
                return ret;
@@ -1639,9 +1612,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
 {
        int ret = 0;
 
-       pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
-               " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
-               cmd->t_task_cdb[0]);
+       pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
+               " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
        pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
                cmd->se_tfo->get_cmd_state(cmd),
                cmd->t_state, sense_reason);
@@ -1698,13 +1670,13 @@ void transport_generic_request_failure(struct se_cmd *cmd,
                 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
                 */
                if (cmd->se_sess &&
-                   cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2)
-                       core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
-                               cmd->orig_fe_lun, 0x2C,
-                               ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
-
+                   cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
+                       target_ua_allocate_lun(cmd->se_sess->se_node_acl,
+                                              cmd->orig_fe_lun, 0x2C,
+                                       ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+               }
                trace_target_cmd_complete(cmd);
-               ret = cmd->se_tfo-> queue_status(cmd);
+               ret = cmd->se_tfo->queue_status(cmd);
                if (ret == -EAGAIN || ret == -ENOMEM)
                        goto queue_full;
                goto check_stop;
@@ -1765,8 +1737,8 @@ static int target_write_prot_action(struct se_cmd *cmd)
                        break;
 
                sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
-               cmd->pi_err = sbc_dif_verify_write(cmd, cmd->t_task_lba,
-                                                  sectors, 0, NULL, 0);
+               cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
+                                            sectors, 0, cmd->t_prot_sg, 0);
                if (unlikely(cmd->pi_err)) {
                        spin_lock_irq(&cmd->t_state_lock);
                        cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
@@ -1849,9 +1821,8 @@ void target_execute_cmd(struct se_cmd *cmd)
         */
        spin_lock_irq(&cmd->t_state_lock);
        if (cmd->transport_state & CMD_T_STOP) {
-               pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
-                       __func__, __LINE__,
-                       cmd->se_tfo->get_task_tag(cmd));
+               pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
+                       __func__, __LINE__, cmd->tag);
 
                spin_unlock_irq(&cmd->t_state_lock);
                complete_all(&cmd->t_transport_stop_comp);
@@ -1990,16 +1961,17 @@ static void transport_handle_queue_full(
 
 static bool target_read_prot_action(struct se_cmd *cmd)
 {
-       sense_reason_t rc;
-
        switch (cmd->prot_op) {
        case TARGET_PROT_DIN_STRIP:
                if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
-                       rc = sbc_dif_read_strip(cmd);
-                       if (rc) {
-                               cmd->pi_err = rc;
+                       u32 sectors = cmd->data_length >>
+                                 ilog2(cmd->se_dev->dev_attrib.block_size);
+
+                       cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
+                                                    sectors, 0, cmd->t_prot_sg,
+                                                    0);
+                       if (cmd->pi_err)
                                return true;
-                       }
                }
                break;
        case TARGET_PROT_DIN_INSERT:
@@ -2078,12 +2050,8 @@ static void target_complete_ok_work(struct work_struct *work)
 queue_rsp:
        switch (cmd->data_direction) {
        case DMA_FROM_DEVICE:
-               spin_lock(&cmd->se_lun->lun_sep_lock);
-               if (cmd->se_lun->lun_sep) {
-                       cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
-                                       cmd->data_length;
-               }
-               spin_unlock(&cmd->se_lun->lun_sep_lock);
+               atomic_long_add(cmd->data_length,
+                               &cmd->se_lun->lun_stats.tx_data_octets);
                /*
                 * Perform READ_STRIP of PI using software emulation when
                 * backend had PI enabled, if the transport will not be
@@ -2106,22 +2074,14 @@ queue_rsp:
                        goto queue_full;
                break;
        case DMA_TO_DEVICE:
-               spin_lock(&cmd->se_lun->lun_sep_lock);
-               if (cmd->se_lun->lun_sep) {
-                       cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
-                               cmd->data_length;
-               }
-               spin_unlock(&cmd->se_lun->lun_sep_lock);
+               atomic_long_add(cmd->data_length,
+                               &cmd->se_lun->lun_stats.rx_data_octets);
                /*
                 * Check if we need to send READ payload for BIDI-COMMAND
                 */
                if (cmd->se_cmd_flags & SCF_BIDI) {
-                       spin_lock(&cmd->se_lun->lun_sep_lock);
-                       if (cmd->se_lun->lun_sep) {
-                               cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
-                                       cmd->data_length;
-                       }
-                       spin_unlock(&cmd->se_lun->lun_sep_lock);
+                       atomic_long_add(cmd->data_length,
+                                       &cmd->se_lun->lun_stats.tx_data_octets);
                        ret = cmd->se_tfo->queue_data_in(cmd);
                        if (ret == -EAGAIN || ret == -ENOMEM)
                                goto queue_full;
@@ -2178,6 +2138,12 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
 
 static inline void transport_free_pages(struct se_cmd *cmd)
 {
+       if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
+               transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
+               cmd->t_prot_sg = NULL;
+               cmd->t_prot_nents = 0;
+       }
+
        if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
                /*
                 * Release special case READ buffer payload required for
@@ -2201,10 +2167,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
        transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
        cmd->t_bidi_data_sg = NULL;
        cmd->t_bidi_data_nents = 0;
-
-       transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
-       cmd->t_prot_sg = NULL;
-       cmd->t_prot_nents = 0;
 }
 
 /**
@@ -2226,7 +2188,7 @@ static int transport_release_cmd(struct se_cmd *cmd)
         * If this cmd has been setup with target_get_sess_cmd(), drop
         * the kref and call ->release_cmd() in kref callback.
         */
-       return target_put_sess_cmd(cmd->se_sess, cmd);
+       return target_put_sess_cmd(cmd);
 }
 
 /**
@@ -2343,6 +2305,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
        int ret = 0;
        bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
 
+       if (cmd->prot_op != TARGET_PROT_NORMAL &&
+           !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
+               ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
+                                      cmd->prot_length, true);
+               if (ret < 0)
+                       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
+
        /*
         * Determine is the TCM fabric module has already allocated physical
         * memory, and is directly calling transport_generic_map_mem_to_cmd()
@@ -2368,14 +2338,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
                                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                }
 
-               if (cmd->prot_op != TARGET_PROT_NORMAL) {
-                       ret = target_alloc_sgl(&cmd->t_prot_sg,
-                                              &cmd->t_prot_nents,
-                                              cmd->prot_length, true);
-                       if (ret < 0)
-                               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               }
-
                ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
                                       cmd->data_length, zero_flag);
                if (ret < 0)
@@ -2470,13 +2432,12 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
 EXPORT_SYMBOL(transport_generic_free_cmd);
 
 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
- * @se_sess:   session to reference
  * @se_cmd:    command descriptor to add
  * @ack_kref:  Signal that fabric will perform an ack target_put_sess_cmd()
  */
-int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
-                              bool ack_kref)
+int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
 {
+       struct se_session *se_sess = se_cmd->se_sess;
        unsigned long flags;
        int ret = 0;
 
@@ -2498,7 +2459,7 @@ out:
        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 
        if (ret && ack_kref)
-               target_put_sess_cmd(se_sess, se_cmd);
+               target_put_sess_cmd(se_cmd);
 
        return ret;
 }
@@ -2527,11 +2488,12 @@ static void target_release_cmd_kref(struct kref *kref)
 }
 
 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
- * @se_sess:   session to reference
  * @se_cmd:    command descriptor to drop
  */
-int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+int target_put_sess_cmd(struct se_cmd *se_cmd)
 {
+       struct se_session *se_sess = se_cmd->se_sess;
+
        if (!se_sess) {
                se_cmd->se_tfo->release_cmd(se_cmd);
                return 1;
@@ -2597,31 +2559,10 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
 }
 EXPORT_SYMBOL(target_wait_for_sess_cmds);
 
-static int transport_clear_lun_ref_thread(void *p)
+void transport_clear_lun_ref(struct se_lun *lun)
 {
-       struct se_lun *lun = p;
-
        percpu_ref_kill(&lun->lun_ref);
-
        wait_for_completion(&lun->lun_ref_comp);
-       complete(&lun->lun_shutdown_comp);
-
-       return 0;
-}
-
-int transport_clear_lun_ref(struct se_lun *lun)
-{
-       struct task_struct *kt;
-
-       kt = kthread_run(transport_clear_lun_ref_thread, lun,
-                       "tcm_cl_%u", lun->unpacked_lun);
-       if (IS_ERR(kt)) {
-               pr_err("Unable to start clear_lun thread\n");
-               return PTR_ERR(kt);
-       }
-       wait_for_completion(&lun->lun_shutdown_comp);
-
-       return 0;
 }
 
 /**
@@ -2655,10 +2596,8 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
 
        cmd->transport_state |= CMD_T_STOP;
 
-       pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
-               " i_state: %d, t_state: %d, CMD_T_STOP\n",
-               cmd, cmd->se_tfo->get_task_tag(cmd),
-               cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+       pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
+               cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
 
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
@@ -2667,9 +2606,8 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
 
-       pr_debug("wait_for_tasks: Stopped wait_for_completion("
-               "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
-               cmd->se_tfo->get_task_tag(cmd));
+       pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
+               cmd->tag);
 
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
@@ -2971,8 +2909,8 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
        if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
                return 1;
 
-       pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
-                cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
+       pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
+                cmd->t_task_cdb[0], cmd->tag);
 
        cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -3011,9 +2949,8 @@ void transport_send_task_abort(struct se_cmd *cmd)
 
        transport_lun_remove_cmd(cmd);
 
-       pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
-               " ITT: 0x%08x\n", cmd->t_task_cdb[0],
-               cmd->se_tfo->get_task_tag(cmd));
+       pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
+                cmd->t_task_cdb[0], cmd->tag);
 
        trace_target_cmd_complete(cmd);
        cmd->se_tfo->queue_status(cmd);
@@ -3039,6 +2976,11 @@ static void target_tmr_work(struct work_struct *work)
                ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
                tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
                                         TMR_FUNCTION_REJECTED;
+               if (tmr->response == TMR_FUNCTION_COMPLETE) {
+                       target_ua_allocate_lun(cmd->se_sess->se_node_acl,
+                                              cmd->orig_fe_lun, 0x29,
+                                              ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
+               }
                break;
        case TMR_TARGET_WARM_RESET:
                tmr->response = TMR_FUNCTION_REJECTED;
@@ -3073,3 +3015,22 @@ int transport_generic_handle_tmr(
        return 0;
 }
 EXPORT_SYMBOL(transport_generic_handle_tmr);
+
+bool
+target_check_wce(struct se_device *dev)
+{
+       bool wce = false;
+
+       if (dev->transport->get_write_cache)
+               wce = dev->transport->get_write_cache(dev);
+       else if (dev->dev_attrib.emulate_write_cache > 0)
+               wce = true;
+
+       return wce;
+}
+
+bool
+target_check_fua(struct se_device *dev)
+{
+       return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
+}
index e44cc94b12cb1b10ae3670f053fc0456255ac515..be25eb807a5fd5dbda381165ac15f4dd1285bb59 100644 (file)
@@ -29,7 +29,6 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 
 #include "target_core_internal.h"
 #include "target_core_alua.h"
@@ -50,9 +49,17 @@ target_scsi3_ua_check(struct se_cmd *cmd)
        if (!nacl)
                return 0;
 
-       deve = nacl->device_list[cmd->orig_fe_lun];
-       if (!atomic_read(&deve->ua_count))
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+       if (!deve) {
+               rcu_read_unlock();
                return 0;
+       }
+       if (!atomic_read(&deve->ua_count)) {
+               rcu_read_unlock();
+               return 0;
+       }
+       rcu_read_unlock();
        /*
         * From sam4r14, section 5.14 Unit attention condition:
         *
@@ -79,18 +86,11 @@ target_scsi3_ua_check(struct se_cmd *cmd)
 }
 
 int core_scsi3_ua_allocate(
-       struct se_node_acl *nacl,
-       u32 unpacked_lun,
+       struct se_dev_entry *deve,
        u8 asc,
        u8 ascq)
 {
-       struct se_dev_entry *deve;
        struct se_ua *ua, *ua_p, *ua_tmp;
-       /*
-        * PASSTHROUGH OPS
-        */
-       if (!nacl)
-               return -EINVAL;
 
        ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
        if (!ua) {
@@ -99,13 +99,9 @@ int core_scsi3_ua_allocate(
        }
        INIT_LIST_HEAD(&ua->ua_nacl_list);
 
-       ua->ua_nacl = nacl;
        ua->ua_asc = asc;
        ua->ua_ascq = ascq;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[unpacked_lun];
-
        spin_lock(&deve->ua_lock);
        list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
                /*
@@ -113,7 +109,6 @@ int core_scsi3_ua_allocate(
                 */
                if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
                        spin_unlock(&deve->ua_lock);
-                       spin_unlock_irq(&nacl->device_list_lock);
                        kmem_cache_free(se_ua_cache, ua);
                        return 0;
                }
@@ -158,24 +153,40 @@ int core_scsi3_ua_allocate(
                        list_add_tail(&ua->ua_nacl_list,
                                &deve->ua_list);
                spin_unlock(&deve->ua_lock);
-               spin_unlock_irq(&nacl->device_list_lock);
 
                atomic_inc_mb(&deve->ua_count);
                return 0;
        }
        list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
        spin_unlock(&deve->ua_lock);
-       spin_unlock_irq(&nacl->device_list_lock);
 
-       pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
-               " 0x%02x, ASCQ: 0x%02x\n",
-               nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+       pr_debug("Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:"
+               " 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
                asc, ascq);
 
        atomic_inc_mb(&deve->ua_count);
        return 0;
 }
 
+void target_ua_allocate_lun(struct se_node_acl *nacl,
+                           u32 unpacked_lun, u8 asc, u8 ascq)
+{
+       struct se_dev_entry *deve;
+
+       if (!nacl)
+               return;
+
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, unpacked_lun);
+       if (!deve) {
+               rcu_read_unlock();
+               return;
+       }
+
+       core_scsi3_ua_allocate(deve, asc, ascq);
+       rcu_read_unlock();
+}
+
 void core_scsi3_ua_release_all(
        struct se_dev_entry *deve)
 {
@@ -210,10 +221,14 @@ void core_scsi3_ua_for_check_condition(
        if (!nacl)
                return;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[cmd->orig_fe_lun];
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+       if (!deve) {
+               rcu_read_unlock();
+               return;
+       }
        if (!atomic_read(&deve->ua_count)) {
-               spin_unlock_irq(&nacl->device_list_lock);
+               rcu_read_unlock();
                return;
        }
        /*
@@ -249,10 +264,10 @@ void core_scsi3_ua_for_check_condition(
                atomic_dec_mb(&deve->ua_count);
        }
        spin_unlock(&deve->ua_lock);
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
 
        pr_debug("[%s]: %s UNIT ATTENTION condition with"
-               " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
+               " INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x"
                " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
                nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
                (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
@@ -278,10 +293,14 @@ int core_scsi3_ua_clear_for_request_sense(
        if (!nacl)
                return -EINVAL;
 
-       spin_lock_irq(&nacl->device_list_lock);
-       deve = nacl->device_list[cmd->orig_fe_lun];
+       rcu_read_lock();
+       deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+       if (!deve) {
+               rcu_read_unlock();
+               return -EINVAL;
+       }
        if (!atomic_read(&deve->ua_count)) {
-               spin_unlock_irq(&nacl->device_list_lock);
+               rcu_read_unlock();
                return -EPERM;
        }
        /*
@@ -307,10 +326,10 @@ int core_scsi3_ua_clear_for_request_sense(
                atomic_dec_mb(&deve->ua_count);
        }
        spin_unlock(&deve->ua_lock);
-       spin_unlock_irq(&nacl->device_list_lock);
+       rcu_read_unlock();
 
        pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
-               " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
+               " LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x,"
                " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
                cmd->orig_fe_lun, *asc, *ascq);
 
index a6b56b364e7af5b7895df6f8b955c3d3d053f296..bd6e78ba153d68bd37b784ba7ebd52290932906c 100644 (file)
 
 #define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS          0x09
 
+#define ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED                      0x03
+#define ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED                        0x0E
+
 extern struct kmem_cache *se_ua_cache;
 
 extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
-extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
+extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8);
+extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8);
 extern void core_scsi3_ua_release_all(struct se_dev_entry *);
 extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
 extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
index 549af9847c2826ae8e11d6291bf8ca1715dd6a0a..c448ef421ce779347973579654b36fb7107043f6 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
  * Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2015 Arrikto, Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -30,7 +31,6 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_backend.h>
-#include <target/target_core_backend_configfs.h>
 
 #include <linux/target_core_user.h>
 
@@ -168,6 +168,11 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
        tcmu_cmd->tcmu_dev = udev;
        tcmu_cmd->data_length = se_cmd->data_length;
 
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
+               BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+               tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length;
+       }
+
        tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
 
        idr_preload(GFP_KERNEL);
@@ -226,9 +231,106 @@ static inline size_t head_to_end(size_t head, size_t size)
 
 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
 
+static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
+       struct scatterlist *data_sg, unsigned int data_nents,
+       struct iovec **iov, int *iov_cnt, bool copy_data)
+{
+       int i;
+       void *from, *to;
+       size_t copy_bytes;
+       struct scatterlist *sg;
+
+       for_each_sg(data_sg, sg, data_nents, i) {
+               copy_bytes = min_t(size_t, sg->length,
+                                head_to_end(udev->data_head, udev->data_size));
+               from = kmap_atomic(sg_page(sg)) + sg->offset;
+               to = (void *) udev->mb_addr + udev->data_off + udev->data_head;
+
+               if (copy_data) {
+                       memcpy(to, from, copy_bytes);
+                       tcmu_flush_dcache_range(to, copy_bytes);
+               }
+
+               /* Even iov_base is relative to mb_addr */
+               (*iov)->iov_len = copy_bytes;
+               (*iov)->iov_base = (void __user *) udev->data_off +
+                                               udev->data_head;
+               (*iov_cnt)++;
+               (*iov)++;
+
+               UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
+
+               /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
+               if (sg->length != copy_bytes) {
+                       void *from_skip = from + copy_bytes;
+
+                       copy_bytes = sg->length - copy_bytes;
+
+                       (*iov)->iov_len = copy_bytes;
+                       (*iov)->iov_base = (void __user *) udev->data_off +
+                                                       udev->data_head;
+
+                       if (copy_data) {
+                               to = (void *) udev->mb_addr +
+                                       udev->data_off + udev->data_head;
+                               memcpy(to, from_skip, copy_bytes);
+                               tcmu_flush_dcache_range(to, copy_bytes);
+                       }
+
+                       (*iov_cnt)++;
+                       (*iov)++;
+
+                       UPDATE_HEAD(udev->data_head,
+                               copy_bytes, udev->data_size);
+               }
+
+               kunmap_atomic(from - sg->offset);
+       }
+}
+
+static void gather_and_free_data_area(struct tcmu_dev *udev,
+       struct scatterlist *data_sg, unsigned int data_nents)
+{
+       int i;
+       void *from, *to;
+       size_t copy_bytes;
+       struct scatterlist *sg;
+
+       /* It'd be easier to look at entry's iovec again, but UAM */
+       for_each_sg(data_sg, sg, data_nents, i) {
+               copy_bytes = min_t(size_t, sg->length,
+                                head_to_end(udev->data_tail, udev->data_size));
+
+               to = kmap_atomic(sg_page(sg)) + sg->offset;
+               WARN_ON(sg->length + sg->offset > PAGE_SIZE);
+               from = (void *) udev->mb_addr +
+                       udev->data_off + udev->data_tail;
+               tcmu_flush_dcache_range(from, copy_bytes);
+               memcpy(to, from, copy_bytes);
+
+               UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
+
+               /* Uh oh, wrapped the data buffer for this sg's data */
+               if (sg->length != copy_bytes) {
+                       void *to_skip = to + copy_bytes;
+
+                       from = (void *) udev->mb_addr +
+                               udev->data_off + udev->data_tail;
+                       WARN_ON(udev->data_tail);
+                       copy_bytes = sg->length - copy_bytes;
+                       tcmu_flush_dcache_range(from, copy_bytes);
+                       memcpy(to_skip, from, copy_bytes);
+
+                       UPDATE_HEAD(udev->data_tail,
+                               copy_bytes, udev->data_size);
+               }
+               kunmap_atomic(to - sg->offset);
+       }
+}
+
 /*
- * We can't queue a command until we have space available on the cmd ring *and* space
- * space avail on the data ring.
+ * We can't queue a command until we have space available on the cmd ring *and*
+ * space available on the data ring.
  *
  * Called with ring lock held.
  */
@@ -276,12 +378,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        size_t base_command_size, command_size;
        struct tcmu_mailbox *mb;
        struct tcmu_cmd_entry *entry;
-       int i;
-       struct scatterlist *sg;
        struct iovec *iov;
-       int iov_cnt = 0;
+       int iov_cnt;
        uint32_t cmd_head;
        uint64_t cdb_off;
+       bool copy_to_data_area;
 
        if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
                return -EINVAL;
@@ -294,7 +395,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
         * b/c size == offsetof one-past-element.
        */
        base_command_size = max(offsetof(struct tcmu_cmd_entry,
-                                        req.iov[se_cmd->t_data_nents + 2]),
+                                        req.iov[se_cmd->t_bidi_data_nents +
+                                                se_cmd->t_data_nents + 2]),
                                sizeof(struct tcmu_cmd_entry));
        command_size = base_command_size
                + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -362,53 +464,20 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
         * Fix up iovecs, and handle if allocation in data ring wrapped.
         */
        iov = &entry->req.iov[0];
-       for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
-               size_t copy_bytes = min((size_t)sg->length,
-                                    head_to_end(udev->data_head, udev->data_size));
-               void *from = kmap_atomic(sg_page(sg)) + sg->offset;
-               void *to = (void *) mb + udev->data_off + udev->data_head;
-
-               if (tcmu_cmd->se_cmd->data_direction == DMA_TO_DEVICE) {
-                       memcpy(to, from, copy_bytes);
-                       tcmu_flush_dcache_range(to, copy_bytes);
-               }
-
-               /* Even iov_base is relative to mb_addr */
-               iov->iov_len = copy_bytes;
-               iov->iov_base = (void __user *) udev->data_off +
-                                               udev->data_head;
-               iov_cnt++;
-               iov++;
-
-               UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
-
-               /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
-               if (sg->length != copy_bytes) {
-                       from += copy_bytes;
-                       copy_bytes = sg->length - copy_bytes;
-
-                       iov->iov_len = copy_bytes;
-                       iov->iov_base = (void __user *) udev->data_off +
-                                                       udev->data_head;
-
-                       if (se_cmd->data_direction == DMA_TO_DEVICE) {
-                               to = (void *) mb + udev->data_off + udev->data_head;
-                               memcpy(to, from, copy_bytes);
-                               tcmu_flush_dcache_range(to, copy_bytes);
-                       }
-
-                       iov_cnt++;
-                       iov++;
-
-                       UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
-               }
-
-               kunmap_atomic(from);
-       }
+       iov_cnt = 0;
+       copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
+               || se_cmd->se_cmd_flags & SCF_BIDI);
+       alloc_and_scatter_data_area(udev, se_cmd->t_data_sg,
+               se_cmd->t_data_nents, &iov, &iov_cnt, copy_to_data_area);
        entry->req.iov_cnt = iov_cnt;
-       entry->req.iov_bidi_cnt = 0;
        entry->req.iov_dif_cnt = 0;
 
+       /* Handle BIDI commands */
+       iov_cnt = 0;
+       alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
+               se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
+       entry->req.iov_bidi_cnt = iov_cnt;
+
        /* All offsets relative to mb_addr, not start of entry! */
        cdb_off = CMDR_OFF + cmd_head + base_command_size;
        memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
@@ -481,47 +550,22 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                               se_cmd->scsi_sense_length);
 
                UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
-       }
-       else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
-               struct scatterlist *sg;
-               int i;
-
-               /* It'd be easier to look at entry's iovec again, but UAM */
-               for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
-                       size_t copy_bytes;
-                       void *to;
-                       void *from;
-
-                       copy_bytes = min((size_t)sg->length,
-                                        head_to_end(udev->data_tail, udev->data_size));
-
-                       to = kmap_atomic(sg_page(sg)) + sg->offset;
-                       WARN_ON(sg->length + sg->offset > PAGE_SIZE);
-                       from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
-                       tcmu_flush_dcache_range(from, copy_bytes);
-                       memcpy(to, from, copy_bytes);
-
-                       UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
-
-                       /* Uh oh, wrapped the data buffer for this sg's data */
-                       if (sg->length != copy_bytes) {
-                               from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
-                               WARN_ON(udev->data_tail);
-                               to += copy_bytes;
-                               copy_bytes = sg->length - copy_bytes;
-                               tcmu_flush_dcache_range(from, copy_bytes);
-                               memcpy(to, from, copy_bytes);
-
-                               UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
-                       }
-
-                       kunmap_atomic(to);
-               }
-
+       } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
+               /* Discard data_out buffer */
+               UPDATE_HEAD(udev->data_tail,
+                       (size_t)se_cmd->t_data_sg->length, udev->data_size);
+
+               /* Get Data-In buffer */
+               gather_and_free_data_area(udev,
+                       se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
+       } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+               gather_and_free_data_area(udev,
+                       se_cmd->t_data_sg, se_cmd->t_data_nents);
        } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
                UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
-       } else {
-               pr_warn("TCMU: data direction was %d!\n", se_cmd->data_direction);
+       } else if (se_cmd->data_direction != DMA_NONE) {
+               pr_warn("TCMU: data direction was %d!\n",
+                       se_cmd->data_direction);
        }
 
        target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
@@ -910,6 +954,14 @@ static int tcmu_check_pending_cmd(int id, void *p, void *data)
        return -EINVAL;
 }
 
+static void tcmu_dev_call_rcu(struct rcu_head *p)
+{
+       struct se_device *dev = container_of(p, struct se_device, rcu_head);
+       struct tcmu_dev *udev = TCMU_DEV(dev);
+
+       kfree(udev);
+}
+
 static void tcmu_free_device(struct se_device *dev)
 {
        struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -935,8 +987,7 @@ static void tcmu_free_device(struct se_device *dev)
                kfree(udev->uio_info.name);
                kfree(udev->name);
        }
-
-       kfree(udev);
+       call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
 }
 
 enum {
@@ -1054,27 +1105,7 @@ tcmu_parse_cdb(struct se_cmd *cmd)
        return passthrough_parse_cdb(cmd, tcmu_pass_op);
 }
 
-DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type);
-TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type);
-
-DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size);
-TB_DEV_ATTR_RO(tcmu, hw_block_size);
-
-DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors);
-TB_DEV_ATTR_RO(tcmu, hw_max_sectors);
-
-DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth);
-TB_DEV_ATTR_RO(tcmu, hw_queue_depth);
-
-static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
-       &tcmu_dev_attrib_hw_pi_prot_type.attr,
-       &tcmu_dev_attrib_hw_block_size.attr,
-       &tcmu_dev_attrib_hw_max_sectors.attr,
-       &tcmu_dev_attrib_hw_queue_depth.attr,
-       NULL,
-};
-
-static struct se_subsystem_api tcmu_template = {
+static const struct target_backend_ops tcmu_ops = {
        .name                   = "user",
        .inquiry_prod           = "USER",
        .inquiry_rev            = TCMU_VERSION,
@@ -1090,11 +1121,11 @@ static struct se_subsystem_api tcmu_template = {
        .show_configfs_dev_params = tcmu_show_configfs_dev_params,
        .get_device_type        = sbc_get_device_type,
        .get_blocks             = tcmu_get_blocks,
+       .tb_dev_attrib_attrs    = passthrough_attrib_attrs,
 };
 
 static int __init tcmu_module_init(void)
 {
-       struct target_backend_cits *tbc = &tcmu_template.tb_cits;
        int ret;
 
        BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
@@ -1117,10 +1148,7 @@ static int __init tcmu_module_init(void)
                goto out_unreg_device;
        }
 
-       target_core_setup_sub_cits(&tcmu_template);
-       tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs;
-
-       ret = transport_subsystem_register(&tcmu_template);
+       ret = transport_backend_register(&tcmu_ops);
        if (ret)
                goto out_unreg_genl;
 
@@ -1138,7 +1166,7 @@ out_free_cache:
 
 static void __exit tcmu_module_exit(void)
 {
-       transport_subsystem_release(&tcmu_template);
+       target_backend_unregister(&tcmu_ops);
        genl_unregister_family(&tcmu_genl_family);
        root_device_unregister(tcmu_root_device);
        kmem_cache_destroy(tcmu_cmd_cache);
index 5ec0d00edaa3412591eebd4fae6a0bd435005a1a..4515f52546f83c5cd0d4ade575f3a006d9ce3683 100644 (file)
@@ -31,7 +31,6 @@
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 
 #include "target_core_internal.h"
 #include "target_core_pr.h"
@@ -348,8 +347,7 @@ struct xcopy_pt_cmd {
        unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
 };
 
-static struct se_port xcopy_pt_port;
-static struct se_portal_group xcopy_pt_tpg;
+struct se_portal_group xcopy_pt_tpg;
 static struct se_session xcopy_pt_sess;
 static struct se_node_acl xcopy_pt_nacl;
 
@@ -358,11 +356,6 @@ static char *xcopy_pt_get_fabric_name(void)
         return "xcopy-pt";
 }
 
-static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd)
-{
-        return 0;
-}
-
 static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
 {
         return 0;
@@ -423,7 +416,6 @@ static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
 
 static const struct target_core_fabric_ops xcopy_pt_tfo = {
        .get_fabric_name        = xcopy_pt_get_fabric_name,
-       .get_task_tag           = xcopy_pt_get_tag,
        .get_cmd_state          = xcopy_pt_get_cmd_state,
        .release_cmd            = xcopy_pt_release_cmd,
        .check_stop_free        = xcopy_pt_check_stop_free,
@@ -445,17 +437,11 @@ int target_xcopy_setup_pt(void)
                return -ENOMEM;
        }
 
-       memset(&xcopy_pt_port, 0, sizeof(struct se_port));
-       INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list);
-       INIT_LIST_HEAD(&xcopy_pt_port.sep_list);
-       mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex);
-
        memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
        INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
        INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
        INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
 
-       xcopy_pt_port.sep_tpg = &xcopy_pt_tpg;
        xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
 
        memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
@@ -496,10 +482,6 @@ static void target_xcopy_setup_pt_port(
                 */
                if (remote_port) {
                        xpt_cmd->remote_port = remote_port;
-                       pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
-                       pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to"
-                               " cmd->se_lun->lun_sep for X-COPY data PUSH\n",
-                               pt_cmd->se_lun->lun_sep);
                } else {
                        pt_cmd->se_lun = ec_cmd->se_lun;
                        pt_cmd->se_dev = ec_cmd->se_dev;
@@ -519,10 +501,6 @@ static void target_xcopy_setup_pt_port(
                 */
                if (remote_port) {
                        xpt_cmd->remote_port = remote_port;
-                       pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
-                       pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to"
-                               " cmd->se_lun->lun_sep for X-COPY data PULL\n",
-                               pt_cmd->se_lun->lun_sep);
                } else {
                        pt_cmd->se_lun = ec_cmd->se_lun;
                        pt_cmd->se_dev = ec_cmd->se_dev;
@@ -574,6 +552,7 @@ static int target_xcopy_setup_pt_cmd(
        xpt_cmd->xcopy_op = xop;
        target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
 
+       cmd->tag = 0;
        sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
        if (sense_rc) {
                ret = -EINVAL;
index 881deb3d499adc57e578ce9af9890953f9281f2a..39909dadef3e82476ad2747aa42974ee6647162d 100644 (file)
@@ -80,8 +80,8 @@ struct ft_node_auth {
  * Node ACL for FC remote port session.
  */
 struct ft_node_acl {
-       struct ft_node_auth node_auth;
        struct se_node_acl se_node_acl;
+       struct ft_node_auth node_auth;
 };
 
 struct ft_lun {
@@ -157,7 +157,6 @@ int ft_queue_status(struct se_cmd *);
 int ft_queue_data_in(struct se_cmd *);
 int ft_write_pending(struct se_cmd *);
 int ft_write_pending_status(struct se_cmd *);
-u32 ft_get_task_tag(struct se_cmd *);
 int ft_get_cmd_state(struct se_cmd *);
 void ft_queue_tm_resp(struct se_cmd *);
 void ft_aborted_task(struct se_cmd *);
index 1bf78e7c994c8ef7ec3ecc58cbd7b6a2e3dfa782..68031723e5be33c97742acc286fb4217279cb2d4 100644 (file)
@@ -36,7 +36,6 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
 #include "tcm_fc.h"
@@ -243,15 +242,6 @@ int ft_write_pending(struct se_cmd *se_cmd)
        return 0;
 }
 
-u32 ft_get_task_tag(struct se_cmd *se_cmd)
-{
-       struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
-
-       if (cmd->aborted)
-               return ~0;
-       return fc_seq_exch(cmd->seq)->rxid;
-}
-
 int ft_get_cmd_state(struct se_cmd *se_cmd)
 {
        return 0;
@@ -564,6 +554,7 @@ static void ft_send_work(struct work_struct *work)
        }
 
        fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
+       cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
        /*
         * Use a single se_cmd->cmd_kref as we expect to release se_cmd
         * directly from ft_check_stop_free callback in response path.
index 86b699b94c7b615836ac38383554e1c83ff27af2..16670933013bf6b7d803471a73de3c87b79459a1 100644 (file)
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
 #include "tcm_fc.h"
 
-static const struct target_core_fabric_ops ft_fabric_ops;
-
 static LIST_HEAD(ft_wwn_list);
 DEFINE_MUTEX(ft_lport_lock);
 
@@ -194,48 +191,17 @@ static struct configfs_attribute *ft_nacl_base_attrs[] = {
  * Add ACL for an initiator.  The ACL is named arbitrarily.
  * The port_name and/or node_name are attributes.
  */
-static struct se_node_acl *ft_add_acl(
-       struct se_portal_group *se_tpg,
-       struct config_group *group,
-       const char *name)
+static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name)
 {
-       struct ft_node_acl *acl;
-       struct ft_tpg *tpg;
+       struct ft_node_acl *acl =
+               container_of(nacl, struct ft_node_acl, se_node_acl);
        u64 wwpn;
-       u32 q_depth;
-
-       pr_debug("add acl %s\n", name);
-       tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
 
        if (ft_parse_wwn(name, &wwpn, 1) < 0)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
-       acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
-       if (!acl)
-               return ERR_PTR(-ENOMEM);
        acl->node_auth.port_name = wwpn;
-
-       q_depth = 32;           /* XXX bogus default - get from tpg? */
-       return core_tpg_add_initiator_node_acl(&tpg->se_tpg,
-                               &acl->se_node_acl, name, q_depth);
-}
-
-static void ft_del_acl(struct se_node_acl *se_acl)
-{
-       struct se_portal_group *se_tpg = se_acl->se_tpg;
-       struct ft_tpg *tpg;
-       struct ft_node_acl *acl = container_of(se_acl,
-                               struct ft_node_acl, se_node_acl);
-
-       pr_debug("del acl %s\n",
-               config_item_name(&se_acl->acl_group.cg_item));
-
-       tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
-       pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
-                   acl, se_acl, tpg, &tpg->se_tpg);
-
-       core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
-       kfree(acl);
+       return 0;
 }
 
 struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
@@ -245,7 +211,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
        struct se_portal_group *se_tpg = &tpg->se_tpg;
        struct se_node_acl *se_acl;
 
-       spin_lock_irq(&se_tpg->acl_node_lock);
+       mutex_lock(&se_tpg->acl_node_mutex);
        list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
                acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
                pr_debug("acl %p port_name %llx\n",
@@ -259,33 +225,10 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
                        break;
                }
        }
-       spin_unlock_irq(&se_tpg->acl_node_lock);
+       mutex_unlock(&se_tpg->acl_node_mutex);
        return found;
 }
 
-static struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
-       struct ft_node_acl *acl;
-
-       acl = kzalloc(sizeof(*acl), GFP_KERNEL);
-       if (!acl) {
-               pr_err("Unable to allocate struct ft_node_acl\n");
-               return NULL;
-       }
-       pr_debug("acl %p\n", acl);
-       return &acl->se_node_acl;
-}
-
-static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
-                                     struct se_node_acl *se_acl)
-{
-       struct ft_node_acl *acl = container_of(se_acl,
-                               struct ft_node_acl, se_node_acl);
-
-       pr_debug("acl %p\n", acl);
-       kfree(acl);
-}
-
 /*
  * local_port port_group (tpg) ops.
  */
@@ -333,8 +276,7 @@ static struct se_portal_group *ft_add_tpg(
                return NULL;
        }
 
-       ret = core_tpg_register(&ft_fabric_ops, wwn, &tpg->se_tpg,
-                               tpg, TRANSPORT_TPG_TYPE_NORMAL);
+       ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
        if (ret < 0) {
                destroy_workqueue(wq);
                kfree(tpg);
@@ -459,6 +401,11 @@ static struct configfs_attribute *ft_wwn_attrs[] = {
        NULL,
 };
 
+static inline struct ft_tpg *ft_tpg(struct se_portal_group *se_tpg)
+{
+       return container_of(se_tpg, struct ft_tpg, se_tpg);
+}
+
 static char *ft_get_fabric_name(void)
 {
        return "fc";
@@ -466,25 +413,16 @@ static char *ft_get_fabric_name(void)
 
 static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
-       struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
-
-       return tpg->lport_wwn->name;
+       return ft_tpg(se_tpg)->lport_wwn->name;
 }
 
 static u16 ft_get_tag(struct se_portal_group *se_tpg)
 {
-       struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
-
        /*
         * This tag is used when forming SCSI Name identifier in EVPD=1 0x83
         * to represent the SCSI Target Port.
         */
-       return tpg->index;
-}
-
-static u32 ft_get_default_depth(struct se_portal_group *se_tpg)
-{
-       return 1;
+       return ft_tpg(se_tpg)->index;
 }
 
 static int ft_check_false(struct se_portal_group *se_tpg)
@@ -498,28 +436,20 @@ static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
 
 static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
 {
-       struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
-
-       return tpg->index;
+       return ft_tpg(se_tpg)->index;
 }
 
 static const struct target_core_fabric_ops ft_fabric_ops = {
        .module =                       THIS_MODULE,
        .name =                         "fc",
+       .node_acl_size =                sizeof(struct ft_node_acl),
        .get_fabric_name =              ft_get_fabric_name,
-       .get_fabric_proto_ident =       fc_get_fabric_proto_ident,
        .tpg_get_wwn =                  ft_get_fabric_wwn,
        .tpg_get_tag =                  ft_get_tag,
-       .tpg_get_default_depth =        ft_get_default_depth,
-       .tpg_get_pr_transport_id =      fc_get_pr_transport_id,
-       .tpg_get_pr_transport_id_len =  fc_get_pr_transport_id_len,
-       .tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id,
        .tpg_check_demo_mode =          ft_check_false,
        .tpg_check_demo_mode_cache =    ft_check_false,
        .tpg_check_demo_mode_write_protect = ft_check_false,
        .tpg_check_prod_mode_write_protect = ft_check_false,
-       .tpg_alloc_fabric_acl =         ft_tpg_alloc_fabric_acl,
-       .tpg_release_fabric_acl =       ft_tpg_release_fabric_acl,
        .tpg_get_inst_index =           ft_tpg_get_inst_index,
        .check_stop_free =              ft_check_stop_free,
        .release_cmd =                  ft_release_cmd,
@@ -530,7 +460,6 @@ static const struct target_core_fabric_ops ft_fabric_ops = {
        .write_pending =                ft_write_pending,
        .write_pending_status =         ft_write_pending_status,
        .set_default_node_attributes =  ft_set_default_node_attr,
-       .get_task_tag =                 ft_get_task_tag,
        .get_cmd_state =                ft_get_cmd_state,
        .queue_data_in =                ft_queue_data_in,
        .queue_status =                 ft_queue_status,
@@ -544,12 +473,7 @@ static const struct target_core_fabric_ops ft_fabric_ops = {
        .fabric_drop_wwn =              &ft_del_wwn,
        .fabric_make_tpg =              &ft_add_tpg,
        .fabric_drop_tpg =              &ft_del_tpg,
-       .fabric_post_link =             NULL,
-       .fabric_pre_unlink =            NULL,
-       .fabric_make_np =               NULL,
-       .fabric_drop_np =               NULL,
-       .fabric_make_nodeacl =          &ft_add_acl,
-       .fabric_drop_nodeacl =          &ft_del_acl,
+       .fabric_init_nodeacl =          &ft_init_nodeacl,
 
        .tfc_wwn_attrs                  = ft_wwn_attrs,
        .tfc_tpg_nacl_base_attrs        = ft_nacl_base_attrs,
index fe585d1cce231c246150d5d639837fab68bbb09f..4b0fedd6bd4be2236f2beb636e05251345687c60 100644 (file)
@@ -44,7 +44,6 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
 #include "tcm_fc.h"
index f2a616d4f2c481ef568abfa5faff1a0063ebf36e..31a9e3fb98c5321de2e6b774c875845638c81862 100644 (file)
@@ -36,7 +36,6 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
 #include "tcm_fc.h"
index 2e6716104d3fa8c9aa45f3a39fdafe8c355d4ead..5820e851392798e9d550241dafc8a00ec76898e3 100644 (file)
@@ -119,7 +119,7 @@ exit:
        return ret;
 }
 
-static struct kernel_param_ops duration_ops = {
+static const struct kernel_param_ops duration_ops = {
        .set = duration_set,
        .get = param_get_int,
 };
@@ -167,7 +167,7 @@ exit_win:
        return ret;
 }
 
-static struct kernel_param_ops window_size_ops = {
+static const struct kernel_param_ops window_size_ops = {
        .set = window_size_set,
        .get = param_get_int,
 };
index f78a87b07872ffad69234d680953e1236f8dad81..bb809cf3661770e63e0a50ceb19017a0ea087d0f 100644 (file)
@@ -1345,7 +1345,7 @@ static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
 
 #define param_check_vmidfilter(name, p) __param_check(name, p, void)
 
-static struct kernel_param_ops param_ops_vmidfilter = {
+static const struct kernel_param_ops param_ops_vmidfilter = {
        .set = param_set_vmidfilter,
        .get = param_get_vmidfilter,
 };
index 7a3d146a5f0efc0dbc3b94e854adb4d81c85da4d..a9d837f83ce832539a442643f10ec4221d0fa117 100644 (file)
@@ -302,7 +302,7 @@ static int xen_initial_domain_console_init(void)
 static void xen_console_update_evtchn(struct xencons_info *info)
 {
        if (xen_hvm_domain()) {
-               uint64_t v;
+               uint64_t v = 0;
                int err;
 
                err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
index 3774600741d844f7d21ab98d3186d4fde25c0db3..9325262289f9e191a1c3ecb5d8024df56b7e0d21 100644 (file)
@@ -640,25 +640,7 @@ err_destroy_ports:
        put_tty_driver(channel_driver);
        return ret;
 }
-
-static void dashtty_exit(void)
-{
-       int nport;
-       struct dashtty_port *dport;
-
-       del_timer_sync(&put_timer);
-       kthread_stop(dashtty_thread);
-       del_timer_sync(&poll_timer);
-       tty_unregister_driver(channel_driver);
-       for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
-               dport = &dashtty_ports[nport];
-               tty_port_destroy(&dport->port);
-       }
-       put_tty_driver(channel_driver);
-}
-
-module_init(dashtty_init);
-module_exit(dashtty_exit);
+device_initcall(dashtty_init);
 
 #ifdef CONFIG_DA_CONSOLE
 
index 978204333c94b364b843c723c45dff353a5dbdfb..d75a66c7275098184b53344d0729c9dd2ac50a85 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/console.h>
 #include <linux/pm_qos.h>
+#include <linux/pm_wakeirq.h>
 #include <linux/dma-mapping.h>
 
 #include "8250.h"
@@ -552,17 +553,6 @@ static void omap8250_uart_qos_work(struct work_struct *work)
        pm_qos_update_request(&priv->pm_qos_request, priv->latency);
 }
 
-static irqreturn_t omap_wake_irq(int irq, void *dev_id)
-{
-       struct uart_port *port = dev_id;
-       int ret;
-
-       ret = port->handle_irq(port);
-       if (ret)
-               return IRQ_HANDLED;
-       return IRQ_NONE;
-}
-
 #ifdef CONFIG_SERIAL_8250_DMA
 static int omap_8250_dma_handle_irq(struct uart_port *port);
 #endif
@@ -596,11 +586,9 @@ static int omap_8250_startup(struct uart_port *port)
        int ret;
 
        if (priv->wakeirq) {
-               ret = request_irq(priv->wakeirq, omap_wake_irq,
-                                 port->irqflags, "uart wakeup irq", port);
+               ret = dev_pm_set_dedicated_wake_irq(port->dev, priv->wakeirq);
                if (ret)
                        return ret;
-               disable_irq(priv->wakeirq);
        }
 
        pm_runtime_get_sync(port->dev);
@@ -649,8 +637,7 @@ static int omap_8250_startup(struct uart_port *port)
 err:
        pm_runtime_mark_last_busy(port->dev);
        pm_runtime_put_autosuspend(port->dev);
-       if (priv->wakeirq)
-               free_irq(priv->wakeirq, port);
+       dev_pm_clear_wake_irq(port->dev);
        return ret;
 }
 
@@ -682,10 +669,8 @@ static void omap_8250_shutdown(struct uart_port *port)
 
        pm_runtime_mark_last_busy(port->dev);
        pm_runtime_put_autosuspend(port->dev);
-
        free_irq(port->irq, port);
-       if (priv->wakeirq)
-               free_irq(priv->wakeirq, port);
+       dev_pm_clear_wake_irq(port->dev);
 }
 
 static void omap_8250_throttle(struct uart_port *port)
@@ -1226,31 +1211,6 @@ static int omap8250_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-
-static inline void omap8250_enable_wakeirq(struct omap8250_priv *priv,
-                                          bool enable)
-{
-       if (!priv->wakeirq)
-               return;
-
-       if (enable)
-               enable_irq(priv->wakeirq);
-       else
-               disable_irq_nosync(priv->wakeirq);
-}
-
-static void omap8250_enable_wakeup(struct omap8250_priv *priv,
-                                  bool enable)
-{
-       if (enable == priv->wakeups_enabled)
-               return;
-
-       omap8250_enable_wakeirq(priv, enable);
-       priv->wakeups_enabled = enable;
-}
-#endif
-
 #ifdef CONFIG_PM_SLEEP
 static int omap8250_prepare(struct device *dev)
 {
@@ -1277,11 +1237,6 @@ static int omap8250_suspend(struct device *dev)
 
        serial8250_suspend_port(priv->line);
        flush_work(&priv->qos_work);
-
-       if (device_may_wakeup(dev))
-               omap8250_enable_wakeup(priv, true);
-       else
-               omap8250_enable_wakeup(priv, false);
        return 0;
 }
 
@@ -1289,9 +1244,6 @@ static int omap8250_resume(struct device *dev)
 {
        struct omap8250_priv *priv = dev_get_drvdata(dev);
 
-       if (device_may_wakeup(dev))
-               omap8250_enable_wakeup(priv, false);
-
        serial8250_resume_port(priv->line);
        return 0;
 }
@@ -1333,7 +1285,6 @@ static int omap8250_runtime_suspend(struct device *dev)
                        return -EBUSY;
        }
 
-       omap8250_enable_wakeup(priv, true);
        if (up->dma)
                omap_8250_rx_dma(up, UART_IIR_RX_TIMEOUT);
 
@@ -1354,7 +1305,6 @@ static int omap8250_runtime_resume(struct device *dev)
                return 0;
 
        up = serial8250_get_port(priv->line);
-       omap8250_enable_wakeup(priv, false);
        loss_cntx = omap8250_lost_context(up);
 
        if (loss_cntx)
index 7f49172ccd8673b316b3b82ab21282885ef983eb..7a2172b5e93cd296674c9b78a5fd31594db0f492 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/serial_core.h>
 #include <linux/irq.h>
 #include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/gpio.h>
@@ -160,7 +161,6 @@ struct uart_omap_port {
        unsigned long           port_activity;
        int                     context_loss_cnt;
        u32                     errata;
-       u8                      wakeups_enabled;
        u32                     features;
 
        int                     rts_gpio;
@@ -209,28 +209,11 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
        return pdata->get_context_loss_count(up->dev);
 }
 
-static inline void serial_omap_enable_wakeirq(struct uart_omap_port *up,
-                                      bool enable)
-{
-       if (!up->wakeirq)
-               return;
-
-       if (enable)
-               enable_irq(up->wakeirq);
-       else
-               disable_irq_nosync(up->wakeirq);
-}
-
+/* REVISIT: Remove this when omap3 boots in device tree only mode */
 static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
 {
        struct omap_uart_port_info *pdata = dev_get_platdata(up->dev);
 
-       if (enable == up->wakeups_enabled)
-               return;
-
-       serial_omap_enable_wakeirq(up, enable);
-       up->wakeups_enabled = enable;
-
        if (!pdata || !pdata->enable_wakeup)
                return;
 
@@ -750,13 +733,11 @@ static int serial_omap_startup(struct uart_port *port)
 
        /* Optional wake-up IRQ */
        if (up->wakeirq) {
-               retval = request_irq(up->wakeirq, serial_omap_irq,
-                                    up->port.irqflags, up->name, up);
+               retval = dev_pm_set_dedicated_wake_irq(up->dev, up->wakeirq);
                if (retval) {
                        free_irq(up->port.irq, up);
                        return retval;
                }
-               disable_irq(up->wakeirq);
        }
 
        dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
@@ -845,8 +826,7 @@ static void serial_omap_shutdown(struct uart_port *port)
        pm_runtime_mark_last_busy(up->dev);
        pm_runtime_put_autosuspend(up->dev);
        free_irq(up->port.irq, up);
-       if (up->wakeirq)
-               free_irq(up->wakeirq, up);
+       dev_pm_clear_wake_irq(up->dev);
 }
 
 static void serial_omap_uart_qos_work(struct work_struct *work)
@@ -1139,13 +1119,6 @@ serial_omap_pm(struct uart_port *port, unsigned int state,
        serial_out(up, UART_EFR, efr);
        serial_out(up, UART_LCR, 0);
 
-       if (!device_may_wakeup(up->dev)) {
-               if (!state)
-                       pm_runtime_forbid(up->dev);
-               else
-                       pm_runtime_allow(up->dev);
-       }
-
        pm_runtime_mark_last_busy(up->dev);
        pm_runtime_put_autosuspend(up->dev);
 }
index 2847108cc8ddd228633d49bf97c40bfde6e4e3f3..b5b427888b2453d88e59f7d002ab8d4e512d23e6 100644 (file)
@@ -988,7 +988,7 @@ static int sysrq_reset_seq_param_set(const char *buffer,
        return 0;
 }
 
-static struct kernel_param_ops param_ops_sysrq_reset_seq = {
+static const struct kernel_param_ops param_ops_sysrq_reset_seq = {
        .get    = param_get_ushort,
        .set    = sysrq_reset_seq_param_set,
 };
index 888998a7fe314c4ee3f25298f2c81f0d429365c4..a2ae88dbda784f257923a730caf20c2986b22798 100644 (file)
@@ -1599,7 +1599,7 @@ static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
        char file_arr[] = "CMVxy.bin";
        char *file;
 
-       kparam_block_sysfs_write(cmv_file);
+       kernel_param_lock(THIS_MODULE);
        /* set proper name corresponding modem version and line type */
        if (cmv_file[sc->modem_index] == NULL) {
                if (UEA_CHIP_VERSION(sc) == ADI930)
@@ -1618,7 +1618,7 @@ static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
        strlcat(cmv_name, file, UEA_FW_NAME_MAX);
        if (ver == 2)
                strlcat(cmv_name, ".v2", UEA_FW_NAME_MAX);
-       kparam_unblock_sysfs_write(cmv_file);
+       kernel_param_unlock(THIS_MODULE);
 }
 
 static int request_cmvs_old(struct uea_softc *sc,
index 3cc109f3c9c803638b8a47ffc1aa4aabb8397a6b..d2259c6639960c55c76239401148bbcc2ec1f449 100644 (file)
@@ -2936,7 +2936,7 @@ int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
        if (fsg_lun_is_open(lun)) {
                p = "(error)";
                if (pathbuf) {
-                       p = d_path(&lun->filp->f_path, pathbuf, PATH_MAX);
+                       p = file_path(lun->filp, pathbuf, PATH_MAX);
                        if (IS_ERR(p))
                                p = "(error)";
                }
index 648f9e489b39bb3a291f091771dcd099013176db..d62683017cf3c1c052f78eb243799e1ca46ca8d5 100644 (file)
@@ -341,7 +341,7 @@ ssize_t fsg_show_file(struct fsg_lun *curlun, struct rw_semaphore *filesem,
 
        down_read(filesem);
        if (fsg_lun_is_open(curlun)) {  /* Get the complete pathname */
-               p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
+               p = file_path(curlun->filp, buf, PAGE_SIZE - 1);
                if (IS_ERR(p))
                        rc = PTR_ERR(p);
                else {
index 6ce932f90ef84ee961eee50dd95ec4cc83bfacad..c3c48088fced7b90703b068b2527635a6a5e01ea 100644 (file)
@@ -20,7 +20,6 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 #include <asm/unaligned.h>
 
@@ -28,8 +27,6 @@
 
 USB_GADGET_COMPOSITE_OPTIONS();
 
-static const struct target_core_fabric_ops usbg_ops;
-
 static inline struct f_uas *to_f_uas(struct usb_function *f)
 {
        return container_of(f, struct f_uas, function);
@@ -1111,6 +1108,7 @@ static int usbg_submit_command(struct f_uas *fu,
        memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
 
        cmd->tag = be16_to_cpup(&cmd_iu->tag);
+       cmd->se_cmd.tag = cmd->tag;
        if (fu->flags & USBG_USE_STREAMS) {
                if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
                        goto err;
@@ -1244,6 +1242,7 @@ static int bot_submit_command(struct f_uas *fu,
        cmd->unpacked_lun = cbw->Lun;
        cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
        cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
+       cmd->se_cmd.tag = le32_to_cpu(cmd->bot_tag);
 
        INIT_WORK(&cmd->work, bot_cmd_work);
        ret = queue_work(tpg->workqueue, &cmd->work);
@@ -1273,23 +1272,6 @@ static char *usbg_get_fabric_name(void)
        return "usb_gadget";
 }
 
-static u8 usbg_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
-       struct usbg_tpg *tpg = container_of(se_tpg,
-                               struct usbg_tpg, se_tpg);
-       struct usbg_tport *tport = tpg->tport;
-       u8 proto_id;
-
-       switch (tport->tport_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-       default:
-               proto_id = sas_get_fabric_proto_ident(se_tpg);
-               break;
-       }
-
-       return proto_id;
-}
-
 static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
        struct usbg_tpg *tpg = container_of(se_tpg,
@@ -1306,97 +1288,6 @@ static u16 usbg_get_tag(struct se_portal_group *se_tpg)
        return tpg->tport_tpgt;
 }
 
-static u32 usbg_get_default_depth(struct se_portal_group *se_tpg)
-{
-       return 1;
-}
-
-static u32 usbg_get_pr_transport_id(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
-       int *format_code,
-       unsigned char *buf)
-{
-       struct usbg_tpg *tpg = container_of(se_tpg,
-                               struct usbg_tpg, se_tpg);
-       struct usbg_tport *tport = tpg->tport;
-       int ret = 0;
-
-       switch (tport->tport_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-       default:
-               ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                                       format_code, buf);
-               break;
-       }
-
-       return ret;
-}
-
-static u32 usbg_get_pr_transport_id_len(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl,
-       struct t10_pr_registration *pr_reg,
-       int *format_code)
-{
-       struct usbg_tpg *tpg = container_of(se_tpg,
-                               struct usbg_tpg, se_tpg);
-       struct usbg_tport *tport = tpg->tport;
-       int ret = 0;
-
-       switch (tport->tport_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-       default:
-               ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                                       format_code);
-               break;
-       }
-
-       return ret;
-}
-
-static char *usbg_parse_pr_out_transport_id(
-       struct se_portal_group *se_tpg,
-       const char *buf,
-       u32 *out_tid_len,
-       char **port_nexus_ptr)
-{
-       struct usbg_tpg *tpg = container_of(se_tpg,
-                               struct usbg_tpg, se_tpg);
-       struct usbg_tport *tport = tpg->tport;
-       char *tid = NULL;
-
-       switch (tport->tport_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-       default:
-               tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                                       port_nexus_ptr);
-       }
-
-       return tid;
-}
-
-static struct se_node_acl *usbg_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
-       struct usbg_nacl *nacl;
-
-       nacl = kzalloc(sizeof(struct usbg_nacl), GFP_KERNEL);
-       if (!nacl)
-               return NULL;
-
-       return &nacl->se_node_acl;
-}
-
-static void usbg_release_fabric_acl(
-       struct se_portal_group *se_tpg,
-       struct se_node_acl *se_nacl)
-{
-       struct usbg_nacl *nacl = container_of(se_nacl,
-                       struct usbg_nacl, se_node_acl);
-       kfree(nacl);
-}
-
 static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
 {
        return 1;
@@ -1447,18 +1338,6 @@ static void usbg_set_default_node_attrs(struct se_node_acl *nacl)
        return;
 }
 
-static u32 usbg_get_task_tag(struct se_cmd *se_cmd)
-{
-       struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
-                       se_cmd);
-       struct f_uas *fu = cmd->fu;
-
-       if (fu->flags & USBG_IS_BOT)
-               return le32_to_cpu(cmd->bot_tag);
-       else
-               return cmd->tag;
-}
-
 static int usbg_get_cmd_state(struct se_cmd *se_cmd)
 {
        return 0;
@@ -1488,50 +1367,11 @@ static const char *usbg_check_wwn(const char *name)
        return n;
 }
 
-static struct se_node_acl *usbg_make_nodeacl(
-       struct se_portal_group *se_tpg,
-       struct config_group *group,
-       const char *name)
-{
-       struct se_node_acl *se_nacl, *se_nacl_new;
-       struct usbg_nacl *nacl;
-       u64 wwpn = 0;
-       u32 nexus_depth;
-       const char *wnn_name;
-
-       wnn_name = usbg_check_wwn(name);
-       if (!wnn_name)
-               return ERR_PTR(-EINVAL);
-       se_nacl_new = usbg_alloc_fabric_acl(se_tpg);
-       if (!(se_nacl_new))
-               return ERR_PTR(-ENOMEM);
-
-       nexus_depth = 1;
-       /*
-        * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
-        * when converting a NodeACL from demo mode -> explict
-        */
-       se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
-                               name, nexus_depth);
-       if (IS_ERR(se_nacl)) {
-               usbg_release_fabric_acl(se_tpg, se_nacl_new);
-               return se_nacl;
-       }
-       /*
-        * Locate our struct usbg_nacl and set the FC Nport WWPN
-        */
-       nacl = container_of(se_nacl, struct usbg_nacl, se_node_acl);
-       nacl->iport_wwpn = wwpn;
-       snprintf(nacl->iport_name, sizeof(nacl->iport_name), "%s", name);
-       return se_nacl;
-}
-
-static void usbg_drop_nodeacl(struct se_node_acl *se_acl)
+static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
 {
-       struct usbg_nacl *nacl = container_of(se_acl,
-                               struct usbg_nacl, se_node_acl);
-       core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
-       kfree(nacl);
+       if (!usbg_check_wwn(name))
+               return -EINVAL;
+       return 0;
 }
 
 struct usbg_tpg *the_only_tpg_I_currently_have;
@@ -1571,8 +1411,11 @@ static struct se_portal_group *usbg_make_tpg(
        tpg->tport = tport;
        tpg->tport_tpgt = tpgt;
 
-       ret = core_tpg_register(&usbg_ops, wwn, &tpg->se_tpg, tpg,
-                               TRANSPORT_TPG_TYPE_NORMAL);
+       /*
+        * SPC doesn't assign a protocol identifier for USB-SCSI, so we
+        * pretend to be SAS..
+        */
+       ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);
        if (ret < 0) {
                destroy_workqueue(tpg->workqueue);
                kfree(tpg);
@@ -1866,19 +1709,12 @@ static const struct target_core_fabric_ops usbg_ops = {
        .module                         = THIS_MODULE,
        .name                           = "usb_gadget",
        .get_fabric_name                = usbg_get_fabric_name,
-       .get_fabric_proto_ident         = usbg_get_fabric_proto_ident,
        .tpg_get_wwn                    = usbg_get_fabric_wwn,
        .tpg_get_tag                    = usbg_get_tag,
-       .tpg_get_default_depth          = usbg_get_default_depth,
-       .tpg_get_pr_transport_id        = usbg_get_pr_transport_id,
-       .tpg_get_pr_transport_id_len    = usbg_get_pr_transport_id_len,
-       .tpg_parse_pr_out_transport_id  = usbg_parse_pr_out_transport_id,
        .tpg_check_demo_mode            = usbg_check_true,
        .tpg_check_demo_mode_cache      = usbg_check_false,
        .tpg_check_demo_mode_write_protect = usbg_check_false,
        .tpg_check_prod_mode_write_protect = usbg_check_false,
-       .tpg_alloc_fabric_acl           = usbg_alloc_fabric_acl,
-       .tpg_release_fabric_acl         = usbg_release_fabric_acl,
        .tpg_get_inst_index             = usbg_tpg_get_inst_index,
        .release_cmd                    = usbg_release_cmd,
        .shutdown_session               = usbg_shutdown_session,
@@ -1888,7 +1724,6 @@ static const struct target_core_fabric_ops usbg_ops = {
        .write_pending                  = usbg_send_write_request,
        .write_pending_status           = usbg_write_pending_status,
        .set_default_node_attributes    = usbg_set_default_node_attrs,
-       .get_task_tag                   = usbg_get_task_tag,
        .get_cmd_state                  = usbg_get_cmd_state,
        .queue_data_in                  = usbg_send_read_response,
        .queue_status                   = usbg_send_status_response,
@@ -1902,10 +1737,7 @@ static const struct target_core_fabric_ops usbg_ops = {
        .fabric_drop_tpg                = usbg_drop_tpg,
        .fabric_post_link               = usbg_port_link,
        .fabric_pre_unlink              = usbg_port_unlink,
-       .fabric_make_np                 = NULL,
-       .fabric_drop_np                 = NULL,
-       .fabric_make_nodeacl            = usbg_make_nodeacl,
-       .fabric_drop_nodeacl            = usbg_drop_nodeacl,
+       .fabric_init_nodeacl            = usbg_init_nodeacl,
 
        .tfc_wwn_attrs                  = usbg_wwn_attrs,
        .tfc_tpg_base_attrs             = usbg_base_attrs,
index 9fb3544cc80f088d547d264945944043508418e3..0b749e1aa2f10e56ac667c19145f09446627ad66 100644 (file)
@@ -24,15 +24,6 @@ enum {
 #define USB_G_ALT_INT_BBB       0
 #define USB_G_ALT_INT_UAS       1
 
-struct usbg_nacl {
-       /* Binary World Wide unique Port Name for SAS Initiator port */
-       u64 iport_wwpn;
-       /* ASCII formatted WWPN for Sas Initiator port */
-       char iport_name[USBG_NAMELEN];
-       /* Returned by usbg_make_nodeacl() */
-       struct se_node_acl se_node_acl;
-};
-
 struct tcm_usbg_nexus {
        struct se_session *tvn_se_sess;
 };
@@ -52,8 +43,6 @@ struct usbg_tpg {
 };
 
 struct usbg_tport {
-       /* SCSI protocol the tport is providing */
-       u8 tport_proto_id;
        /* Binary World Wide unique Port Name for SAS Target port */
        u64 tport_wwpn;
        /* ASCII formatted WWPN for SAS Target port */
index 017a1e8a8f6fcf4eb092fa48584aef00dddf5f17..533eaf04f12faff85ba90791647c08a1f7a17599 100644 (file)
@@ -32,3 +32,18 @@ config VHOST
        ---help---
          This option is selected by any driver which needs to access
          the core of vhost.
+
+config VHOST_CROSS_ENDIAN_LEGACY
+       bool "Cross-endian support for vhost"
+       default n
+       ---help---
+         This option allows vhost to support guests with a different byte
+         ordering from host while using legacy virtio.
+
+         Userspace programs can control the feature using the
+         VHOST_SET_VRING_ENDIAN and VHOST_GET_VRING_ENDIAN ioctls.
+
+         This is only useful on a few platforms (ppc64 and arm64). Since it
+         adds some overhead, it is disabled by default.
+
+         If unsure, say "N".
index 55722feeb898d12c7724bb3142ced1bf3691ee91..dfcc02c936485199c44d1545bf95b73bfe23ff97 100644 (file)
@@ -43,7 +43,6 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 #include <linux/vhost.h>
 #include <linux/virtio_scsi.h>
@@ -117,15 +116,6 @@ struct vhost_scsi_nexus {
        struct se_session *tvn_se_sess;
 };
 
-struct vhost_scsi_nacl {
-       /* Binary World Wide unique Port Name for Vhost Initiator port */
-       u64 iport_wwpn;
-       /* ASCII formatted WWPN for Sas Initiator port */
-       char iport_name[VHOST_SCSI_NAMELEN];
-       /* Returned by vhost_scsi_make_nodeacl() */
-       struct se_node_acl se_node_acl;
-};
-
 struct vhost_scsi_tpg {
        /* Vhost port target portal group tag for TCM */
        u16 tport_tpgt;
@@ -218,7 +208,6 @@ struct vhost_scsi {
        int vs_events_nr; /* num of pending events, protected by vq->mutex */
 };
 
-static struct target_core_fabric_ops vhost_scsi_ops;
 static struct workqueue_struct *vhost_scsi_workqueue;
 
 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
@@ -299,28 +288,6 @@ static char *vhost_scsi_get_fabric_name(void)
        return "vhost";
 }
 
-static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
-       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
-                               struct vhost_scsi_tpg, se_tpg);
-       struct vhost_scsi_tport *tport = tpg->tport;
-
-       switch (tport->tport_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-               return sas_get_fabric_proto_ident(se_tpg);
-       case SCSI_PROTOCOL_FCP:
-               return fc_get_fabric_proto_ident(se_tpg);
-       case SCSI_PROTOCOL_ISCSI:
-               return iscsi_get_fabric_proto_ident(se_tpg);
-       default:
-               pr_err("Unknown tport_proto_id: 0x%02x, using"
-                       " SAS emulation\n", tport->tport_proto_id);
-               break;
-       }
-
-       return sas_get_fabric_proto_ident(se_tpg);
-}
-
 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
@@ -337,102 +304,6 @@ static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
        return tpg->tport_tpgt;
 }
 
-static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg)
-{
-       return 1;
-}
-
-static u32
-vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg,
-                             struct se_node_acl *se_nacl,
-                             struct t10_pr_registration *pr_reg,
-                             int *format_code,
-                             unsigned char *buf)
-{
-       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
-                               struct vhost_scsi_tpg, se_tpg);
-       struct vhost_scsi_tport *tport = tpg->tport;
-
-       switch (tport->tport_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-               return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                                       format_code, buf);
-       case SCSI_PROTOCOL_FCP:
-               return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                                       format_code, buf);
-       case SCSI_PROTOCOL_ISCSI:
-               return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                                       format_code, buf);
-       default:
-               pr_err("Unknown tport_proto_id: 0x%02x, using"
-                       " SAS emulation\n", tport->tport_proto_id);
-               break;
-       }
-
-       return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                       format_code, buf);
-}
-
-static u32
-vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg,
-                                 struct se_node_acl *se_nacl,
-                                 struct t10_pr_registration *pr_reg,
-                                 int *format_code)
-{
-       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
-                               struct vhost_scsi_tpg, se_tpg);
-       struct vhost_scsi_tport *tport = tpg->tport;
-
-       switch (tport->tport_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-               return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                                       format_code);
-       case SCSI_PROTOCOL_FCP:
-               return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                                       format_code);
-       case SCSI_PROTOCOL_ISCSI:
-               return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                                       format_code);
-       default:
-               pr_err("Unknown tport_proto_id: 0x%02x, using"
-                       " SAS emulation\n", tport->tport_proto_id);
-               break;
-       }
-
-       return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                       format_code);
-}
-
-static char *
-vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
-                                   const char *buf,
-                                   u32 *out_tid_len,
-                                   char **port_nexus_ptr)
-{
-       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
-                               struct vhost_scsi_tpg, se_tpg);
-       struct vhost_scsi_tport *tport = tpg->tport;
-
-       switch (tport->tport_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-               return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                                       port_nexus_ptr);
-       case SCSI_PROTOCOL_FCP:
-               return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                                       port_nexus_ptr);
-       case SCSI_PROTOCOL_ISCSI:
-               return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                                       port_nexus_ptr);
-       default:
-               pr_err("Unknown tport_proto_id: 0x%02x, using"
-                       " SAS emulation\n", tport->tport_proto_id);
-               break;
-       }
-
-       return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                       port_nexus_ptr);
-}
-
 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
 {
        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
@@ -441,29 +312,6 @@ static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
        return tpg->tv_fabric_prot_type;
 }
 
-static struct se_node_acl *
-vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
-       struct vhost_scsi_nacl *nacl;
-
-       nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL);
-       if (!nacl) {
-               pr_err("Unable to allocate struct vhost_scsi_nacl\n");
-               return NULL;
-       }
-
-       return &nacl->se_node_acl;
-}
-
-static void
-vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg,
-                            struct se_node_acl *se_nacl)
-{
-       struct vhost_scsi_nacl *nacl = container_of(se_nacl,
-                       struct vhost_scsi_nacl, se_node_acl);
-       kfree(nacl);
-}
-
 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
 {
        return 1;
@@ -521,11 +369,6 @@ static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
        return;
 }
 
-static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd)
-{
-       return 0;
-}
-
 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
 {
        return 0;
@@ -609,7 +452,7 @@ static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
 
 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 {
-       return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+       return target_put_sess_cmd(se_cmd);
 }
 
 static void
@@ -970,6 +813,7 @@ static void vhost_scsi_submission_work(struct work_struct *work)
        }
        tv_nexus = cmd->tvc_nexus;
 
+       se_cmd->tag = 0;
        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
                        cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
                        cmd->tvc_lun, cmd->tvc_exp_data_len,
@@ -1824,50 +1668,6 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
        mutex_unlock(&vhost_scsi_mutex);
 }
 
-static struct se_node_acl *
-vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg,
-                      struct config_group *group,
-                      const char *name)
-{
-       struct se_node_acl *se_nacl, *se_nacl_new;
-       struct vhost_scsi_nacl *nacl;
-       u64 wwpn = 0;
-       u32 nexus_depth;
-
-       /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
-               return ERR_PTR(-EINVAL); */
-       se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg);
-       if (!se_nacl_new)
-               return ERR_PTR(-ENOMEM);
-
-       nexus_depth = 1;
-       /*
-        * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
-        * when converting a NodeACL from demo mode -> explict
-        */
-       se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
-                               name, nexus_depth);
-       if (IS_ERR(se_nacl)) {
-               vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new);
-               return se_nacl;
-       }
-       /*
-        * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN
-        */
-       nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl);
-       nacl->iport_wwpn = wwpn;
-
-       return se_nacl;
-}
-
-static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl)
-{
-       struct vhost_scsi_nacl *nacl = container_of(se_acl,
-                               struct vhost_scsi_nacl, se_node_acl);
-       core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
-       kfree(nacl);
-}
-
 static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
                                       struct se_session *se_sess)
 {
@@ -2202,8 +2002,7 @@ vhost_scsi_make_tpg(struct se_wwn *wwn,
        tpg->tport = tport;
        tpg->tport_tpgt = tpgt;
 
-       ret = core_tpg_register(&vhost_scsi_ops, wwn,
-                               &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+       ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
        if (ret < 0) {
                kfree(tpg);
                return NULL;
@@ -2327,20 +2126,13 @@ static struct target_core_fabric_ops vhost_scsi_ops = {
        .module                         = THIS_MODULE,
        .name                           = "vhost",
        .get_fabric_name                = vhost_scsi_get_fabric_name,
-       .get_fabric_proto_ident         = vhost_scsi_get_fabric_proto_ident,
        .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
        .tpg_get_tag                    = vhost_scsi_get_tpgt,
-       .tpg_get_default_depth          = vhost_scsi_get_default_depth,
-       .tpg_get_pr_transport_id        = vhost_scsi_get_pr_transport_id,
-       .tpg_get_pr_transport_id_len    = vhost_scsi_get_pr_transport_id_len,
-       .tpg_parse_pr_out_transport_id  = vhost_scsi_parse_pr_out_transport_id,
        .tpg_check_demo_mode            = vhost_scsi_check_true,
        .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
        .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
        .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
        .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
-       .tpg_alloc_fabric_acl           = vhost_scsi_alloc_fabric_acl,
-       .tpg_release_fabric_acl         = vhost_scsi_release_fabric_acl,
        .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
        .release_cmd                    = vhost_scsi_release_cmd,
        .check_stop_free                = vhost_scsi_check_stop_free,
@@ -2351,7 +2143,6 @@ static struct target_core_fabric_ops vhost_scsi_ops = {
        .write_pending                  = vhost_scsi_write_pending,
        .write_pending_status           = vhost_scsi_write_pending_status,
        .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
-       .get_task_tag                   = vhost_scsi_get_task_tag,
        .get_cmd_state                  = vhost_scsi_get_cmd_state,
        .queue_data_in                  = vhost_scsi_queue_data_in,
        .queue_status                   = vhost_scsi_queue_status,
@@ -2366,10 +2157,6 @@ static struct target_core_fabric_ops vhost_scsi_ops = {
        .fabric_drop_tpg                = vhost_scsi_drop_tpg,
        .fabric_post_link               = vhost_scsi_port_link,
        .fabric_pre_unlink              = vhost_scsi_port_unlink,
-       .fabric_make_np                 = NULL,
-       .fabric_drop_np                 = NULL,
-       .fabric_make_nodeacl            = vhost_scsi_make_nodeacl,
-       .fabric_drop_nodeacl            = vhost_scsi_drop_nodeacl,
 
        .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
        .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
index 2ee28266fd0704fd1e1c4c64a6f19c8d863727fd..9e8e004bb1c38d809c2af43b9d42c053db3a41a2 100644 (file)
@@ -36,6 +36,77 @@ enum {
 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
 
+#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
+static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
+{
+       vq->user_be = !virtio_legacy_is_little_endian();
+}
+
+static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
+{
+       struct vhost_vring_state s;
+
+       if (vq->private_data)
+               return -EBUSY;
+
+       if (copy_from_user(&s, argp, sizeof(s)))
+               return -EFAULT;
+
+       if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
+           s.num != VHOST_VRING_BIG_ENDIAN)
+               return -EINVAL;
+
+       vq->user_be = s.num;
+
+       return 0;
+}
+
+static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
+                                  int __user *argp)
+{
+       struct vhost_vring_state s = {
+               .index = idx,
+               .num = vq->user_be
+       };
+
+       if (copy_to_user(argp, &s, sizeof(s)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static void vhost_init_is_le(struct vhost_virtqueue *vq)
+{
+       /* Note for legacy virtio: user_be is initialized at reset time
+        * according to the host endianness. If userspace does not set an
+        * explicit endianness, the default behavior is native endian, as
+        * expected by legacy virtio.
+        */
+       vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
+}
+#else
+static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
+{
+}
+
+static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
+{
+       return -ENOIOCTLCMD;
+}
+
+static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
+                                  int __user *argp)
+{
+       return -ENOIOCTLCMD;
+}
+
+static void vhost_init_is_le(struct vhost_virtqueue *vq)
+{
+       if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
+               vq->is_le = true;
+}
+#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
+
 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
                            poll_table *pt)
 {
@@ -199,6 +270,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
        vq->call = NULL;
        vq->log_ctx = NULL;
        vq->memory = NULL;
+       vq->is_le = virtio_legacy_is_little_endian();
+       vhost_vq_reset_user_be(vq);
 }
 
 static int vhost_worker(void *data)
@@ -806,6 +879,12 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
                } else
                        filep = eventfp;
                break;
+       case VHOST_SET_VRING_ENDIAN:
+               r = vhost_set_vring_endian(vq, argp);
+               break;
+       case VHOST_GET_VRING_ENDIAN:
+               r = vhost_get_vring_endian(vq, idx, argp);
+               break;
        default:
                r = -ENOIOCTLCMD;
        }
@@ -1044,8 +1123,12 @@ int vhost_init_used(struct vhost_virtqueue *vq)
 {
        __virtio16 last_used_idx;
        int r;
-       if (!vq->private_data)
+       if (!vq->private_data) {
+               vq->is_le = virtio_legacy_is_little_endian();
                return 0;
+       }
+
+       vhost_init_is_le(vq);
 
        r = vhost_update_used_flags(vq);
        if (r)
index 8c1c792900ba15f74daa1123920c294ffa60df9c..ce6f6da4b09f988bc4ae15268912335fcd1bab18 100644 (file)
@@ -106,6 +106,14 @@ struct vhost_virtqueue {
        /* Log write descriptors */
        void __user *log_base;
        struct vhost_log *log;
+
+       /* Ring endianness. Defaults to legacy native endianness.
+        * Set to true when starting a modern virtio device. */
+       bool is_le;
+#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
+       /* Ring endianness requested by userspace for cross-endian support. */
+       bool user_be;
+#endif
 };
 
 struct vhost_dev {
@@ -173,34 +181,39 @@ static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
        return vq->acked_features & (1ULL << bit);
 }
 
+static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
+{
+       return vq->is_le;
+}
+
 /* Memory accessors */
 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
 {
-       return __virtio16_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+       return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
 }
 
 static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
 {
-       return __cpu_to_virtio16(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+       return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
 }
 
 static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
 {
-       return __virtio32_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+       return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
 }
 
 static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
 {
-       return __cpu_to_virtio32(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+       return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
 }
 
 static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
 {
-       return __virtio64_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+       return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
 }
 
 static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
 {
-       return __cpu_to_virtio64(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+       return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
 }
 #endif
index 612b093831d52be7040bcefeb279eaf9f4dd9700..9200a8668b498ff3dc76136760496524a0b3cad4 100644 (file)
@@ -1225,6 +1225,15 @@ static int dss_add_child_component(struct device *dev, void *data)
 {
        struct component_match **match = data;
 
+       /*
+        * HACK
+        * We don't have a working driver for rfbi, so skip it here always.
+        * Otherwise dss will never get probed successfully, as it will wait
+        * for rfbi to get probed.
+        */
+       if (strstr(dev_name(dev), "rfbi"))
+               return 0;
+
        component_match_add(dev->parent, match, dss_component_compare, dev);
 
        return 0;
index d32d1c4d1b99f81a1962bb92bf7cc6b3e1ad77ff..178ae93b7ebd6a89e946803832dd3ba032b0f9c8 100644 (file)
@@ -1977,7 +1977,7 @@ static int param_set_scroll(const char *val, const struct kernel_param *kp)
 
        return 0;
 }
-static struct kernel_param_ops param_ops_scroll = {
+static const struct kernel_param_ops param_ops_scroll = {
        .set = param_set_scroll,
 };
 #define param_check_scroll(name, p) __param_check(name, p, void)
index ea7f056ed5fe3c4bbc04c45d9e09043bf7f134ce..8bac309c24b99c7134737ac901933fb0ed0155b5 100644 (file)
@@ -754,9 +754,9 @@ static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
        /* Prepare startup mode */
 
-       kparam_block_sysfs_write(mode_option);
+       kernel_param_lock(THIS_MODULE);
        rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
-       kparam_unblock_sysfs_write(mode_option);
+       kernel_param_unlock(THIS_MODULE);
        if (! ((rc == 1) || (rc == 2))) {
                rc = -EINVAL;
                dev_err(info->device, "mode %s not found\n", mode_option);
index 7a5e60dea6c59267ef48efa42ac2091346616b57..10189b5b627f962cb9a8e9527aae829e27b0aec2 100644 (file)
@@ -691,7 +691,7 @@ static int vm_cmdline_get(char *buffer, const struct kernel_param *kp)
        return strlen(buffer) + 1;
 }
 
-static struct kernel_param_ops vm_cmdline_param_ops = {
+static const struct kernel_param_ops vm_cmdline_param_ops = {
        .set = vm_cmdline_set,
        .get = vm_cmdline_get,
 };
index 5447b818633232937d7cb87ddb21424da1281010..78f804af6c2020a9b927286e3496f6a2d8541953 100644 (file)
@@ -507,10 +507,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
        if (rc)
                goto err_enable_device;
 
-       rc = pci_request_regions(pci_dev, "virtio-pci");
-       if (rc)
-               goto err_request_regions;
-
        if (force_legacy) {
                rc = virtio_pci_legacy_probe(vp_dev);
                /* Also try modern mode if we can't map BAR0 (no IO space). */
@@ -540,8 +536,6 @@ err_register:
        else
             virtio_pci_modern_remove(vp_dev);
 err_probe:
-       pci_release_regions(pci_dev);
-err_request_regions:
        pci_disable_device(pci_dev);
 err_enable_device:
        kfree(vp_dev);
@@ -559,7 +553,6 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
        else
                virtio_pci_modern_remove(vp_dev);
 
-       pci_release_regions(pci_dev);
        pci_disable_device(pci_dev);
 }
 
index 28ee4e56badf1575d8d1c590850902fb7a54d30b..b976d968e793683a22a6d6c7f213763863ef0bd0 100644 (file)
@@ -75,6 +75,8 @@ struct virtio_pci_device {
        /* Multiply queue_notify_off by this value. (non-legacy mode). */
        u32 notify_offset_multiplier;
 
+       int modern_bars;
+
        /* Legacy only field */
        /* the IO mapping for the PCI config space */
        void __iomem *ioaddr;
index 256a5278a515deb1b8060386eb74bcf344a8fd66..48bc9797e530ad289e831d3492eb8f4762a4bd18 100644 (file)
@@ -215,6 +215,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
 int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
 {
        struct pci_dev *pci_dev = vp_dev->pci_dev;
+       int rc;
 
        /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
        if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
@@ -226,9 +227,14 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
                return -ENODEV;
        }
 
+       rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
+       if (rc)
+               return rc;
+
+       rc = -ENOMEM;
        vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
        if (!vp_dev->ioaddr)
-               return -ENOMEM;
+               goto err_iomap;
 
        vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
 
@@ -246,6 +252,10 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
        vp_dev->del_vq = del_vq;
 
        return 0;
+
+err_iomap:
+       pci_release_region(pci_dev, 0);
+       return rc;
 }
 
 void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
@@ -253,4 +263,5 @@ void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
        struct pci_dev *pci_dev = vp_dev->pci_dev;
 
        pci_iounmap(pci_dev, vp_dev->ioaddr);
+       pci_release_region(pci_dev, 0);
 }
index e88e0997a8897e2ceaa25891f3ae080048d6e259..8e5cf194cc0bd003888c1235b8d9bfc1cbc99e9c 100644 (file)
@@ -499,7 +499,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
  * Returns offset of the capability, or 0.
  */
 static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
-                                            u32 ioresource_types)
+                                            u32 ioresource_types, int *bars)
 {
        int pos;
 
@@ -520,8 +520,10 @@ static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
 
                if (type == cfg_type) {
                        if (pci_resource_len(dev, bar) &&
-                           pci_resource_flags(dev, bar) & ioresource_types)
+                           pci_resource_flags(dev, bar) & ioresource_types) {
+                               *bars |= (1 << bar);
                                return pos;
+                       }
                }
        }
        return 0;
@@ -617,7 +619,8 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
 
        /* check for a common config: if not, use legacy mode (bar 0). */
        common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
-                                           IORESOURCE_IO | IORESOURCE_MEM);
+                                           IORESOURCE_IO | IORESOURCE_MEM,
+                                           &vp_dev->modern_bars);
        if (!common) {
                dev_info(&pci_dev->dev,
                         "virtio_pci: leaving for legacy driver\n");
@@ -626,9 +629,11 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
 
        /* If common is there, these should be too... */
        isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
-                                        IORESOURCE_IO | IORESOURCE_MEM);
+                                        IORESOURCE_IO | IORESOURCE_MEM,
+                                        &vp_dev->modern_bars);
        notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
-                                           IORESOURCE_IO | IORESOURCE_MEM);
+                                           IORESOURCE_IO | IORESOURCE_MEM,
+                                           &vp_dev->modern_bars);
        if (!isr || !notify) {
                dev_err(&pci_dev->dev,
                        "virtio_pci: missing capabilities %i/%i/%i\n",
@@ -640,7 +645,13 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
         * device-specific configuration.
         */
        device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
-                                           IORESOURCE_IO | IORESOURCE_MEM);
+                                           IORESOURCE_IO | IORESOURCE_MEM,
+                                           &vp_dev->modern_bars);
+
+       err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars,
+                                          "virtio-pci-modern");
+       if (err)
+               return err;
 
        err = -EINVAL;
        vp_dev->common = map_capability(pci_dev, common,
@@ -727,4 +738,5 @@ void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
                pci_iounmap(pci_dev, vp_dev->notify_base);
        pci_iounmap(pci_dev, vp_dev->isr);
        pci_iounmap(pci_dev, vp_dev->common);
+       pci_release_selected_regions(pci_dev, vp_dev->modern_bars);
 }
index 262647bbc61449931624821533e887d4b748b18e..241fafde42cb54ceb2e50918e4a02e1c64f4f923 100644 (file)
@@ -1,3 +1,4 @@
+
 #
 # Watchdog device configuration
 #
@@ -96,6 +97,15 @@ config DA9063_WATCHDOG
 
          This driver can be built as a module. The module name is da9063_wdt.
 
+config DA9062_WATCHDOG
+       tristate "Dialog DA9062 Watchdog"
+       depends on MFD_DA9062
+       select WATCHDOG_CORE
+       help
+         Support for the watchdog in the DA9062 PMIC.
+
+         This driver can be built as a module. The module name is da9062_wdt.
+
 config GPIO_WATCHDOG
        tristate "Watchdog device controlled through GPIO-line"
        depends on OF_GPIO
@@ -104,6 +114,17 @@ config GPIO_WATCHDOG
          If you say yes here you get support for watchdog device
          controlled through GPIO-line.
 
+config GPIO_WATCHDOG_ARCH_INITCALL
+       bool "Register the watchdog as early as possible"
+       depends on GPIO_WATCHDOG=y
+       help
+         In some situations, the default initcall level (module_init)
+         in not early enough in the boot process to avoid the watchdog
+         to be triggered.
+         If you say yes here, the initcall level would be raised to
+         arch_initcall.
+         If in doubt, say N.
+
 config MENF21BMC_WATCHDOG
        tristate "MEN 14F021P00 BMC Watchdog"
        depends on MFD_MENF21BMC
@@ -169,6 +190,7 @@ config AT91SAM9X_WATCHDOG
 
 config CADENCE_WATCHDOG
        tristate "Cadence Watchdog Timer"
+       depends on HAS_IOMEM
        select WATCHDOG_CORE
        help
          Say Y here if you want to include support for the watchdog
@@ -408,7 +430,7 @@ config TS72XX_WATCHDOG
 
 config MAX63XX_WATCHDOG
        tristate "Max63xx watchdog"
-       depends on ARM && HAS_IOMEM
+       depends on HAS_IOMEM
        select WATCHDOG_CORE
        help
          Support for memory mapped max63{69,70,71,72,73,74} watchdog timer.
@@ -526,6 +548,16 @@ config MEDIATEK_WATCHDOG
          To compile this driver as a module, choose M here: the
          module will be called mtk_wdt.
 
+config DIGICOLOR_WATCHDOG
+       tristate "Conexant Digicolor SoCs watchdog support"
+       depends on ARCH_DIGICOLOR
+       select WATCHDOG_CORE
+       help
+         Say Y here to include support for the watchdog timer
+         in Conexant Digicolor SoCs.
+         To compile this driver as a module, choose M here: the
+         module will be called digicolor_wdt.
+
 # AVR32 Architecture
 
 config AT32AP700X_WDT
@@ -1355,7 +1387,7 @@ config BOOKE_WDT_DEFAULT_TIMEOUT
 config MEN_A21_WDT
        tristate "MEN A21 VME CPU Carrier Board Watchdog Timer"
        select WATCHDOG_CORE
-       depends on GPIOLIB
+       depends on GPIOLIB || COMPILE_TEST
        help
         Watchdog driver for MEN A21 VMEbus CPU Carrier Boards.
 
index d98768c7d928cb5c7b54b6f339f0efc4d88c2db4..59ea9a1b8e766f64e49531873cb5b94373956962 100644 (file)
@@ -65,6 +65,7 @@ obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
 obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
 obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o
 obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o
+obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o
 
 # AVR32 Architecture
 obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -180,6 +181,7 @@ obj-$(CONFIG_XEN_WDT) += xen_wdt.o
 # Architecture Independent
 obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
 obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
+obj-$(CONFIG_DA9062_WATCHDOG) += da9062_wdt.o
 obj-$(CONFIG_DA9063_WATCHDOG) += da9063_wdt.o
 obj-$(CONFIG_GPIO_WATCHDOG)    += gpio_wdt.o
 obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
index 1443b3c391de497c05fe332f1c4cdd067bc5f5c9..e4698f7c5f9306826836e7856b7862aa21c593d6 100644 (file)
@@ -40,9 +40,9 @@
 #define DRV_NAME "AT91SAM9 Watchdog"
 
 #define wdt_read(wdt, field) \
-       __raw_readl((wdt)->base + (field))
+       readl_relaxed((wdt)->base + (field))
 #define wdt_write(wtd, field, val) \
-       __raw_writel((val), (wdt)->base + (field))
+       writel_relaxed((val), (wdt)->base + (field))
 
 /* AT91SAM9 watchdog runs a 12bit counter @ 256Hz,
  * use this to convert a watchdog
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
new file mode 100644 (file)
index 0000000..b3a870c
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+ * da9062_wdt.c - WDT device driver for DA9062
+ * Copyright (C) 2015  Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/da9062/registers.h>
+#include <linux/mfd/da9062/core.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+
+static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 };
+#define DA9062_TWDSCALE_DISABLE                0
+#define DA9062_TWDSCALE_MIN            1
+#define DA9062_TWDSCALE_MAX            (ARRAY_SIZE(wdt_timeout) - 1)
+#define DA9062_WDT_MIN_TIMEOUT         wdt_timeout[DA9062_TWDSCALE_MIN]
+#define DA9062_WDT_MAX_TIMEOUT         wdt_timeout[DA9062_TWDSCALE_MAX]
+#define DA9062_WDG_DEFAULT_TIMEOUT     wdt_timeout[DA9062_TWDSCALE_MAX-1]
+#define DA9062_RESET_PROTECTION_MS     300
+
+struct da9062_watchdog {
+       struct da9062 *hw;
+       struct watchdog_device wdtdev;
+       unsigned long j_time_stamp;
+};
+
+static void da9062_set_window_start(struct da9062_watchdog *wdt)
+{
+       wdt->j_time_stamp = jiffies;
+}
+
+static void da9062_apply_window_protection(struct da9062_watchdog *wdt)
+{
+       unsigned long delay = msecs_to_jiffies(DA9062_RESET_PROTECTION_MS);
+       unsigned long timeout = wdt->j_time_stamp + delay;
+       unsigned long now = jiffies;
+       unsigned int diff_ms;
+
+       /* if time-limit has not elapsed then wait for remainder */
+       if (time_before(now, timeout)) {
+               diff_ms = jiffies_to_msecs(timeout-now);
+               dev_dbg(wdt->hw->dev,
+                       "Kicked too quickly. Delaying %u msecs\n", diff_ms);
+               msleep(diff_ms);
+       }
+}
+
+static unsigned int da9062_wdt_timeout_to_sel(unsigned int secs)
+{
+       unsigned int i;
+
+       for (i = DA9062_TWDSCALE_MIN; i <= DA9062_TWDSCALE_MAX; i++) {
+               if (wdt_timeout[i] >= secs)
+                       return i;
+       }
+
+       return DA9062_TWDSCALE_MAX;
+}
+
+static int da9062_reset_watchdog_timer(struct da9062_watchdog *wdt)
+{
+       int ret;
+
+       da9062_apply_window_protection(wdt);
+
+       ret = regmap_update_bits(wdt->hw->regmap,
+                          DA9062AA_CONTROL_F,
+                          DA9062AA_WATCHDOG_MASK,
+                          DA9062AA_WATCHDOG_MASK);
+
+       da9062_set_window_start(wdt);
+
+       return ret;
+}
+
+static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt,
+                                             unsigned int regval)
+{
+       struct da9062 *chip = wdt->hw;
+       int ret;
+
+       ret = da9062_reset_watchdog_timer(wdt);
+       if (ret)
+               return ret;
+
+       return regmap_update_bits(chip->regmap,
+                                 DA9062AA_CONTROL_D,
+                                 DA9062AA_TWDSCALE_MASK,
+                                 regval);
+}
+
+static int da9062_wdt_start(struct watchdog_device *wdd)
+{
+       struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
+       unsigned int selector;
+       int ret;
+
+       selector = da9062_wdt_timeout_to_sel(wdt->wdtdev.timeout);
+       ret = da9062_wdt_update_timeout_register(wdt, selector);
+       if (ret)
+               dev_err(wdt->hw->dev, "Watchdog failed to start (err = %d)\n",
+                       ret);
+
+       return ret;
+}
+
+static int da9062_wdt_stop(struct watchdog_device *wdd)
+{
+       struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
+       int ret;
+
+       ret = da9062_reset_watchdog_timer(wdt);
+       if (ret) {
+               dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n",
+                       ret);
+               return ret;
+       }
+
+       ret = regmap_update_bits(wdt->hw->regmap,
+                                DA9062AA_CONTROL_D,
+                                DA9062AA_TWDSCALE_MASK,
+                                DA9062_TWDSCALE_DISABLE);
+       if (ret)
+               dev_err(wdt->hw->dev, "Watchdog failed to stop (err = %d)\n",
+                       ret);
+
+       return ret;
+}
+
+static int da9062_wdt_ping(struct watchdog_device *wdd)
+{
+       struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
+       int ret;
+
+       ret = da9062_reset_watchdog_timer(wdt);
+       if (ret)
+               dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n",
+                       ret);
+
+       return ret;
+}
+
+static int da9062_wdt_set_timeout(struct watchdog_device *wdd,
+                                 unsigned int timeout)
+{
+       struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
+       unsigned int selector;
+       int ret;
+
+       selector = da9062_wdt_timeout_to_sel(timeout);
+       ret = da9062_wdt_update_timeout_register(wdt, selector);
+       if (ret)
+               dev_err(wdt->hw->dev, "Failed to set watchdog timeout (err = %d)\n",
+                       ret);
+       else
+               wdd->timeout = wdt_timeout[selector];
+
+       return ret;
+}
+
+static const struct watchdog_info da9062_watchdog_info = {
+       .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+       .identity = "DA9062 WDT",
+};
+
+static const struct watchdog_ops da9062_watchdog_ops = {
+       .owner = THIS_MODULE,
+       .start = da9062_wdt_start,
+       .stop = da9062_wdt_stop,
+       .ping = da9062_wdt_ping,
+       .set_timeout = da9062_wdt_set_timeout,
+};
+
+static int da9062_wdt_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct da9062 *chip;
+       struct da9062_watchdog *wdt;
+
+       chip = dev_get_drvdata(pdev->dev.parent);
+       if (!chip)
+               return -EINVAL;
+
+       wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+       if (!wdt)
+               return -ENOMEM;
+
+       wdt->hw = chip;
+
+       wdt->wdtdev.info = &da9062_watchdog_info;
+       wdt->wdtdev.ops = &da9062_watchdog_ops;
+       wdt->wdtdev.min_timeout = DA9062_WDT_MIN_TIMEOUT;
+       wdt->wdtdev.max_timeout = DA9062_WDT_MAX_TIMEOUT;
+       wdt->wdtdev.timeout = DA9062_WDG_DEFAULT_TIMEOUT;
+       wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
+
+       watchdog_set_drvdata(&wdt->wdtdev, wdt);
+       dev_set_drvdata(&pdev->dev, wdt);
+
+       ret = watchdog_register_device(&wdt->wdtdev);
+       if (ret < 0) {
+               dev_err(wdt->hw->dev,
+                       "watchdog registration failed (%d)\n", ret);
+               return ret;
+       }
+
+       da9062_set_window_start(wdt);
+
+       ret = da9062_wdt_ping(&wdt->wdtdev);
+       if (ret < 0)
+               watchdog_unregister_device(&wdt->wdtdev);
+
+       return ret;
+}
+
+static int da9062_wdt_remove(struct platform_device *pdev)
+{
+       struct da9062_watchdog *wdt = dev_get_drvdata(&pdev->dev);
+
+       watchdog_unregister_device(&wdt->wdtdev);
+       return 0;
+}
+
+static struct platform_driver da9062_wdt_driver = {
+       .probe = da9062_wdt_probe,
+       .remove = da9062_wdt_remove,
+       .driver = {
+               .name = "da9062-watchdog",
+       },
+};
+module_platform_driver(da9062_wdt_driver);
+
+MODULE_AUTHOR("S Twiss <stwiss.opensource@diasemi.com>");
+MODULE_DESCRIPTION("WDT device driver for Dialog DA9062");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9062-watchdog");
diff --git a/drivers/watchdog/digicolor_wdt.c b/drivers/watchdog/digicolor_wdt.c
new file mode 100644 (file)
index 0000000..31d8e49
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * Watchdog driver for Conexant Digicolor
+ *
+ * Copyright (C) 2015 Paradox Innovation Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/watchdog.h>
+#include <linux/reboot.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+
+#define TIMER_A_CONTROL                0
+#define TIMER_A_COUNT          4
+
+#define TIMER_A_ENABLE_COUNT   BIT(0)
+#define TIMER_A_ENABLE_WATCHDOG        BIT(1)
+
+struct dc_wdt {
+       void __iomem            *base;
+       struct clk              *clk;
+       struct notifier_block   restart_handler;
+       spinlock_t              lock;
+};
+
+static unsigned timeout;
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds");
+
+static void dc_wdt_set(struct dc_wdt *wdt, u32 ticks)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&wdt->lock, flags);
+
+       writel_relaxed(0, wdt->base + TIMER_A_CONTROL);
+       writel_relaxed(ticks, wdt->base + TIMER_A_COUNT);
+       writel_relaxed(TIMER_A_ENABLE_COUNT | TIMER_A_ENABLE_WATCHDOG,
+                      wdt->base + TIMER_A_CONTROL);
+
+       spin_unlock_irqrestore(&wdt->lock, flags);
+}
+
+static int dc_restart_handler(struct notifier_block *this, unsigned long mode,
+                             void *cmd)
+{
+       struct dc_wdt *wdt = container_of(this, struct dc_wdt, restart_handler);
+
+       dc_wdt_set(wdt, 1);
+       /* wait for reset to assert... */
+       mdelay(500);
+
+       return NOTIFY_DONE;
+}
+
+static int dc_wdt_start(struct watchdog_device *wdog)
+{
+       struct dc_wdt *wdt = watchdog_get_drvdata(wdog);
+
+       dc_wdt_set(wdt, wdog->timeout * clk_get_rate(wdt->clk));
+
+       return 0;
+}
+
+static int dc_wdt_stop(struct watchdog_device *wdog)
+{
+       struct dc_wdt *wdt = watchdog_get_drvdata(wdog);
+
+       writel_relaxed(0, wdt->base + TIMER_A_CONTROL);
+
+       return 0;
+}
+
+static int dc_wdt_set_timeout(struct watchdog_device *wdog, unsigned int t)
+{
+       struct dc_wdt *wdt = watchdog_get_drvdata(wdog);
+
+       dc_wdt_set(wdt, t * clk_get_rate(wdt->clk));
+       wdog->timeout = t;
+
+       return 0;
+}
+
+static unsigned int dc_wdt_get_timeleft(struct watchdog_device *wdog)
+{
+       struct dc_wdt *wdt = watchdog_get_drvdata(wdog);
+       uint32_t count = readl_relaxed(wdt->base + TIMER_A_COUNT);
+
+       return count / clk_get_rate(wdt->clk);
+}
+
+static struct watchdog_ops dc_wdt_ops = {
+       .owner          = THIS_MODULE,
+       .start          = dc_wdt_start,
+       .stop           = dc_wdt_stop,
+       .set_timeout    = dc_wdt_set_timeout,
+       .get_timeleft   = dc_wdt_get_timeleft,
+};
+
+static struct watchdog_info dc_wdt_info = {
+       .options        = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE
+                       | WDIOF_KEEPALIVEPING,
+       .identity       = "Conexant Digicolor Watchdog",
+};
+
+static struct watchdog_device dc_wdt_wdd = {
+       .info           = &dc_wdt_info,
+       .ops            = &dc_wdt_ops,
+       .min_timeout    = 1,
+};
+
+static int dc_wdt_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct dc_wdt *wdt;
+       int ret;
+
+       wdt = devm_kzalloc(dev, sizeof(struct dc_wdt), GFP_KERNEL);
+       if (!wdt)
+               return -ENOMEM;
+       platform_set_drvdata(pdev, wdt);
+
+       wdt->base = of_iomap(np, 0);
+       if (!wdt->base) {
+               dev_err(dev, "Failed to remap watchdog regs");
+               return -ENODEV;
+       }
+
+       wdt->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(wdt->clk)) {
+               ret = PTR_ERR(wdt->clk);
+               goto err_iounmap;
+       }
+       dc_wdt_wdd.max_timeout = U32_MAX / clk_get_rate(wdt->clk);
+       dc_wdt_wdd.timeout = dc_wdt_wdd.max_timeout;
+
+       spin_lock_init(&wdt->lock);
+
+       watchdog_set_drvdata(&dc_wdt_wdd, wdt);
+       watchdog_init_timeout(&dc_wdt_wdd, timeout, dev);
+       ret = watchdog_register_device(&dc_wdt_wdd);
+       if (ret) {
+               dev_err(dev, "Failed to register watchdog device");
+               goto err_iounmap;
+       }
+
+       wdt->restart_handler.notifier_call = dc_restart_handler;
+       wdt->restart_handler.priority = 128;
+       ret = register_restart_handler(&wdt->restart_handler);
+       if (ret)
+               dev_warn(&pdev->dev, "cannot register restart handler\n");
+
+       return 0;
+
+err_iounmap:
+       iounmap(wdt->base);
+       return ret;
+}
+
+static int dc_wdt_remove(struct platform_device *pdev)
+{
+       struct dc_wdt *wdt = platform_get_drvdata(pdev);
+
+       unregister_restart_handler(&wdt->restart_handler);
+       watchdog_unregister_device(&dc_wdt_wdd);
+       iounmap(wdt->base);
+
+       return 0;
+}
+
+static void dc_wdt_shutdown(struct platform_device *pdev)
+{
+       dc_wdt_stop(&dc_wdt_wdd);
+}
+
+static const struct of_device_id dc_wdt_of_match[] = {
+       { .compatible = "cnxt,cx92755-wdt", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, dc_wdt_of_match);
+
+static struct platform_driver dc_wdt_driver = {
+       .probe          = dc_wdt_probe,
+       .remove         = dc_wdt_remove,
+       .shutdown       = dc_wdt_shutdown,
+       .driver = {
+               .name =         "digicolor-wdt",
+               .of_match_table = dc_wdt_of_match,
+       },
+};
+module_platform_driver(dc_wdt_driver);
+
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_DESCRIPTION("Driver for Conexant Digicolor watchdog timer");
+MODULE_LICENSE("GPL");
index d0bb9499d12caaa2d901b3454a0c65cf825a9ea4..6ea0634345e99512eefec4f82ebcd8ee348bac82 100644 (file)
@@ -35,7 +35,6 @@
 #include <linux/pm.h>
 #include <linux/platform_device.h>
 #include <linux/reboot.h>
-#include <linux/spinlock.h>
 #include <linux/timer.h>
 #include <linux/uaccess.h>
 #include <linux/watchdog.h>
@@ -61,7 +60,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
 #define WDT_TIMEOUT            (HZ / 2)
 
 static struct {
-       spinlock_t              lock;
        void __iomem            *regs;
        struct clk              *clk;
        unsigned long           in_use;
@@ -177,7 +175,6 @@ static int dw_wdt_open(struct inode *inode, struct file *filp)
        /* Make sure we don't get unloaded. */
        __module_get(THIS_MODULE);
 
-       spin_lock(&dw_wdt.lock);
        if (!dw_wdt_is_enabled()) {
                /*
                 * The watchdog is not currently enabled. Set the timeout to
@@ -190,8 +187,6 @@ static int dw_wdt_open(struct inode *inode, struct file *filp)
 
        dw_wdt_set_next_heartbeat();
 
-       spin_unlock(&dw_wdt.lock);
-
        return nonseekable_open(inode, filp);
 }
 
@@ -220,6 +215,7 @@ static ssize_t dw_wdt_write(struct file *filp, const char __user *buf,
        }
 
        dw_wdt_set_next_heartbeat();
+       dw_wdt_keepalive();
        mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
 
        return len;
@@ -348,8 +344,6 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       spin_lock_init(&dw_wdt.lock);
-
        ret = misc_register(&dw_wdt_miscdev);
        if (ret)
                goto out_disable_clk;
index cbc313d37c59f1f67b0bd99fc5835d2d35a14b85..1687cc2d71223cc799fbb60bb1d3f0914d7c3ada 100644 (file)
@@ -267,7 +267,16 @@ static struct platform_driver gpio_wdt_driver = {
        .probe  = gpio_wdt_probe,
        .remove = gpio_wdt_remove,
 };
+
+#ifdef CONFIG_GPIO_WATCHDOG_ARCH_INITCALL
+static int __init gpio_wdt_init(void)
+{
+       return platform_driver_register(&gpio_wdt_driver);
+}
+arch_initcall(gpio_wdt_init);
+#else
 module_platform_driver(gpio_wdt_driver);
+#endif
 
 MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
 MODULE_DESCRIPTION("GPIO Watchdog");
index ada3e44f99328424d11994ec03ebdd4886f4a919..286369d4f0f50928c4b837eb362a8e0fb5c6fa02 100644 (file)
@@ -588,7 +588,7 @@ static long hpwdt_ioctl(struct file *file, unsigned int cmd,
 {
        void __user *argp = (void __user *)arg;
        int __user *p = argp;
-       int new_margin;
+       int new_margin, options;
        int ret = -ENOTTY;
 
        switch (cmd) {
@@ -608,6 +608,20 @@ static long hpwdt_ioctl(struct file *file, unsigned int cmd,
                ret = 0;
                break;
 
+       case WDIOC_SETOPTIONS:
+               ret = get_user(options, p);
+               if (ret)
+                       break;
+
+               if (options & WDIOS_DISABLECARD)
+                       hpwdt_stop();
+
+               if (options & WDIOS_ENABLECARD) {
+                       hpwdt_start();
+                       hpwdt_ping();
+               }
+               break;
+
        case WDIOC_SETTIMEOUT:
                ret = get_user(new_margin, p);
                if (ret)
index 0deaa4f971f5ff1fdfde6d063314a45bd99b9707..0f73621827abf839f81fdfd46056652032898266 100644 (file)
@@ -9,6 +9,35 @@
  *
  * Based on drivers/watchdog/sunxi_wdt.c Copyright (c) 2013 Carlo Caione
  *                                                     2012 Henrik Nordstrom
+ *
+ * Notes
+ * -----
+ * The timeout value is rounded to the next power of two clock cycles.
+ * This is configured using the PDC_WDT_CONFIG register, according to this
+ * formula:
+ *
+ *     timeout = 2^(delay + 1) clock cycles
+ *
+ * Where 'delay' is the value written in PDC_WDT_CONFIG register.
+ *
+ * Therefore, the hardware only allows to program watchdog timeouts, expressed
+ * as a power of two number of watchdog clock cycles. The current implementation
+ * guarantees that the actual watchdog timeout will be _at least_ the value
+ * programmed in the imgpdg_wdt driver.
+ *
+ * The following table shows how the user-configured timeout relates
+ * to the actual hardware timeout (watchdog clock @ 40000 Hz):
+ *
+ * input timeout | WD_DELAY | actual timeout
+ * -----------------------------------
+ *      10       |   18     |  13 seconds
+ *      20       |   19     |  26 seconds
+ *      30       |   20     |  52 seconds
+ *      60       |   21     |  104 seconds
+ *
+ * Albeit coarse, this granularity would suffice most watchdog uses.
+ * If the platform allows it, the user should be able to change the watchdog
+ * clock rate and achieve a finer timeout granularity.
  */
 
 #include <linux/clk.h>
@@ -16,6 +45,7 @@
 #include <linux/log2.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/reboot.h>
 #include <linux/slab.h>
 #include <linux/watchdog.h>
 
@@ -42,7 +72,7 @@
 #define PDC_WDT_MIN_TIMEOUT            1
 #define PDC_WDT_DEF_TIMEOUT            64
 
-static int heartbeat = PDC_WDT_DEF_TIMEOUT;
+static int heartbeat;
 module_param(heartbeat, int, 0);
 MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds "
        "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
@@ -57,6 +87,7 @@ struct pdc_wdt_dev {
        struct clk *wdt_clk;
        struct clk *sys_clk;
        void __iomem *base;
+       struct notifier_block restart_handler;
 };
 
 static int pdc_wdt_keepalive(struct watchdog_device *wdt_dev)
@@ -84,18 +115,24 @@ static int pdc_wdt_stop(struct watchdog_device *wdt_dev)
        return 0;
 }
 
+static void __pdc_wdt_set_timeout(struct pdc_wdt_dev *wdt)
+{
+       unsigned long clk_rate = clk_get_rate(wdt->wdt_clk);
+       unsigned int val;
+
+       val = readl(wdt->base + PDC_WDT_CONFIG) & ~PDC_WDT_CONFIG_DELAY_MASK;
+       val |= order_base_2(wdt->wdt_dev.timeout * clk_rate) - 1;
+       writel(val, wdt->base + PDC_WDT_CONFIG);
+}
+
 static int pdc_wdt_set_timeout(struct watchdog_device *wdt_dev,
                               unsigned int new_timeout)
 {
-       unsigned int val;
        struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
-       unsigned long clk_rate = clk_get_rate(wdt->wdt_clk);
 
        wdt->wdt_dev.timeout = new_timeout;
 
-       val = readl(wdt->base + PDC_WDT_CONFIG) & ~PDC_WDT_CONFIG_DELAY_MASK;
-       val |= order_base_2(new_timeout * clk_rate) - 1;
-       writel(val, wdt->base + PDC_WDT_CONFIG);
+       __pdc_wdt_set_timeout(wdt);
 
        return 0;
 }
@@ -106,6 +143,8 @@ static int pdc_wdt_start(struct watchdog_device *wdt_dev)
        unsigned int val;
        struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
 
+       __pdc_wdt_set_timeout(wdt);
+
        val = readl(wdt->base + PDC_WDT_CONFIG);
        val |= PDC_WDT_CONFIG_ENABLE;
        writel(val, wdt->base + PDC_WDT_CONFIG);
@@ -128,8 +167,21 @@ static const struct watchdog_ops pdc_wdt_ops = {
        .set_timeout    = pdc_wdt_set_timeout,
 };
 
+static int pdc_wdt_restart(struct notifier_block *this, unsigned long mode,
+                          void *cmd)
+{
+       struct pdc_wdt_dev *wdt = container_of(this, struct pdc_wdt_dev,
+                                              restart_handler);
+
+       /* Assert SOFT_RESET */
+       writel(0x1, wdt->base + PDC_WDT_SOFT_RESET);
+
+       return NOTIFY_OK;
+}
+
 static int pdc_wdt_probe(struct platform_device *pdev)
 {
+       u64 div;
        int ret, val;
        unsigned long clk_rate;
        struct resource *res;
@@ -189,16 +241,15 @@ static int pdc_wdt_probe(struct platform_device *pdev)
 
        pdc_wdt->wdt_dev.info = &pdc_wdt_info;
        pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
-       pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK;
+
+       div = 1ULL << (PDC_WDT_CONFIG_DELAY_MASK + 1);
+       do_div(div, clk_rate);
+       pdc_wdt->wdt_dev.max_timeout = div;
+       pdc_wdt->wdt_dev.timeout = PDC_WDT_DEF_TIMEOUT;
        pdc_wdt->wdt_dev.parent = &pdev->dev;
        watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
 
-       ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
-       if (ret < 0) {
-               pdc_wdt->wdt_dev.timeout = pdc_wdt->wdt_dev.max_timeout;
-               dev_warn(&pdev->dev,
-                        "Initial timeout out of range! setting max timeout\n");
-       }
+       watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
 
        pdc_wdt_stop(&pdc_wdt->wdt_dev);
 
@@ -238,6 +289,13 @@ static int pdc_wdt_probe(struct platform_device *pdev)
        if (ret)
                goto disable_wdt_clk;
 
+       pdc_wdt->restart_handler.notifier_call = pdc_wdt_restart;
+       pdc_wdt->restart_handler.priority = 128;
+       ret = register_restart_handler(&pdc_wdt->restart_handler);
+       if (ret)
+               dev_warn(&pdev->dev, "failed to register restart handler: %d\n",
+                        ret);
+
        return 0;
 
 disable_wdt_clk:
index 5e6d808d358a5c342ae11b9fd71ce5e09416b809..0bb1a1d1b170a098bfcca3a3f5b828450849f9e0 100644 (file)
@@ -166,6 +166,8 @@ static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
 {
        struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
 
+       wdog->timeout = new_timeout;
+
        regmap_update_bits(wdev->regmap, IMX2_WDT_WCR, IMX2_WDT_WCR_WT,
                           WDOG_SEC_TO_COUNT(new_timeout));
        return 0;
@@ -256,8 +258,11 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
        wdog->ops               = &imx2_wdt_ops;
        wdog->min_timeout       = 1;
        wdog->max_timeout       = IMX2_WDT_MAX_TIME;
+       wdog->parent            = &pdev->dev;
 
-       clk_prepare_enable(wdev->clk);
+       ret = clk_prepare_enable(wdev->clk);
+       if (ret)
+               return ret;
 
        regmap_read(wdev->regmap, IMX2_WDT_WRSR, &val);
        wdog->bootstatus = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0;
@@ -286,7 +291,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
        ret = watchdog_register_device(wdog);
        if (ret) {
                dev_err(&pdev->dev, "cannot register watchdog device\n");
-               return ret;
+               goto disable_clk;
        }
 
        wdev->restart_handler.notifier_call = imx2_restart_handler;
@@ -299,6 +304,10 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
                 wdog->timeout, nowayout);
 
        return 0;
+
+disable_clk:
+       clk_disable_unprepare(wdev->clk);
+       return ret;
 }
 
 static int __exit imx2_wdt_remove(struct platform_device *pdev)
@@ -362,8 +371,11 @@ static int imx2_wdt_resume(struct device *dev)
 {
        struct watchdog_device *wdog = dev_get_drvdata(dev);
        struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+       int ret;
 
-       clk_prepare_enable(wdev->clk);
+       ret = clk_prepare_enable(wdev->clk);
+       if (ret)
+               return ret;
 
        if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
                /*
index 08da3114accb5981958fc8c4c96e2911714c10a8..f36ca4be07207a9fd200d92577e9242a10e774df 100644 (file)
@@ -39,10 +39,22 @@ static bool nowayout  = WATCHDOG_NOWAYOUT;
 #define MAX6369_WDSET  (7 << 0)
 #define MAX6369_WDI    (1 << 3)
 
-static DEFINE_SPINLOCK(io_lock);
+#define MAX6369_WDSET_DISABLED 3
 
 static int nodelay;
-static void __iomem    *wdt_base;
+
+struct max63xx_wdt {
+       struct watchdog_device wdd;
+       const struct max63xx_timeout *timeout;
+
+       /* memory mapping */
+       void __iomem *base;
+       spinlock_t lock;
+
+       /* WDI and WSET bits write access routines */
+       void (*ping)(struct max63xx_wdt *wdt);
+       void (*set)(struct max63xx_wdt *wdt, u8 set);
+};
 
 /*
  * The timeout values used are actually the absolute minimum the chip
@@ -59,25 +71,25 @@ static void __iomem *wdt_base;
 
 /* Timeouts in second */
 struct max63xx_timeout {
-       u8 wdset;
-       u8 tdelay;
-       u8 twd;
+       const u8 wdset;
+       const u8 tdelay;
+       const u8 twd;
 };
 
-static struct max63xx_timeout max6369_table[] = {
+static const struct max63xx_timeout max6369_table[] = {
        { 5,  1,  1 },
        { 6, 10, 10 },
        { 7, 60, 60 },
        { },
 };
 
-static struct max63xx_timeout max6371_table[] = {
+static const struct max63xx_timeout max6371_table[] = {
        { 6, 60,  3 },
        { 7, 60, 60 },
        { },
 };
 
-static struct max63xx_timeout max6373_table[] = {
+static const struct max63xx_timeout max6373_table[] = {
        { 2, 60,  1 },
        { 5,  0,  1 },
        { 1,  3,  3 },
@@ -86,8 +98,6 @@ static struct max63xx_timeout max6373_table[] = {
        { },
 };
 
-static struct max63xx_timeout *current_timeout;
-
 static struct max63xx_timeout *
 max63xx_select_timeout(struct max63xx_timeout *table, int value)
 {
@@ -108,59 +118,32 @@ max63xx_select_timeout(struct max63xx_timeout *table, int value)
 
 static int max63xx_wdt_ping(struct watchdog_device *wdd)
 {
-       u8 val;
-
-       spin_lock(&io_lock);
+       struct max63xx_wdt *wdt = watchdog_get_drvdata(wdd);
 
-       val = __raw_readb(wdt_base);
-
-       __raw_writeb(val | MAX6369_WDI, wdt_base);
-       __raw_writeb(val & ~MAX6369_WDI, wdt_base);
-
-       spin_unlock(&io_lock);
+       wdt->ping(wdt);
        return 0;
 }
 
 static int max63xx_wdt_start(struct watchdog_device *wdd)
 {
-       struct max63xx_timeout *entry = watchdog_get_drvdata(wdd);
-       u8 val;
+       struct max63xx_wdt *wdt = watchdog_get_drvdata(wdd);
 
-       spin_lock(&io_lock);
-
-       val = __raw_readb(wdt_base);
-       val &= ~MAX6369_WDSET;
-       val |= entry->wdset;
-       __raw_writeb(val, wdt_base);
-
-       spin_unlock(&io_lock);
+       wdt->set(wdt, wdt->timeout->wdset);
 
        /* check for a edge triggered startup */
-       if (entry->tdelay == 0)
-               max63xx_wdt_ping(wdd);
+       if (wdt->timeout->tdelay == 0)
+               wdt->ping(wdt);
        return 0;
 }
 
 static int max63xx_wdt_stop(struct watchdog_device *wdd)
 {
-       u8 val;
+       struct max63xx_wdt *wdt = watchdog_get_drvdata(wdd);
 
-       spin_lock(&io_lock);
-
-       val = __raw_readb(wdt_base);
-       val &= ~MAX6369_WDSET;
-       val |= 3;
-       __raw_writeb(val, wdt_base);
-
-       spin_unlock(&io_lock);
+       wdt->set(wdt, MAX6369_WDSET_DISABLED);
        return 0;
 }
 
-static const struct watchdog_info max63xx_wdt_info = {
-       .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
-       .identity = "max63xx Watchdog",
-};
-
 static const struct watchdog_ops max63xx_wdt_ops = {
        .owner = THIS_MODULE,
        .start = max63xx_wdt_start,
@@ -168,53 +151,108 @@ static const struct watchdog_ops max63xx_wdt_ops = {
        .ping = max63xx_wdt_ping,
 };
 
-static struct watchdog_device max63xx_wdt_dev = {
-       .info = &max63xx_wdt_info,
-       .ops = &max63xx_wdt_ops,
+static const struct watchdog_info max63xx_wdt_info = {
+       .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+       .identity = "max63xx Watchdog",
 };
 
+static void max63xx_mmap_ping(struct max63xx_wdt *wdt)
+{
+       u8 val;
+
+       spin_lock(&wdt->lock);
+
+       val = __raw_readb(wdt->base);
+
+       __raw_writeb(val | MAX6369_WDI, wdt->base);
+       __raw_writeb(val & ~MAX6369_WDI, wdt->base);
+
+       spin_unlock(&wdt->lock);
+}
+
+static void max63xx_mmap_set(struct max63xx_wdt *wdt, u8 set)
+{
+       u8 val;
+
+       spin_lock(&wdt->lock);
+
+       val = __raw_readb(wdt->base);
+       val &= ~MAX6369_WDSET;
+       val |= set & MAX6369_WDSET;
+       __raw_writeb(val, wdt->base);
+
+       spin_unlock(&wdt->lock);
+}
+
+static int max63xx_mmap_init(struct platform_device *p, struct max63xx_wdt *wdt)
+{
+       struct resource *mem = platform_get_resource(p, IORESOURCE_MEM, 0);
+
+       wdt->base = devm_ioremap_resource(&p->dev, mem);
+       if (IS_ERR(wdt->base))
+               return PTR_ERR(wdt->base);
+
+       spin_lock_init(&wdt->lock);
+
+       wdt->ping = max63xx_mmap_ping;
+       wdt->set = max63xx_mmap_set;
+       return 0;
+}
+
 static int max63xx_wdt_probe(struct platform_device *pdev)
 {
-       struct resource *wdt_mem;
+       struct max63xx_wdt *wdt;
        struct max63xx_timeout *table;
+       int err;
+
+       wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+       if (!wdt)
+               return -ENOMEM;
 
        table = (struct max63xx_timeout *)pdev->id_entry->driver_data;
 
        if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
                heartbeat = DEFAULT_HEARTBEAT;
 
-       dev_info(&pdev->dev, "requesting %ds heartbeat\n", heartbeat);
-       current_timeout = max63xx_select_timeout(table, heartbeat);
-
-       if (!current_timeout) {
-               dev_err(&pdev->dev, "unable to satisfy heartbeat request\n");
+       wdt->timeout = max63xx_select_timeout(table, heartbeat);
+       if (!wdt->timeout) {
+               dev_err(&pdev->dev, "unable to satisfy %ds heartbeat request\n",
+                       heartbeat);
                return -EINVAL;
        }
 
-       dev_info(&pdev->dev, "using %ds heartbeat with %ds initial delay\n",
-                current_timeout->twd, current_timeout->tdelay);
+       err = max63xx_mmap_init(pdev, wdt);
+       if (err)
+               return err;
+
+       platform_set_drvdata(pdev, &wdt->wdd);
+       watchdog_set_drvdata(&wdt->wdd, wdt);
 
-       heartbeat = current_timeout->twd;
+       wdt->wdd.parent = &pdev->dev;
+       wdt->wdd.timeout = wdt->timeout->twd;
+       wdt->wdd.info = &max63xx_wdt_info;
+       wdt->wdd.ops = &max63xx_wdt_ops;
 
-       wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       wdt_base = devm_ioremap_resource(&pdev->dev, wdt_mem);
-       if (IS_ERR(wdt_base))
-               return PTR_ERR(wdt_base);
+       watchdog_set_nowayout(&wdt->wdd, nowayout);
 
-       max63xx_wdt_dev.timeout = heartbeat;
-       watchdog_set_nowayout(&max63xx_wdt_dev, nowayout);
-       watchdog_set_drvdata(&max63xx_wdt_dev, current_timeout);
+       err = watchdog_register_device(&wdt->wdd);
+       if (err)
+               return err;
 
-       return watchdog_register_device(&max63xx_wdt_dev);
+       dev_info(&pdev->dev, "using %ds heartbeat with %ds initial delay\n",
+                wdt->timeout->twd, wdt->timeout->tdelay);
+       return 0;
 }
 
 static int max63xx_wdt_remove(struct platform_device *pdev)
 {
-       watchdog_unregister_device(&max63xx_wdt_dev);
+       struct watchdog_device *wdd = platform_get_drvdata(pdev);
+
+       watchdog_unregister_device(wdd);
        return 0;
 }
 
-static struct platform_device_id max63xx_id_table[] = {
+static const struct platform_device_id max63xx_id_table[] = {
        { "max6369_wdt", (kernel_ulong_t)max6369_table, },
        { "max6370_wdt", (kernel_ulong_t)max6369_table, },
        { "max6371_wdt", (kernel_ulong_t)max6371_table, },
index 96dbba9805796938f2c64a85af8a66ccfdd58ac6..d193a5e79c381775ba683a5cf2153d8b0e5619c7 100644 (file)
@@ -208,14 +208,15 @@ static int a21_wdt_probe(struct platform_device *pdev)
        else if (reset == 7)
                a21_wdt.bootstatus |= WDIOF_EXTERN2;
 
+       drv->wdt = a21_wdt;
+       dev_set_drvdata(&pdev->dev, drv);
+
        ret = watchdog_register_device(&a21_wdt);
        if (ret) {
                dev_err(&pdev->dev, "Cannot register watchdog device\n");
                goto err_register_wd;
        }
 
-       dev_set_drvdata(&pdev->dev, drv);
-
        dev_info(&pdev->dev, "MEN A21 watchdog timer driver enabled\n");
 
        return 0;
index 1e6be9e405779884c315de023a8aad18e5c07419..de911c7e477c2875fe3633bce5a72a6b45fb95c0 100644 (file)
@@ -53,7 +53,15 @@ static unsigned timer_margin;
 module_param(timer_margin, uint, 0);
 MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)");
 
+#define to_omap_wdt_dev(_wdog) container_of(_wdog, struct omap_wdt_dev, wdog)
+
+static bool early_enable;
+module_param(early_enable, bool, 0);
+MODULE_PARM_DESC(early_enable,
+       "Watchdog is started on module insertion (default=0)");
+
 struct omap_wdt_dev {
+       struct watchdog_device wdog;
        void __iomem    *base;          /* physical */
        struct device   *dev;
        bool            omap_wdt_users;
@@ -123,7 +131,7 @@ static void omap_wdt_set_timer(struct omap_wdt_dev *wdev,
 
 static int omap_wdt_start(struct watchdog_device *wdog)
 {
-       struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+       struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog);
        void __iomem *base = wdev->base;
 
        mutex_lock(&wdev->lock);
@@ -132,6 +140,13 @@ static int omap_wdt_start(struct watchdog_device *wdog)
 
        pm_runtime_get_sync(wdev->dev);
 
+       /*
+        * Make sure the watchdog is disabled. This is unfortunately required
+        * because writing to various registers with the watchdog running has no
+        * effect.
+        */
+       omap_wdt_disable(wdev);
+
        /* initialize prescaler */
        while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01)
                cpu_relax();
@@ -151,7 +166,7 @@ static int omap_wdt_start(struct watchdog_device *wdog)
 
 static int omap_wdt_stop(struct watchdog_device *wdog)
 {
-       struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+       struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog);
 
        mutex_lock(&wdev->lock);
        omap_wdt_disable(wdev);
@@ -163,7 +178,7 @@ static int omap_wdt_stop(struct watchdog_device *wdog)
 
 static int omap_wdt_ping(struct watchdog_device *wdog)
 {
-       struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+       struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog);
 
        mutex_lock(&wdev->lock);
        omap_wdt_reload(wdev);
@@ -175,7 +190,7 @@ static int omap_wdt_ping(struct watchdog_device *wdog)
 static int omap_wdt_set_timeout(struct watchdog_device *wdog,
                                unsigned int timeout)
 {
-       struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+       struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog);
 
        mutex_lock(&wdev->lock);
        omap_wdt_disable(wdev);
@@ -188,6 +203,16 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog,
        return 0;
 }
 
+static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog)
+{
+       struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+       void __iomem *base = wdev->base;
+       u32 value;
+
+       value = readl_relaxed(base + OMAP_WATCHDOG_CRR);
+       return GET_WCCR_SECS(value);
+}
+
 static const struct watchdog_info omap_wdt_info = {
        .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
        .identity = "OMAP Watchdog",
@@ -199,21 +224,16 @@ static const struct watchdog_ops omap_wdt_ops = {
        .stop           = omap_wdt_stop,
        .ping           = omap_wdt_ping,
        .set_timeout    = omap_wdt_set_timeout,
+       .get_timeleft   = omap_wdt_get_timeleft,
 };
 
 static int omap_wdt_probe(struct platform_device *pdev)
 {
        struct omap_wd_timer_platform_data *pdata = dev_get_platdata(&pdev->dev);
-       struct watchdog_device *omap_wdt;
        struct resource *res;
        struct omap_wdt_dev *wdev;
-       u32 rs;
        int ret;
 
-       omap_wdt = devm_kzalloc(&pdev->dev, sizeof(*omap_wdt), GFP_KERNEL);
-       if (!omap_wdt)
-               return -ENOMEM;
-
        wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL);
        if (!wdev)
                return -ENOMEM;
@@ -229,35 +249,30 @@ static int omap_wdt_probe(struct platform_device *pdev)
        if (IS_ERR(wdev->base))
                return PTR_ERR(wdev->base);
 
-       omap_wdt->info        = &omap_wdt_info;
-       omap_wdt->ops         = &omap_wdt_ops;
-       omap_wdt->min_timeout = TIMER_MARGIN_MIN;
-       omap_wdt->max_timeout = TIMER_MARGIN_MAX;
+       wdev->wdog.info = &omap_wdt_info;
+       wdev->wdog.ops = &omap_wdt_ops;
+       wdev->wdog.min_timeout = TIMER_MARGIN_MIN;
+       wdev->wdog.max_timeout = TIMER_MARGIN_MAX;
 
-       if (timer_margin >= TIMER_MARGIN_MIN &&
-           timer_margin <= TIMER_MARGIN_MAX)
-               omap_wdt->timeout = timer_margin;
-       else
-               omap_wdt->timeout = TIMER_MARGIN_DEFAULT;
+       if (watchdog_init_timeout(&wdev->wdog, timer_margin, &pdev->dev) < 0)
+               wdev->wdog.timeout = TIMER_MARGIN_DEFAULT;
 
-       watchdog_set_drvdata(omap_wdt, wdev);
-       watchdog_set_nowayout(omap_wdt, nowayout);
+       watchdog_set_nowayout(&wdev->wdog, nowayout);
 
-       platform_set_drvdata(pdev, omap_wdt);
+       platform_set_drvdata(pdev, wdev);
 
        pm_runtime_enable(wdev->dev);
        pm_runtime_get_sync(wdev->dev);
 
-       if (pdata && pdata->read_reset_sources)
-               rs = pdata->read_reset_sources();
-       else
-               rs = 0;
-       omap_wdt->bootstatus = (rs & (1 << OMAP_MPU_WD_RST_SRC_ID_SHIFT)) ?
-                               WDIOF_CARDRESET : 0;
+       if (pdata && pdata->read_reset_sources) {
+               u32 rs = pdata->read_reset_sources();
+               if (rs & (1 << OMAP_MPU_WD_RST_SRC_ID_SHIFT))
+                       wdev->wdog.bootstatus = WDIOF_CARDRESET;
+       }
 
        omap_wdt_disable(wdev);
 
-       ret = watchdog_register_device(omap_wdt);
+       ret = watchdog_register_device(&wdev->wdog);
        if (ret) {
                pm_runtime_disable(wdev->dev);
                return ret;
@@ -265,17 +280,19 @@ static int omap_wdt_probe(struct platform_device *pdev)
 
        pr_info("OMAP Watchdog Timer Rev 0x%02x: initial timeout %d sec\n",
                readl_relaxed(wdev->base + OMAP_WATCHDOG_REV) & 0xFF,
-               omap_wdt->timeout);
+               wdev->wdog.timeout);
 
        pm_runtime_put_sync(wdev->dev);
 
+       if (early_enable)
+               omap_wdt_start(&wdev->wdog);
+
        return 0;
 }
 
 static void omap_wdt_shutdown(struct platform_device *pdev)
 {
-       struct watchdog_device *wdog = platform_get_drvdata(pdev);
-       struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+       struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
 
        mutex_lock(&wdev->lock);
        if (wdev->omap_wdt_users) {
@@ -287,11 +304,10 @@ static void omap_wdt_shutdown(struct platform_device *pdev)
 
 static int omap_wdt_remove(struct platform_device *pdev)
 {
-       struct watchdog_device *wdog = platform_get_drvdata(pdev);
-       struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+       struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
 
        pm_runtime_disable(wdev->dev);
-       watchdog_unregister_device(wdog);
+       watchdog_unregister_device(&wdev->wdog);
 
        return 0;
 }
@@ -306,8 +322,7 @@ static int omap_wdt_remove(struct platform_device *pdev)
 
 static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state)
 {
-       struct watchdog_device *wdog = platform_get_drvdata(pdev);
-       struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+       struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
 
        mutex_lock(&wdev->lock);
        if (wdev->omap_wdt_users) {
@@ -321,8 +336,7 @@ static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state)
 
 static int omap_wdt_resume(struct platform_device *pdev)
 {
-       struct watchdog_device *wdog = platform_get_drvdata(pdev);
-       struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+       struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
 
        mutex_lock(&wdev->lock);
        if (wdev->omap_wdt_users) {
index 09b774cf75b9be8ebd83fefbdbc199e2c432a631..42f31ec5e90d0e0a3ff65034a322532635f12d12 100644 (file)
@@ -50,5 +50,6 @@
 
 #define PTV                    0       /* prescale */
 #define GET_WLDR_VAL(secs)     (0xffffffff - ((secs) * (32768/(1<<PTV))) + 1)
+#define GET_WCCR_SECS(val)     ((0xffffffff - (val) + 1) / (32768/(1<<PTV)))
 
 #endif                         /* _OMAP_WATCHDOG_H */
index f32be155212adfea313f3c45716749199af2296d..6785afdc0fcaabad1694f3e0c24b316ddb29c1bf 100644 (file)
@@ -197,7 +197,7 @@ static int st_wdog_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       /* LPC can either run in RTC or WDT mode */
+       /* LPC can either run as a Clocksource or in RTC or WDT mode */
        if (mode != ST_LPC_MODE_WDT)
                return -ENODEV;
 
index cec9b559647dee675439058efb07138fe866edb7..1a80594554133a4f7bbe03b965070342ca01f2de 100644 (file)
 static DEFINE_IDA(watchdog_ida);
 static struct class *watchdog_class;
 
+/*
+ * Deferred Registration infrastructure.
+ *
+ * Sometimes watchdog drivers needs to be loaded as soon as possible,
+ * for example when it's impossible to disable it. To do so,
+ * raising the initcall level of the watchdog driver is a solution.
+ * But in such case, the miscdev is maybe not ready (subsys_initcall), and
+ * watchdog_core need miscdev to register the watchdog as a char device.
+ *
+ * The deferred registration infrastructure offer a way for the watchdog
+ * subsystem to register a watchdog properly, even before miscdev is ready.
+ */
+
+static DEFINE_MUTEX(wtd_deferred_reg_mutex);
+static LIST_HEAD(wtd_deferred_reg_list);
+static bool wtd_deferred_reg_done;
+
+static int watchdog_deferred_registration_add(struct watchdog_device *wdd)
+{
+       list_add_tail(&wdd->deferred,
+                     &wtd_deferred_reg_list);
+       return 0;
+}
+
+static void watchdog_deferred_registration_del(struct watchdog_device *wdd)
+{
+       struct list_head *p, *n;
+       struct watchdog_device *wdd_tmp;
+
+       list_for_each_safe(p, n, &wtd_deferred_reg_list) {
+               wdd_tmp = list_entry(p, struct watchdog_device,
+                                    deferred);
+               if (wdd_tmp == wdd) {
+                       list_del(&wdd_tmp->deferred);
+                       break;
+               }
+       }
+}
+
 static void watchdog_check_min_max_timeout(struct watchdog_device *wdd)
 {
        /*
@@ -98,17 +137,7 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
 }
 EXPORT_SYMBOL_GPL(watchdog_init_timeout);
 
-/**
- * watchdog_register_device() - register a watchdog device
- * @wdd: watchdog device
- *
- * Register a watchdog device with the kernel so that the
- * watchdog timer can be accessed from userspace.
- *
- * A zero is returned on success and a negative errno code for
- * failure.
- */
-int watchdog_register_device(struct watchdog_device *wdd)
+static int __watchdog_register_device(struct watchdog_device *wdd)
 {
        int ret, id, devno;
 
@@ -164,16 +193,33 @@ int watchdog_register_device(struct watchdog_device *wdd)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(watchdog_register_device);
 
 /**
- * watchdog_unregister_device() - unregister a watchdog device
- * @wdd: watchdog device to unregister
+ * watchdog_register_device() - register a watchdog device
+ * @wdd: watchdog device
  *
- * Unregister a watchdog device that was previously successfully
- * registered with watchdog_register_device().
+ * Register a watchdog device with the kernel so that the
+ * watchdog timer can be accessed from userspace.
+ *
+ * A zero is returned on success and a negative errno code for
+ * failure.
  */
-void watchdog_unregister_device(struct watchdog_device *wdd)
+
+int watchdog_register_device(struct watchdog_device *wdd)
+{
+       int ret;
+
+       mutex_lock(&wtd_deferred_reg_mutex);
+       if (wtd_deferred_reg_done)
+               ret = __watchdog_register_device(wdd);
+       else
+               ret = watchdog_deferred_registration_add(wdd);
+       mutex_unlock(&wtd_deferred_reg_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(watchdog_register_device);
+
+static void __watchdog_unregister_device(struct watchdog_device *wdd)
 {
        int ret;
        int devno;
@@ -189,8 +235,43 @@ void watchdog_unregister_device(struct watchdog_device *wdd)
        ida_simple_remove(&watchdog_ida, wdd->id);
        wdd->dev = NULL;
 }
+
+/**
+ * watchdog_unregister_device() - unregister a watchdog device
+ * @wdd: watchdog device to unregister
+ *
+ * Unregister a watchdog device that was previously successfully
+ * registered with watchdog_register_device().
+ */
+
+void watchdog_unregister_device(struct watchdog_device *wdd)
+{
+       mutex_lock(&wtd_deferred_reg_mutex);
+       if (wtd_deferred_reg_done)
+               __watchdog_unregister_device(wdd);
+       else
+               watchdog_deferred_registration_del(wdd);
+       mutex_unlock(&wtd_deferred_reg_mutex);
+}
+
 EXPORT_SYMBOL_GPL(watchdog_unregister_device);
 
+static int __init watchdog_deferred_registration(void)
+{
+       mutex_lock(&wtd_deferred_reg_mutex);
+       wtd_deferred_reg_done = true;
+       while (!list_empty(&wtd_deferred_reg_list)) {
+               struct watchdog_device *wdd;
+
+               wdd = list_first_entry(&wtd_deferred_reg_list,
+                                      struct watchdog_device, deferred);
+               list_del(&wdd->deferred);
+               __watchdog_register_device(wdd);
+       }
+       mutex_unlock(&wtd_deferred_reg_mutex);
+       return 0;
+}
+
 static int __init watchdog_init(void)
 {
        int err;
@@ -207,6 +288,7 @@ static int __init watchdog_init(void)
                return err;
        }
 
+       watchdog_deferred_registration();
        return 0;
 }
 
@@ -217,7 +299,7 @@ static void __exit watchdog_exit(void)
        ida_destroy(&watchdog_ida);
 }
 
-subsys_initcall(watchdog_init);
+subsys_initcall_sync(watchdog_init);
 module_exit(watchdog_exit);
 
 MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
index 38387950490eb8dbd07c85434ac59e5129af2c1f..96093ae369a5613938962b4970decda66c19d342 100644 (file)
@@ -39,8 +39,8 @@
 #include <asm/irq.h>
 #include <asm/idle.h>
 #include <asm/io_apic.h>
-#include <asm/xen/page.h>
 #include <asm/xen/pci.h>
+#include <xen/page.h>
 #endif
 #include <asm/sync_bitops.h>
 #include <asm/xen/hypercall.h>
index 417415d738d0f454da2d28d2815bb221d160613f..ed673e1acd6159a3ca34dc10238fef8936e43249 100644 (file)
 #include <asm/sync_bitops.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
-#include <asm/xen/page.h>
 
 #include <xen/xen.h>
 #include <xen/xen-ops.h>
 #include <xen/events.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/event_channel.h>
+#include <xen/page.h>
 
 #include "events_internal.h"
 
index 89274850741b5e3ee457fa2bd19d6efaf16d1f5d..67b9163db7185402b0ff3811c5363c1a1022e2c7 100644 (file)
@@ -41,9 +41,9 @@
 #include <xen/balloon.h>
 #include <xen/gntdev.h>
 #include <xen/events.h>
+#include <xen/page.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
-#include <asm/xen/page.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
index b1c7170e5c9e1edf85049c4d43fc4d1b9f2b18f8..62f591f8763ccfd4519248566b6f1d49eb78ce64 100644 (file)
@@ -138,7 +138,6 @@ static struct gnttab_free_callback *gnttab_free_callback_list;
 static int gnttab_expand(unsigned int req_entries);
 
 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
-#define SPP (PAGE_SIZE / sizeof(grant_status_t))
 
 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
 {
index 9e6a85104a20820ff16ef748804eb8c6a98b4201..d10effee9b9eb16d46a0bfea9b108b5f22863be3 100644 (file)
 #include <xen/grant_table.h>
 #include <xen/events.h>
 #include <xen/hvc-console.h>
+#include <xen/page.h>
 #include <xen/xen-ops.h>
 
 #include <asm/xen/hypercall.h>
-#include <asm/xen/page.h>
 #include <asm/xen/hypervisor.h>
 
 enum shutdown_state {
index d88f36754bf7efcd67750cce72ec607564bc6fd4..239738f944badfa3f12f3d61581ef5cb4d6910d4 100644 (file)
@@ -17,8 +17,8 @@
 
 #include <xen/xen.h>
 #include <xen/interface/xen.h>
+#include <xen/page.h>
 #include <asm/xen/hypercall.h>
-#include <asm/xen/page.h>
 #include <asm/xen/hypervisor.h>
 #include <xen/tmem.h>
 
@@ -389,7 +389,7 @@ static int __init xen_tmem_init(void)
        }
 #endif
 #ifdef CONFIG_CLEANCACHE
-       BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
+       BUILD_BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
        if (tmem_enabled && cleancache) {
                int err;
 
index 39223c3e99ad5277a3bc525647b7a58f7df668f8..9eeefd7cad41127dcade5d81c06647218abeaccc 100644 (file)
@@ -53,7 +53,6 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
 #include <target/target_core_fabric_configfs.h>
 
 #include <asm/hypervisor.h>
@@ -201,8 +200,6 @@ static LIST_HEAD(scsiback_free_pages);
 static DEFINE_MUTEX(scsiback_mutex);
 static LIST_HEAD(scsiback_list);
 
-static const struct target_core_fabric_ops scsiback_ops;
-
 static void scsiback_get(struct vscsibk_info *info)
 {
        atomic_inc(&info->nr_unreplied_reqs);
@@ -397,6 +394,7 @@ static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
        memset(se_cmd, 0, sizeof(*se_cmd));
 
        scsiback_get(pending_req->info);
+       se_cmd->tag = pending_req->rqid;
        rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd,
                        pending_req->sense_buffer, pending_req->v2p->lun,
                        pending_req->data_len, 0,
@@ -863,7 +861,8 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
        struct list_head *head = &(info->v2p_entry_lists);
        unsigned long flags;
        char *lunp;
-       unsigned int lun;
+       unsigned long long unpacked_lun;
+       struct se_lun *se_lun;
        struct scsiback_tpg *tpg_entry, *tpg = NULL;
        char *error = "doesn't exist";
 
@@ -874,24 +873,27 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
        }
        *lunp = 0;
        lunp++;
-       if (kstrtouint(lunp, 10, &lun) || lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
+       err = kstrtoull(lunp, 10, &unpacked_lun);
+       if (err < 0) {
                pr_err("lun number not valid: %s\n", lunp);
-               return -EINVAL;
+               return err;
        }
 
        mutex_lock(&scsiback_mutex);
        list_for_each_entry(tpg_entry, &scsiback_list, tv_tpg_list) {
                if (!strcmp(phy, tpg_entry->tport->tport_name) ||
                    !strcmp(phy, tpg_entry->param_alias)) {
-                       spin_lock(&tpg_entry->se_tpg.tpg_lun_lock);
-                       if (tpg_entry->se_tpg.tpg_lun_list[lun]->lun_status ==
-                           TRANSPORT_LUN_STATUS_ACTIVE) {
-                               if (!tpg_entry->tpg_nexus)
-                                       error = "nexus undefined";
-                               else
-                                       tpg = tpg_entry;
+                       mutex_lock(&tpg_entry->se_tpg.tpg_lun_mutex);
+                       hlist_for_each_entry(se_lun, &tpg_entry->se_tpg.tpg_lun_hlist, link) {
+                               if (se_lun->unpacked_lun == unpacked_lun) {
+                                       if (!tpg_entry->tpg_nexus)
+                                               error = "nexus undefined";
+                                       else
+                                               tpg = tpg_entry;
+                                       break;
+                               }
                        }
-                       spin_unlock(&tpg_entry->se_tpg.tpg_lun_lock);
+                       mutex_unlock(&tpg_entry->se_tpg.tpg_lun_mutex);
                        break;
                }
        }
@@ -903,7 +905,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
        mutex_unlock(&scsiback_mutex);
 
        if (!tpg) {
-               pr_err("%s:%d %s\n", phy, lun, error);
+               pr_err("%s:%llu %s\n", phy, unpacked_lun, error);
                return -ENODEV;
        }
 
@@ -931,7 +933,7 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
        kref_init(&new->kref);
        new->v = *v;
        new->tpg = tpg;
-       new->lun = lun;
+       new->lun = unpacked_lun;
        list_add_tail(&new->l, head);
 
 out:
@@ -1251,28 +1253,6 @@ static char *scsiback_dump_proto_id(struct scsiback_tport *tport)
        return "Unknown";
 }
 
-static u8 scsiback_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
-       struct scsiback_tpg *tpg = container_of(se_tpg,
-                               struct scsiback_tpg, se_tpg);
-       struct scsiback_tport *tport = tpg->tport;
-
-       switch (tport->tport_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-               return sas_get_fabric_proto_ident(se_tpg);
-       case SCSI_PROTOCOL_FCP:
-               return fc_get_fabric_proto_ident(se_tpg);
-       case SCSI_PROTOCOL_ISCSI:
-               return iscsi_get_fabric_proto_ident(se_tpg);
-       default:
-               pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n",
-                       tport->tport_proto_id);
-               break;
-       }
-
-       return sas_get_fabric_proto_ident(se_tpg);
-}
-
 static char *scsiback_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
        struct scsiback_tpg *tpg = container_of(se_tpg,
@@ -1289,102 +1269,6 @@ static u16 scsiback_get_tag(struct se_portal_group *se_tpg)
        return tpg->tport_tpgt;
 }
 
-static u32 scsiback_get_default_depth(struct se_portal_group *se_tpg)
-{
-       return 1;
-}
-
-static u32
-scsiback_get_pr_transport_id(struct se_portal_group *se_tpg,
-                             struct se_node_acl *se_nacl,
-                             struct t10_pr_registration *pr_reg,
-                             int *format_code,
-                             unsigned char *buf)
-{
-       struct scsiback_tpg *tpg = container_of(se_tpg,
-                               struct scsiback_tpg, se_tpg);
-       struct scsiback_tport *tport = tpg->tport;
-
-       switch (tport->tport_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-               return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                                       format_code, buf);
-       case SCSI_PROTOCOL_FCP:
-               return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                                       format_code, buf);
-       case SCSI_PROTOCOL_ISCSI:
-               return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                                       format_code, buf);
-       default:
-               pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n",
-                       tport->tport_proto_id);
-               break;
-       }
-
-       return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
-                       format_code, buf);
-}
-
-static u32
-scsiback_get_pr_transport_id_len(struct se_portal_group *se_tpg,
-                                 struct se_node_acl *se_nacl,
-                                 struct t10_pr_registration *pr_reg,
-                                 int *format_code)
-{
-       struct scsiback_tpg *tpg = container_of(se_tpg,
-                               struct scsiback_tpg, se_tpg);
-       struct scsiback_tport *tport = tpg->tport;
-
-       switch (tport->tport_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-               return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                                       format_code);
-       case SCSI_PROTOCOL_FCP:
-               return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                                       format_code);
-       case SCSI_PROTOCOL_ISCSI:
-               return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                                       format_code);
-       default:
-               pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n",
-                       tport->tport_proto_id);
-               break;
-       }
-
-       return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
-                       format_code);
-}
-
-static char *
-scsiback_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
-                                   const char *buf,
-                                   u32 *out_tid_len,
-                                   char **port_nexus_ptr)
-{
-       struct scsiback_tpg *tpg = container_of(se_tpg,
-                               struct scsiback_tpg, se_tpg);
-       struct scsiback_tport *tport = tpg->tport;
-
-       switch (tport->tport_proto_id) {
-       case SCSI_PROTOCOL_SAS:
-               return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                                       port_nexus_ptr);
-       case SCSI_PROTOCOL_FCP:
-               return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                                       port_nexus_ptr);
-       case SCSI_PROTOCOL_ISCSI:
-               return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                                       port_nexus_ptr);
-       default:
-               pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n",
-                       tport->tport_proto_id);
-               break;
-       }
-
-       return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
-                       port_nexus_ptr);
-}
-
 static struct se_wwn *
 scsiback_make_tport(struct target_fabric_configfs *tf,
                     struct config_group *group,
@@ -1451,19 +1335,6 @@ static void scsiback_drop_tport(struct se_wwn *wwn)
        kfree(tport);
 }
 
-static struct se_node_acl *
-scsiback_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
-       return kzalloc(sizeof(struct se_node_acl), GFP_KERNEL);
-}
-
-static void
-scsiback_release_fabric_acl(struct se_portal_group *se_tpg,
-                            struct se_node_acl *se_nacl)
-{
-       kfree(se_nacl);
-}
-
 static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg)
 {
        return 1;
@@ -1522,14 +1393,6 @@ static void scsiback_set_default_node_attrs(struct se_node_acl *nacl)
 {
 }
 
-static u32 scsiback_get_task_tag(struct se_cmd *se_cmd)
-{
-       struct vscsibk_pend *pending_req = container_of(se_cmd,
-                               struct vscsibk_pend, se_cmd);
-
-       return pending_req->rqid;
-}
-
 static int scsiback_get_cmd_state(struct se_cmd *se_cmd)
 {
        return 0;
@@ -1898,8 +1761,7 @@ scsiback_make_tpg(struct se_wwn *wwn,
        tpg->tport = tport;
        tpg->tport_tpgt = tpgt;
 
-       ret = core_tpg_register(&scsiback_ops, wwn,
-                               &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+       ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
        if (ret < 0) {
                kfree(tpg);
                return NULL;
@@ -1944,23 +1806,15 @@ static const struct target_core_fabric_ops scsiback_ops = {
        .module                         = THIS_MODULE,
        .name                           = "xen-pvscsi",
        .get_fabric_name                = scsiback_get_fabric_name,
-       .get_fabric_proto_ident         = scsiback_get_fabric_proto_ident,
        .tpg_get_wwn                    = scsiback_get_fabric_wwn,
        .tpg_get_tag                    = scsiback_get_tag,
-       .tpg_get_default_depth          = scsiback_get_default_depth,
-       .tpg_get_pr_transport_id        = scsiback_get_pr_transport_id,
-       .tpg_get_pr_transport_id_len    = scsiback_get_pr_transport_id_len,
-       .tpg_parse_pr_out_transport_id  = scsiback_parse_pr_out_transport_id,
        .tpg_check_demo_mode            = scsiback_check_true,
        .tpg_check_demo_mode_cache      = scsiback_check_true,
        .tpg_check_demo_mode_write_protect = scsiback_check_false,
        .tpg_check_prod_mode_write_protect = scsiback_check_false,
-       .tpg_alloc_fabric_acl           = scsiback_alloc_fabric_acl,
-       .tpg_release_fabric_acl         = scsiback_release_fabric_acl,
        .tpg_get_inst_index             = scsiback_tpg_get_inst_index,
        .check_stop_free                = scsiback_check_stop_free,
        .release_cmd                    = scsiback_release_cmd,
-       .put_session                    = NULL,
        .shutdown_session               = scsiback_shutdown_session,
        .close_session                  = scsiback_close_session,
        .sess_get_index                 = scsiback_sess_get_index,
@@ -1968,7 +1822,6 @@ static const struct target_core_fabric_ops scsiback_ops = {
        .write_pending                  = scsiback_write_pending,
        .write_pending_status           = scsiback_write_pending_status,
        .set_default_node_attributes    = scsiback_set_default_node_attrs,
-       .get_task_tag                   = scsiback_get_task_tag,
        .get_cmd_state                  = scsiback_get_cmd_state,
        .queue_data_in                  = scsiback_queue_data_in,
        .queue_status                   = scsiback_queue_status,
@@ -1983,12 +1836,6 @@ static const struct target_core_fabric_ops scsiback_ops = {
        .fabric_drop_tpg                = scsiback_drop_tpg,
        .fabric_post_link               = scsiback_port_link,
        .fabric_pre_unlink              = scsiback_port_unlink,
-       .fabric_make_np                 = NULL,
-       .fabric_drop_np                 = NULL,
-#if 0
-       .fabric_make_nodeacl            = scsiback_make_nodeacl,
-       .fabric_drop_nodeacl            = scsiback_drop_nodeacl,
-#endif
 
        .tfc_wwn_attrs                  = scsiback_wwn_attrs,
        .tfc_tpg_base_attrs             = scsiback_tpg_attrs,
index 96b2011d25f35f628288c523dea84c4150e8ce35..9ad327238ba931243967455b5790916dc6b184f1 100644 (file)
@@ -37,7 +37,7 @@
 #include <linux/vmalloc.h>
 #include <linux/export.h>
 #include <asm/xen/hypervisor.h>
-#include <asm/xen/page.h>
+#include <xen/page.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/event_channel.h>
 #include <xen/balloon.h>
@@ -379,16 +379,16 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
        int i, j;
 
        for (i = 0; i < nr_pages; i++) {
-               unsigned long addr = (unsigned long)vaddr +
-                       (PAGE_SIZE * i);
                err = gnttab_grant_foreign_access(dev->otherend_id,
-                                                 virt_to_mfn(addr), 0);
+                                                 virt_to_mfn(vaddr), 0);
                if (err < 0) {
                        xenbus_dev_fatal(dev, err,
                                         "granting access to ring page");
                        goto fail;
                }
                grefs[i] = err;
+
+               vaddr = vaddr + PAGE_SIZE;
        }
 
        return 0;
index 5390a674b5e3a8d8ea62112343be83a34be58547..4308fb3cf7c2f717ffd446035f1c30b61f8dacf8 100644 (file)
@@ -742,7 +742,7 @@ static int xenbus_resume_cb(struct notifier_block *nb,
        int err = 0;
 
        if (xen_hvm_domain()) {
-               uint64_t v;
+               uint64_t v = 0;
 
                err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
                if (!err && v)
index a19c31d3f369469ec5771ca5cf61c371b5865881..4d4a0df8344fe2a7d9316d91e890a62a40a34172 100644 (file)
@@ -242,7 +242,7 @@ static struct kmem_cache *adfs_inode_cachep;
 static struct inode *adfs_alloc_inode(struct super_block *sb)
 {
        struct adfs_inode_info *ei;
-       ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL);
+       ei = kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
index cffe8370fb444c692b79f787155d08d163be0914..c69a87eaf57d147bfae947335e95fa5ae2be4452 100644 (file)
@@ -64,7 +64,7 @@ struct affs_inode_info {
 /* short cut to get to the affs specific inode data */
 static inline struct affs_inode_info *AFFS_I(struct inode *inode)
 {
-       return list_entry(inode, struct affs_inode_info, vfs_inode);
+       return container_of(inode, struct affs_inode_info, vfs_inode);
 }
 
 /*
index a8f463c028ce9e9c52862e01a21de82a5b7e2f09..5fa92bc790ef7e960b5f3b1fc1501eb1842e5b5e 100644 (file)
@@ -140,7 +140,7 @@ affs_remove_link(struct dentry *dentry)
 {
        struct inode *dir, *inode = d_inode(dentry);
        struct super_block *sb = inode->i_sb;
-       struct buffer_head *bh = NULL, *link_bh = NULL;
+       struct buffer_head *bh, *link_bh = NULL;
        u32 link_ino, ino;
        int retval;
 
index a022f4accd76b801566564b7c6de2d38f2e1cdfb..17349500592d55b8b0a9a087809301c05995c696 100644 (file)
@@ -346,7 +346,7 @@ affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s3
 {
        struct super_block *sb = dir->i_sb;
        struct buffer_head *inode_bh = NULL;
-       struct buffer_head *bh = NULL;
+       struct buffer_head *bh;
        u32 block = 0;
        int retval;
 
index f39b71c3981e7d5db936820f07f0ca722314a6af..ea5b69a18ba9ce9165d8ea083bfffbb40e13a600 100644 (file)
@@ -16,14 +16,12 @@ static int affs_symlink_readpage(struct file *file, struct page *page)
        struct inode *inode = page->mapping->host;
        char *link = kmap(page);
        struct slink_front *lf;
-       int err;
        int                      i, j;
        char                     c;
        char                     lc;
 
        pr_debug("follow_link(ino=%lu)\n", inode->i_ino);
 
-       err = -EIO;
        bh = affs_bread(inode->i_sb, inode->i_ino);
        if (!bh)
                goto fail;
@@ -66,7 +64,7 @@ fail:
        SetPageError(page);
        kunmap(page);
        unlock_page(page);
-       return err;
+       return -EIO;
 }
 
 const struct address_space_operations affs_symlink_aops = {
index 5b700ef1e59db31c9d8d71e9386566eb1c488774..c37149b929be50442ef40aacc36423eab9b264a2 100644 (file)
@@ -238,11 +238,6 @@ static inline u64 autofs4_get_ino(struct autofs_sb_info *sbi)
        return d_inode(sbi->sb->s_root)->i_ino;
 }
 
-static inline int simple_positive(struct dentry *dentry)
-{
-       return d_really_is_positive(dentry) && !d_unhashed(dentry);
-}
-
 static inline void __autofs4_add_expiring(struct dentry *dentry)
 {
        struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
index 1fead8d56a98b751b74456f423fa5bacf6a3c6ba..35d19e8731e35b8aabfe9cdaaf32d11b9b17b326 100644 (file)
@@ -112,7 +112,7 @@ BEFS_SB(const struct super_block *super)
 static inline struct befs_inode_info *
 BEFS_I(const struct inode *inode)
 {
-       return list_entry(inode, struct befs_inode_info, vfs_inode);
+       return container_of(inode, struct befs_inode_info, vfs_inode);
 }
 
 static inline befs_blocknr_t
index cd46e415883090747d8238c2a2fbaa9b101dbc5e..6b659967898ebc534c1ce8d91ff1179f050e8f34 100644 (file)
@@ -1530,7 +1530,7 @@ static int fill_files_note(struct memelfnote *note)
                file = vma->vm_file;
                if (!file)
                        continue;
-               filename = d_path(&file->f_path, name_curpos, remaining);
+               filename = file_path(file, name_curpos, remaining);
                if (IS_ERR(filename)) {
                        if (PTR_ERR(filename) == -ENAMETOOLONG) {
                                vfree(data);
@@ -1540,7 +1540,7 @@ static int fill_files_note(struct memelfnote *note)
                        continue;
                }
 
-               /* d_path() fills at the end, move name down */
+               /* file_path() fills at the end, move name down */
                /* n = strlen(filename) + 1: */
                n = (name_curpos + remaining) - filename;
                remaining = filename - name_curpos;
index 4fe10f93db8a3e52ebbb5330e94b80ee92455e1d..198243717da567bd5f47ad7c94ab823a82506c62 100644 (file)
@@ -152,6 +152,9 @@ blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
 
+       if (IS_DAX(inode))
+               return dax_do_io(iocb, inode, iter, offset, blkdev_get_block,
+                               NULL, DIO_SKIP_DIO_COUNT);
        return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset,
                                    blkdev_get_block, NULL, NULL,
                                    DIO_SKIP_DIO_COUNT);
@@ -443,6 +446,12 @@ long bdev_direct_access(struct block_device *bdev, sector_t sector,
        long avail;
        const struct block_device_operations *ops = bdev->bd_disk->fops;
 
+       /*
+        * The device driver is allowed to sleep, in order to make the
+        * memory directly accessible.
+        */
+       might_sleep();
+
        if (size < 0)
                return size;
        if (!ops->direct_access)
@@ -1170,6 +1179,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                bdev->bd_disk = disk;
                bdev->bd_queue = disk->queue;
                bdev->bd_contains = bdev;
+               bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0;
                if (!partno) {
                        ret = -ENXIO;
                        bdev->bd_part = disk_get_part(disk, partno);
index 795d754327a7277de47d13e0f1426aaa5c7fd85c..b823fac91c9289bc67d3bb5191f4ce96e38294ac 100644 (file)
@@ -1748,7 +1748,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        }
 
        current->backing_dev_info = inode_to_bdi(inode);
-       err = file_remove_suid(file);
+       err = file_remove_privs(file);
        if (err) {
                mutex_unlock(&inode->i_mutex);
                goto out;
index 8c52472d2efa4ec745821e52d9deaa14bd440515..aecd0859eacbf28d9b3782527c178f8e05000e7c 100644 (file)
@@ -43,7 +43,6 @@ struct cachefiles_object {
        loff_t                          i_size;         /* object size */
        unsigned long                   flags;
 #define CACHEFILES_OBJECT_ACTIVE       0               /* T if marked active */
-#define CACHEFILES_OBJECT_BURIED       1               /* T if preemptively buried */
        atomic_t                        usage;          /* object usage count */
        uint8_t                         type;           /* object type */
        uint8_t                         new;            /* T if object new */
index ab857ab9f40d91a375a31030e1872f9ebd704837..fc1056f5c96a3c750c3c5c9009048b234a7fd745 100644 (file)
@@ -97,7 +97,8 @@ static noinline void cachefiles_printk_object(struct cachefiles_object *object,
  *   call vfs_unlink(), vfs_rmdir() or vfs_rename()
  */
 static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
-                                         struct dentry *dentry)
+                                         struct dentry *dentry,
+                                         enum fscache_why_object_killed why)
 {
        struct cachefiles_object *object;
        struct rb_node *p;
@@ -132,8 +133,9 @@ found_dentry:
                pr_err("\n");
                pr_err("Error: Can't preemptively bury live object\n");
                cachefiles_printk_object(object, NULL);
-       } else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
-               pr_err("Error: Object already preemptively buried\n");
+       } else {
+               if (why != FSCACHE_OBJECT_IS_STALE)
+                       fscache_object_mark_killed(&object->fscache, why);
        }
 
        write_unlock(&cache->active_lock);
@@ -265,7 +267,8 @@ requeue:
 static int cachefiles_bury_object(struct cachefiles_cache *cache,
                                  struct dentry *dir,
                                  struct dentry *rep,
-                                 bool preemptive)
+                                 bool preemptive,
+                                 enum fscache_why_object_killed why)
 {
        struct dentry *grave, *trap;
        struct path path, path_to_graveyard;
@@ -289,7 +292,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
                        ret = vfs_unlink(d_inode(dir), rep, NULL);
 
                        if (preemptive)
-                               cachefiles_mark_object_buried(cache, rep);
+                               cachefiles_mark_object_buried(cache, rep, why);
                }
 
                mutex_unlock(&d_inode(dir)->i_mutex);
@@ -394,7 +397,7 @@ try_again:
                                            "Rename failed with error %d", ret);
 
                if (preemptive)
-                       cachefiles_mark_object_buried(cache, rep);
+                       cachefiles_mark_object_buried(cache, rep, why);
        }
 
        unlock_rename(cache->graveyard, dir);
@@ -422,7 +425,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
 
        mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT);
 
-       if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
+       if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->fscache.flags)) {
                /* object allocation for the same key preemptively deleted this
                 * object's file so that it could create its own file */
                _debug("object preemptively buried");
@@ -433,7 +436,8 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
                 * may have been renamed */
                if (dir == object->dentry->d_parent) {
                        ret = cachefiles_bury_object(cache, dir,
-                                                    object->dentry, false);
+                                                    object->dentry, false,
+                                                    FSCACHE_OBJECT_WAS_RETIRED);
                } else {
                        /* it got moved, presumably by cachefilesd culling it,
                         * so it's no longer in the key path and we can ignore
@@ -522,7 +526,7 @@ lookup_again:
                if (d_is_negative(next)) {
                        ret = cachefiles_has_space(cache, 1, 0);
                        if (ret < 0)
-                               goto create_error;
+                               goto no_space_error;
 
                        path.dentry = dir;
                        ret = security_path_mkdir(&path, next, 0);
@@ -551,7 +555,7 @@ lookup_again:
                if (d_is_negative(next)) {
                        ret = cachefiles_has_space(cache, 1, 0);
                        if (ret < 0)
-                               goto create_error;
+                               goto no_space_error;
 
                        path.dentry = dir;
                        ret = security_path_mknod(&path, next, S_IFREG, 0);
@@ -602,7 +606,8 @@ lookup_again:
                         * mutex) */
                        object->dentry = NULL;
 
-                       ret = cachefiles_bury_object(cache, dir, next, true);
+                       ret = cachefiles_bury_object(cache, dir, next, true,
+                                                    FSCACHE_OBJECT_IS_STALE);
                        dput(next);
                        next = NULL;
 
@@ -610,6 +615,7 @@ lookup_again:
                                goto delete_error;
 
                        _debug("redo lookup");
+                       fscache_object_retrying_stale(&object->fscache);
                        goto lookup_again;
                }
        }
@@ -662,6 +668,8 @@ lookup_again:
        _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino);
        return 0;
 
+no_space_error:
+       fscache_object_mark_killed(&object->fscache, FSCACHE_OBJECT_NO_SPACE);
 create_error:
        _debug("create error %d", ret);
        if (ret == -EIO)
@@ -927,7 +935,8 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
        /*  actually remove the victim (drops the dir mutex) */
        _debug("bury");
 
-       ret = cachefiles_bury_object(cache, dir, victim, false);
+       ret = cachefiles_bury_object(cache, dir, victim, false,
+                                    FSCACHE_OBJECT_WAS_CULLED);
        if (ret < 0)
                goto error;
 
index 64fa248343f65461db232ee4ae0939beff0fc05c..8f84646f10e9560ade100c1dafa180768f1d76de 100644 (file)
@@ -187,10 +187,10 @@ int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
                val_size2 = posix_acl_xattr_size(default_acl->a_count);
 
        err = -ENOMEM;
-       tmp_buf = kmalloc(max(val_size1, val_size2), GFP_NOFS);
+       tmp_buf = kmalloc(max(val_size1, val_size2), GFP_KERNEL);
        if (!tmp_buf)
                goto out_err;
-       pagelist = kmalloc(sizeof(struct ceph_pagelist), GFP_NOFS);
+       pagelist = kmalloc(sizeof(struct ceph_pagelist), GFP_KERNEL);
        if (!pagelist)
                goto out_err;
        ceph_pagelist_init(pagelist);
index e162bcd105ee2d98c2dcd12e80c75ee729a7a951..890c50971a690472f6dc795b00fd0bcfbcdf1a39 100644 (file)
@@ -87,17 +87,21 @@ static int ceph_set_page_dirty(struct page *page)
        inode = mapping->host;
        ci = ceph_inode(inode);
 
-       /*
-        * Note that we're grabbing a snapc ref here without holding
-        * any locks!
-        */
-       snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
-
        /* dirty the head */
        spin_lock(&ci->i_ceph_lock);
-       if (ci->i_head_snapc == NULL)
-               ci->i_head_snapc = ceph_get_snap_context(snapc);
-       ++ci->i_wrbuffer_ref_head;
+       BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
+       if (__ceph_have_pending_cap_snap(ci)) {
+               struct ceph_cap_snap *capsnap =
+                               list_last_entry(&ci->i_cap_snaps,
+                                               struct ceph_cap_snap,
+                                               ci_item);
+               snapc = ceph_get_snap_context(capsnap->context);
+               capsnap->dirty_pages++;
+       } else {
+               BUG_ON(!ci->i_head_snapc);
+               snapc = ceph_get_snap_context(ci->i_head_snapc);
+               ++ci->i_wrbuffer_ref_head;
+       }
        if (ci->i_wrbuffer_ref == 0)
                ihold(inode);
        ++ci->i_wrbuffer_ref;
@@ -346,7 +350,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
 
        /* build page vector */
        nr_pages = calc_pages_for(0, len);
-       pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS);
+       pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL);
        ret = -ENOMEM;
        if (!pages)
                goto out;
@@ -358,7 +362,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
                dout("start_read %p adding %p idx %lu\n", inode, page,
                     page->index);
                if (add_to_page_cache_lru(page, &inode->i_data, page->index,
-                                         GFP_NOFS)) {
+                                         GFP_KERNEL)) {
                        ceph_fscache_uncache_page(inode, page);
                        page_cache_release(page);
                        dout("start_read %p add_to_page_cache failed %p\n",
@@ -436,7 +440,7 @@ out:
  * only snap context we are allowed to write back.
  */
 static struct ceph_snap_context *get_oldest_context(struct inode *inode,
-                                                   u64 *snap_size)
+                                                   loff_t *snap_size)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_snap_context *snapc = NULL;
@@ -476,8 +480,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        struct ceph_osd_client *osdc;
        struct ceph_snap_context *snapc, *oldest;
        loff_t page_off = page_offset(page);
+       loff_t snap_size = -1;
        long writeback_stat;
-       u64 truncate_size, snap_size = 0;
+       u64 truncate_size;
        u32 truncate_seq;
        int err = 0, len = PAGE_CACHE_SIZE;
 
@@ -512,7 +517,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        spin_lock(&ci->i_ceph_lock);
        truncate_seq = ci->i_truncate_seq;
        truncate_size = ci->i_truncate_size;
-       if (!snap_size)
+       if (snap_size == -1)
                snap_size = i_size_read(inode);
        spin_unlock(&ci->i_ceph_lock);
 
@@ -695,7 +700,8 @@ static int ceph_writepages_start(struct address_space *mapping,
        unsigned wsize = 1 << inode->i_blkbits;
        struct ceph_osd_request *req = NULL;
        int do_sync = 0;
-       u64 truncate_size, snap_size;
+       loff_t snap_size, i_size;
+       u64 truncate_size;
        u32 truncate_seq;
 
        /*
@@ -741,7 +747,7 @@ static int ceph_writepages_start(struct address_space *mapping,
 retry:
        /* find oldest snap context with dirty data */
        ceph_put_snap_context(snapc);
-       snap_size = 0;
+       snap_size = -1;
        snapc = get_oldest_context(inode, &snap_size);
        if (!snapc) {
                /* hmm, why does writepages get called when there
@@ -749,16 +755,13 @@ retry:
                dout(" no snap context with dirty data?\n");
                goto out;
        }
-       if (snap_size == 0)
-               snap_size = i_size_read(inode);
        dout(" oldest snapc is %p seq %lld (%d snaps)\n",
             snapc, snapc->seq, snapc->num_snaps);
 
        spin_lock(&ci->i_ceph_lock);
        truncate_seq = ci->i_truncate_seq;
        truncate_size = ci->i_truncate_size;
-       if (!snap_size)
-               snap_size = i_size_read(inode);
+       i_size = i_size_read(inode);
        spin_unlock(&ci->i_ceph_lock);
 
        if (last_snapc && snapc != last_snapc) {
@@ -828,8 +831,10 @@ get_more_pages:
                                dout("waiting on writeback %p\n", page);
                                wait_on_page_writeback(page);
                        }
-                       if (page_offset(page) >= snap_size) {
-                               dout("%p page eof %llu\n", page, snap_size);
+                       if (page_offset(page) >=
+                           (snap_size == -1 ? i_size : snap_size)) {
+                               dout("%p page eof %llu\n", page,
+                                    (snap_size == -1 ? i_size : snap_size));
                                done = 1;
                                unlock_page(page);
                                break;
@@ -884,7 +889,8 @@ get_more_pages:
                                }
 
                                if (do_sync)
-                                       osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
+                                       osd_req_op_init(req, 1,
+                                                       CEPH_OSD_OP_STARTSYNC, 0);
 
                                req->r_callback = writepages_finish;
                                req->r_inode = inode;
@@ -944,10 +950,18 @@ get_more_pages:
                }
 
                /* Format the osd request message and submit the write */
-
                offset = page_offset(pages[0]);
-               len = min(snap_size - offset,
-                         (u64)locked_pages << PAGE_CACHE_SHIFT);
+               len = (u64)locked_pages << PAGE_CACHE_SHIFT;
+               if (snap_size == -1) {
+                       len = min(len, (u64)i_size_read(inode) - offset);
+                        /* writepages_finish() clears writeback pages
+                         * according to the data length, so make sure
+                         * data length covers all locked pages */
+                       len = max(len, 1 +
+                               ((u64)(locked_pages - 1) << PAGE_CACHE_SHIFT));
+               } else {
+                       len = min(len, snap_size - offset);
+               }
                dout("writepages got %d pages at %llu~%llu\n",
                     locked_pages, offset, len);
 
@@ -1032,7 +1046,6 @@ static int ceph_update_writeable_page(struct file *file,
 {
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
        loff_t page_off = pos & PAGE_CACHE_MASK;
        int pos_in_page = pos & ~PAGE_CACHE_MASK;
        int end_in_page = pos_in_page + len;
@@ -1044,10 +1057,6 @@ retry_locked:
        /* writepages currently holds page lock, but if we change that later, */
        wait_on_page_writeback(page);
 
-       /* check snap context */
-       BUG_ON(!ci->i_snap_realm);
-       down_read(&mdsc->snap_rwsem);
-       BUG_ON(!ci->i_snap_realm->cached_context);
        snapc = page_snap_context(page);
        if (snapc && snapc != ci->i_head_snapc) {
                /*
@@ -1055,7 +1064,6 @@ retry_locked:
                 * context!  is it writeable now?
                 */
                oldest = get_oldest_context(inode, NULL);
-               up_read(&mdsc->snap_rwsem);
 
                if (snapc->seq > oldest->seq) {
                        ceph_put_snap_context(oldest);
@@ -1112,7 +1120,6 @@ retry_locked:
        }
 
        /* we need to read it. */
-       up_read(&mdsc->snap_rwsem);
        r = readpage_nounlock(file, page);
        if (r < 0)
                goto fail_nosnap;
@@ -1157,16 +1164,13 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
 
 /*
  * we don't do anything in here that simple_write_end doesn't do
- * except adjust dirty page accounting and drop read lock on
- * mdsc->snap_rwsem.
+ * except adjust dirty page accounting
  */
 static int ceph_write_end(struct file *file, struct address_space *mapping,
                          loff_t pos, unsigned len, unsigned copied,
                          struct page *page, void *fsdata)
 {
        struct inode *inode = file_inode(file);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
-       struct ceph_mds_client *mdsc = fsc->mdsc;
        unsigned from = pos & (PAGE_CACHE_SIZE - 1);
        int check_cap = 0;
 
@@ -1188,7 +1192,6 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
        set_page_dirty(page);
 
        unlock_page(page);
-       up_read(&mdsc->snap_rwsem);
        page_cache_release(page);
 
        if (check_cap)
@@ -1314,13 +1317,17 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct inode *inode = file_inode(vma->vm_file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_file_info *fi = vma->vm_file->private_data;
-       struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+       struct ceph_cap_flush *prealloc_cf;
        struct page *page = vmf->page;
        loff_t off = page_offset(page);
        loff_t size = i_size_read(inode);
        size_t len;
        int want, got, ret;
 
+       prealloc_cf = ceph_alloc_cap_flush();
+       if (!prealloc_cf)
+               return VM_FAULT_SIGBUS;
+
        if (ci->i_inline_version != CEPH_INLINE_NONE) {
                struct page *locked_page = NULL;
                if (off == 0) {
@@ -1330,8 +1337,10 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
                ret = ceph_uninline_data(vma->vm_file, locked_page);
                if (locked_page)
                        unlock_page(locked_page);
-               if (ret < 0)
-                       return VM_FAULT_SIGBUS;
+               if (ret < 0) {
+                       ret = VM_FAULT_SIGBUS;
+                       goto out_free;
+               }
        }
 
        if (off + PAGE_CACHE_SIZE <= size)
@@ -1353,7 +1362,8 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
                        break;
                if (ret != -ERESTARTSYS) {
                        WARN_ON(1);
-                       return VM_FAULT_SIGBUS;
+                       ret = VM_FAULT_SIGBUS;
+                       goto out_free;
                }
        }
        dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
@@ -1373,7 +1383,6 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (ret == 0) {
                /* success.  we'll keep the page locked. */
                set_page_dirty(page);
-               up_read(&mdsc->snap_rwsem);
                ret = VM_FAULT_LOCKED;
        } else {
                if (ret == -ENOMEM)
@@ -1389,7 +1398,8 @@ out:
                int dirty;
                spin_lock(&ci->i_ceph_lock);
                ci->i_inline_version = CEPH_INLINE_NONE;
-               dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
+               dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
+                                              &prealloc_cf);
                spin_unlock(&ci->i_ceph_lock);
                if (dirty)
                        __mark_inode_dirty(inode, dirty);
@@ -1398,6 +1408,8 @@ out:
        dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %d\n",
             inode, off, len, ceph_cap_string(got), ret);
        ceph_put_cap_refs(ci, got);
+out_free:
+       ceph_free_cap_flush(prealloc_cf);
 
        return ret;
 }
@@ -1509,8 +1521,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
                                    ceph_vino(inode), 0, &len, 0, 1,
                                    CEPH_OSD_OP_CREATE,
                                    CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
-                                   ci->i_snap_realm->cached_context,
-                                   0, 0, false);
+                                   ceph_empty_snapc, 0, 0, false);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
                goto out;
@@ -1528,7 +1539,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
                                    ceph_vino(inode), 0, &len, 1, 3,
                                    CEPH_OSD_OP_WRITE,
                                    CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
-                                   ci->i_snap_realm->cached_context,
+                                   ceph_empty_snapc,
                                    ci->i_truncate_seq, ci->i_truncate_size,
                                    false);
        if (IS_ERR(req)) {
@@ -1597,3 +1608,206 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma)
        vma->vm_ops = &ceph_vmops;
        return 0;
 }
+
+enum {
+       POOL_READ       = 1,
+       POOL_WRITE      = 2,
+};
+
+static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
+{
+       struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
+       struct ceph_mds_client *mdsc = fsc->mdsc;
+       struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
+       struct rb_node **p, *parent;
+       struct ceph_pool_perm *perm;
+       struct page **pages;
+       int err = 0, err2 = 0, have = 0;
+
+       down_read(&mdsc->pool_perm_rwsem);
+       p = &mdsc->pool_perm_tree.rb_node;
+       while (*p) {
+               perm = rb_entry(*p, struct ceph_pool_perm, node);
+               if (pool < perm->pool)
+                       p = &(*p)->rb_left;
+               else if (pool > perm->pool)
+                       p = &(*p)->rb_right;
+               else {
+                       have = perm->perm;
+                       break;
+               }
+       }
+       up_read(&mdsc->pool_perm_rwsem);
+       if (*p)
+               goto out;
+
+       dout("__ceph_pool_perm_get pool %u no perm cached\n", pool);
+
+       down_write(&mdsc->pool_perm_rwsem);
+       parent = NULL;
+       while (*p) {
+               parent = *p;
+               perm = rb_entry(parent, struct ceph_pool_perm, node);
+               if (pool < perm->pool)
+                       p = &(*p)->rb_left;
+               else if (pool > perm->pool)
+                       p = &(*p)->rb_right;
+               else {
+                       have = perm->perm;
+                       break;
+               }
+       }
+       if (*p) {
+               up_write(&mdsc->pool_perm_rwsem);
+               goto out;
+       }
+
+       rd_req = ceph_osdc_alloc_request(&fsc->client->osdc,
+                                        ceph_empty_snapc,
+                                        1, false, GFP_NOFS);
+       if (!rd_req) {
+               err = -ENOMEM;
+               goto out_unlock;
+       }
+
+       rd_req->r_flags = CEPH_OSD_FLAG_READ;
+       osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
+       rd_req->r_base_oloc.pool = pool;
+       snprintf(rd_req->r_base_oid.name, sizeof(rd_req->r_base_oid.name),
+                "%llx.00000000", ci->i_vino.ino);
+       rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name);
+
+       wr_req = ceph_osdc_alloc_request(&fsc->client->osdc,
+                                        ceph_empty_snapc,
+                                        1, false, GFP_NOFS);
+       if (!wr_req) {
+               err = -ENOMEM;
+               goto out_unlock;
+       }
+
+       wr_req->r_flags = CEPH_OSD_FLAG_WRITE |
+                         CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK;
+       osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
+       wr_req->r_base_oloc.pool = pool;
+       wr_req->r_base_oid = rd_req->r_base_oid;
+
+       /* one page should be large enough for STAT data */
+       pages = ceph_alloc_page_vector(1, GFP_KERNEL);
+       if (IS_ERR(pages)) {
+               err = PTR_ERR(pages);
+               goto out_unlock;
+       }
+
+       osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
+                                    0, false, true);
+       ceph_osdc_build_request(rd_req, 0, NULL, CEPH_NOSNAP,
+                               &ci->vfs_inode.i_mtime);
+       err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
+
+       ceph_osdc_build_request(wr_req, 0, NULL, CEPH_NOSNAP,
+                               &ci->vfs_inode.i_mtime);
+       err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
+
+       if (!err)
+               err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
+       if (!err2)
+               err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
+
+       if (err >= 0 || err == -ENOENT)
+               have |= POOL_READ;
+       else if (err != -EPERM)
+               goto out_unlock;
+
+       if (err2 == 0 || err2 == -EEXIST)
+               have |= POOL_WRITE;
+       else if (err2 != -EPERM) {
+               err = err2;
+               goto out_unlock;
+       }
+
+       perm = kmalloc(sizeof(*perm), GFP_NOFS);
+       if (!perm) {
+               err = -ENOMEM;
+               goto out_unlock;
+       }
+
+       perm->pool = pool;
+       perm->perm = have;
+       rb_link_node(&perm->node, parent, p);
+       rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
+       err = 0;
+out_unlock:
+       up_write(&mdsc->pool_perm_rwsem);
+
+       if (rd_req)
+               ceph_osdc_put_request(rd_req);
+       if (wr_req)
+               ceph_osdc_put_request(wr_req);
+out:
+       if (!err)
+               err = have;
+       dout("__ceph_pool_perm_get pool %u result = %d\n", pool, err);
+       return err;
+}
+
+int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
+{
+       u32 pool;
+       int ret, flags;
+
+       if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode),
+                               NOPOOLPERM))
+               return 0;
+
+       spin_lock(&ci->i_ceph_lock);
+       flags = ci->i_ceph_flags;
+       pool = ceph_file_layout_pg_pool(ci->i_layout);
+       spin_unlock(&ci->i_ceph_lock);
+check:
+       if (flags & CEPH_I_POOL_PERM) {
+               if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
+                       dout("ceph_pool_perm_check pool %u no read perm\n",
+                            pool);
+                       return -EPERM;
+               }
+               if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
+                       dout("ceph_pool_perm_check pool %u no write perm\n",
+                            pool);
+                       return -EPERM;
+               }
+               return 0;
+       }
+
+       ret = __ceph_pool_perm_get(ci, pool);
+       if (ret < 0)
+               return ret;
+
+       flags = CEPH_I_POOL_PERM;
+       if (ret & POOL_READ)
+               flags |= CEPH_I_POOL_RD;
+       if (ret & POOL_WRITE)
+               flags |= CEPH_I_POOL_WR;
+
+       spin_lock(&ci->i_ceph_lock);
+       if (pool == ceph_file_layout_pg_pool(ci->i_layout)) {
+               ci->i_ceph_flags = flags;
+        } else {
+               pool = ceph_file_layout_pg_pool(ci->i_layout);
+               flags = ci->i_ceph_flags;
+       }
+       spin_unlock(&ci->i_ceph_lock);
+       goto check;
+}
+
+void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
+{
+       struct ceph_pool_perm *perm;
+       struct rb_node *n;
+
+       while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
+               n = rb_first(&mdsc->pool_perm_tree);
+               perm = rb_entry(n, struct ceph_pool_perm, node);
+               rb_erase(n, &mdsc->pool_perm_tree);
+               kfree(perm);
+       }
+}
index be5ea6af8366479b675e81e1d13e96139abed2c4..dc10c9dd36c1a2ac6264ed21d3248e5f62f1e330 100644 (file)
@@ -833,7 +833,9 @@ int __ceph_caps_used(struct ceph_inode_info *ci)
                used |= CEPH_CAP_PIN;
        if (ci->i_rd_ref)
                used |= CEPH_CAP_FILE_RD;
-       if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
+       if (ci->i_rdcache_ref ||
+           (!S_ISDIR(ci->vfs_inode.i_mode) && /* ignore readdir cache */
+            ci->vfs_inode.i_data.nrpages))
                used |= CEPH_CAP_FILE_CACHE;
        if (ci->i_wr_ref)
                used |= CEPH_CAP_FILE_WR;
@@ -926,16 +928,6 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
 
        /* remove from session list */
        spin_lock(&session->s_cap_lock);
-       /*
-        * s_cap_reconnect is protected by s_cap_lock. no one changes
-        * s_cap_gen while session is in the reconnect state.
-        */
-       if (queue_release &&
-           (!session->s_cap_reconnect ||
-            cap->cap_gen == session->s_cap_gen))
-               __queue_cap_release(session, ci->i_vino.ino, cap->cap_id,
-                                   cap->mseq, cap->issue_seq);
-
        if (session->s_cap_iterator == cap) {
                /* not yet, we are iterating over this very cap */
                dout("__ceph_remove_cap  delaying %p removal from session %p\n",
@@ -948,6 +940,25 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
        }
        /* protect backpointer with s_cap_lock: see iterate_session_caps */
        cap->ci = NULL;
+
+       /*
+        * s_cap_reconnect is protected by s_cap_lock. no one changes
+        * s_cap_gen while session is in the reconnect state.
+        */
+       if (queue_release &&
+           (!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) {
+               cap->queue_release = 1;
+               if (removed) {
+                       list_add_tail(&cap->session_caps,
+                                     &session->s_cap_releases);
+                       session->s_num_cap_releases++;
+                       removed = 0;
+               }
+       } else {
+               cap->queue_release = 0;
+       }
+       cap->cap_ino = ci->i_vino.ino;
+
        spin_unlock(&session->s_cap_lock);
 
        /* remove from inode list */
@@ -977,8 +988,8 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
 static int send_cap_msg(struct ceph_mds_session *session,
                        u64 ino, u64 cid, int op,
                        int caps, int wanted, int dirty,
-                       u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
-                       u64 size, u64 max_size,
+                       u32 seq, u64 flush_tid, u64 oldest_flush_tid,
+                       u32 issue_seq, u32 mseq, u64 size, u64 max_size,
                        struct timespec *mtime, struct timespec *atime,
                        u64 time_warp_seq,
                        kuid_t uid, kgid_t gid, umode_t mode,
@@ -992,20 +1003,23 @@ static int send_cap_msg(struct ceph_mds_session *session,
        size_t extra_len;
 
        dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
-            " seq %u/%u mseq %u follows %lld size %llu/%llu"
+            " seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu"
             " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
             cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
             ceph_cap_string(dirty),
-            seq, issue_seq, mseq, follows, size, max_size,
+            seq, issue_seq, flush_tid, oldest_flush_tid,
+            mseq, follows, size, max_size,
             xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
 
-       /* flock buffer size + inline version + inline data size */
-       extra_len = 4 + 8 + 4;
+       /* flock buffer size + inline version + inline data size +
+        * osd_epoch_barrier + oldest_flush_tid */
+       extra_len = 4 + 8 + 4 + 4 + 8;
        msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len,
                           GFP_NOFS, false);
        if (!msg)
                return -ENOMEM;
 
+       msg->hdr.version = cpu_to_le16(6);
        msg->hdr.tid = cpu_to_le64(flush_tid);
 
        fc = msg->front.iov_base;
@@ -1041,6 +1055,10 @@ static int send_cap_msg(struct ceph_mds_session *session,
        ceph_encode_64(&p, inline_data ? 0 : CEPH_INLINE_NONE);
        /* inline data size */
        ceph_encode_32(&p, 0);
+       /* osd_epoch_barrier */
+       ceph_encode_32(&p, 0);
+       /* oldest_flush_tid */
+       ceph_encode_64(&p, oldest_flush_tid);
 
        fc->xattr_version = cpu_to_le64(xattr_version);
        if (xattrs_buf) {
@@ -1053,44 +1071,6 @@ static int send_cap_msg(struct ceph_mds_session *session,
        return 0;
 }
 
-void __queue_cap_release(struct ceph_mds_session *session,
-                        u64 ino, u64 cap_id, u32 migrate_seq,
-                        u32 issue_seq)
-{
-       struct ceph_msg *msg;
-       struct ceph_mds_cap_release *head;
-       struct ceph_mds_cap_item *item;
-
-       BUG_ON(!session->s_num_cap_releases);
-       msg = list_first_entry(&session->s_cap_releases,
-                              struct ceph_msg, list_head);
-
-       dout(" adding %llx release to mds%d msg %p (%d left)\n",
-            ino, session->s_mds, msg, session->s_num_cap_releases);
-
-       BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
-       head = msg->front.iov_base;
-       le32_add_cpu(&head->num, 1);
-       item = msg->front.iov_base + msg->front.iov_len;
-       item->ino = cpu_to_le64(ino);
-       item->cap_id = cpu_to_le64(cap_id);
-       item->migrate_seq = cpu_to_le32(migrate_seq);
-       item->seq = cpu_to_le32(issue_seq);
-
-       session->s_num_cap_releases--;
-
-       msg->front.iov_len += sizeof(*item);
-       if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
-               dout(" release msg %p full\n", msg);
-               list_move_tail(&msg->list_head, &session->s_cap_releases_done);
-       } else {
-               dout(" release msg %p at %d/%d (%d)\n", msg,
-                    (int)le32_to_cpu(head->num),
-                    (int)CEPH_CAPS_PER_RELEASE,
-                    (int)msg->front.iov_len);
-       }
-}
-
 /*
  * Queue cap releases when an inode is dropped from our cache.  Since
  * inode is about to be destroyed, there is no need for i_ceph_lock.
@@ -1127,7 +1107,7 @@ void ceph_queue_caps_release(struct inode *inode)
  */
 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
                      int op, int used, int want, int retain, int flushing,
-                     unsigned *pflush_tid)
+                     u64 flush_tid, u64 oldest_flush_tid)
        __releases(cap->ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = cap->ci;
@@ -1145,8 +1125,6 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        u64 xattr_version = 0;
        struct ceph_buffer *xattr_blob = NULL;
        int delayed = 0;
-       u64 flush_tid = 0;
-       int i;
        int ret;
        bool inline_data;
 
@@ -1190,26 +1168,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        cap->implemented &= cap->issued | used;
        cap->mds_wanted = want;
 
-       if (flushing) {
-               /*
-                * assign a tid for flush operations so we can avoid
-                * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
-                * clean type races.  track latest tid for every bit
-                * so we can handle flush AxFw, flush Fw, and have the
-                * first ack clean Ax.
-                */
-               flush_tid = ++ci->i_cap_flush_last_tid;
-               if (pflush_tid)
-                       *pflush_tid = flush_tid;
-               dout(" cap_flush_tid %d\n", (int)flush_tid);
-               for (i = 0; i < CEPH_CAP_BITS; i++)
-                       if (flushing & (1 << i))
-                               ci->i_cap_flush_tid[i] = flush_tid;
-
-               follows = ci->i_head_snapc->seq;
-       } else {
-               follows = 0;
-       }
+       follows = flushing ? ci->i_head_snapc->seq : 0;
 
        keep = cap->implemented;
        seq = cap->seq;
@@ -1237,7 +1196,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        spin_unlock(&ci->i_ceph_lock);
 
        ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
-               op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
+               op, keep, want, flushing, seq,
+               flush_tid, oldest_flush_tid, issue_seq, mseq,
                size, max_size, &mtime, &atime, time_warp_seq,
                uid, gid, mode, xattr_version, xattr_blob,
                follows, inline_data);
@@ -1259,14 +1219,14 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
  * asynchronously back to the MDS once sync writes complete and dirty
  * data is written out.
  *
- * Unless @again is true, skip cap_snaps that were already sent to
+ * Unless @kick is true, skip cap_snaps that were already sent to
  * the MDS (i.e., during this session).
  *
  * Called under i_ceph_lock.  Takes s_mutex as needed.
  */
 void __ceph_flush_snaps(struct ceph_inode_info *ci,
                        struct ceph_mds_session **psession,
-                       int again)
+                       int kick)
                __releases(ci->i_ceph_lock)
                __acquires(ci->i_ceph_lock)
 {
@@ -1297,11 +1257,8 @@ retry:
                if (capsnap->dirty_pages || capsnap->writing)
                        break;
 
-               /*
-                * if cap writeback already occurred, we should have dropped
-                * the capsnap in ceph_put_wrbuffer_cap_refs.
-                */
-               BUG_ON(capsnap->dirty == 0);
+               /* should be removed by ceph_try_drop_cap_snap() */
+               BUG_ON(!capsnap->need_flush);
 
                /* pick mds, take s_mutex */
                if (ci->i_auth_cap == NULL) {
@@ -1310,7 +1267,7 @@ retry:
                }
 
                /* only flush each capsnap once */
-               if (!again && !list_empty(&capsnap->flushing_item)) {
+               if (!kick && !list_empty(&capsnap->flushing_item)) {
                        dout("already flushed %p, skipping\n", capsnap);
                        continue;
                }
@@ -1320,6 +1277,9 @@ retry:
 
                if (session && session->s_mds != mds) {
                        dout("oops, wrong session %p mutex\n", session);
+                       if (kick)
+                               goto out;
+
                        mutex_unlock(&session->s_mutex);
                        ceph_put_mds_session(session);
                        session = NULL;
@@ -1343,20 +1303,22 @@ retry:
                        goto retry;
                }
 
-               capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
+               spin_lock(&mdsc->cap_dirty_lock);
+               capsnap->flush_tid = ++mdsc->last_cap_flush_tid;
+               spin_unlock(&mdsc->cap_dirty_lock);
+
                atomic_inc(&capsnap->nref);
-               if (!list_empty(&capsnap->flushing_item))
-                       list_del_init(&capsnap->flushing_item);
-               list_add_tail(&capsnap->flushing_item,
-                             &session->s_cap_snaps_flushing);
+               if (list_empty(&capsnap->flushing_item))
+                       list_add_tail(&capsnap->flushing_item,
+                                     &session->s_cap_snaps_flushing);
                spin_unlock(&ci->i_ceph_lock);
 
                dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
                     inode, capsnap, capsnap->follows, capsnap->flush_tid);
                send_cap_msg(session, ceph_vino(inode).ino, 0,
                             CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
-                            capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
-                            capsnap->size, 0,
+                            capsnap->dirty, 0, capsnap->flush_tid, 0,
+                            0, mseq, capsnap->size, 0,
                             &capsnap->mtime, &capsnap->atime,
                             capsnap->time_warp_seq,
                             capsnap->uid, capsnap->gid, capsnap->mode,
@@ -1396,7 +1358,8 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci)
  * Caller is then responsible for calling __mark_inode_dirty with the
  * returned flags value.
  */
-int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
+int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
+                          struct ceph_cap_flush **pcf)
 {
        struct ceph_mds_client *mdsc =
                ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
@@ -1416,9 +1379,14 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
             ceph_cap_string(was | mask));
        ci->i_dirty_caps |= mask;
        if (was == 0) {
-               if (!ci->i_head_snapc)
+               WARN_ON_ONCE(ci->i_prealloc_cap_flush);
+               swap(ci->i_prealloc_cap_flush, *pcf);
+
+               if (!ci->i_head_snapc) {
+                       WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem));
                        ci->i_head_snapc = ceph_get_snap_context(
                                ci->i_snap_realm->cached_context);
+               }
                dout(" inode %p now dirty snapc %p auth cap %p\n",
                     &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
                BUG_ON(!list_empty(&ci->i_dirty_item));
@@ -1429,6 +1397,8 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
                        ihold(inode);
                        dirty |= I_DIRTY_SYNC;
                }
+       } else {
+               WARN_ON_ONCE(!ci->i_prealloc_cap_flush);
        }
        BUG_ON(list_empty(&ci->i_dirty_item));
        if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
@@ -1438,6 +1408,74 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
        return dirty;
 }
 
+static void __add_cap_flushing_to_inode(struct ceph_inode_info *ci,
+                                       struct ceph_cap_flush *cf)
+{
+       struct rb_node **p = &ci->i_cap_flush_tree.rb_node;
+       struct rb_node *parent = NULL;
+       struct ceph_cap_flush *other = NULL;
+
+       while (*p) {
+               parent = *p;
+               other = rb_entry(parent, struct ceph_cap_flush, i_node);
+
+               if (cf->tid < other->tid)
+                       p = &(*p)->rb_left;
+               else if (cf->tid > other->tid)
+                       p = &(*p)->rb_right;
+               else
+                       BUG();
+       }
+
+       rb_link_node(&cf->i_node, parent, p);
+       rb_insert_color(&cf->i_node, &ci->i_cap_flush_tree);
+}
+
+static void __add_cap_flushing_to_mdsc(struct ceph_mds_client *mdsc,
+                                      struct ceph_cap_flush *cf)
+{
+       struct rb_node **p = &mdsc->cap_flush_tree.rb_node;
+       struct rb_node *parent = NULL;
+       struct ceph_cap_flush *other = NULL;
+
+       while (*p) {
+               parent = *p;
+               other = rb_entry(parent, struct ceph_cap_flush, g_node);
+
+               if (cf->tid < other->tid)
+                       p = &(*p)->rb_left;
+               else if (cf->tid > other->tid)
+                       p = &(*p)->rb_right;
+               else
+                       BUG();
+       }
+
+       rb_link_node(&cf->g_node, parent, p);
+       rb_insert_color(&cf->g_node, &mdsc->cap_flush_tree);
+}
+
+struct ceph_cap_flush *ceph_alloc_cap_flush(void)
+{
+       return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
+}
+
+void ceph_free_cap_flush(struct ceph_cap_flush *cf)
+{
+       if (cf)
+               kmem_cache_free(ceph_cap_flush_cachep, cf);
+}
+
+static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc)
+{
+       struct rb_node *n = rb_first(&mdsc->cap_flush_tree);
+       if (n) {
+               struct ceph_cap_flush *cf =
+                       rb_entry(n, struct ceph_cap_flush, g_node);
+               return cf->tid;
+       }
+       return 0;
+}
+
 /*
  * Add dirty inode to the flushing list.  Assigned a seq number so we
  * can wait for caps to flush without starving.
@@ -1445,14 +1483,17 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
  * Called under i_ceph_lock.
  */
 static int __mark_caps_flushing(struct inode *inode,
-                                struct ceph_mds_session *session)
+                               struct ceph_mds_session *session,
+                               u64 *flush_tid, u64 *oldest_flush_tid)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_cap_flush *cf = NULL;
        int flushing;
 
        BUG_ON(ci->i_dirty_caps == 0);
        BUG_ON(list_empty(&ci->i_dirty_item));
+       BUG_ON(!ci->i_prealloc_cap_flush);
 
        flushing = ci->i_dirty_caps;
        dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
@@ -1463,22 +1504,31 @@ static int __mark_caps_flushing(struct inode *inode,
        ci->i_dirty_caps = 0;
        dout(" inode %p now !dirty\n", inode);
 
+       swap(cf, ci->i_prealloc_cap_flush);
+       cf->caps = flushing;
+       cf->kick = false;
+
        spin_lock(&mdsc->cap_dirty_lock);
        list_del_init(&ci->i_dirty_item);
 
+       cf->tid = ++mdsc->last_cap_flush_tid;
+       __add_cap_flushing_to_mdsc(mdsc, cf);
+       *oldest_flush_tid = __get_oldest_flush_tid(mdsc);
+
        if (list_empty(&ci->i_flushing_item)) {
-               ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
                list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
                mdsc->num_cap_flushing++;
-               dout(" inode %p now flushing seq %lld\n", inode,
-                    ci->i_cap_flush_seq);
+               dout(" inode %p now flushing tid %llu\n", inode, cf->tid);
        } else {
                list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
-               dout(" inode %p now flushing (more) seq %lld\n", inode,
-                    ci->i_cap_flush_seq);
+               dout(" inode %p now flushing (more) tid %llu\n",
+                    inode, cf->tid);
        }
        spin_unlock(&mdsc->cap_dirty_lock);
 
+       __add_cap_flushing_to_inode(ci, cf);
+
+       *flush_tid = cf->tid;
        return flushing;
 }
 
@@ -1524,6 +1574,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
        struct ceph_mds_client *mdsc = fsc->mdsc;
        struct inode *inode = &ci->vfs_inode;
        struct ceph_cap *cap;
+       u64 flush_tid, oldest_flush_tid;
        int file_wanted, used, cap_used;
        int took_snap_rwsem = 0;             /* true if mdsc->snap_rwsem held */
        int issued, implemented, want, retain, revoking, flushing = 0;
@@ -1553,13 +1604,13 @@ retry:
 retry_locked:
        file_wanted = __ceph_caps_file_wanted(ci);
        used = __ceph_caps_used(ci);
-       want = file_wanted | used;
        issued = __ceph_caps_issued(ci, &implemented);
        revoking = implemented & ~issued;
 
-       retain = want | CEPH_CAP_PIN;
+       want = file_wanted;
+       retain = file_wanted | used | CEPH_CAP_PIN;
        if (!mdsc->stopping && inode->i_nlink > 0) {
-               if (want) {
+               if (file_wanted) {
                        retain |= CEPH_CAP_ANY;       /* be greedy */
                } else if (S_ISDIR(inode->i_mode) &&
                           (issued & CEPH_CAP_FILE_SHARED) &&
@@ -1602,9 +1653,10 @@ retry_locked:
         * If we fail, it's because pages are locked.... try again later.
         */
        if ((!is_delayed || mdsc->stopping) &&
-           ci->i_wrbuffer_ref == 0 &&               /* no dirty pages... */
-           inode->i_data.nrpages &&                 /* have cached pages */
-           (file_wanted == 0 ||                     /* no open files */
+           !S_ISDIR(inode->i_mode) &&          /* ignore readdir cache */
+           ci->i_wrbuffer_ref == 0 &&          /* no dirty pages... */
+           inode->i_data.nrpages &&            /* have cached pages */
+           (file_wanted == 0 ||                /* no open files */
             (revoking & (CEPH_CAP_FILE_CACHE|
                          CEPH_CAP_FILE_LAZYIO))) && /*  or revoking cache */
            !tried_invalidate) {
@@ -1742,17 +1794,25 @@ ack:
                        took_snap_rwsem = 1;
                }
 
-               if (cap == ci->i_auth_cap && ci->i_dirty_caps)
-                       flushing = __mark_caps_flushing(inode, session);
-               else
+               if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
+                       flushing = __mark_caps_flushing(inode, session,
+                                                       &flush_tid,
+                                                       &oldest_flush_tid);
+               } else {
                        flushing = 0;
+                       flush_tid = 0;
+                       spin_lock(&mdsc->cap_dirty_lock);
+                       oldest_flush_tid = __get_oldest_flush_tid(mdsc);
+                       spin_unlock(&mdsc->cap_dirty_lock);
+               }
 
                mds = cap->mds;  /* remember mds, so we don't repeat */
                sent++;
 
                /* __send_cap drops i_ceph_lock */
                delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
-                                     want, retain, flushing, NULL);
+                                     want, retain, flushing,
+                                     flush_tid, oldest_flush_tid);
                goto retry; /* retake i_ceph_lock and restart our cap scan. */
        }
 
@@ -1781,12 +1841,13 @@ ack:
 /*
  * Try to flush dirty caps back to the auth mds.
  */
-static int try_flush_caps(struct inode *inode, unsigned *flush_tid)
+static int try_flush_caps(struct inode *inode, u64 *ptid)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       int flushing = 0;
        struct ceph_mds_session *session = NULL;
+       int flushing = 0;
+       u64 flush_tid = 0, oldest_flush_tid = 0;
 
 retry:
        spin_lock(&ci->i_ceph_lock);
@@ -1811,42 +1872,54 @@ retry:
                if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
                        goto out;
 
-               flushing = __mark_caps_flushing(inode, session);
+               flushing = __mark_caps_flushing(inode, session, &flush_tid,
+                                               &oldest_flush_tid);
 
                /* __send_cap drops i_ceph_lock */
                delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
-                                    cap->issued | cap->implemented, flushing,
-                                    flush_tid);
-               if (!delayed)
-                       goto out_unlocked;
+                                    (cap->issued | cap->implemented),
+                                    flushing, flush_tid, oldest_flush_tid);
 
-               spin_lock(&ci->i_ceph_lock);
-               __cap_delay_requeue(mdsc, ci);
+               if (delayed) {
+                       spin_lock(&ci->i_ceph_lock);
+                       __cap_delay_requeue(mdsc, ci);
+                       spin_unlock(&ci->i_ceph_lock);
+               }
+       } else {
+               struct rb_node *n = rb_last(&ci->i_cap_flush_tree);
+               if (n) {
+                       struct ceph_cap_flush *cf =
+                               rb_entry(n, struct ceph_cap_flush, i_node);
+                       flush_tid = cf->tid;
+               }
+               flushing = ci->i_flushing_caps;
+               spin_unlock(&ci->i_ceph_lock);
        }
 out:
-       spin_unlock(&ci->i_ceph_lock);
-out_unlocked:
        if (session)
                mutex_unlock(&session->s_mutex);
+
+       *ptid = flush_tid;
        return flushing;
 }
 
 /*
  * Return true if we've flushed caps through the given flush_tid.
  */
-static int caps_are_flushed(struct inode *inode, unsigned tid)
+static int caps_are_flushed(struct inode *inode, u64 flush_tid)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       int i, ret = 1;
+       struct ceph_cap_flush *cf;
+       struct rb_node *n;
+       int ret = 1;
 
        spin_lock(&ci->i_ceph_lock);
-       for (i = 0; i < CEPH_CAP_BITS; i++)
-               if ((ci->i_flushing_caps & (1 << i)) &&
-                   ci->i_cap_flush_tid[i] <= tid) {
-                       /* still flushing this bit */
+       n = rb_first(&ci->i_cap_flush_tree);
+       if (n) {
+               cf = rb_entry(n, struct ceph_cap_flush, i_node);
+               if (cf->tid <= flush_tid)
                        ret = 0;
-                       break;
-               }
+       }
        spin_unlock(&ci->i_ceph_lock);
        return ret;
 }
@@ -1864,13 +1937,16 @@ static void sync_write_wait(struct inode *inode)
        struct ceph_osd_request *req;
        u64 last_tid;
 
+       if (!S_ISREG(inode->i_mode))
+               return;
+
        spin_lock(&ci->i_unsafe_lock);
        if (list_empty(head))
                goto out;
 
        /* set upper bound as _last_ entry in chain */
-       req = list_entry(head->prev, struct ceph_osd_request,
-                        r_unsafe_item);
+       req = list_last_entry(head, struct ceph_osd_request,
+                             r_unsafe_item);
        last_tid = req->r_tid;
 
        do {
@@ -1888,18 +1964,64 @@ static void sync_write_wait(struct inode *inode)
                 */
                if (list_empty(head))
                        break;
-               req = list_entry(head->next, struct ceph_osd_request,
-                                r_unsafe_item);
+               req = list_first_entry(head, struct ceph_osd_request,
+                                      r_unsafe_item);
        } while (req->r_tid < last_tid);
 out:
        spin_unlock(&ci->i_unsafe_lock);
 }
 
+/*
+ * wait for any uncommitted directory operations to commit.
+ */
+static int unsafe_dirop_wait(struct inode *inode)
+{
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       struct list_head *head = &ci->i_unsafe_dirops;
+       struct ceph_mds_request *req;
+       u64 last_tid;
+       int ret = 0;
+
+       if (!S_ISDIR(inode->i_mode))
+               return 0;
+
+       spin_lock(&ci->i_unsafe_lock);
+       if (list_empty(head))
+               goto out;
+
+       req = list_last_entry(head, struct ceph_mds_request,
+                             r_unsafe_dir_item);
+       last_tid = req->r_tid;
+
+       do {
+               ceph_mdsc_get_request(req);
+               spin_unlock(&ci->i_unsafe_lock);
+
+               dout("unsafe_dirop_wait %p wait on tid %llu (until %llu)\n",
+                    inode, req->r_tid, last_tid);
+               ret = !wait_for_completion_timeout(&req->r_safe_completion,
+                                       ceph_timeout_jiffies(req->r_timeout));
+               if (ret)
+                       ret = -EIO;  /* timed out */
+
+               ceph_mdsc_put_request(req);
+
+               spin_lock(&ci->i_unsafe_lock);
+               if (ret || list_empty(head))
+                       break;
+               req = list_first_entry(head, struct ceph_mds_request,
+                                      r_unsafe_dir_item);
+       } while (req->r_tid < last_tid);
+out:
+       spin_unlock(&ci->i_unsafe_lock);
+       return ret;
+}
+
 int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
        struct inode *inode = file->f_mapping->host;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       unsigned flush_tid;
+       u64 flush_tid;
        int ret;
        int dirty;
 
@@ -1908,25 +2030,30 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 
        ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
        if (ret < 0)
-               return ret;
+               goto out;
+
+       if (datasync)
+               goto out;
+
        mutex_lock(&inode->i_mutex);
 
        dirty = try_flush_caps(inode, &flush_tid);
        dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
 
+       ret = unsafe_dirop_wait(inode);
+
        /*
         * only wait on non-file metadata writeback (the mds
         * can recover size and mtime, so we don't need to
         * wait for that)
         */
-       if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
-               dout("fsync waiting for flush_tid %u\n", flush_tid);
+       if (!ret && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
                ret = wait_event_interruptible(ci->i_cap_wq,
-                                      caps_are_flushed(inode, flush_tid));
+                                       caps_are_flushed(inode, flush_tid));
        }
-
-       dout("fsync %p%s done\n", inode, datasync ? " datasync" : "");
        mutex_unlock(&inode->i_mutex);
+out:
+       dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
        return ret;
 }
 
@@ -1939,7 +2066,7 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       unsigned flush_tid;
+       u64 flush_tid;
        int err = 0;
        int dirty;
        int wait = wbc->sync_mode == WB_SYNC_ALL;
@@ -1994,6 +2121,104 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
        }
 }
 
+static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
+                               struct ceph_mds_session *session,
+                               struct ceph_inode_info *ci,
+                               bool kick_all)
+{
+       struct inode *inode = &ci->vfs_inode;
+       struct ceph_cap *cap;
+       struct ceph_cap_flush *cf;
+       struct rb_node *n;
+       int delayed = 0;
+       u64 first_tid = 0;
+       u64 oldest_flush_tid;
+
+       spin_lock(&mdsc->cap_dirty_lock);
+       oldest_flush_tid = __get_oldest_flush_tid(mdsc);
+       spin_unlock(&mdsc->cap_dirty_lock);
+
+       while (true) {
+               spin_lock(&ci->i_ceph_lock);
+               cap = ci->i_auth_cap;
+               if (!(cap && cap->session == session)) {
+                       pr_err("%p auth cap %p not mds%d ???\n", inode,
+                                       cap, session->s_mds);
+                       spin_unlock(&ci->i_ceph_lock);
+                       break;
+               }
+
+               for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
+                       cf = rb_entry(n, struct ceph_cap_flush, i_node);
+                       if (cf->tid < first_tid)
+                               continue;
+                       if (kick_all || cf->kick)
+                               break;
+               }
+               if (!n) {
+                       spin_unlock(&ci->i_ceph_lock);
+                       break;
+               }
+
+               cf = rb_entry(n, struct ceph_cap_flush, i_node);
+               cf->kick = false;
+
+               first_tid = cf->tid + 1;
+
+               dout("kick_flushing_caps %p cap %p tid %llu %s\n", inode,
+                    cap, cf->tid, ceph_cap_string(cf->caps));
+               delayed |= __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
+                                     __ceph_caps_used(ci),
+                                     __ceph_caps_wanted(ci),
+                                     cap->issued | cap->implemented,
+                                     cf->caps, cf->tid, oldest_flush_tid);
+       }
+       return delayed;
+}
+
+void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
+                                  struct ceph_mds_session *session)
+{
+       struct ceph_inode_info *ci;
+       struct ceph_cap *cap;
+       struct ceph_cap_flush *cf;
+       struct rb_node *n;
+
+       dout("early_kick_flushing_caps mds%d\n", session->s_mds);
+       list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
+               spin_lock(&ci->i_ceph_lock);
+               cap = ci->i_auth_cap;
+               if (!(cap && cap->session == session)) {
+                       pr_err("%p auth cap %p not mds%d ???\n",
+                               &ci->vfs_inode, cap, session->s_mds);
+                       spin_unlock(&ci->i_ceph_lock);
+                       continue;
+               }
+
+
+               /*
+                * if flushing caps were revoked, we re-send the cap flush
+                * in client reconnect stage. This guarantees MDS * processes
+                * the cap flush message before issuing the flushing caps to
+                * other client.
+                */
+               if ((cap->issued & ci->i_flushing_caps) !=
+                   ci->i_flushing_caps) {
+                       spin_unlock(&ci->i_ceph_lock);
+                       if (!__kick_flushing_caps(mdsc, session, ci, true))
+                               continue;
+                       spin_lock(&ci->i_ceph_lock);
+               }
+
+               for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
+                       cf = rb_entry(n, struct ceph_cap_flush, i_node);
+                       cf->kick = true;
+               }
+
+               spin_unlock(&ci->i_ceph_lock);
+       }
+}
+
 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
                             struct ceph_mds_session *session)
 {
@@ -2003,28 +2228,10 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
 
        dout("kick_flushing_caps mds%d\n", session->s_mds);
        list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
-               struct inode *inode = &ci->vfs_inode;
-               struct ceph_cap *cap;
-               int delayed = 0;
-
-               spin_lock(&ci->i_ceph_lock);
-               cap = ci->i_auth_cap;
-               if (cap && cap->session == session) {
-                       dout("kick_flushing_caps %p cap %p %s\n", inode,
-                            cap, ceph_cap_string(ci->i_flushing_caps));
-                       delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
-                                            __ceph_caps_used(ci),
-                                            __ceph_caps_wanted(ci),
-                                            cap->issued | cap->implemented,
-                                            ci->i_flushing_caps, NULL);
-                       if (delayed) {
-                               spin_lock(&ci->i_ceph_lock);
-                               __cap_delay_requeue(mdsc, ci);
-                               spin_unlock(&ci->i_ceph_lock);
-                       }
-               } else {
-                       pr_err("%p auth cap %p not mds%d ???\n", inode,
-                              cap, session->s_mds);
+               int delayed = __kick_flushing_caps(mdsc, session, ci, false);
+               if (delayed) {
+                       spin_lock(&ci->i_ceph_lock);
+                       __cap_delay_requeue(mdsc, ci);
                        spin_unlock(&ci->i_ceph_lock);
                }
        }
@@ -2036,26 +2243,25 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_cap *cap;
-       int delayed = 0;
 
        spin_lock(&ci->i_ceph_lock);
        cap = ci->i_auth_cap;
-       dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
-            ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
+       dout("kick_flushing_inode_caps %p flushing %s\n", inode,
+            ceph_cap_string(ci->i_flushing_caps));
 
        __ceph_flush_snaps(ci, &session, 1);
 
        if (ci->i_flushing_caps) {
+               int delayed;
+
                spin_lock(&mdsc->cap_dirty_lock);
                list_move_tail(&ci->i_flushing_item,
                               &cap->session->s_cap_flushing);
                spin_unlock(&mdsc->cap_dirty_lock);
 
-               delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
-                                    __ceph_caps_used(ci),
-                                    __ceph_caps_wanted(ci),
-                                    cap->issued | cap->implemented,
-                                    ci->i_flushing_caps, NULL);
+               spin_unlock(&ci->i_ceph_lock);
+
+               delayed = __kick_flushing_caps(mdsc, session, ci, true);
                if (delayed) {
                        spin_lock(&ci->i_ceph_lock);
                        __cap_delay_requeue(mdsc, ci);
@@ -2073,7 +2279,8 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
  *
  * Protected by i_ceph_lock.
  */
-static void __take_cap_refs(struct ceph_inode_info *ci, int got)
+static void __take_cap_refs(struct ceph_inode_info *ci, int got,
+                           bool snap_rwsem_locked)
 {
        if (got & CEPH_CAP_PIN)
                ci->i_pin_ref++;
@@ -2081,8 +2288,14 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got)
                ci->i_rd_ref++;
        if (got & CEPH_CAP_FILE_CACHE)
                ci->i_rdcache_ref++;
-       if (got & CEPH_CAP_FILE_WR)
+       if (got & CEPH_CAP_FILE_WR) {
+               if (ci->i_wr_ref == 0 && !ci->i_head_snapc) {
+                       BUG_ON(!snap_rwsem_locked);
+                       ci->i_head_snapc = ceph_get_snap_context(
+                                       ci->i_snap_realm->cached_context);
+               }
                ci->i_wr_ref++;
+       }
        if (got & CEPH_CAP_FILE_BUFFER) {
                if (ci->i_wb_ref == 0)
                        ihold(&ci->vfs_inode);
@@ -2100,16 +2313,19 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got)
  * requested from the MDS.
  */
 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
-                           loff_t endoff, int *got, int *check_max, int *err)
+                           loff_t endoff, bool nonblock, int *got, int *err)
 {
        struct inode *inode = &ci->vfs_inode;
+       struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
        int ret = 0;
        int have, implemented;
        int file_wanted;
+       bool snap_rwsem_locked = false;
 
        dout("get_cap_refs %p need %s want %s\n", inode,
             ceph_cap_string(need), ceph_cap_string(want));
 
+again:
        spin_lock(&ci->i_ceph_lock);
 
        /* make sure file is actually open */
@@ -2125,6 +2341,10 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
        /* finish pending truncate */
        while (ci->i_truncate_pending) {
                spin_unlock(&ci->i_ceph_lock);
+               if (snap_rwsem_locked) {
+                       up_read(&mdsc->snap_rwsem);
+                       snap_rwsem_locked = false;
+               }
                __ceph_do_pending_vmtruncate(inode);
                spin_lock(&ci->i_ceph_lock);
        }
@@ -2136,7 +2356,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
                        dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
                             inode, endoff, ci->i_max_size);
                        if (endoff > ci->i_requested_max_size) {
-                               *check_max = 1;
+                               *err = -EAGAIN;
                                ret = 1;
                        }
                        goto out_unlock;
@@ -2164,8 +2384,29 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
                     inode, ceph_cap_string(have), ceph_cap_string(not),
                     ceph_cap_string(revoking));
                if ((revoking & not) == 0) {
+                       if (!snap_rwsem_locked &&
+                           !ci->i_head_snapc &&
+                           (need & CEPH_CAP_FILE_WR)) {
+                               if (!down_read_trylock(&mdsc->snap_rwsem)) {
+                                       /*
+                                        * we can not call down_read() when
+                                        * task isn't in TASK_RUNNING state
+                                        */
+                                       if (nonblock) {
+                                               *err = -EAGAIN;
+                                               ret = 1;
+                                               goto out_unlock;
+                                       }
+
+                                       spin_unlock(&ci->i_ceph_lock);
+                                       down_read(&mdsc->snap_rwsem);
+                                       snap_rwsem_locked = true;
+                                       goto again;
+                               }
+                               snap_rwsem_locked = true;
+                       }
                        *got = need | (have & want);
-                       __take_cap_refs(ci, *got);
+                       __take_cap_refs(ci, *got, true);
                        ret = 1;
                }
        } else {
@@ -2189,6 +2430,8 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
        }
 out_unlock:
        spin_unlock(&ci->i_ceph_lock);
+       if (snap_rwsem_locked)
+               up_read(&mdsc->snap_rwsem);
 
        dout("get_cap_refs %p ret %d got %s\n", inode,
             ret, ceph_cap_string(*got));
@@ -2231,50 +2474,70 @@ static void check_max_size(struct inode *inode, loff_t endoff)
 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
                  loff_t endoff, int *got, struct page **pinned_page)
 {
-       int _got, check_max, ret, err = 0;
+       int _got, ret, err = 0;
 
-retry:
-       if (endoff > 0)
-               check_max_size(&ci->vfs_inode, endoff);
-       _got = 0;
-       check_max = 0;
-       ret = wait_event_interruptible(ci->i_cap_wq,
-                               try_get_cap_refs(ci, need, want, endoff,
-                                                &_got, &check_max, &err));
-       if (err)
-               ret = err;
+       ret = ceph_pool_perm_check(ci, need);
        if (ret < 0)
                return ret;
 
-       if (check_max)
-               goto retry;
+       while (true) {
+               if (endoff > 0)
+                       check_max_size(&ci->vfs_inode, endoff);
 
-       if (ci->i_inline_version != CEPH_INLINE_NONE &&
-           (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
-           i_size_read(&ci->vfs_inode) > 0) {
-               struct page *page = find_get_page(ci->vfs_inode.i_mapping, 0);
-               if (page) {
-                       if (PageUptodate(page)) {
-                               *pinned_page = page;
-                               goto out;
-                       }
-                       page_cache_release(page);
-               }
-               /*
-                * drop cap refs first because getattr while holding
-                * caps refs can cause deadlock.
-                */
-               ceph_put_cap_refs(ci, _got);
+               err = 0;
                _got = 0;
+               ret = try_get_cap_refs(ci, need, want, endoff,
+                                      false, &_got, &err);
+               if (ret) {
+                       if (err == -EAGAIN)
+                               continue;
+                       if (err < 0)
+                               return err;
+               } else {
+                       ret = wait_event_interruptible(ci->i_cap_wq,
+                                       try_get_cap_refs(ci, need, want, endoff,
+                                                        true, &_got, &err));
+                       if (err == -EAGAIN)
+                               continue;
+                       if (err < 0)
+                               ret = err;
+                       if (ret < 0)
+                               return ret;
+               }
 
-               /* getattr request will bring inline data into page cache */
-               ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
-                                       CEPH_STAT_CAP_INLINE_DATA, true);
-               if (ret < 0)
-                       return ret;
-               goto retry;
+               if (ci->i_inline_version != CEPH_INLINE_NONE &&
+                   (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
+                   i_size_read(&ci->vfs_inode) > 0) {
+                       struct page *page =
+                               find_get_page(ci->vfs_inode.i_mapping, 0);
+                       if (page) {
+                               if (PageUptodate(page)) {
+                                       *pinned_page = page;
+                                       break;
+                               }
+                               page_cache_release(page);
+                       }
+                       /*
+                        * drop cap refs first because getattr while
+                        * holding * caps refs can cause deadlock.
+                        */
+                       ceph_put_cap_refs(ci, _got);
+                       _got = 0;
+
+                       /*
+                        * getattr request will bring inline data into
+                        * page cache
+                        */
+                       ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
+                                               CEPH_STAT_CAP_INLINE_DATA,
+                                               true);
+                       if (ret < 0)
+                               return ret;
+                       continue;
+               }
+               break;
        }
-out:
+
        *got = _got;
        return 0;
 }
@@ -2286,10 +2549,31 @@ out:
 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
 {
        spin_lock(&ci->i_ceph_lock);
-       __take_cap_refs(ci, caps);
+       __take_cap_refs(ci, caps, false);
        spin_unlock(&ci->i_ceph_lock);
 }
 
+
+/*
+ * drop cap_snap that is not associated with any snapshot.
+ * we don't need to send FLUSHSNAP message for it.
+ */
+static int ceph_try_drop_cap_snap(struct ceph_cap_snap *capsnap)
+{
+       if (!capsnap->need_flush &&
+           !capsnap->writing && !capsnap->dirty_pages) {
+
+               dout("dropping cap_snap %p follows %llu\n",
+                    capsnap, capsnap->follows);
+               ceph_put_snap_context(capsnap->context);
+               list_del(&capsnap->ci_item);
+               list_del(&capsnap->flushing_item);
+               ceph_put_cap_snap(capsnap);
+               return 1;
+       }
+       return 0;
+}
+
 /*
  * Release cap refs.
  *
@@ -2303,7 +2587,6 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
 {
        struct inode *inode = &ci->vfs_inode;
        int last = 0, put = 0, flushsnaps = 0, wake = 0;
-       struct ceph_cap_snap *capsnap;
 
        spin_lock(&ci->i_ceph_lock);
        if (had & CEPH_CAP_PIN)
@@ -2325,17 +2608,24 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
        if (had & CEPH_CAP_FILE_WR)
                if (--ci->i_wr_ref == 0) {
                        last++;
-                       if (!list_empty(&ci->i_cap_snaps)) {
-                               capsnap = list_first_entry(&ci->i_cap_snaps,
-                                                    struct ceph_cap_snap,
-                                                    ci_item);
-                               if (capsnap->writing) {
-                                       capsnap->writing = 0;
-                                       flushsnaps =
-                                               __ceph_finish_cap_snap(ci,
-                                                                      capsnap);
-                                       wake = 1;
-                               }
+                       if (__ceph_have_pending_cap_snap(ci)) {
+                               struct ceph_cap_snap *capsnap =
+                                       list_last_entry(&ci->i_cap_snaps,
+                                                       struct ceph_cap_snap,
+                                                       ci_item);
+                               capsnap->writing = 0;
+                               if (ceph_try_drop_cap_snap(capsnap))
+                                       put++;
+                               else if (__ceph_finish_cap_snap(ci, capsnap))
+                                       flushsnaps = 1;
+                               wake = 1;
+                       }
+                       if (ci->i_wrbuffer_ref_head == 0 &&
+                           ci->i_dirty_caps == 0 &&
+                           ci->i_flushing_caps == 0) {
+                               BUG_ON(!ci->i_head_snapc);
+                               ceph_put_snap_context(ci->i_head_snapc);
+                               ci->i_head_snapc = NULL;
                        }
                        /* see comment in __ceph_remove_cap() */
                        if (!__ceph_is_any_caps(ci) && ci->i_snap_realm)
@@ -2352,7 +2642,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
                ceph_flush_snaps(ci);
        if (wake)
                wake_up_all(&ci->i_cap_wq);
-       if (put)
+       while (put-- > 0)
                iput(inode);
 }
 
@@ -2380,7 +2670,9 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
        if (ci->i_head_snapc == snapc) {
                ci->i_wrbuffer_ref_head -= nr;
                if (ci->i_wrbuffer_ref_head == 0 &&
-                   ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) {
+                   ci->i_wr_ref == 0 &&
+                   ci->i_dirty_caps == 0 &&
+                   ci->i_flushing_caps == 0) {
                        BUG_ON(!ci->i_head_snapc);
                        ceph_put_snap_context(ci->i_head_snapc);
                        ci->i_head_snapc = NULL;
@@ -2401,25 +2693,15 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                capsnap->dirty_pages -= nr;
                if (capsnap->dirty_pages == 0) {
                        complete_capsnap = 1;
-                       if (capsnap->dirty == 0)
-                               /* cap writeback completed before we created
-                                * the cap_snap; no FLUSHSNAP is needed */
-                               drop_capsnap = 1;
+                       drop_capsnap = ceph_try_drop_cap_snap(capsnap);
                }
                dout("put_wrbuffer_cap_refs on %p cap_snap %p "
-                    " snap %lld %d/%d -> %d/%d %s%s%s\n",
+                    " snap %lld %d/%d -> %d/%d %s%s\n",
                     inode, capsnap, capsnap->context->seq,
                     ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
                     ci->i_wrbuffer_ref, capsnap->dirty_pages,
                     last ? " (wrbuffer last)" : "",
-                    complete_capsnap ? " (complete capsnap)" : "",
-                    drop_capsnap ? " (drop capsnap)" : "");
-               if (drop_capsnap) {
-                       ceph_put_snap_context(capsnap->context);
-                       list_del(&capsnap->ci_item);
-                       list_del(&capsnap->flushing_item);
-                       ceph_put_cap_snap(capsnap);
-               }
+                    complete_capsnap ? " (complete capsnap)" : "");
        }
 
        spin_unlock(&ci->i_ceph_lock);
@@ -2526,7 +2808,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
         * try to invalidate (once).  (If there are dirty buffers, we
         * will invalidate _after_ writeback.)
         */
-       if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
+       if (!S_ISDIR(inode->i_mode) && /* don't invalidate readdir cache */
+           ((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
            (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
            !ci->i_wrbuffer_ref) {
                if (try_nonblocking_invalidate(inode)) {
@@ -2732,16 +3015,29 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_cap_flush *cf;
+       struct rb_node *n;
+       LIST_HEAD(to_remove);
        unsigned seq = le32_to_cpu(m->seq);
        int dirty = le32_to_cpu(m->dirty);
        int cleaned = 0;
        int drop = 0;
-       int i;
 
-       for (i = 0; i < CEPH_CAP_BITS; i++)
-               if ((dirty & (1 << i)) &&
-                   (u16)flush_tid == ci->i_cap_flush_tid[i])
-                       cleaned |= 1 << i;
+       n = rb_first(&ci->i_cap_flush_tree);
+       while (n) {
+               cf = rb_entry(n, struct ceph_cap_flush, i_node);
+               n = rb_next(&cf->i_node);
+               if (cf->tid == flush_tid)
+                       cleaned = cf->caps;
+               if (cf->tid <= flush_tid) {
+                       rb_erase(&cf->i_node, &ci->i_cap_flush_tree);
+                       list_add_tail(&cf->list, &to_remove);
+               } else {
+                       cleaned &= ~cf->caps;
+                       if (!cleaned)
+                               break;
+               }
+       }
 
        dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
             " flushing %s -> %s\n",
@@ -2749,12 +3045,23 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
             ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
             ceph_cap_string(ci->i_flushing_caps & ~cleaned));
 
-       if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned))
+       if (list_empty(&to_remove) && !cleaned)
                goto out;
 
        ci->i_flushing_caps &= ~cleaned;
 
        spin_lock(&mdsc->cap_dirty_lock);
+
+       if (!list_empty(&to_remove)) {
+               list_for_each_entry(cf, &to_remove, list)
+                       rb_erase(&cf->g_node, &mdsc->cap_flush_tree);
+
+               n = rb_first(&mdsc->cap_flush_tree);
+               cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL;
+               if (!cf || cf->tid > flush_tid)
+                       wake_up_all(&mdsc->cap_flushing_wq);
+       }
+
        if (ci->i_flushing_caps == 0) {
                list_del_init(&ci->i_flushing_item);
                if (!list_empty(&session->s_cap_flushing))
@@ -2764,14 +3071,14 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                                         struct ceph_inode_info,
                                         i_flushing_item)->vfs_inode);
                mdsc->num_cap_flushing--;
-               wake_up_all(&mdsc->cap_flushing_wq);
                dout(" inode %p now !flushing\n", inode);
 
                if (ci->i_dirty_caps == 0) {
                        dout(" inode %p now clean\n", inode);
                        BUG_ON(!list_empty(&ci->i_dirty_item));
                        drop = 1;
-                       if (ci->i_wrbuffer_ref_head == 0) {
+                       if (ci->i_wr_ref == 0 &&
+                           ci->i_wrbuffer_ref_head == 0) {
                                BUG_ON(!ci->i_head_snapc);
                                ceph_put_snap_context(ci->i_head_snapc);
                                ci->i_head_snapc = NULL;
@@ -2785,6 +3092,13 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
 
 out:
        spin_unlock(&ci->i_ceph_lock);
+
+       while (!list_empty(&to_remove)) {
+               cf = list_first_entry(&to_remove,
+                                     struct ceph_cap_flush, list);
+               list_del(&cf->list);
+               ceph_free_cap_flush(cf);
+       }
        if (drop)
                iput(inode);
 }
@@ -2800,6 +3114,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
                                     struct ceph_mds_session *session)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
        u64 follows = le64_to_cpu(m->snap_follows);
        struct ceph_cap_snap *capsnap;
        int drop = 0;
@@ -2823,6 +3138,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
                        list_del(&capsnap->ci_item);
                        list_del(&capsnap->flushing_item);
                        ceph_put_cap_snap(capsnap);
+                       wake_up_all(&mdsc->cap_flushing_wq);
                        drop = 1;
                        break;
                } else {
@@ -2971,7 +3287,6 @@ retry:
                        mutex_lock_nested(&session->s_mutex,
                                          SINGLE_DEPTH_NESTING);
                }
-               ceph_add_cap_releases(mdsc, tsession);
                new_cap = ceph_get_cap(mdsc, NULL);
        } else {
                WARN_ON(1);
@@ -3167,16 +3482,20 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
             (unsigned)seq);
 
-       if (op == CEPH_CAP_OP_IMPORT)
-               ceph_add_cap_releases(mdsc, session);
-
        if (!inode) {
                dout(" i don't have ino %llx\n", vino.ino);
 
                if (op == CEPH_CAP_OP_IMPORT) {
+                       cap = ceph_get_cap(mdsc, NULL);
+                       cap->cap_ino = vino.ino;
+                       cap->queue_release = 1;
+                       cap->cap_id = cap_id;
+                       cap->mseq = mseq;
+                       cap->seq = seq;
                        spin_lock(&session->s_cap_lock);
-                       __queue_cap_release(session, vino.ino, cap_id,
-                                           mseq, seq);
+                       list_add_tail(&cap->session_caps,
+                                       &session->s_cap_releases);
+                       session->s_num_cap_releases++;
                        spin_unlock(&session->s_cap_lock);
                }
                goto flush_cap_releases;
@@ -3252,11 +3571,10 @@ void ceph_handle_caps(struct ceph_mds_session *session,
 
 flush_cap_releases:
        /*
-        * send any full release message to try to move things
+        * send any cap release message to try to move things
         * along for the mds (who clearly thinks we still have this
         * cap).
         */
-       ceph_add_cap_releases(mdsc, session);
        ceph_send_cap_releases(mdsc, session);
 
 done:
index 4248307fea909c6f1555758e536a3f733ad5f95d..9314b4ea2375145647aa16446a1f33ae2c8106b2 100644 (file)
@@ -38,7 +38,7 @@ int ceph_init_dentry(struct dentry *dentry)
        if (dentry->d_fsdata)
                return 0;
 
-       di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
+       di = kmem_cache_alloc(ceph_dentry_cachep, GFP_KERNEL | __GFP_ZERO);
        if (!di)
                return -ENOMEM;          /* oh well */
 
@@ -106,6 +106,27 @@ static int fpos_cmp(loff_t l, loff_t r)
        return (int)(fpos_off(l) - fpos_off(r));
 }
 
+/*
+ * make note of the last dentry we read, so we can
+ * continue at the same lexicographical point,
+ * regardless of what dir changes take place on the
+ * server.
+ */
+static int note_last_dentry(struct ceph_file_info *fi, const char *name,
+                           int len, unsigned next_offset)
+{
+       char *buf = kmalloc(len+1, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+       kfree(fi->last_name);
+       fi->last_name = buf;
+       memcpy(fi->last_name, name, len);
+       fi->last_name[len] = 0;
+       fi->next_offset = next_offset;
+       dout("note_last_dentry '%s'\n", fi->last_name);
+       return 0;
+}
+
 /*
  * When possible, we try to satisfy a readdir by peeking at the
  * dcache.  We make this work by carefully ordering dentries on
@@ -123,123 +144,113 @@ static int __dcache_readdir(struct file *file,  struct dir_context *ctx,
        struct ceph_file_info *fi = file->private_data;
        struct dentry *parent = file->f_path.dentry;
        struct inode *dir = d_inode(parent);
-       struct list_head *p;
-       struct dentry *dentry, *last;
+       struct dentry *dentry, *last = NULL;
        struct ceph_dentry_info *di;
+       unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *);
        int err = 0;
+       loff_t ptr_pos = 0;
+       struct ceph_readdir_cache_control cache_ctl = {};
 
-       /* claim ref on last dentry we returned */
-       last = fi->dentry;
-       fi->dentry = NULL;
-
-       dout("__dcache_readdir %p v%u at %llu (last %p)\n",
-            dir, shared_gen, ctx->pos, last);
+       dout("__dcache_readdir %p v%u at %llu\n", dir, shared_gen, ctx->pos);
 
-       spin_lock(&parent->d_lock);
-
-       /* start at beginning? */
-       if (ctx->pos == 2 || last == NULL ||
-           fpos_cmp(ctx->pos, ceph_dentry(last)->offset) < 0) {
-               if (list_empty(&parent->d_subdirs))
-                       goto out_unlock;
-               p = parent->d_subdirs.prev;
-               dout(" initial p %p/%p\n", p->prev, p->next);
-       } else {
-               p = last->d_child.prev;
+       /* we can calculate cache index for the first dirfrag */
+       if (ceph_frag_is_leftmost(fpos_frag(ctx->pos))) {
+               cache_ctl.index = fpos_off(ctx->pos) - 2;
+               BUG_ON(cache_ctl.index < 0);
+               ptr_pos = cache_ctl.index * sizeof(struct dentry *);
        }
 
-more:
-       dentry = list_entry(p, struct dentry, d_child);
-       di = ceph_dentry(dentry);
-       while (1) {
-               dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
-                    d_unhashed(dentry) ? "!hashed" : "hashed",
-                    parent->d_subdirs.prev, parent->d_subdirs.next);
-               if (p == &parent->d_subdirs) {
+       while (true) {
+               pgoff_t pgoff;
+               bool emit_dentry;
+
+               if (ptr_pos >= i_size_read(dir)) {
                        fi->flags |= CEPH_F_ATEND;
-                       goto out_unlock;
+                       err = 0;
+                       break;
+               }
+
+               err = -EAGAIN;
+               pgoff = ptr_pos >> PAGE_CACHE_SHIFT;
+               if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) {
+                       ceph_readdir_cache_release(&cache_ctl);
+                       cache_ctl.page = find_lock_page(&dir->i_data, pgoff);
+                       if (!cache_ctl.page) {
+                               dout(" page %lu not found\n", pgoff);
+                               break;
+                       }
+                       /* reading/filling the cache are serialized by
+                        * i_mutex, no need to use page lock */
+                       unlock_page(cache_ctl.page);
+                       cache_ctl.dentries = kmap(cache_ctl.page);
                }
-               spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+
+               rcu_read_lock();
+               spin_lock(&parent->d_lock);
+               /* check i_size again here, because empty directory can be
+                * marked as complete while not holding the i_mutex. */
+               if (ceph_dir_is_complete_ordered(dir) &&
+                   ptr_pos < i_size_read(dir))
+                       dentry = cache_ctl.dentries[cache_ctl.index % nsize];
+               else
+                       dentry = NULL;
+               spin_unlock(&parent->d_lock);
+               if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
+                       dentry = NULL;
+               rcu_read_unlock();
+               if (!dentry)
+                       break;
+
+               emit_dentry = false;
+               di = ceph_dentry(dentry);
+               spin_lock(&dentry->d_lock);
                if (di->lease_shared_gen == shared_gen &&
-                   !d_unhashed(dentry) && d_really_is_positive(dentry) &&
+                   d_really_is_positive(dentry) &&
                    ceph_snap(d_inode(dentry)) != CEPH_SNAPDIR &&
                    ceph_ino(d_inode(dentry)) != CEPH_INO_CEPH &&
-                   fpos_cmp(ctx->pos, di->offset) <= 0)
-                       break;
-               dout(" skipping %p %pd at %llu (%llu)%s%s\n", dentry,
-                    dentry, di->offset,
-                    ctx->pos, d_unhashed(dentry) ? " unhashed" : "",
-                    !d_inode(dentry) ? " null" : "");
+                   fpos_cmp(ctx->pos, di->offset) <= 0) {
+                       emit_dentry = true;
+               }
                spin_unlock(&dentry->d_lock);
-               p = p->prev;
-               dentry = list_entry(p, struct dentry, d_child);
-               di = ceph_dentry(dentry);
-       }
-
-       dget_dlock(dentry);
-       spin_unlock(&dentry->d_lock);
-       spin_unlock(&parent->d_lock);
 
-       /* make sure a dentry wasn't dropped while we didn't have parent lock */
-       if (!ceph_dir_is_complete_ordered(dir)) {
-               dout(" lost dir complete on %p; falling back to mds\n", dir);
-               dput(dentry);
-               err = -EAGAIN;
-               goto out;
-       }
+               if (emit_dentry) {
+                       dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
+                            dentry, dentry, d_inode(dentry));
+                       ctx->pos = di->offset;
+                       if (!dir_emit(ctx, dentry->d_name.name,
+                                     dentry->d_name.len,
+                                     ceph_translate_ino(dentry->d_sb,
+                                                        d_inode(dentry)->i_ino),
+                                     d_inode(dentry)->i_mode >> 12)) {
+                               dput(dentry);
+                               err = 0;
+                               break;
+                       }
+                       ctx->pos++;
 
-       dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
-            dentry, dentry, d_inode(dentry));
-       if (!dir_emit(ctx, dentry->d_name.name,
-                     dentry->d_name.len,
-                     ceph_translate_ino(dentry->d_sb, d_inode(dentry)->i_ino),
-                     d_inode(dentry)->i_mode >> 12)) {
-               if (last) {
-                       /* remember our position */
-                       fi->dentry = last;
-                       fi->next_offset = fpos_off(di->offset);
+                       if (last)
+                               dput(last);
+                       last = dentry;
+               } else {
+                       dput(dentry);
                }
-               dput(dentry);
-               return 0;
-       }
-
-       ctx->pos = di->offset + 1;
 
-       if (last)
-               dput(last);
-       last = dentry;
-
-       spin_lock(&parent->d_lock);
-       p = p->prev;    /* advance to next dentry */
-       goto more;
-
-out_unlock:
-       spin_unlock(&parent->d_lock);
-out:
-       if (last)
+               cache_ctl.index++;
+               ptr_pos += sizeof(struct dentry *);
+       }
+       ceph_readdir_cache_release(&cache_ctl);
+       if (last) {
+               int ret;
+               di = ceph_dentry(last);
+               ret = note_last_dentry(fi, last->d_name.name, last->d_name.len,
+                                      fpos_off(di->offset) + 1);
+               if (ret < 0)
+                       err = ret;
                dput(last);
+       }
        return err;
 }
 
-/*
- * make note of the last dentry we read, so we can
- * continue at the same lexicographical point,
- * regardless of what dir changes take place on the
- * server.
- */
-static int note_last_dentry(struct ceph_file_info *fi, const char *name,
-                           int len)
-{
-       kfree(fi->last_name);
-       fi->last_name = kmalloc(len+1, GFP_NOFS);
-       if (!fi->last_name)
-               return -ENOMEM;
-       memcpy(fi->last_name, name, len);
-       fi->last_name[len] = 0;
-       dout("note_last_dentry '%s'\n", fi->last_name);
-       return 0;
-}
-
 static int ceph_readdir(struct file *file, struct dir_context *ctx)
 {
        struct ceph_file_info *fi = file->private_data;
@@ -280,8 +291,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
 
        /* can we use the dcache? */
        spin_lock(&ci->i_ceph_lock);
-       if ((ctx->pos == 2 || fi->dentry) &&
-           ceph_test_mount_opt(fsc, DCACHE) &&
+       if (ceph_test_mount_opt(fsc, DCACHE) &&
            !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
            ceph_snap(inode) != CEPH_SNAPDIR &&
            __ceph_dir_is_complete_ordered(ci) &&
@@ -296,24 +306,8 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
        } else {
                spin_unlock(&ci->i_ceph_lock);
        }
-       if (fi->dentry) {
-               err = note_last_dentry(fi, fi->dentry->d_name.name,
-                                      fi->dentry->d_name.len);
-               if (err)
-                       return err;
-               dput(fi->dentry);
-               fi->dentry = NULL;
-       }
 
        /* proceed with a normal readdir */
-
-       if (ctx->pos == 2) {
-               /* note dir version at start of readdir so we can tell
-                * if any dentries get dropped */
-               fi->dir_release_count = atomic_read(&ci->i_release_count);
-               fi->dir_ordered_count = ci->i_ordered_count;
-       }
-
 more:
        /* do we have the correct frag content buffered? */
        if (fi->frag != frag || fi->last_readdir == NULL) {
@@ -342,12 +336,15 @@ more:
                req->r_direct_hash = ceph_frag_value(frag);
                req->r_direct_is_hash = true;
                if (fi->last_name) {
-                       req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
+                       req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL);
                        if (!req->r_path2) {
                                ceph_mdsc_put_request(req);
                                return -ENOMEM;
                        }
                }
+               req->r_dir_release_cnt = fi->dir_release_count;
+               req->r_dir_ordered_cnt = fi->dir_ordered_count;
+               req->r_readdir_cache_idx = fi->readdir_cache_idx;
                req->r_readdir_offset = fi->next_offset;
                req->r_args.readdir.frag = cpu_to_le32(frag);
 
@@ -364,26 +361,38 @@ more:
                     (int)req->r_reply_info.dir_end,
                     (int)req->r_reply_info.dir_complete);
 
-               if (!req->r_did_prepopulate) {
-                       dout("readdir !did_prepopulate");
-                       /* preclude from marking dir complete */
-                       fi->dir_release_count--;
-               }
 
                /* note next offset and last dentry name */
                rinfo = &req->r_reply_info;
                if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
                        frag = le32_to_cpu(rinfo->dir_dir->frag);
-                       if (ceph_frag_is_leftmost(frag))
-                               fi->next_offset = 2;
-                       else
-                               fi->next_offset = 0;
-                       off = fi->next_offset;
+                       off = req->r_readdir_offset;
+                       fi->next_offset = off;
                }
+
                fi->frag = frag;
                fi->offset = fi->next_offset;
                fi->last_readdir = req;
 
+               if (req->r_did_prepopulate) {
+                       fi->readdir_cache_idx = req->r_readdir_cache_idx;
+                       if (fi->readdir_cache_idx < 0) {
+                               /* preclude from marking dir ordered */
+                               fi->dir_ordered_count = 0;
+                       } else if (ceph_frag_is_leftmost(frag) && off == 2) {
+                               /* note dir version at start of readdir so
+                                * we can tell if any dentries get dropped */
+                               fi->dir_release_count = req->r_dir_release_cnt;
+                               fi->dir_ordered_count = req->r_dir_ordered_cnt;
+                       }
+               } else {
+                       dout("readdir !did_prepopulate");
+                       /* disable readdir cache */
+                       fi->readdir_cache_idx = -1;
+                       /* preclude from marking dir complete */
+                       fi->dir_release_count = 0;
+               }
+
                if (req->r_reply_info.dir_end) {
                        kfree(fi->last_name);
                        fi->last_name = NULL;
@@ -394,10 +403,10 @@ more:
                } else {
                        err = note_last_dentry(fi,
                                       rinfo->dir_dname[rinfo->dir_nr-1],
-                                      rinfo->dir_dname_len[rinfo->dir_nr-1]);
+                                      rinfo->dir_dname_len[rinfo->dir_nr-1],
+                                      fi->next_offset + rinfo->dir_nr);
                        if (err)
                                return err;
-                       fi->next_offset += rinfo->dir_nr;
                }
        }
 
@@ -453,16 +462,22 @@ more:
         * were released during the whole readdir, and we should have
         * the complete dir contents in our cache.
         */
-       spin_lock(&ci->i_ceph_lock);
-       if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
-               if (ci->i_ordered_count == fi->dir_ordered_count)
+       if (atomic64_read(&ci->i_release_count) == fi->dir_release_count) {
+               spin_lock(&ci->i_ceph_lock);
+               if (fi->dir_ordered_count == atomic64_read(&ci->i_ordered_count)) {
                        dout(" marking %p complete and ordered\n", inode);
-               else
+                       /* use i_size to track number of entries in
+                        * readdir cache */
+                       BUG_ON(fi->readdir_cache_idx < 0);
+                       i_size_write(inode, fi->readdir_cache_idx *
+                                    sizeof(struct dentry*));
+               } else {
                        dout(" marking %p complete\n", inode);
+               }
                __ceph_dir_set_complete(ci, fi->dir_release_count,
                                        fi->dir_ordered_count);
+               spin_unlock(&ci->i_ceph_lock);
        }
-       spin_unlock(&ci->i_ceph_lock);
 
        dout("readdir %p file %p done.\n", inode, file);
        return 0;
@@ -476,14 +491,12 @@ static void reset_readdir(struct ceph_file_info *fi, unsigned frag)
        }
        kfree(fi->last_name);
        fi->last_name = NULL;
+       fi->dir_release_count = 0;
+       fi->readdir_cache_idx = -1;
        if (ceph_frag_is_leftmost(frag))
                fi->next_offset = 2;  /* compensate for . and .. */
        else
                fi->next_offset = 0;
-       if (fi->dentry) {
-               dput(fi->dentry);
-               fi->dentry = NULL;
-       }
        fi->flags &= ~CEPH_F_ATEND;
 }
 
@@ -497,13 +510,12 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
        mutex_lock(&inode->i_mutex);
        retval = -EINVAL;
        switch (whence) {
-       case SEEK_END:
-               offset += inode->i_size + 2;   /* FIXME */
-               break;
        case SEEK_CUR:
                offset += file->f_pos;
        case SEEK_SET:
                break;
+       case SEEK_END:
+               retval = -EOPNOTSUPP;
        default:
                goto out;
        }
@@ -516,20 +528,18 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
                }
                retval = offset;
 
-               /*
-                * discard buffered readdir content on seekdir(0), or
-                * seek to new frag, or seek prior to current chunk.
-                */
                if (offset == 0 ||
                    fpos_frag(offset) != fi->frag ||
                    fpos_off(offset) < fi->offset) {
+                       /* discard buffered readdir content on seekdir(0), or
+                        * seek to new frag, or seek prior to current chunk */
                        dout("dir_llseek dropping %p content\n", file);
                        reset_readdir(fi, fpos_frag(offset));
+               } else if (fpos_cmp(offset, old_offset) > 0) {
+                       /* reset dir_release_count if we did a forward seek */
+                       fi->dir_release_count = 0;
+                       fi->readdir_cache_idx = -1;
                }
-
-               /* bump dir_release_count if we did a forward seek */
-               if (fpos_cmp(offset, old_offset) > 0)
-                       fi->dir_release_count--;
        }
 out:
        mutex_unlock(&inode->i_mutex);
@@ -764,7 +774,7 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
                err = PTR_ERR(req);
                goto out;
        }
-       req->r_path2 = kstrdup(dest, GFP_NOFS);
+       req->r_path2 = kstrdup(dest, GFP_KERNEL);
        if (!req->r_path2) {
                err = -ENOMEM;
                ceph_mdsc_put_request(req);
@@ -985,16 +995,15 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
                 * to do it here.
                 */
 
+               /* d_move screws up sibling dentries' offsets */
+               ceph_dir_clear_complete(old_dir);
+               ceph_dir_clear_complete(new_dir);
+
                d_move(old_dentry, new_dentry);
 
                /* ensure target dentry is invalidated, despite
                   rehashing bug in vfs_rename_dir */
                ceph_invalidate_dentry_lease(new_dentry);
-
-               /* d_move screws up sibling dentries' offsets */
-               ceph_dir_clear_complete(old_dir);
-               ceph_dir_clear_complete(new_dir);
-
        }
        ceph_mdsc_put_request(req);
        return err;
@@ -1189,7 +1198,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
                return -EISDIR;
 
        if (!cf->dir_info) {
-               cf->dir_info = kmalloc(bufsize, GFP_NOFS);
+               cf->dir_info = kmalloc(bufsize, GFP_KERNEL);
                if (!cf->dir_info)
                        return -ENOMEM;
                cf->dir_info_len =
@@ -1223,66 +1232,6 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
        return size - left;
 }
 
-/*
- * an fsync() on a dir will wait for any uncommitted directory
- * operations to commit.
- */
-static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
-                         int datasync)
-{
-       struct inode *inode = file_inode(file);
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       struct list_head *head = &ci->i_unsafe_dirops;
-       struct ceph_mds_request *req;
-       u64 last_tid;
-       int ret = 0;
-
-       dout("dir_fsync %p\n", inode);
-       ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
-       if (ret)
-               return ret;
-       mutex_lock(&inode->i_mutex);
-
-       spin_lock(&ci->i_unsafe_lock);
-       if (list_empty(head))
-               goto out;
-
-       req = list_entry(head->prev,
-                        struct ceph_mds_request, r_unsafe_dir_item);
-       last_tid = req->r_tid;
-
-       do {
-               ceph_mdsc_get_request(req);
-               spin_unlock(&ci->i_unsafe_lock);
-
-               dout("dir_fsync %p wait on tid %llu (until %llu)\n",
-                    inode, req->r_tid, last_tid);
-               if (req->r_timeout) {
-                       unsigned long time_left = wait_for_completion_timeout(
-                                                       &req->r_safe_completion,
-                                                       req->r_timeout);
-                       if (time_left > 0)
-                               ret = 0;
-                       else
-                               ret = -EIO;  /* timed out */
-               } else {
-                       wait_for_completion(&req->r_safe_completion);
-               }
-               ceph_mdsc_put_request(req);
-
-               spin_lock(&ci->i_unsafe_lock);
-               if (ret || list_empty(head))
-                       break;
-               req = list_entry(head->next,
-                                struct ceph_mds_request, r_unsafe_dir_item);
-       } while (req->r_tid < last_tid);
-out:
-       spin_unlock(&ci->i_unsafe_lock);
-       mutex_unlock(&inode->i_mutex);
-
-       return ret;
-}
-
 /*
  * We maintain a private dentry LRU.
  *
@@ -1353,7 +1302,7 @@ const struct file_operations ceph_dir_fops = {
        .open = ceph_open,
        .release = ceph_release,
        .unlocked_ioctl = ceph_ioctl,
-       .fsync = ceph_dir_fsync,
+       .fsync = ceph_fsync,
 };
 
 const struct file_operations ceph_snapdir_fops = {
index 3b6b522b4b31ed9e2f7193c661894787bfe63627..8b79d87eaf4675ff91cf05c10a3fc53e70d5b313 100644 (file)
@@ -89,13 +89,14 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
        case S_IFDIR:
                dout("init_file %p %p 0%o (regular)\n", inode, file,
                     inode->i_mode);
-               cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
+               cf = kmem_cache_alloc(ceph_file_cachep, GFP_KERNEL | __GFP_ZERO);
                if (cf == NULL) {
                        ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
                        return -ENOMEM;
                }
                cf->fmode = fmode;
                cf->next_offset = 2;
+               cf->readdir_cache_idx = -1;
                file->private_data = cf;
                BUG_ON(inode->i_fop->release != ceph_release);
                break;
@@ -324,7 +325,6 @@ int ceph_release(struct inode *inode, struct file *file)
                ceph_mdsc_put_request(cf->last_readdir);
        kfree(cf->last_name);
        kfree(cf->dir_info);
-       dput(cf->dentry);
        kmem_cache_free(ceph_file_cachep, cf);
 
        /* wake up anyone waiting for caps on this inode */
@@ -483,7 +483,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
                }
        } else {
                num_pages = calc_pages_for(off, len);
-               pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
+               pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
                if (IS_ERR(pages))
                        return PTR_ERR(pages);
                ret = striped_read(inode, off, len, pages,
@@ -557,13 +557,13 @@ static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
  * objects, rollback on failure, etc.)
  */
 static ssize_t
-ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
+ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
+                      struct ceph_snap_context *snapc)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
-       struct ceph_snap_context *snapc;
        struct ceph_vino vino;
        struct ceph_osd_request *req;
        struct page **pages;
@@ -600,7 +600,6 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
                size_t start;
                ssize_t n;
 
-               snapc = ci->i_snap_realm->cached_context;
                vino = ceph_vino(inode);
                req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
                                            vino, pos, &len, 0,
@@ -614,7 +613,7 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
                        break;
                }
 
-               osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
+               osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
 
                n = iov_iter_get_pages_alloc(from, &pages, len, &start);
                if (unlikely(n < 0)) {
@@ -674,13 +673,13 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
  * objects, rollback on failure, etc.)
  */
 static ssize_t
-ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
+ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
+               struct ceph_snap_context *snapc)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
-       struct ceph_snap_context *snapc;
        struct ceph_vino vino;
        struct ceph_osd_request *req;
        struct page **pages;
@@ -717,7 +716,6 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
                size_t left;
                int n;
 
-               snapc = ci->i_snap_realm->cached_context;
                vino = ceph_vino(inode);
                req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
                                            vino, pos, &len, 0, 1,
@@ -736,7 +734,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
                 */
                num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
-               pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
+               pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
                if (IS_ERR(pages)) {
                        ret = PTR_ERR(pages);
                        goto out;
@@ -860,7 +858,7 @@ again:
                struct page *page = NULL;
                loff_t i_size;
                if (retry_op == READ_INLINE) {
-                       page = __page_cache_alloc(GFP_NOFS);
+                       page = __page_cache_alloc(GFP_KERNEL);
                        if (!page)
                                return -ENOMEM;
                }
@@ -941,6 +939,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_osd_client *osdc =
                &ceph_sb_to_client(inode->i_sb)->client->osdc;
+       struct ceph_cap_flush *prealloc_cf;
        ssize_t count, written = 0;
        int err, want, got;
        loff_t pos;
@@ -948,6 +947,10 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (ceph_snap(inode) != CEPH_NOSNAP)
                return -EROFS;
 
+       prealloc_cf = ceph_alloc_cap_flush();
+       if (!prealloc_cf)
+               return -ENOMEM;
+
        mutex_lock(&inode->i_mutex);
 
        /* We can write back this queue in page reclaim */
@@ -959,7 +962,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
        pos = iocb->ki_pos;
        count = iov_iter_count(from);
-       err = file_remove_suid(file);
+       err = file_remove_privs(file);
        if (err)
                goto out;
 
@@ -996,14 +999,30 @@ retry_snap:
 
        if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
            (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
+               struct ceph_snap_context *snapc;
                struct iov_iter data;
                mutex_unlock(&inode->i_mutex);
+
+               spin_lock(&ci->i_ceph_lock);
+               if (__ceph_have_pending_cap_snap(ci)) {
+                       struct ceph_cap_snap *capsnap =
+                                       list_last_entry(&ci->i_cap_snaps,
+                                                       struct ceph_cap_snap,
+                                                       ci_item);
+                       snapc = ceph_get_snap_context(capsnap->context);
+               } else {
+                       BUG_ON(!ci->i_head_snapc);
+                       snapc = ceph_get_snap_context(ci->i_head_snapc);
+               }
+               spin_unlock(&ci->i_ceph_lock);
+
                /* we might need to revert back to that point */
                data = *from;
                if (iocb->ki_flags & IOCB_DIRECT)
-                       written = ceph_sync_direct_write(iocb, &data, pos);
+                       written = ceph_sync_direct_write(iocb, &data, pos,
+                                                        snapc);
                else
-                       written = ceph_sync_write(iocb, &data, pos);
+                       written = ceph_sync_write(iocb, &data, pos, snapc);
                if (written == -EOLDSNAPC) {
                        dout("aio_write %p %llx.%llx %llu~%u"
                                "got EOLDSNAPC, retrying\n",
@@ -1014,6 +1033,7 @@ retry_snap:
                }
                if (written > 0)
                        iov_iter_advance(from, written);
+               ceph_put_snap_context(snapc);
        } else {
                loff_t old_size = inode->i_size;
                /*
@@ -1035,7 +1055,8 @@ retry_snap:
                int dirty;
                spin_lock(&ci->i_ceph_lock);
                ci->i_inline_version = CEPH_INLINE_NONE;
-               dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
+               dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
+                                              &prealloc_cf);
                spin_unlock(&ci->i_ceph_lock);
                if (dirty)
                        __mark_inode_dirty(inode, dirty);
@@ -1059,6 +1080,7 @@ retry_snap:
 out:
        mutex_unlock(&inode->i_mutex);
 out_unlocked:
+       ceph_free_cap_flush(prealloc_cf);
        current->backing_dev_info = NULL;
        return written ? written : err;
 }
@@ -1255,6 +1277,7 @@ static long ceph_fallocate(struct file *file, int mode,
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_osd_client *osdc =
                &ceph_inode_to_client(inode)->client->osdc;
+       struct ceph_cap_flush *prealloc_cf;
        int want, got = 0;
        int dirty;
        int ret = 0;
@@ -1267,6 +1290,10 @@ static long ceph_fallocate(struct file *file, int mode,
        if (!S_ISREG(inode->i_mode))
                return -EOPNOTSUPP;
 
+       prealloc_cf = ceph_alloc_cap_flush();
+       if (!prealloc_cf)
+               return -ENOMEM;
+
        mutex_lock(&inode->i_mutex);
 
        if (ceph_snap(inode) != CEPH_NOSNAP) {
@@ -1313,7 +1340,8 @@ static long ceph_fallocate(struct file *file, int mode,
        if (!ret) {
                spin_lock(&ci->i_ceph_lock);
                ci->i_inline_version = CEPH_INLINE_NONE;
-               dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
+               dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
+                                              &prealloc_cf);
                spin_unlock(&ci->i_ceph_lock);
                if (dirty)
                        __mark_inode_dirty(inode, dirty);
@@ -1322,6 +1350,7 @@ static long ceph_fallocate(struct file *file, int mode,
        ceph_put_cap_refs(ci, got);
 unlock:
        mutex_unlock(&inode->i_mutex);
+       ceph_free_cap_flush(prealloc_cf);
        return ret;
 }
 
index 571acd88606cfcec3d01fc4a6ef453f0b49e9713..96d2bd8299022e554c8bfdc0b7f1c759be8fc8cd 100644 (file)
@@ -389,9 +389,10 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
        ci->i_inline_version = 0;
        ci->i_time_warp_seq = 0;
        ci->i_ceph_flags = 0;
-       ci->i_ordered_count = 0;
-       atomic_set(&ci->i_release_count, 1);
-       atomic_set(&ci->i_complete_count, 0);
+       atomic64_set(&ci->i_ordered_count, 1);
+       atomic64_set(&ci->i_release_count, 1);
+       atomic64_set(&ci->i_complete_seq[0], 0);
+       atomic64_set(&ci->i_complete_seq[1], 0);
        ci->i_symlink = NULL;
 
        memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
@@ -415,9 +416,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
        ci->i_flushing_caps = 0;
        INIT_LIST_HEAD(&ci->i_dirty_item);
        INIT_LIST_HEAD(&ci->i_flushing_item);
-       ci->i_cap_flush_seq = 0;
-       ci->i_cap_flush_last_tid = 0;
-       memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
+       ci->i_prealloc_cap_flush = NULL;
+       ci->i_cap_flush_tree = RB_ROOT;
        init_waitqueue_head(&ci->i_cap_wq);
        ci->i_hold_caps_min = 0;
        ci->i_hold_caps_max = 0;
@@ -752,7 +752,10 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
 
        if (new_version ||
            (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
+               if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool)
+                       ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
                ci->i_layout = info->layout;
+
                queue_trunc = ceph_fill_file_size(inode, issued,
                                        le32_to_cpu(info->truncate_seq),
                                        le64_to_cpu(info->truncate_size),
@@ -858,9 +861,10 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
                            (issued & CEPH_CAP_FILE_EXCL) == 0 &&
                            !__ceph_dir_is_complete(ci)) {
                                dout(" marking %p complete (empty)\n", inode);
+                               i_size_write(inode, 0);
                                __ceph_dir_set_complete(ci,
-                                       atomic_read(&ci->i_release_count),
-                                       ci->i_ordered_count);
+                                       atomic64_read(&ci->i_release_count),
+                                       atomic64_read(&ci->i_ordered_count));
                        }
 
                        wake = true;
@@ -1212,6 +1216,10 @@ retry_lookup:
                        dout("fill_trace doing d_move %p -> %p\n",
                             req->r_old_dentry, dn);
 
+                       /* d_move screws up sibling dentries' offsets */
+                       ceph_dir_clear_ordered(dir);
+                       ceph_dir_clear_ordered(olddir);
+
                        d_move(req->r_old_dentry, dn);
                        dout(" src %p '%pd' dst %p '%pd'\n",
                             req->r_old_dentry,
@@ -1222,10 +1230,6 @@ retry_lookup:
                           rehashing bug in vfs_rename_dir */
                        ceph_invalidate_dentry_lease(dn);
 
-                       /* d_move screws up sibling dentries' offsets */
-                       ceph_dir_clear_ordered(dir);
-                       ceph_dir_clear_ordered(olddir);
-
                        dout("dn %p gets new offset %lld\n", req->r_old_dentry,
                             ceph_dentry(req->r_old_dentry)->offset);
 
@@ -1333,6 +1337,49 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
        return err;
 }
 
+void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
+{
+       if (ctl->page) {
+               kunmap(ctl->page);
+               page_cache_release(ctl->page);
+               ctl->page = NULL;
+       }
+}
+
+static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
+                             struct ceph_readdir_cache_control *ctl,
+                             struct ceph_mds_request *req)
+{
+       struct ceph_inode_info *ci = ceph_inode(dir);
+       unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry*);
+       unsigned idx = ctl->index % nsize;
+       pgoff_t pgoff = ctl->index / nsize;
+
+       if (!ctl->page || pgoff != page_index(ctl->page)) {
+               ceph_readdir_cache_release(ctl);
+               ctl->page  = grab_cache_page(&dir->i_data, pgoff);
+               if (!ctl->page) {
+                       ctl->index = -1;
+                       return -ENOMEM;
+               }
+               /* reading/filling the cache are serialized by
+                * i_mutex, no need to use page lock */
+               unlock_page(ctl->page);
+               ctl->dentries = kmap(ctl->page);
+       }
+
+       if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
+           req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
+               dout("readdir cache dn %p idx %d\n", dn, ctl->index);
+               ctl->dentries[idx] = dn;
+               ctl->index++;
+       } else {
+               dout("disable readdir cache\n");
+               ctl->index = -1;
+       }
+       return 0;
+}
+
 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
                             struct ceph_mds_session *session)
 {
@@ -1345,8 +1392,11 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
        struct inode *snapdir = NULL;
        struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
        struct ceph_dentry_info *di;
-       u64 r_readdir_offset = req->r_readdir_offset;
        u32 frag = le32_to_cpu(rhead->args.readdir.frag);
+       struct ceph_readdir_cache_control cache_ctl = {};
+
+       if (req->r_aborted)
+               return readdir_prepopulate_inodes_only(req, session);
 
        if (rinfo->dir_dir &&
            le32_to_cpu(rinfo->dir_dir->frag) != frag) {
@@ -1354,14 +1404,11 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
                     frag, le32_to_cpu(rinfo->dir_dir->frag));
                frag = le32_to_cpu(rinfo->dir_dir->frag);
                if (ceph_frag_is_leftmost(frag))
-                       r_readdir_offset = 2;
+                       req->r_readdir_offset = 2;
                else
-                       r_readdir_offset = 0;
+                       req->r_readdir_offset = 0;
        }
 
-       if (req->r_aborted)
-               return readdir_prepopulate_inodes_only(req, session);
-
        if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
                snapdir = ceph_get_snapdir(d_inode(parent));
                parent = d_find_alias(snapdir);
@@ -1374,6 +1421,17 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
                        ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
        }
 
+       if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) {
+               /* note dir version at start of readdir so we can tell
+                * if any dentries get dropped */
+               struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
+               req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
+               req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
+               req->r_readdir_cache_idx = 0;
+       }
+
+       cache_ctl.index = req->r_readdir_cache_idx;
+
        /* FIXME: release caps/leases if error occurs */
        for (i = 0; i < rinfo->dir_nr; i++) {
                struct ceph_vino vino;
@@ -1413,13 +1471,6 @@ retry_lookup:
                        d_delete(dn);
                        dput(dn);
                        goto retry_lookup;
-               } else {
-                       /* reorder parent's d_subdirs */
-                       spin_lock(&parent->d_lock);
-                       spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
-                       list_move(&dn->d_child, &parent->d_subdirs);
-                       spin_unlock(&dn->d_lock);
-                       spin_unlock(&parent->d_lock);
                }
 
                /* inode */
@@ -1436,13 +1487,15 @@ retry_lookup:
                        }
                }
 
-               if (fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
-                              req->r_request_started, -1,
-                              &req->r_caps_reservation) < 0) {
+               ret = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
+                                req->r_request_started, -1,
+                                &req->r_caps_reservation);
+               if (ret < 0) {
                        pr_err("fill_inode badness on %p\n", in);
                        if (d_really_is_negative(dn))
                                iput(in);
                        d_drop(dn);
+                       err = ret;
                        goto next_item;
                }
 
@@ -1458,19 +1511,28 @@ retry_lookup:
                }
 
                di = dn->d_fsdata;
-               di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
+               di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
 
                update_dentry_lease(dn, rinfo->dir_dlease[i],
                                    req->r_session,
                                    req->r_request_started);
+
+               if (err == 0 && cache_ctl.index >= 0) {
+                       ret = fill_readdir_cache(d_inode(parent), dn,
+                                                &cache_ctl, req);
+                       if (ret < 0)
+                               err = ret;
+               }
 next_item:
                if (dn)
                        dput(dn);
        }
-       if (err == 0)
-               req->r_did_prepopulate = true;
-
 out:
+       if (err == 0) {
+               req->r_did_prepopulate = true;
+               req->r_readdir_cache_idx = cache_ctl.index;
+       }
+       ceph_readdir_cache_release(&cache_ctl);
        if (snapdir) {
                iput(snapdir);
                dput(parent);
@@ -1712,11 +1774,13 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        const unsigned int ia_valid = attr->ia_valid;
        struct ceph_mds_request *req;
        struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
+       struct ceph_cap_flush *prealloc_cf;
        int issued;
        int release = 0, dirtied = 0;
        int mask = 0;
        int err = 0;
        int inode_dirty_flags = 0;
+       bool lock_snap_rwsem = false;
 
        if (ceph_snap(inode) != CEPH_NOSNAP)
                return -EROFS;
@@ -1725,13 +1789,31 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        if (err != 0)
                return err;
 
+       prealloc_cf = ceph_alloc_cap_flush();
+       if (!prealloc_cf)
+               return -ENOMEM;
+
        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
                                       USE_AUTH_MDS);
-       if (IS_ERR(req))
+       if (IS_ERR(req)) {
+               ceph_free_cap_flush(prealloc_cf);
                return PTR_ERR(req);
+       }
 
        spin_lock(&ci->i_ceph_lock);
        issued = __ceph_caps_issued(ci, NULL);
+
+       if (!ci->i_head_snapc &&
+           (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
+               lock_snap_rwsem = true;
+               if (!down_read_trylock(&mdsc->snap_rwsem)) {
+                       spin_unlock(&ci->i_ceph_lock);
+                       down_read(&mdsc->snap_rwsem);
+                       spin_lock(&ci->i_ceph_lock);
+                       issued = __ceph_caps_issued(ci, NULL);
+               }
+       }
+
        dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
 
        if (ia_valid & ATTR_UID) {
@@ -1874,12 +1956,15 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
                dout("setattr %p ATTR_FILE ... hrm!\n", inode);
 
        if (dirtied) {
-               inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
+               inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
+                                                          &prealloc_cf);
                inode->i_ctime = CURRENT_TIME;
        }
 
        release &= issued;
        spin_unlock(&ci->i_ceph_lock);
+       if (lock_snap_rwsem)
+               up_read(&mdsc->snap_rwsem);
 
        if (inode_dirty_flags)
                __mark_inode_dirty(inode, inode_dirty_flags);
@@ -1904,9 +1989,11 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        ceph_mdsc_put_request(req);
        if (mask & CEPH_SETATTR_SIZE)
                __ceph_do_pending_vmtruncate(inode);
+       ceph_free_cap_flush(prealloc_cf);
        return err;
 out_put:
        ceph_mdsc_put_request(req);
+       ceph_free_cap_flush(prealloc_cf);
        return err;
 }
 
index 84f37f34f9aa663952a60e8a3440a81934c083b0..6aa07af67603ada211f49268d3845ea62b625720 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/utsname.h>
+#include <linux/ratelimit.h>
 
 #include "super.h"
 #include "mds_client.h"
@@ -458,7 +459,6 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
        s->s_cap_reconnect = 0;
        s->s_cap_iterator = NULL;
        INIT_LIST_HEAD(&s->s_cap_releases);
-       INIT_LIST_HEAD(&s->s_cap_releases_done);
        INIT_LIST_HEAD(&s->s_cap_flushing);
        INIT_LIST_HEAD(&s->s_cap_snaps_flushing);
 
@@ -629,6 +629,9 @@ static void __register_request(struct ceph_mds_client *mdsc,
        req->r_uid = current_fsuid();
        req->r_gid = current_fsgid();
 
+       if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
+               mdsc->oldest_tid = req->r_tid;
+
        if (dir) {
                struct ceph_inode_info *ci = ceph_inode(dir);
 
@@ -644,6 +647,21 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
                                 struct ceph_mds_request *req)
 {
        dout("__unregister_request %p tid %lld\n", req, req->r_tid);
+
+       if (req->r_tid == mdsc->oldest_tid) {
+               struct rb_node *p = rb_next(&req->r_node);
+               mdsc->oldest_tid = 0;
+               while (p) {
+                       struct ceph_mds_request *next_req =
+                               rb_entry(p, struct ceph_mds_request, r_node);
+                       if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
+                               mdsc->oldest_tid = next_req->r_tid;
+                               break;
+                       }
+                       p = rb_next(p);
+               }
+       }
+
        rb_erase(&req->r_node, &mdsc->request_tree);
        RB_CLEAR_NODE(&req->r_node);
 
@@ -998,27 +1016,25 @@ void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
  * session caps
  */
 
-/*
- * Free preallocated cap messages assigned to this session
- */
-static void cleanup_cap_releases(struct ceph_mds_session *session)
+/* caller holds s_cap_lock, we drop it */
+static void cleanup_cap_releases(struct ceph_mds_client *mdsc,
+                                struct ceph_mds_session *session)
+       __releases(session->s_cap_lock)
 {
-       struct ceph_msg *msg;
+       LIST_HEAD(tmp_list);
+       list_splice_init(&session->s_cap_releases, &tmp_list);
+       session->s_num_cap_releases = 0;
+       spin_unlock(&session->s_cap_lock);
 
-       spin_lock(&session->s_cap_lock);
-       while (!list_empty(&session->s_cap_releases)) {
-               msg = list_first_entry(&session->s_cap_releases,
-                                      struct ceph_msg, list_head);
-               list_del_init(&msg->list_head);
-               ceph_msg_put(msg);
-       }
-       while (!list_empty(&session->s_cap_releases_done)) {
-               msg = list_first_entry(&session->s_cap_releases_done,
-                                      struct ceph_msg, list_head);
-               list_del_init(&msg->list_head);
-               ceph_msg_put(msg);
+       dout("cleanup_cap_releases mds%d\n", session->s_mds);
+       while (!list_empty(&tmp_list)) {
+               struct ceph_cap *cap;
+               /* zero out the in-progress message */
+               cap = list_first_entry(&tmp_list,
+                                       struct ceph_cap, session_caps);
+               list_del(&cap->session_caps);
+               ceph_put_cap(mdsc, cap);
        }
-       spin_unlock(&session->s_cap_lock);
 }
 
 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
@@ -1033,7 +1049,8 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
                req = list_first_entry(&session->s_unsafe,
                                       struct ceph_mds_request, r_unsafe_item);
                list_del_init(&req->r_unsafe_item);
-               pr_info(" dropping unsafe request %llu\n", req->r_tid);
+               pr_warn_ratelimited(" dropping unsafe request %llu\n",
+                                   req->r_tid);
                __unregister_request(mdsc, req);
        }
        /* zero r_attempts, so kick_requests() will re-send requests */
@@ -1095,10 +1112,16 @@ static int iterate_session_caps(struct ceph_mds_session *session,
                        dout("iterate_session_caps  finishing cap %p removal\n",
                             cap);
                        BUG_ON(cap->session != session);
+                       cap->session = NULL;
                        list_del_init(&cap->session_caps);
                        session->s_nr_caps--;
-                       cap->session = NULL;
-                       old_cap = cap;  /* put_cap it w/o locks held */
+                       if (cap->queue_release) {
+                               list_add_tail(&cap->session_caps,
+                                             &session->s_cap_releases);
+                               session->s_num_cap_releases++;
+                       } else {
+                               old_cap = cap;  /* put_cap it w/o locks held */
+                       }
                }
                if (ret < 0)
                        goto out;
@@ -1119,6 +1142,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                                  void *arg)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
+       LIST_HEAD(to_remove);
        int drop = 0;
 
        dout("removing cap %p, ci is %p, inode is %p\n",
@@ -1126,12 +1150,27 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
        spin_lock(&ci->i_ceph_lock);
        __ceph_remove_cap(cap, false);
        if (!ci->i_auth_cap) {
+               struct ceph_cap_flush *cf;
                struct ceph_mds_client *mdsc =
                        ceph_sb_to_client(inode->i_sb)->mdsc;
 
+               while (true) {
+                       struct rb_node *n = rb_first(&ci->i_cap_flush_tree);
+                       if (!n)
+                               break;
+                       cf = rb_entry(n, struct ceph_cap_flush, i_node);
+                       rb_erase(&cf->i_node, &ci->i_cap_flush_tree);
+                       list_add(&cf->list, &to_remove);
+               }
+
                spin_lock(&mdsc->cap_dirty_lock);
+
+               list_for_each_entry(cf, &to_remove, list)
+                       rb_erase(&cf->g_node, &mdsc->cap_flush_tree);
+
                if (!list_empty(&ci->i_dirty_item)) {
-                       pr_info(" dropping dirty %s state for %p %lld\n",
+                       pr_warn_ratelimited(
+                               " dropping dirty %s state for %p %lld\n",
                                ceph_cap_string(ci->i_dirty_caps),
                                inode, ceph_ino(inode));
                        ci->i_dirty_caps = 0;
@@ -1139,7 +1178,8 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                        drop = 1;
                }
                if (!list_empty(&ci->i_flushing_item)) {
-                       pr_info(" dropping dirty+flushing %s state for %p %lld\n",
+                       pr_warn_ratelimited(
+                               " dropping dirty+flushing %s state for %p %lld\n",
                                ceph_cap_string(ci->i_flushing_caps),
                                inode, ceph_ino(inode));
                        ci->i_flushing_caps = 0;
@@ -1148,8 +1188,20 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                        drop = 1;
                }
                spin_unlock(&mdsc->cap_dirty_lock);
+
+               if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
+                       list_add(&ci->i_prealloc_cap_flush->list, &to_remove);
+                       ci->i_prealloc_cap_flush = NULL;
+               }
        }
        spin_unlock(&ci->i_ceph_lock);
+       while (!list_empty(&to_remove)) {
+               struct ceph_cap_flush *cf;
+               cf = list_first_entry(&to_remove,
+                                     struct ceph_cap_flush, list);
+               list_del(&cf->list);
+               ceph_free_cap_flush(cf);
+       }
        while (drop--)
                iput(inode);
        return 0;
@@ -1191,11 +1243,12 @@ static void remove_session_caps(struct ceph_mds_session *session)
                        spin_lock(&session->s_cap_lock);
                }
        }
-       spin_unlock(&session->s_cap_lock);
+
+       // drop cap expires and unlock s_cap_lock
+       cleanup_cap_releases(session->s_mdsc, session);
 
        BUG_ON(session->s_nr_caps > 0);
        BUG_ON(!list_empty(&session->s_cap_flushing));
-       cleanup_cap_releases(session);
 }
 
 /*
@@ -1371,7 +1424,8 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
             inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
             ceph_cap_string(used), ceph_cap_string(wanted));
        if (cap == ci->i_auth_cap) {
-               if (ci->i_dirty_caps | ci->i_flushing_caps)
+               if (ci->i_dirty_caps || ci->i_flushing_caps ||
+                   !list_empty(&ci->i_cap_snaps))
                        goto out;
                if ((used | wanted) & CEPH_CAP_ANY_WR)
                        goto out;
@@ -1417,121 +1471,80 @@ static int trim_caps(struct ceph_mds_client *mdsc,
                session->s_trim_caps = 0;
        }
 
-       ceph_add_cap_releases(mdsc, session);
        ceph_send_cap_releases(mdsc, session);
        return 0;
 }
 
-/*
- * Allocate cap_release messages.  If there is a partially full message
- * in the queue, try to allocate enough to cover it's remainder, so that
- * we can send it immediately.
- *
- * Called under s_mutex.
- */
-int ceph_add_cap_releases(struct ceph_mds_client *mdsc,
-                         struct ceph_mds_session *session)
+static int check_capsnap_flush(struct ceph_inode_info *ci,
+                              u64 want_snap_seq)
 {
-       struct ceph_msg *msg, *partial = NULL;
-       struct ceph_mds_cap_release *head;
-       int err = -ENOMEM;
-       int extra = mdsc->fsc->mount_options->cap_release_safety;
-       int num;
-
-       dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds,
-            extra);
-
-       spin_lock(&session->s_cap_lock);
-
-       if (!list_empty(&session->s_cap_releases)) {
-               msg = list_first_entry(&session->s_cap_releases,
-                                      struct ceph_msg,
-                                list_head);
-               head = msg->front.iov_base;
-               num = le32_to_cpu(head->num);
-               if (num) {
-                       dout(" partial %p with (%d/%d)\n", msg, num,
-                            (int)CEPH_CAPS_PER_RELEASE);
-                       extra += CEPH_CAPS_PER_RELEASE - num;
-                       partial = msg;
-               }
-       }
-       while (session->s_num_cap_releases < session->s_nr_caps + extra) {
-               spin_unlock(&session->s_cap_lock);
-               msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
-                                  GFP_NOFS, false);
-               if (!msg)
-                       goto out_unlocked;
-               dout("add_cap_releases %p msg %p now %d\n", session, msg,
-                    (int)msg->front.iov_len);
-               head = msg->front.iov_base;
-               head->num = cpu_to_le32(0);
-               msg->front.iov_len = sizeof(*head);
-               spin_lock(&session->s_cap_lock);
-               list_add(&msg->list_head, &session->s_cap_releases);
-               session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE;
-       }
-
-       if (partial) {
-               head = partial->front.iov_base;
-               num = le32_to_cpu(head->num);
-               dout(" queueing partial %p with %d/%d\n", partial, num,
-                    (int)CEPH_CAPS_PER_RELEASE);
-               list_move_tail(&partial->list_head,
-                              &session->s_cap_releases_done);
-               session->s_num_cap_releases -= CEPH_CAPS_PER_RELEASE - num;
+       int ret = 1;
+       spin_lock(&ci->i_ceph_lock);
+       if (want_snap_seq > 0 && !list_empty(&ci->i_cap_snaps)) {
+               struct ceph_cap_snap *capsnap =
+                       list_first_entry(&ci->i_cap_snaps,
+                                        struct ceph_cap_snap, ci_item);
+               ret = capsnap->follows >= want_snap_seq;
        }
-       err = 0;
-       spin_unlock(&session->s_cap_lock);
-out_unlocked:
-       return err;
+       spin_unlock(&ci->i_ceph_lock);
+       return ret;
 }
 
-static int check_cap_flush(struct inode *inode, u64 want_flush_seq)
+static int check_caps_flush(struct ceph_mds_client *mdsc,
+                           u64 want_flush_tid)
 {
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       int ret;
-       spin_lock(&ci->i_ceph_lock);
-       if (ci->i_flushing_caps)
-               ret = ci->i_cap_flush_seq >= want_flush_seq;
-       else
-               ret = 1;
-       spin_unlock(&ci->i_ceph_lock);
+       struct rb_node *n;
+       struct ceph_cap_flush *cf;
+       int ret = 1;
+
+       spin_lock(&mdsc->cap_dirty_lock);
+       n = rb_first(&mdsc->cap_flush_tree);
+       cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL;
+       if (cf && cf->tid <= want_flush_tid) {
+               dout("check_caps_flush still flushing tid %llu <= %llu\n",
+                    cf->tid, want_flush_tid);
+               ret = 0;
+       }
+       spin_unlock(&mdsc->cap_dirty_lock);
        return ret;
 }
 
 /*
  * flush all dirty inode data to disk.
  *
- * returns true if we've flushed through want_flush_seq
+ * returns true if we've flushed through want_flush_tid
  */
-static void wait_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
+static void wait_caps_flush(struct ceph_mds_client *mdsc,
+                           u64 want_flush_tid, u64 want_snap_seq)
 {
        int mds;
 
-       dout("check_cap_flush want %lld\n", want_flush_seq);
+       dout("check_caps_flush want %llu snap want %llu\n",
+            want_flush_tid, want_snap_seq);
        mutex_lock(&mdsc->mutex);
-       for (mds = 0; mds < mdsc->max_sessions; mds++) {
+       for (mds = 0; mds < mdsc->max_sessions; ) {
                struct ceph_mds_session *session = mdsc->sessions[mds];
                struct inode *inode = NULL;
 
-               if (!session)
+               if (!session) {
+                       mds++;
                        continue;
+               }
                get_session(session);
                mutex_unlock(&mdsc->mutex);
 
                mutex_lock(&session->s_mutex);
-               if (!list_empty(&session->s_cap_flushing)) {
-                       struct ceph_inode_info *ci =
-                               list_entry(session->s_cap_flushing.next,
-                                          struct ceph_inode_info,
-                                          i_flushing_item);
-
-                       if (!check_cap_flush(&ci->vfs_inode, want_flush_seq)) {
-                               dout("check_cap_flush still flushing %p "
-                                    "seq %lld <= %lld to mds%d\n",
-                                    &ci->vfs_inode, ci->i_cap_flush_seq,
-                                    want_flush_seq, session->s_mds);
+               if (!list_empty(&session->s_cap_snaps_flushing)) {
+                       struct ceph_cap_snap *capsnap =
+                               list_first_entry(&session->s_cap_snaps_flushing,
+                                                struct ceph_cap_snap,
+                                                flushing_item);
+                       struct ceph_inode_info *ci = capsnap->ci;
+                       if (!check_capsnap_flush(ci, want_snap_seq)) {
+                               dout("check_cap_flush still flushing snap %p "
+                                    "follows %lld <= %lld to mds%d\n",
+                                    &ci->vfs_inode, capsnap->follows,
+                                    want_snap_seq, mds);
                                inode = igrab(&ci->vfs_inode);
                        }
                }
@@ -1540,15 +1553,21 @@ static void wait_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
 
                if (inode) {
                        wait_event(mdsc->cap_flushing_wq,
-                                  check_cap_flush(inode, want_flush_seq));
+                                  check_capsnap_flush(ceph_inode(inode),
+                                                      want_snap_seq));
                        iput(inode);
+               } else {
+                       mds++;
                }
 
                mutex_lock(&mdsc->mutex);
        }
-
        mutex_unlock(&mdsc->mutex);
-       dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
+
+       wait_event(mdsc->cap_flushing_wq,
+                  check_caps_flush(mdsc, want_flush_tid));
+
+       dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
 }
 
 /*
@@ -1557,60 +1576,74 @@ static void wait_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
 void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
                            struct ceph_mds_session *session)
 {
-       struct ceph_msg *msg;
+       struct ceph_msg *msg = NULL;
+       struct ceph_mds_cap_release *head;
+       struct ceph_mds_cap_item *item;
+       struct ceph_cap *cap;
+       LIST_HEAD(tmp_list);
+       int num_cap_releases;
 
-       dout("send_cap_releases mds%d\n", session->s_mds);
        spin_lock(&session->s_cap_lock);
-       while (!list_empty(&session->s_cap_releases_done)) {
-               msg = list_first_entry(&session->s_cap_releases_done,
-                                struct ceph_msg, list_head);
-               list_del_init(&msg->list_head);
-               spin_unlock(&session->s_cap_lock);
-               msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
-               dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
-               ceph_con_send(&session->s_con, msg);
-               spin_lock(&session->s_cap_lock);
-       }
+again:
+       list_splice_init(&session->s_cap_releases, &tmp_list);
+       num_cap_releases = session->s_num_cap_releases;
+       session->s_num_cap_releases = 0;
        spin_unlock(&session->s_cap_lock);
-}
 
-static void discard_cap_releases(struct ceph_mds_client *mdsc,
-                                struct ceph_mds_session *session)
-{
-       struct ceph_msg *msg;
-       struct ceph_mds_cap_release *head;
-       unsigned num;
-
-       dout("discard_cap_releases mds%d\n", session->s_mds);
+       while (!list_empty(&tmp_list)) {
+               if (!msg) {
+                       msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
+                                       PAGE_CACHE_SIZE, GFP_NOFS, false);
+                       if (!msg)
+                               goto out_err;
+                       head = msg->front.iov_base;
+                       head->num = cpu_to_le32(0);
+                       msg->front.iov_len = sizeof(*head);
+               }
+               cap = list_first_entry(&tmp_list, struct ceph_cap,
+                                       session_caps);
+               list_del(&cap->session_caps);
+               num_cap_releases--;
 
-       if (!list_empty(&session->s_cap_releases)) {
-               /* zero out the in-progress message */
-               msg = list_first_entry(&session->s_cap_releases,
-                                       struct ceph_msg, list_head);
                head = msg->front.iov_base;
-               num = le32_to_cpu(head->num);
-               dout("discard_cap_releases mds%d %p %u\n",
-                    session->s_mds, msg, num);
-               head->num = cpu_to_le32(0);
-               msg->front.iov_len = sizeof(*head);
-               session->s_num_cap_releases += num;
+               le32_add_cpu(&head->num, 1);
+               item = msg->front.iov_base + msg->front.iov_len;
+               item->ino = cpu_to_le64(cap->cap_ino);
+               item->cap_id = cpu_to_le64(cap->cap_id);
+               item->migrate_seq = cpu_to_le32(cap->mseq);
+               item->seq = cpu_to_le32(cap->issue_seq);
+               msg->front.iov_len += sizeof(*item);
+
+               ceph_put_cap(mdsc, cap);
+
+               if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
+                       msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
+                       dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
+                       ceph_con_send(&session->s_con, msg);
+                       msg = NULL;
+               }
        }
 
-       /* requeue completed messages */
-       while (!list_empty(&session->s_cap_releases_done)) {
-               msg = list_first_entry(&session->s_cap_releases_done,
-                                struct ceph_msg, list_head);
-               list_del_init(&msg->list_head);
+       BUG_ON(num_cap_releases != 0);
 
-               head = msg->front.iov_base;
-               num = le32_to_cpu(head->num);
-               dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg,
-                    num);
-               session->s_num_cap_releases += num;
-               head->num = cpu_to_le32(0);
-               msg->front.iov_len = sizeof(*head);
-               list_add(&msg->list_head, &session->s_cap_releases);
+       spin_lock(&session->s_cap_lock);
+       if (!list_empty(&session->s_cap_releases))
+               goto again;
+       spin_unlock(&session->s_cap_lock);
+
+       if (msg) {
+               msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
+               dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
+               ceph_con_send(&session->s_con, msg);
        }
+       return;
+out_err:
+       pr_err("send_cap_releases mds%d, failed to allocate message\n",
+               session->s_mds);
+       spin_lock(&session->s_cap_lock);
+       list_splice(&tmp_list, &session->s_cap_releases);
+       session->s_num_cap_releases += num_cap_releases;
+       spin_unlock(&session->s_cap_lock);
 }
 
 /*
@@ -1635,7 +1668,8 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
 
        order = get_order(size * num_entries);
        while (order >= 0) {
-               rinfo->dir_in = (void*)__get_free_pages(GFP_NOFS | __GFP_NOWARN,
+               rinfo->dir_in = (void*)__get_free_pages(GFP_KERNEL |
+                                                       __GFP_NOWARN,
                                                        order);
                if (rinfo->dir_in)
                        break;
@@ -1697,13 +1731,9 @@ static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
                        struct ceph_mds_request, r_node);
 }
 
-static u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
+static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
 {
-       struct ceph_mds_request *req = __get_oldest_req(mdsc);
-
-       if (req)
-               return req->r_tid;
-       return 0;
+       return mdsc->oldest_tid;
 }
 
 /*
@@ -2267,15 +2297,18 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
        /* wait */
        mutex_unlock(&mdsc->mutex);
        dout("do_request waiting\n");
-       if (req->r_timeout) {
-               err = (long)wait_for_completion_killable_timeout(
-                       &req->r_completion, req->r_timeout);
-               if (err == 0)
-                       err = -EIO;
-       } else if (req->r_wait_for_completion) {
+       if (!req->r_timeout && req->r_wait_for_completion) {
                err = req->r_wait_for_completion(mdsc, req);
        } else {
-               err = wait_for_completion_killable(&req->r_completion);
+               long timeleft = wait_for_completion_killable_timeout(
+                                       &req->r_completion,
+                                       ceph_timeout_jiffies(req->r_timeout));
+               if (timeleft > 0)
+                       err = 0;
+               else if (!timeleft)
+                       err = -EIO;  /* timed out */
+               else
+                       err = timeleft;  /* killed */
        }
        dout("do_request waited, got %d\n", err);
        mutex_lock(&mdsc->mutex);
@@ -2496,7 +2529,6 @@ out_err:
        }
        mutex_unlock(&mdsc->mutex);
 
-       ceph_add_cap_releases(mdsc, req->r_session);
        mutex_unlock(&session->s_mutex);
 
        /* kick calling process */
@@ -2888,8 +2920,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
         */
        session->s_cap_reconnect = 1;
        /* drop old cap expires; we're about to reestablish that state */
-       discard_cap_releases(mdsc, session);
-       spin_unlock(&session->s_cap_lock);
+       cleanup_cap_releases(mdsc, session);
 
        /* trim unused caps to reduce MDS's cache rejoin time */
        if (mdsc->fsc->sb->s_root)
@@ -2956,6 +2987,9 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
 
        reply->hdr.data_len = cpu_to_le32(pagelist->length);
        ceph_msg_data_add_pagelist(reply, pagelist);
+
+       ceph_early_kick_flushing_caps(mdsc, session);
+
        ceph_con_send(&session->s_con, reply);
 
        mutex_unlock(&session->s_mutex);
@@ -3352,7 +3386,6 @@ static void delayed_work(struct work_struct *work)
                        send_renew_caps(mdsc, s);
                else
                        ceph_con_keepalive(&s->s_con);
-               ceph_add_cap_releases(mdsc, s);
                if (s->s_state == CEPH_MDS_SESSION_OPEN ||
                    s->s_state == CEPH_MDS_SESSION_HUNG)
                        ceph_send_cap_releases(mdsc, s);
@@ -3390,11 +3423,13 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
        atomic_set(&mdsc->num_sessions, 0);
        mdsc->max_sessions = 0;
        mdsc->stopping = 0;
+       mdsc->last_snap_seq = 0;
        init_rwsem(&mdsc->snap_rwsem);
        mdsc->snap_realms = RB_ROOT;
        INIT_LIST_HEAD(&mdsc->snap_empty);
        spin_lock_init(&mdsc->snap_empty_lock);
        mdsc->last_tid = 0;
+       mdsc->oldest_tid = 0;
        mdsc->request_tree = RB_ROOT;
        INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
        mdsc->last_renew_caps = jiffies;
@@ -3402,7 +3437,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
        spin_lock_init(&mdsc->cap_delay_lock);
        INIT_LIST_HEAD(&mdsc->snap_flush_list);
        spin_lock_init(&mdsc->snap_flush_lock);
-       mdsc->cap_flush_seq = 0;
+       mdsc->last_cap_flush_tid = 1;
+       mdsc->cap_flush_tree = RB_ROOT;
        INIT_LIST_HEAD(&mdsc->cap_dirty);
        INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
        mdsc->num_cap_flushing = 0;
@@ -3414,6 +3450,9 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
        ceph_caps_init(mdsc);
        ceph_adjust_min_caps(mdsc, fsc->min_caps);
 
+       init_rwsem(&mdsc->pool_perm_rwsem);
+       mdsc->pool_perm_tree = RB_ROOT;
+
        return 0;
 }
 
@@ -3423,8 +3462,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
  */
 static void wait_requests(struct ceph_mds_client *mdsc)
 {
+       struct ceph_options *opts = mdsc->fsc->client->options;
        struct ceph_mds_request *req;
-       struct ceph_fs_client *fsc = mdsc->fsc;
 
        mutex_lock(&mdsc->mutex);
        if (__get_oldest_req(mdsc)) {
@@ -3432,7 +3471,7 @@ static void wait_requests(struct ceph_mds_client *mdsc)
 
                dout("wait_requests waiting for requests\n");
                wait_for_completion_timeout(&mdsc->safe_umount_waiters,
-                                   fsc->client->options->mount_timeout * HZ);
+                                   ceph_timeout_jiffies(opts->mount_timeout));
 
                /* tear down remaining requests */
                mutex_lock(&mdsc->mutex);
@@ -3485,7 +3524,8 @@ restart:
                        nextreq = rb_entry(n, struct ceph_mds_request, r_node);
                else
                        nextreq = NULL;
-               if ((req->r_op & CEPH_MDS_OP_WRITE)) {
+               if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
+                   (req->r_op & CEPH_MDS_OP_WRITE)) {
                        /* write op */
                        ceph_mdsc_get_request(req);
                        if (nextreq)
@@ -3513,7 +3553,7 @@ restart:
 
 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
 {
-       u64 want_tid, want_flush;
+       u64 want_tid, want_flush, want_snap;
 
        if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
                return;
@@ -3525,13 +3565,18 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
 
        ceph_flush_dirty_caps(mdsc);
        spin_lock(&mdsc->cap_dirty_lock);
-       want_flush = mdsc->cap_flush_seq;
+       want_flush = mdsc->last_cap_flush_tid;
        spin_unlock(&mdsc->cap_dirty_lock);
 
-       dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
+       down_read(&mdsc->snap_rwsem);
+       want_snap = mdsc->last_snap_seq;
+       up_read(&mdsc->snap_rwsem);
+
+       dout("sync want tid %lld flush_seq %lld snap_seq %lld\n",
+            want_tid, want_flush, want_snap);
 
        wait_unsafe_requests(mdsc, want_tid);
-       wait_caps_flush(mdsc, want_flush);
+       wait_caps_flush(mdsc, want_flush, want_snap);
 }
 
 /*
@@ -3549,10 +3594,9 @@ static bool done_closing_sessions(struct ceph_mds_client *mdsc)
  */
 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
 {
+       struct ceph_options *opts = mdsc->fsc->client->options;
        struct ceph_mds_session *session;
        int i;
-       struct ceph_fs_client *fsc = mdsc->fsc;
-       unsigned long timeout = fsc->client->options->mount_timeout * HZ;
 
        dout("close_sessions\n");
 
@@ -3573,7 +3617,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
 
        dout("waiting for sessions to close\n");
        wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc),
-                          timeout);
+                          ceph_timeout_jiffies(opts->mount_timeout));
 
        /* tear down remaining sessions */
        mutex_lock(&mdsc->mutex);
@@ -3607,6 +3651,7 @@ static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
                ceph_mdsmap_destroy(mdsc->mdsmap);
        kfree(mdsc->sessions);
        ceph_caps_finalize(mdsc);
+       ceph_pool_perm_destroy(mdsc);
 }
 
 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
index 1875b5d985c6b0df2fbb38e16f39a78ecc76750d..762757e6cebf95fff324894d1650b8271210acdd 100644 (file)
@@ -139,7 +139,6 @@ struct ceph_mds_session {
        int               s_cap_reconnect;
        int               s_readonly;
        struct list_head  s_cap_releases; /* waiting cap_release messages */
-       struct list_head  s_cap_releases_done; /* ready to send */
        struct ceph_cap  *s_cap_iterator;
 
        /* protected by mutex */
@@ -228,7 +227,7 @@ struct ceph_mds_request {
        int r_err;
        bool r_aborted;
 
-       unsigned long r_timeout;  /* optional.  jiffies */
+       unsigned long r_timeout;  /* optional.  jiffies, 0 is "wait forever" */
        unsigned long r_started;  /* start time to measure timeout against */
        unsigned long r_request_started; /* start time for mds request only,
                                            used to measure lease durations */
@@ -254,12 +253,21 @@ struct ceph_mds_request {
        bool              r_got_unsafe, r_got_safe, r_got_result;
 
        bool              r_did_prepopulate;
+       long long         r_dir_release_cnt;
+       long long         r_dir_ordered_cnt;
+       int               r_readdir_cache_idx;
        u32               r_readdir_offset;
 
        struct ceph_cap_reservation r_caps_reservation;
        int r_num_caps;
 };
 
+struct ceph_pool_perm {
+       struct rb_node node;
+       u32 pool;
+       int perm;
+};
+
 /*
  * mds client state
  */
@@ -284,12 +292,15 @@ struct ceph_mds_client {
         * references (implying they contain no inodes with caps) that
         * should be destroyed.
         */
+       u64                     last_snap_seq;
        struct rw_semaphore     snap_rwsem;
        struct rb_root          snap_realms;
        struct list_head        snap_empty;
        spinlock_t              snap_empty_lock;  /* protect snap_empty */
 
        u64                    last_tid;      /* most recent mds request */
+       u64                    oldest_tid;    /* oldest incomplete mds request,
+                                                excluding setfilelock requests */
        struct rb_root         request_tree;  /* pending mds requests */
        struct delayed_work    delayed_work;  /* delayed work */
        unsigned long    last_renew_caps;  /* last time we renewed our caps */
@@ -298,7 +309,8 @@ struct ceph_mds_client {
        struct list_head snap_flush_list;  /* cap_snaps ready to flush */
        spinlock_t       snap_flush_lock;
 
-       u64               cap_flush_seq;
+       u64               last_cap_flush_tid;
+       struct rb_root    cap_flush_tree;
        struct list_head  cap_dirty;        /* inodes with dirty caps */
        struct list_head  cap_dirty_migrating; /* ...that are migration... */
        int               num_cap_flushing; /* # caps we are flushing */
@@ -328,6 +340,9 @@ struct ceph_mds_client {
        spinlock_t        dentry_lru_lock;
        struct list_head  dentry_lru;
        int               num_dentry;
+
+       struct rw_semaphore     pool_perm_rwsem;
+       struct rb_root          pool_perm_tree;
 };
 
 extern const char *ceph_mds_op_name(int op);
@@ -379,8 +394,6 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
        kref_put(&req->r_kref, ceph_mdsc_release_request);
 }
 
-extern int ceph_add_cap_releases(struct ceph_mds_client *mdsc,
-                                struct ceph_mds_session *session);
 extern void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
                                   struct ceph_mds_session *session);
 
index a97e39f09ba683349bb5f97e44f0d229b3a88936..233d906aec02b7c4508fd2488908bdb95a130aa4 100644 (file)
@@ -296,7 +296,7 @@ static int cmpu64_rev(const void *a, const void *b)
 }
 
 
-static struct ceph_snap_context *empty_snapc;
+struct ceph_snap_context *ceph_empty_snapc;
 
 /*
  * build the snap context for a given realm.
@@ -338,9 +338,9 @@ static int build_snap_context(struct ceph_snap_realm *realm)
                return 0;
        }
 
-       if (num == 0 && realm->seq == empty_snapc->seq) {
-               ceph_get_snap_context(empty_snapc);
-               snapc = empty_snapc;
+       if (num == 0 && realm->seq == ceph_empty_snapc->seq) {
+               ceph_get_snap_context(ceph_empty_snapc);
+               snapc = ceph_empty_snapc;
                goto done;
        }
 
@@ -436,6 +436,14 @@ static int dup_array(u64 **dst, __le64 *src, u32 num)
        return 0;
 }
 
+static bool has_new_snaps(struct ceph_snap_context *o,
+                         struct ceph_snap_context *n)
+{
+       if (n->num_snaps == 0)
+               return false;
+       /* snaps are in descending order */
+       return n->snaps[0] > o->seq;
+}
 
 /*
  * When a snapshot is applied, the size/mtime inode metadata is queued
@@ -455,6 +463,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
 {
        struct inode *inode = &ci->vfs_inode;
        struct ceph_cap_snap *capsnap;
+       struct ceph_snap_context *old_snapc, *new_snapc;
        int used, dirty;
 
        capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
@@ -467,6 +476,9 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
        used = __ceph_caps_used(ci);
        dirty = __ceph_caps_dirty(ci);
 
+       old_snapc = ci->i_head_snapc;
+       new_snapc = ci->i_snap_realm->cached_context;
+
        /*
         * If there is a write in progress, treat that as a dirty Fw,
         * even though it hasn't completed yet; by the time we finish
@@ -481,76 +493,95 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                   writes in progress now were started before the previous
                   cap_snap.  lucky us. */
                dout("queue_cap_snap %p already pending\n", inode);
-               kfree(capsnap);
-       } else if (ci->i_snap_realm->cached_context == empty_snapc) {
-               dout("queue_cap_snap %p empty snapc\n", inode);
-               kfree(capsnap);
-       } else if (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
-                           CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR)) {
-               struct ceph_snap_context *snapc = ci->i_head_snapc;
-
-               /*
-                * if we are a sync write, we may need to go to the snaprealm
-                * to get the current snapc.
-                */
-               if (!snapc)
-                       snapc = ci->i_snap_realm->cached_context;
+               goto update_snapc;
+       }
+       if (ci->i_wrbuffer_ref_head == 0 &&
+           !(dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))) {
+               dout("queue_cap_snap %p nothing dirty|writing\n", inode);
+               goto update_snapc;
+       }
 
-               dout("queue_cap_snap %p cap_snap %p queuing under %p %s\n",
-                    inode, capsnap, snapc, ceph_cap_string(dirty));
-               ihold(inode);
+       BUG_ON(!old_snapc);
 
-               atomic_set(&capsnap->nref, 1);
-               capsnap->ci = ci;
-               INIT_LIST_HEAD(&capsnap->ci_item);
-               INIT_LIST_HEAD(&capsnap->flushing_item);
-
-               capsnap->follows = snapc->seq;
-               capsnap->issued = __ceph_caps_issued(ci, NULL);
-               capsnap->dirty = dirty;
-
-               capsnap->mode = inode->i_mode;
-               capsnap->uid = inode->i_uid;
-               capsnap->gid = inode->i_gid;
-
-               if (dirty & CEPH_CAP_XATTR_EXCL) {
-                       __ceph_build_xattrs_blob(ci);
-                       capsnap->xattr_blob =
-                               ceph_buffer_get(ci->i_xattrs.blob);
-                       capsnap->xattr_version = ci->i_xattrs.version;
-               } else {
-                       capsnap->xattr_blob = NULL;
-                       capsnap->xattr_version = 0;
+       /*
+        * There is no need to send FLUSHSNAP message to MDS if there is
+        * no new snapshot. But when there is dirty pages or on-going
+        * writes, we still need to create cap_snap. cap_snap is needed
+        * by the write path and page writeback path.
+        *
+        * also see ceph_try_drop_cap_snap()
+        */
+       if (has_new_snaps(old_snapc, new_snapc)) {
+               if (dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))
+                       capsnap->need_flush = true;
+       } else {
+               if (!(used & CEPH_CAP_FILE_WR) &&
+                   ci->i_wrbuffer_ref_head == 0) {
+                       dout("queue_cap_snap %p "
+                            "no new_snap|dirty_page|writing\n", inode);
+                       goto update_snapc;
                }
+       }
 
-               capsnap->inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
-
-               /* dirty page count moved from _head to this cap_snap;
-                  all subsequent writes page dirties occur _after_ this
-                  snapshot. */
-               capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
-               ci->i_wrbuffer_ref_head = 0;
-               capsnap->context = snapc;
-               ci->i_head_snapc =
-                       ceph_get_snap_context(ci->i_snap_realm->cached_context);
-               dout(" new snapc is %p\n", ci->i_head_snapc);
-               list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
-
-               if (used & CEPH_CAP_FILE_WR) {
-                       dout("queue_cap_snap %p cap_snap %p snapc %p"
-                            " seq %llu used WR, now pending\n", inode,
-                            capsnap, snapc, snapc->seq);
-                       capsnap->writing = 1;
-               } else {
-                       /* note mtime, size NOW. */
-                       __ceph_finish_cap_snap(ci, capsnap);
-               }
+       dout("queue_cap_snap %p cap_snap %p queuing under %p %s %s\n",
+            inode, capsnap, old_snapc, ceph_cap_string(dirty),
+            capsnap->need_flush ? "" : "no_flush");
+       ihold(inode);
+
+       atomic_set(&capsnap->nref, 1);
+       capsnap->ci = ci;
+       INIT_LIST_HEAD(&capsnap->ci_item);
+       INIT_LIST_HEAD(&capsnap->flushing_item);
+
+       capsnap->follows = old_snapc->seq;
+       capsnap->issued = __ceph_caps_issued(ci, NULL);
+       capsnap->dirty = dirty;
+
+       capsnap->mode = inode->i_mode;
+       capsnap->uid = inode->i_uid;
+       capsnap->gid = inode->i_gid;
+
+       if (dirty & CEPH_CAP_XATTR_EXCL) {
+               __ceph_build_xattrs_blob(ci);
+               capsnap->xattr_blob =
+                       ceph_buffer_get(ci->i_xattrs.blob);
+               capsnap->xattr_version = ci->i_xattrs.version;
        } else {
-               dout("queue_cap_snap %p nothing dirty|writing\n", inode);
-               kfree(capsnap);
+               capsnap->xattr_blob = NULL;
+               capsnap->xattr_version = 0;
        }
 
+       capsnap->inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
+
+       /* dirty page count moved from _head to this cap_snap;
+          all subsequent writes page dirties occur _after_ this
+          snapshot. */
+       capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
+       ci->i_wrbuffer_ref_head = 0;
+       capsnap->context = old_snapc;
+       list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
+       old_snapc = NULL;
+
+       if (used & CEPH_CAP_FILE_WR) {
+               dout("queue_cap_snap %p cap_snap %p snapc %p"
+                    " seq %llu used WR, now pending\n", inode,
+                    capsnap, old_snapc, old_snapc->seq);
+               capsnap->writing = 1;
+       } else {
+               /* note mtime, size NOW. */
+               __ceph_finish_cap_snap(ci, capsnap);
+       }
+       capsnap = NULL;
+
+update_snapc:
+       if (ci->i_head_snapc) {
+               ci->i_head_snapc = ceph_get_snap_context(new_snapc);
+               dout(" new snapc is %p\n", new_snapc);
+       }
        spin_unlock(&ci->i_ceph_lock);
+
+       kfree(capsnap);
+       ceph_put_snap_context(old_snapc);
 }
 
 /*
@@ -699,6 +730,8 @@ more:
 
                /* queue realm for cap_snap creation */
                list_add(&realm->dirty_item, &dirty_realms);
+               if (realm->seq > mdsc->last_snap_seq)
+                       mdsc->last_snap_seq = realm->seq;
 
                invalidate = 1;
        } else if (!realm->cached_context) {
@@ -964,14 +997,14 @@ out:
 
 int __init ceph_snap_init(void)
 {
-       empty_snapc = ceph_create_snap_context(0, GFP_NOFS);
-       if (!empty_snapc)
+       ceph_empty_snapc = ceph_create_snap_context(0, GFP_NOFS);
+       if (!ceph_empty_snapc)
                return -ENOMEM;
-       empty_snapc->seq = 1;
+       ceph_empty_snapc->seq = 1;
        return 0;
 }
 
 void ceph_snap_exit(void)
 {
-       ceph_put_snap_context(empty_snapc);
+       ceph_put_snap_context(ceph_empty_snapc);
 }
index 4e9905374078a228d18762a46a782a925f8f1675..d1c833c321b92eff48d9f35bf7171ef0ac59e7bf 100644 (file)
@@ -134,10 +134,12 @@ enum {
        Opt_noino32,
        Opt_fscache,
        Opt_nofscache,
+       Opt_poolperm,
+       Opt_nopoolperm,
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
        Opt_acl,
 #endif
-       Opt_noacl
+       Opt_noacl,
 };
 
 static match_table_t fsopt_tokens = {
@@ -165,6 +167,8 @@ static match_table_t fsopt_tokens = {
        {Opt_noino32, "noino32"},
        {Opt_fscache, "fsc"},
        {Opt_nofscache, "nofsc"},
+       {Opt_poolperm, "poolperm"},
+       {Opt_nopoolperm, "nopoolperm"},
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
        {Opt_acl, "acl"},
 #endif
@@ -268,6 +272,13 @@ static int parse_fsopt_token(char *c, void *private)
        case Opt_nofscache:
                fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
                break;
+       case Opt_poolperm:
+               fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
+               printk ("pool perm");
+               break;
+       case Opt_nopoolperm:
+               fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
+               break;
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
        case Opt_acl:
                fsopt->sb_flags |= MS_POSIXACL;
@@ -436,6 +447,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
                seq_puts(m, ",nodcache");
        if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE)
                seq_puts(m, ",fsc");
+       if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
+               seq_puts(m, ",nopoolperm");
 
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
        if (fsopt->sb_flags & MS_POSIXACL)
@@ -609,6 +622,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
  */
 struct kmem_cache *ceph_inode_cachep;
 struct kmem_cache *ceph_cap_cachep;
+struct kmem_cache *ceph_cap_flush_cachep;
 struct kmem_cache *ceph_dentry_cachep;
 struct kmem_cache *ceph_file_cachep;
 
@@ -634,6 +648,10 @@ static int __init init_caches(void)
                                     SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
        if (ceph_cap_cachep == NULL)
                goto bad_cap;
+       ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
+                                          SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
+       if (ceph_cap_flush_cachep == NULL)
+               goto bad_cap_flush;
 
        ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
                                        SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
@@ -652,6 +670,8 @@ static int __init init_caches(void)
 bad_file:
        kmem_cache_destroy(ceph_dentry_cachep);
 bad_dentry:
+       kmem_cache_destroy(ceph_cap_flush_cachep);
+bad_cap_flush:
        kmem_cache_destroy(ceph_cap_cachep);
 bad_cap:
        kmem_cache_destroy(ceph_inode_cachep);
@@ -668,6 +688,7 @@ static void destroy_caches(void)
 
        kmem_cache_destroy(ceph_inode_cachep);
        kmem_cache_destroy(ceph_cap_cachep);
+       kmem_cache_destroy(ceph_cap_flush_cachep);
        kmem_cache_destroy(ceph_dentry_cachep);
        kmem_cache_destroy(ceph_file_cachep);
 
@@ -729,7 +750,7 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
        req->r_ino1.ino = CEPH_INO_ROOT;
        req->r_ino1.snap = CEPH_NOSNAP;
        req->r_started = started;
-       req->r_timeout = fsc->client->options->mount_timeout * HZ;
+       req->r_timeout = fsc->client->options->mount_timeout;
        req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
        req->r_num_caps = 2;
        err = ceph_mdsc_do_request(mdsc, NULL, req);
index fa20e131893956a5360b2f6c37cd1aa7fce7542e..860cc016e70d4ff463c1f7845fc648eaf58269c4 100644 (file)
@@ -35,6 +35,7 @@
 #define CEPH_MOUNT_OPT_INO32           (1<<8) /* 32 bit inos */
 #define CEPH_MOUNT_OPT_DCACHE          (1<<9) /* use dcache for readdir etc */
 #define CEPH_MOUNT_OPT_FSCACHE         (1<<10) /* use fscache */
+#define CEPH_MOUNT_OPT_NOPOOLPERM      (1<<11) /* no pool permission check */
 
 #define CEPH_MOUNT_OPT_DEFAULT    (CEPH_MOUNT_OPT_RBYTES | \
                                   CEPH_MOUNT_OPT_DCACHE)
@@ -121,11 +122,21 @@ struct ceph_cap {
        struct rb_node ci_node;          /* per-ci cap tree */
        struct ceph_mds_session *session;
        struct list_head session_caps;   /* per-session caplist */
-       int mds;
        u64 cap_id;       /* unique cap id (mds provided) */
-       int issued;       /* latest, from the mds */
-       int implemented;  /* implemented superset of issued (for revocation) */
-       int mds_wanted;
+       union {
+               /* in-use caps */
+               struct {
+                       int issued;       /* latest, from the mds */
+                       int implemented;  /* implemented superset of
+                                            issued (for revocation) */
+                       int mds, mds_wanted;
+               };
+               /* caps to release */
+               struct {
+                       u64 cap_ino;
+                       int queue_release;
+               };
+       };
        u32 seq, issue_seq, mseq;
        u32 cap_gen;      /* active/stale cycle */
        unsigned long last_used;
@@ -163,6 +174,7 @@ struct ceph_cap_snap {
        int writing;   /* a sync write is still in progress */
        int dirty_pages;     /* dirty pages awaiting writeback */
        bool inline_data;
+       bool need_flush;
 };
 
 static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
@@ -174,6 +186,17 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
        }
 }
 
+struct ceph_cap_flush {
+       u64 tid;
+       int caps;
+       bool kick;
+       struct rb_node g_node; // global
+       union {
+               struct rb_node i_node; // inode
+               struct list_head list;
+       };
+};
+
 /*
  * The frag tree describes how a directory is fragmented, potentially across
  * multiple metadata servers.  It is also used to indicate points where
@@ -259,9 +282,9 @@ struct ceph_inode_info {
        u32 i_time_warp_seq;
 
        unsigned i_ceph_flags;
-       int i_ordered_count;
-       atomic_t i_release_count;
-       atomic_t i_complete_count;
+       atomic64_t i_release_count;
+       atomic64_t i_ordered_count;
+       atomic64_t i_complete_seq[2];
 
        struct ceph_dir_layout i_dir_layout;
        struct ceph_file_layout i_layout;
@@ -283,11 +306,11 @@ struct ceph_inode_info {
        struct ceph_cap *i_auth_cap;     /* authoritative cap, if any */
        unsigned i_dirty_caps, i_flushing_caps;     /* mask of dirtied fields */
        struct list_head i_dirty_item, i_flushing_item;
-       u64 i_cap_flush_seq;
        /* we need to track cap writeback on a per-cap-bit basis, to allow
         * overlapping, pipelined cap flushes to the mds.  we can probably
         * reduce the tid to 8 bits if we're concerned about inode size. */
-       u16 i_cap_flush_last_tid, i_cap_flush_tid[CEPH_CAP_BITS];
+       struct ceph_cap_flush *i_prealloc_cap_flush;
+       struct rb_root i_cap_flush_tree;
        wait_queue_head_t i_cap_wq;      /* threads waiting on a capability */
        unsigned long i_hold_caps_min; /* jiffies */
        unsigned long i_hold_caps_max; /* jiffies */
@@ -438,36 +461,46 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
 /*
  * Ceph inode.
  */
-#define CEPH_I_DIR_ORDERED     1  /* dentries in dir are ordered */
-#define CEPH_I_NODELAY         4  /* do not delay cap release */
-#define CEPH_I_FLUSH           8  /* do not delay flush of dirty metadata */
-#define CEPH_I_NOFLUSH         16 /* do not flush dirty caps */
+#define CEPH_I_DIR_ORDERED     (1 << 0)  /* dentries in dir are ordered */
+#define CEPH_I_NODELAY         (1 << 1)  /* do not delay cap release */
+#define CEPH_I_FLUSH           (1 << 2)  /* do not delay flush of dirty metadata */
+#define CEPH_I_NOFLUSH         (1 << 3)  /* do not flush dirty caps */
+#define CEPH_I_POOL_PERM       (1 << 4)  /* pool rd/wr bits are valid */
+#define CEPH_I_POOL_RD         (1 << 5)  /* can read from pool */
+#define CEPH_I_POOL_WR         (1 << 6)  /* can write to pool */
+
 
 static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
-                                          int release_count, int ordered_count)
+                                          long long release_count,
+                                          long long ordered_count)
 {
-       atomic_set(&ci->i_complete_count, release_count);
-       if (ci->i_ordered_count == ordered_count)
-               ci->i_ceph_flags |= CEPH_I_DIR_ORDERED;
-       else
-               ci->i_ceph_flags &= ~CEPH_I_DIR_ORDERED;
+       smp_mb__before_atomic();
+       atomic64_set(&ci->i_complete_seq[0], release_count);
+       atomic64_set(&ci->i_complete_seq[1], ordered_count);
 }
 
 static inline void __ceph_dir_clear_complete(struct ceph_inode_info *ci)
 {
-       atomic_inc(&ci->i_release_count);
+       atomic64_inc(&ci->i_release_count);
+}
+
+static inline void __ceph_dir_clear_ordered(struct ceph_inode_info *ci)
+{
+       atomic64_inc(&ci->i_ordered_count);
 }
 
 static inline bool __ceph_dir_is_complete(struct ceph_inode_info *ci)
 {
-       return atomic_read(&ci->i_complete_count) ==
-               atomic_read(&ci->i_release_count);
+       return atomic64_read(&ci->i_complete_seq[0]) ==
+               atomic64_read(&ci->i_release_count);
 }
 
 static inline bool __ceph_dir_is_complete_ordered(struct ceph_inode_info *ci)
 {
-       return __ceph_dir_is_complete(ci) &&
-               (ci->i_ceph_flags & CEPH_I_DIR_ORDERED);
+       return  atomic64_read(&ci->i_complete_seq[0]) ==
+               atomic64_read(&ci->i_release_count) &&
+               atomic64_read(&ci->i_complete_seq[1]) ==
+               atomic64_read(&ci->i_ordered_count);
 }
 
 static inline void ceph_dir_clear_complete(struct inode *inode)
@@ -477,20 +510,13 @@ static inline void ceph_dir_clear_complete(struct inode *inode)
 
 static inline void ceph_dir_clear_ordered(struct inode *inode)
 {
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       spin_lock(&ci->i_ceph_lock);
-       ci->i_ordered_count++;
-       ci->i_ceph_flags &= ~CEPH_I_DIR_ORDERED;
-       spin_unlock(&ci->i_ceph_lock);
+       __ceph_dir_clear_ordered(ceph_inode(inode));
 }
 
 static inline bool ceph_dir_is_complete_ordered(struct inode *inode)
 {
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       bool ret;
-       spin_lock(&ci->i_ceph_lock);
-       ret = __ceph_dir_is_complete_ordered(ci);
-       spin_unlock(&ci->i_ceph_lock);
+       bool ret = __ceph_dir_is_complete_ordered(ceph_inode(inode));
+       smp_rmb();
        return ret;
 }
 
@@ -552,7 +578,10 @@ static inline int __ceph_caps_dirty(struct ceph_inode_info *ci)
 {
        return ci->i_dirty_caps | ci->i_flushing_caps;
 }
-extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask);
+extern struct ceph_cap_flush *ceph_alloc_cap_flush(void);
+extern void ceph_free_cap_flush(struct ceph_cap_flush *cf);
+extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
+                                 struct ceph_cap_flush **pcf);
 
 extern int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
                                      struct ceph_cap *ocap, int mask);
@@ -606,16 +635,20 @@ struct ceph_file_info {
        unsigned offset;       /* offset of last chunk, adjusted for . and .. */
        unsigned next_offset;  /* offset of next chunk (last_name's + 1) */
        char *last_name;       /* last entry in previous chunk */
-       struct dentry *dentry; /* next dentry (for dcache readdir) */
-       int dir_release_count;
-       int dir_ordered_count;
+       long long dir_release_count;
+       long long dir_ordered_count;
+       int readdir_cache_idx;
 
        /* used for -o dirstat read() on directory thing */
        char *dir_info;
        int dir_info_len;
 };
 
-
+struct ceph_readdir_cache_control {
+       struct page  *page;
+       struct dentry **dentries;
+       int index;
+};
 
 /*
  * A "snap realm" describes a subset of the file hierarchy sharing
@@ -687,6 +720,7 @@ static inline int default_congestion_kb(void)
 
 
 /* snap.c */
+extern struct ceph_snap_context *ceph_empty_snapc;
 struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
                                               u64 ino);
 extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
@@ -713,8 +747,8 @@ extern void ceph_snap_exit(void);
 static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
 {
        return !list_empty(&ci->i_cap_snaps) &&
-               list_entry(ci->i_cap_snaps.prev, struct ceph_cap_snap,
-                          ci_item)->writing;
+              list_last_entry(&ci->i_cap_snaps, struct ceph_cap_snap,
+                              ci_item)->writing;
 }
 
 /* inode.c */
@@ -838,12 +872,12 @@ extern void ceph_put_cap(struct ceph_mds_client *mdsc,
                         struct ceph_cap *cap);
 extern int ceph_is_any_caps(struct inode *inode);
 
-extern void __queue_cap_release(struct ceph_mds_session *session, u64 ino,
-                               u64 cap_id, u32 migrate_seq, u32 issue_seq);
 extern void ceph_queue_caps_release(struct inode *inode);
 extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc);
 extern int ceph_fsync(struct file *file, loff_t start, loff_t end,
                      int datasync);
+extern void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
+                                         struct ceph_mds_session *session);
 extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
                                    struct ceph_mds_session *session);
 extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci,
@@ -879,6 +913,9 @@ extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode);
 /* addr.c */
 extern const struct address_space_operations ceph_aops;
 extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
+extern int ceph_uninline_data(struct file *filp, struct page *locked_page);
+extern int ceph_pool_perm_check(struct ceph_inode_info *ci, int need);
+extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc);
 
 /* file.c */
 extern const struct file_operations ceph_file_fops;
@@ -890,7 +927,6 @@ extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 extern int ceph_release(struct inode *inode, struct file *filp);
 extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
                                  char *data, size_t len);
-int ceph_uninline_data(struct file *filp, struct page *locked_page);
 /* dir.c */
 extern const struct file_operations ceph_dir_fops;
 extern const struct file_operations ceph_snapdir_fops;
@@ -911,6 +947,7 @@ extern void ceph_dentry_lru_del(struct dentry *dn);
 extern void ceph_invalidate_dentry_lease(struct dentry *dentry);
 extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn);
 extern struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry);
+extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl);
 
 /*
  * our d_ops vary depending on whether the inode is live,
index cd7ffad4041d81b605bdfbc97a4bc0072c19ca99..819163d8313bb3748765b5c3313dfdfd32ac9472 100644 (file)
@@ -911,6 +911,8 @@ int __ceph_setxattr(struct dentry *dentry, const char *name,
        struct inode *inode = d_inode(dentry);
        struct ceph_vxattr *vxattr;
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
+       struct ceph_cap_flush *prealloc_cf = NULL;
        int issued;
        int err;
        int dirty = 0;
@@ -920,6 +922,7 @@ int __ceph_setxattr(struct dentry *dentry, const char *name,
        char *newval = NULL;
        struct ceph_inode_xattr *xattr = NULL;
        int required_blob_size;
+       bool lock_snap_rwsem = false;
 
        if (!ceph_is_valid_xattr(name))
                return -EOPNOTSUPP;
@@ -948,12 +951,27 @@ int __ceph_setxattr(struct dentry *dentry, const char *name,
        if (!xattr)
                goto out;
 
+       prealloc_cf = ceph_alloc_cap_flush();
+       if (!prealloc_cf)
+               goto out;
+
        spin_lock(&ci->i_ceph_lock);
 retry:
        issued = __ceph_caps_issued(ci, NULL);
-       dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
        if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))
                goto do_sync;
+
+       if (!lock_snap_rwsem && !ci->i_head_snapc) {
+               lock_snap_rwsem = true;
+               if (!down_read_trylock(&mdsc->snap_rwsem)) {
+                       spin_unlock(&ci->i_ceph_lock);
+                       down_read(&mdsc->snap_rwsem);
+                       spin_lock(&ci->i_ceph_lock);
+                       goto retry;
+               }
+       }
+
+       dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
        __build_xattrs(inode);
 
        required_blob_size = __get_required_blob_size(ci, name_len, val_len);
@@ -966,7 +984,7 @@ retry:
                dout(" preaallocating new blob size=%d\n", required_blob_size);
                blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
                if (!blob)
-                       goto out;
+                       goto do_sync_unlocked;
                spin_lock(&ci->i_ceph_lock);
                if (ci->i_xattrs.prealloc_blob)
                        ceph_buffer_put(ci->i_xattrs.prealloc_blob);
@@ -978,21 +996,28 @@ retry:
                          flags, value ? 1 : -1, &xattr);
 
        if (!err) {
-               dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
+               dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
+                                              &prealloc_cf);
                ci->i_xattrs.dirty = true;
                inode->i_ctime = CURRENT_TIME;
        }
 
        spin_unlock(&ci->i_ceph_lock);
+       if (lock_snap_rwsem)
+               up_read(&mdsc->snap_rwsem);
        if (dirty)
                __mark_inode_dirty(inode, dirty);
+       ceph_free_cap_flush(prealloc_cf);
        return err;
 
 do_sync:
        spin_unlock(&ci->i_ceph_lock);
 do_sync_unlocked:
+       if (lock_snap_rwsem)
+               up_read(&mdsc->snap_rwsem);
        err = ceph_sync_setxattr(dentry, name, value, size, flags);
 out:
+       ceph_free_cap_flush(prealloc_cf);
        kfree(newname);
        kfree(newval);
        kfree(xattr);
@@ -1044,10 +1069,13 @@ int __ceph_removexattr(struct dentry *dentry, const char *name)
        struct inode *inode = d_inode(dentry);
        struct ceph_vxattr *vxattr;
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
+       struct ceph_cap_flush *prealloc_cf = NULL;
        int issued;
        int err;
        int required_blob_size;
        int dirty;
+       bool lock_snap_rwsem = false;
 
        if (!ceph_is_valid_xattr(name))
                return -EOPNOTSUPP;
@@ -1060,14 +1088,29 @@ int __ceph_removexattr(struct dentry *dentry, const char *name)
        if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
                goto do_sync_unlocked;
 
+       prealloc_cf = ceph_alloc_cap_flush();
+       if (!prealloc_cf)
+               return -ENOMEM;
+
        err = -ENOMEM;
        spin_lock(&ci->i_ceph_lock);
 retry:
        issued = __ceph_caps_issued(ci, NULL);
-       dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
-
        if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))
                goto do_sync;
+
+       if (!lock_snap_rwsem && !ci->i_head_snapc) {
+               lock_snap_rwsem = true;
+               if (!down_read_trylock(&mdsc->snap_rwsem)) {
+                       spin_unlock(&ci->i_ceph_lock);
+                       down_read(&mdsc->snap_rwsem);
+                       spin_lock(&ci->i_ceph_lock);
+                       goto retry;
+               }
+       }
+
+       dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
+
        __build_xattrs(inode);
 
        required_blob_size = __get_required_blob_size(ci, 0, 0);
@@ -1080,7 +1123,7 @@ retry:
                dout(" preaallocating new blob size=%d\n", required_blob_size);
                blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
                if (!blob)
-                       goto out;
+                       goto do_sync_unlocked;
                spin_lock(&ci->i_ceph_lock);
                if (ci->i_xattrs.prealloc_blob)
                        ceph_buffer_put(ci->i_xattrs.prealloc_blob);
@@ -1090,18 +1133,24 @@ retry:
 
        err = __remove_xattr_by_name(ceph_inode(inode), name);
 
-       dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
+       dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
+                                      &prealloc_cf);
        ci->i_xattrs.dirty = true;
        inode->i_ctime = CURRENT_TIME;
        spin_unlock(&ci->i_ceph_lock);
+       if (lock_snap_rwsem)
+               up_read(&mdsc->snap_rwsem);
        if (dirty)
                __mark_inode_dirty(inode, dirty);
+       ceph_free_cap_flush(prealloc_cf);
        return err;
 do_sync:
        spin_unlock(&ci->i_ceph_lock);
 do_sync_unlocked:
+       if (lock_snap_rwsem)
+               up_read(&mdsc->snap_rwsem);
+       ceph_free_cap_flush(prealloc_cf);
        err = ceph_send_removexattr(dentry, name);
-out:
        return err;
 }
 
index d6f7a76a1f5b06c3aeab8f07bb841e5c1629a32a..f829fe963f5bab9e1de4b81ec9195de90e5da77d 100644 (file)
@@ -79,7 +79,7 @@ void coda_sysctl_clean(void);
 
 static inline struct coda_inode_info *ITOC(struct inode *inode)
 {
-       return list_entry(inode, struct coda_inode_info, vfs_inode);
+       return container_of(inode, struct coda_inode_info, vfs_inode);
 }
 
 static __inline__ struct CodaFid *coda_i2f(struct inode *inode)
index 8d89f5fd0331c6a8b0ff9648251199248995aa50..eae87575e681bbbed9db2ca48eb91cffa7ed8e88 100644 (file)
@@ -236,7 +236,7 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
 
        if (dentry) {
                spin_lock(&dentry->d_lock);
-               if (!d_unhashed(dentry) && d_really_is_positive(dentry)) {
+               if (simple_positive(dentry)) {
                        dget_dlock(dentry);
                        __d_drop(dentry);
                        spin_unlock(&dentry->d_lock);
index 5373567420912c87b1d82f3c605e308bda94c68b..a8f3b589a2dfe5f030f4073aa46d79b965c77dc5 100644 (file)
@@ -129,8 +129,6 @@ void configfs_release_fs(void)
 }
 
 
-static struct kobject *config_kobj;
-
 static int __init configfs_init(void)
 {
        int err = -ENOMEM;
@@ -141,8 +139,8 @@ static int __init configfs_init(void)
        if (!configfs_dir_cachep)
                goto out;
 
-       config_kobj = kobject_create_and_add("config", kernel_kobj);
-       if (!config_kobj)
+       err = sysfs_create_mount_point(kernel_kobj, "config");
+       if (err)
                goto out2;
 
        err = register_filesystem(&configfs_fs_type);
@@ -152,7 +150,7 @@ static int __init configfs_init(void)
        return 0;
 out3:
        pr_err("Unable to register filesystem!\n");
-       kobject_put(config_kobj);
+       sysfs_remove_mount_point(kernel_kobj, "config");
 out2:
        kmem_cache_destroy(configfs_dir_cachep);
        configfs_dir_cachep = NULL;
@@ -163,7 +161,7 @@ out:
 static void __exit configfs_exit(void)
 {
        unregister_filesystem(&configfs_fs_type);
-       kobject_put(config_kobj);
+       sysfs_remove_mount_point(kernel_kobj, "config");
        kmem_cache_destroy(configfs_dir_cachep);
        configfs_dir_cachep = NULL;
 }
index e52e0064feac8d01c1447969e7e48189af5f2714..c5ecde6f3eed975af7756c17cec4f3b1748dbc83 100644 (file)
@@ -140,7 +140,7 @@ static int cn_print_exe_file(struct core_name *cn)
                goto put_exe_file;
        }
 
-       path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
+       path = file_path(exe_file, pathbuf, PATH_MAX);
        if (IS_ERR(path)) {
                ret = PTR_ERR(path);
                goto free_buf;
index 99b5fbc38992db1f88be1a1e48dad4fda584a0c1..c3e21ccfc358b2da1170c15f09a5946afad0ff16 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -155,7 +155,7 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
                }
 
                if (iov_iter_rw(iter) == WRITE)
-                       len = copy_from_iter(addr, max - pos, iter);
+                       len = copy_from_iter_nocache(addr, max - pos, iter);
                else if (!hole)
                        len = copy_to_iter(addr, max - pos, iter);
                else
@@ -209,7 +209,8 @@ ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
        }
 
        /* Protects against truncate */
-       inode_dio_begin(inode);
+       if (!(flags & DIO_SKIP_DIO_COUNT))
+               inode_dio_begin(inode);
 
        retval = dax_io(inode, iter, pos, end, get_block, &bh);
 
@@ -219,7 +220,8 @@ ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
        if ((retval > 0) && end_io)
                end_io(iocb, pos, retval, bh.b_private);
 
-       inode_dio_end(inode);
+       if (!(flags & DIO_SKIP_DIO_COUNT))
+               inode_dio_end(inode);
  out:
        return retval;
 }
index 592c4b582495b515c52a2aa3458be3422953c111..7a3f3e5f9ceabfc4cad41a5d5478258f7e817dc1 100644 (file)
@@ -1673,7 +1673,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
                                DCACHE_OP_COMPARE       |
                                DCACHE_OP_REVALIDATE    |
                                DCACHE_OP_WEAK_REVALIDATE       |
-                               DCACHE_OP_DELETE ));
+                               DCACHE_OP_DELETE        |
+                               DCACHE_OP_SELECT_INODE));
        dentry->d_op = op;
        if (!op)
                return;
@@ -1689,6 +1690,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
                dentry->d_flags |= DCACHE_OP_DELETE;
        if (op->d_prune)
                dentry->d_flags |= DCACHE_OP_PRUNE;
+       if (op->d_select_inode)
+               dentry->d_flags |= DCACHE_OP_SELECT_INODE;
 
 }
 EXPORT_SYMBOL(d_set_d_op);
@@ -2927,17 +2930,6 @@ restart:
                                vfsmnt = &mnt->mnt;
                                continue;
                        }
-                       /*
-                        * Filesystems needing to implement special "root names"
-                        * should do so with ->d_dname()
-                        */
-                       if (IS_ROOT(dentry) &&
-                          (dentry->d_name.len != 1 ||
-                           dentry->d_name.name[0] != '/')) {
-                               WARN(1, "Root dentry has weird name <%.*s>\n",
-                                    (int) dentry->d_name.len,
-                                    dentry->d_name.name);
-                       }
                        if (!error)
                                error = is_mounted(vfsmnt) ? 1 : 2;
                        break;
index 7eaec88ea970d1a6ea8422465857deaefd2b7052..c711be8d6a3cc71a598a92a82026a59f05eccd90 100644 (file)
@@ -44,11 +44,6 @@ static struct inode *debugfs_get_inode(struct super_block *sb)
        return inode;
 }
 
-static inline int debugfs_positive(struct dentry *dentry)
-{
-       return d_really_is_positive(dentry) && !d_unhashed(dentry);
-}
-
 struct debugfs_mount_opts {
        kuid_t uid;
        kgid_t gid;
@@ -522,7 +517,7 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
 {
        int ret = 0;
 
-       if (debugfs_positive(dentry)) {
+       if (simple_positive(dentry)) {
                dget(dentry);
                if (d_is_dir(dentry))
                        ret = simple_rmdir(d_inode(parent), dentry);
@@ -602,7 +597,7 @@ void debugfs_remove_recursive(struct dentry *dentry)
         */
        spin_lock(&parent->d_lock);
        list_for_each_entry(child, &parent->d_subdirs, d_child) {
-               if (!debugfs_positive(child))
+               if (!simple_positive(child))
                        continue;
 
                /* perhaps simple_empty(child) makes more sense */
@@ -623,7 +618,7 @@ void debugfs_remove_recursive(struct dentry *dentry)
                 * from d_subdirs. When releasing the parent->d_lock we can
                 * no longer trust that the next pointer is valid.
                 * Restart the loop. We'll skip this one with the
-                * debugfs_positive() check.
+                * simple_positive() check.
                 */
                goto loop;
        }
@@ -716,20 +711,17 @@ bool debugfs_initialized(void)
 }
 EXPORT_SYMBOL_GPL(debugfs_initialized);
 
-
-static struct kobject *debug_kobj;
-
 static int __init debugfs_init(void)
 {
        int retval;
 
-       debug_kobj = kobject_create_and_add("debug", kernel_kobj);
-       if (!debug_kobj)
-               return -EINVAL;
+       retval = sysfs_create_mount_point(kernel_kobj, "debug");
+       if (retval)
+               return retval;
 
        retval = register_filesystem(&debug_fs_type);
        if (retval)
-               kobject_put(debug_kobj);
+               sysfs_remove_mount_point(kernel_kobj, "debug");
        else
                debugfs_registered = true;
 
index add566303c684336cce3d8f9c4fa0f98678adafc..c35ffdc12bbafd1874dcea43ec5094867daf3e0d 100644 (file)
@@ -142,6 +142,8 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode)
        if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
                return inode->i_sb;
 #endif
+       if (!devpts_mnt)
+               return NULL;
        return devpts_mnt->mnt_sb;
 }
 
@@ -525,10 +527,14 @@ static struct file_system_type devpts_fs_type = {
 int devpts_new_index(struct inode *ptmx_inode)
 {
        struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-       struct pts_fs_info *fsi = DEVPTS_SB(sb);
+       struct pts_fs_info *fsi;
        int index;
        int ida_ret;
 
+       if (!sb)
+               return -ENODEV;
+
+       fsi = DEVPTS_SB(sb);
 retry:
        if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
                return -ENOMEM;
@@ -584,11 +590,18 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
        struct dentry *dentry;
        struct super_block *sb = pts_sb_from_inode(ptmx_inode);
        struct inode *inode;
-       struct dentry *root = sb->s_root;
-       struct pts_fs_info *fsi = DEVPTS_SB(sb);
-       struct pts_mount_opts *opts = &fsi->mount_opts;
+       struct dentry *root;
+       struct pts_fs_info *fsi;
+       struct pts_mount_opts *opts;
        char s[12];
 
+       if (!sb)
+               return ERR_PTR(-ENODEV);
+
+       root = sb->s_root;
+       fsi = DEVPTS_SB(sb);
+       opts = &fsi->mount_opts;
+
        inode = new_inode(sb);
        if (!inode)
                return ERR_PTR(-ENOMEM);
@@ -676,12 +689,16 @@ static int __init init_devpts_fs(void)
        struct ctl_table_header *table;
 
        if (!err) {
+               struct vfsmount *mnt;
+
                table = register_sysctl_table(pty_root_table);
-               devpts_mnt = kern_mount(&devpts_fs_type);
-               if (IS_ERR(devpts_mnt)) {
-                       err = PTR_ERR(devpts_mnt);
+               mnt = kern_mount(&devpts_fs_type);
+               if (IS_ERR(mnt)) {
+                       err = PTR_ERR(mnt);
                        unregister_filesystem(&devpts_fs_type);
                        unregister_sysctl_table(table);
+               } else {
+                       devpts_mnt = mnt;
                }
        }
        return err;
index 4deb0b05b011266f344d7647a9cd004e085e4266..e5bb2abf77f9adb1e11508cec2a8fea65934bac4 100644 (file)
@@ -44,12 +44,6 @@ static inline void exofs_put_page(struct page *page)
        page_cache_release(page);
 }
 
-/* Accesses dir's inode->i_size must be called under inode lock */
-static inline unsigned long dir_pages(struct inode *inode)
-{
-       return (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-}
-
 static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr)
 {
        loff_t last_byte = inode->i_size;
index 796b491e69785e4b703b7c3a0cbc7e97d3510a40..0c6638b40f2176b602f70ef465968b3a0dddc292 100644 (file)
@@ -70,11 +70,6 @@ static inline void ext2_put_page(struct page *page)
        page_cache_release(page);
 }
 
-static inline unsigned long dir_pages(struct inode *inode)
-{
-       return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
-}
-
 /*
  * Return the offset into page `page_nr' of the last valid
  * byte in that page, plus one.
index aadb7282883493597f8dae099f20c3e86694bea8..2553aa8b608d84d1673190ea634c5e84c86d9f0b 100644 (file)
@@ -504,7 +504,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
        struct buffer_head              *bh;
        int                             err;
 
-       bh = sb_getblk(inode->i_sb, pblk);
+       bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
        if (unlikely(!bh))
                return ERR_PTR(-ENOMEM);
 
@@ -1089,7 +1089,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                err = -EIO;
                goto cleanup;
        }
-       bh = sb_getblk(inode->i_sb, newblock);
+       bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
        if (unlikely(!bh)) {
                err = -ENOMEM;
                goto cleanup;
@@ -1283,7 +1283,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
        if (newblock == 0)
                return err;
 
-       bh = sb_getblk(inode->i_sb, newblock);
+       bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
        if (unlikely(!bh))
                return -ENOMEM;
        lock_buffer(bh);
index 41f8e55afcd11491c5f25bf0989f74c12b9baf69..cecf9aa1081134d255455ee329a4e118b8355216 100644 (file)
@@ -1323,7 +1323,7 @@ static void ext4_da_page_release_reservation(struct page *page,
                                             unsigned int offset,
                                             unsigned int length)
 {
-       int to_release = 0;
+       int to_release = 0, contiguous_blks = 0;
        struct buffer_head *head, *bh;
        unsigned int curr_off = 0;
        struct inode *inode = page->mapping->host;
@@ -1344,14 +1344,23 @@ static void ext4_da_page_release_reservation(struct page *page,
 
                if ((offset <= curr_off) && (buffer_delay(bh))) {
                        to_release++;
+                       contiguous_blks++;
                        clear_buffer_delay(bh);
+               } else if (contiguous_blks) {
+                       lblk = page->index <<
+                              (PAGE_CACHE_SHIFT - inode->i_blkbits);
+                       lblk += (curr_off >> inode->i_blkbits) -
+                               contiguous_blks;
+                       ext4_es_remove_extent(inode, lblk, contiguous_blks);
+                       contiguous_blks = 0;
                }
                curr_off = next_off;
        } while ((bh = bh->b_this_page) != head);
 
-       if (to_release) {
+       if (contiguous_blks) {
                lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-               ext4_es_remove_extent(inode, lblk, to_release);
+               lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
+               ext4_es_remove_extent(inode, lblk, contiguous_blks);
        }
 
        /* If we have released all the blocks belonging to a cluster, then we
@@ -4344,7 +4353,12 @@ static void ext4_update_other_inodes_time(struct super_block *sb,
        int inode_size = EXT4_INODE_SIZE(sb);
 
        oi.orig_ino = orig_ino;
-       ino = (orig_ino & ~(inodes_per_block - 1)) + 1;
+       /*
+        * Calculate the first inode in the inode table block.  Inode
+        * numbers are one-based.  That is, the first inode in a block
+        * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
+        */
+       ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
        for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
                if (ino == orig_ino)
                        continue;
index f6aedf88da437ee324c314bb1020baa51db0423c..34b610ea503053674b3b87ffd3503d6c641c573d 100644 (file)
@@ -4816,18 +4816,12 @@ do_more:
                /*
                 * blocks being freed are metadata. these blocks shouldn't
                 * be used until this transaction is committed
+                *
+                * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
+                * to fail.
                 */
-       retry:
-               new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
-               if (!new_entry) {
-                       /*
-                        * We use a retry loop because
-                        * ext4_free_blocks() is not allowed to fail.
-                        */
-                       cond_resched();
-                       congestion_wait(BLK_RW_ASYNC, HZ/50);
-                       goto retry;
-               }
+               new_entry = kmem_cache_alloc(ext4_free_data_cachep,
+                               GFP_NOFS|__GFP_NOFAIL);
                new_entry->efd_start_cluster = bit;
                new_entry->efd_group = block_group;
                new_entry->efd_count = count_clusters;
index b52374e4210221bc02c9cb3ad23f8027f21e77ad..6163ad21cb0ef6b184bdfceb700c1dfd89317590 100644 (file)
@@ -620,6 +620,7 @@ int ext4_ind_migrate(struct inode *inode)
        struct ext4_inode_info          *ei = EXT4_I(inode);
        struct ext4_extent              *ex;
        unsigned int                    i, len;
+       ext4_lblk_t                     start, end;
        ext4_fsblk_t                    blk;
        handle_t                        *handle;
        int                             ret;
@@ -633,6 +634,14 @@ int ext4_ind_migrate(struct inode *inode)
                                       EXT4_FEATURE_RO_COMPAT_BIGALLOC))
                return -EOPNOTSUPP;
 
+       /*
+        * In order to get correct extent info, force all delayed allocation
+        * blocks to be allocated, otherwise delayed allocation blocks may not
+        * be reflected and bypass the checks on extent header.
+        */
+       if (test_opt(inode->i_sb, DELALLOC))
+               ext4_alloc_da_blocks(inode);
+
        handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
        if (IS_ERR(handle))
                return PTR_ERR(handle);
@@ -650,11 +659,13 @@ int ext4_ind_migrate(struct inode *inode)
                goto errout;
        }
        if (eh->eh_entries == 0)
-               blk = len = 0;
+               blk = len = start = end = 0;
        else {
                len = le16_to_cpu(ex->ee_len);
                blk = ext4_ext_pblock(ex);
-               if (len > EXT4_NDIR_BLOCKS) {
+               start = le32_to_cpu(ex->ee_block);
+               end = start + len - 1;
+               if (end >= EXT4_NDIR_BLOCKS) {
                        ret = -EOPNOTSUPP;
                        goto errout;
                }
@@ -662,7 +673,7 @@ int ext4_ind_migrate(struct inode *inode)
 
        ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
        memset(ei->i_data, 0, sizeof(ei->i_data));
-       for (i=0; i < len; i++)
+       for (i = start; i <= end; i++)
                ei->i_data[i] = cpu_to_le32(blk++);
        ext4_mark_inode_dirty(handle, inode);
 errout:
index 5c787647afe2a3817dfbf49949e195218199c8f6..58987b5c514b2baf433801dee0525d05c4525906 100644 (file)
@@ -452,7 +452,7 @@ void __ext4_error_file(struct file *file, const char *function,
        es = EXT4_SB(inode->i_sb)->s_es;
        es->s_last_error_ino = cpu_to_le32(inode->i_ino);
        if (ext4_error_ratelimit(inode->i_sb)) {
-               path = d_path(&(file->f_path), pathname, sizeof(pathname));
+               path = file_path(file, pathname, sizeof(pathname));
                if (IS_ERR(path))
                        path = "(unknown)";
                va_start(args, fmt);
index 93c5f89c248b07b6fba50fd31f4bd36ba9520fef..6c672ad329e9a8c6bf3b7925f9e344ea1c5991c4 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -147,6 +147,13 @@ static int expand_fdtable(struct files_struct *files, int nr)
 
        spin_unlock(&files->file_lock);
        new_fdt = alloc_fdtable(nr);
+
+       /* make sure all __fd_install() have seen resize_in_progress
+        * or have finished their rcu_read_lock_sched() section.
+        */
+       if (atomic_read(&files->count) > 1)
+               synchronize_sched();
+
        spin_lock(&files->file_lock);
        if (!new_fdt)
                return -ENOMEM;
@@ -158,21 +165,14 @@ static int expand_fdtable(struct files_struct *files, int nr)
                __free_fdtable(new_fdt);
                return -EMFILE;
        }
-       /*
-        * Check again since another task may have expanded the fd table while
-        * we dropped the lock
-        */
        cur_fdt = files_fdtable(files);
-       if (nr >= cur_fdt->max_fds) {
-               /* Continue as planned */
-               copy_fdtable(new_fdt, cur_fdt);
-               rcu_assign_pointer(files->fdt, new_fdt);
-               if (cur_fdt != &files->fdtab)
-                       call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
-       } else {
-               /* Somebody else expanded, so undo our attempt */
-               __free_fdtable(new_fdt);
-       }
+       BUG_ON(nr < cur_fdt->max_fds);
+       copy_fdtable(new_fdt, cur_fdt);
+       rcu_assign_pointer(files->fdt, new_fdt);
+       if (cur_fdt != &files->fdtab)
+               call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
+       /* coupled with smp_rmb() in __fd_install() */
+       smp_wmb();
        return 1;
 }
 
@@ -185,21 +185,38 @@ static int expand_fdtable(struct files_struct *files, int nr)
  * The files->file_lock should be held on entry, and will be held on exit.
  */
 static int expand_files(struct files_struct *files, int nr)
+       __releases(files->file_lock)
+       __acquires(files->file_lock)
 {
        struct fdtable *fdt;
+       int expanded = 0;
 
+repeat:
        fdt = files_fdtable(files);
 
        /* Do we need to expand? */
        if (nr < fdt->max_fds)
-               return 0;
+               return expanded;
 
        /* Can we expand? */
        if (nr >= sysctl_nr_open)
                return -EMFILE;
 
+       if (unlikely(files->resize_in_progress)) {
+               spin_unlock(&files->file_lock);
+               expanded = 1;
+               wait_event(files->resize_wait, !files->resize_in_progress);
+               spin_lock(&files->file_lock);
+               goto repeat;
+       }
+
        /* All good, so we try */
-       return expand_fdtable(files, nr);
+       files->resize_in_progress = true;
+       expanded = expand_fdtable(files, nr);
+       files->resize_in_progress = false;
+
+       wake_up_all(&files->resize_wait);
+       return expanded;
 }
 
 static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
@@ -256,6 +273,8 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
        atomic_set(&newf->count, 1);
 
        spin_lock_init(&newf->file_lock);
+       newf->resize_in_progress = false;
+       init_waitqueue_head(&newf->resize_wait);
        newf->next_fd = 0;
        new_fdt = &newf->fdtab;
        new_fdt->max_fds = NR_OPEN_DEFAULT;
@@ -553,11 +572,21 @@ void __fd_install(struct files_struct *files, unsigned int fd,
                struct file *file)
 {
        struct fdtable *fdt;
-       spin_lock(&files->file_lock);
-       fdt = files_fdtable(files);
+
+       might_sleep();
+       rcu_read_lock_sched();
+
+       while (unlikely(files->resize_in_progress)) {
+               rcu_read_unlock_sched();
+               wait_event(files->resize_wait, !files->resize_in_progress);
+               rcu_read_lock_sched();
+       }
+       /* coupled with smp_wmb() in expand_fdtable() */
+       smp_rmb();
+       fdt = rcu_dereference_sched(files->fdt);
        BUG_ON(fdt->fd[fd] != NULL);
        rcu_assign_pointer(fdt->fd[fd], file);
-       spin_unlock(&files->file_lock);
+       rcu_read_unlock_sched();
 }
 
 void fd_install(unsigned int fd, struct file *file)
@@ -635,11 +664,17 @@ static struct file *__fget(unsigned int fd, fmode_t mask)
        struct file *file;
 
        rcu_read_lock();
+loop:
        file = fcheck_files(files, fd);
        if (file) {
-               /* File object ref couldn't be taken */
-               if ((file->f_mode & mask) || !get_file_rcu(file))
+               /* File object ref couldn't be taken.
+                * dup2() atomicity guarantee is the reason
+                * we loop to catch the new file (or NULL pointer)
+                */
+               if (file->f_mode & mask)
                        file = NULL;
+               else if (!get_file_rcu(file))
+                       goto loop;
        }
        rcu_read_unlock();
 
index 294174dcc2261237fa900432c5c3a7134fda3976..7f9d407c759596f950335bd418ab0226f4a629f8 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/cdev.h>
 #include <linux/fsnotify.h>
 #include <linux/sysctl.h>
-#include <linux/lglock.h>
 #include <linux/percpu_counter.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
index 99c7f0a37af4435105064b1b1a1f82d6618130bb..484b32d3234ad9823394f55064e8b8e2ca237b02 100644 (file)
@@ -61,13 +61,6 @@ const struct file_operations vxfs_dir_operations = {
        .iterate =              vxfs_readdir,
 };
 
-static inline u_long
-dir_pages(struct inode *inode)
-{
-       return (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-}
 static inline u_long
 dir_blocks(struct inode *ip)
 {
index 89acec742e0bfdb2b1aab30e31991655462f05d9..d403c69bee0829fba9187114bc318dd810e8d179 100644 (file)
@@ -327,7 +327,8 @@ static int fscache_alloc_object(struct fscache_cache *cache,
 
 object_already_extant:
        ret = -ENOBUFS;
-       if (fscache_object_is_dead(object)) {
+       if (fscache_object_is_dying(object) ||
+           fscache_cache_is_broken(object)) {
                spin_unlock(&cookie->lock);
                goto error;
        }
@@ -671,7 +672,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
        if (!op)
                return -ENOMEM;
 
-       fscache_operation_init(op, NULL, NULL);
+       fscache_operation_init(op, NULL, NULL, NULL);
        op->flags = FSCACHE_OP_MYTHREAD |
                (1 << FSCACHE_OP_WAITING) |
                (1 << FSCACHE_OP_UNUSE_COOKIE);
@@ -695,8 +696,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
        /* the work queue now carries its own ref on the object */
        spin_unlock(&cookie->lock);
 
-       ret = fscache_wait_for_operation_activation(object, op,
-                                                   NULL, NULL, NULL);
+       ret = fscache_wait_for_operation_activation(object, op, NULL, NULL);
        if (ret == 0) {
                /* ask the cache to honour the operation */
                ret = object->cache->ops->check_consistency(op);
index 7872a62ef30c1e019b8cc111143b722394a8b23f..97ec4511095765ffa96f98538aedc74cae8fbadf 100644 (file)
@@ -124,8 +124,7 @@ extern int fscache_submit_exclusive_op(struct fscache_object *,
                                       struct fscache_operation *);
 extern int fscache_submit_op(struct fscache_object *,
                             struct fscache_operation *);
-extern int fscache_cancel_op(struct fscache_operation *,
-                            void (*)(struct fscache_operation *));
+extern int fscache_cancel_op(struct fscache_operation *, bool);
 extern void fscache_cancel_all_ops(struct fscache_object *);
 extern void fscache_abort_object(struct fscache_object *);
 extern void fscache_start_operations(struct fscache_object *);
@@ -138,8 +137,7 @@ extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
 extern int fscache_wait_for_operation_activation(struct fscache_object *,
                                                 struct fscache_operation *,
                                                 atomic_t *,
-                                                atomic_t *,
-                                                void (*)(struct fscache_operation *));
+                                                atomic_t *);
 extern void fscache_invalidate_writes(struct fscache_cookie *);
 
 /*
@@ -164,6 +162,7 @@ extern atomic_t fscache_n_op_pend;
 extern atomic_t fscache_n_op_run;
 extern atomic_t fscache_n_op_enqueue;
 extern atomic_t fscache_n_op_deferred_release;
+extern atomic_t fscache_n_op_initialised;
 extern atomic_t fscache_n_op_release;
 extern atomic_t fscache_n_op_gc;
 extern atomic_t fscache_n_op_cancelled;
@@ -271,6 +270,11 @@ extern atomic_t fscache_n_cop_write_page;
 extern atomic_t fscache_n_cop_uncache_page;
 extern atomic_t fscache_n_cop_dissociate_pages;
 
+extern atomic_t fscache_n_cache_no_space_reject;
+extern atomic_t fscache_n_cache_stale_objects;
+extern atomic_t fscache_n_cache_retired_objects;
+extern atomic_t fscache_n_cache_culled_objects;
+
 static inline void fscache_stat(atomic_t *stat)
 {
        atomic_inc(stat);
index da032daf0e0d7562f40e5a050685829af68869ac..9e792e30f4db47b38c6db644487c440a2e12febb 100644 (file)
@@ -327,6 +327,17 @@ void fscache_object_init(struct fscache_object *object,
 }
 EXPORT_SYMBOL(fscache_object_init);
 
+/*
+ * Mark the object as no longer being live, making sure that we synchronise
+ * against op submission.
+ */
+static inline void fscache_mark_object_dead(struct fscache_object *object)
+{
+       spin_lock(&object->lock);
+       clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
+       spin_unlock(&object->lock);
+}
+
 /*
  * Abort object initialisation before we start it.
  */
@@ -610,6 +621,8 @@ static const struct fscache_state *fscache_lookup_failure(struct fscache_object
        object->cache->ops->lookup_complete(object);
        fscache_stat_d(&fscache_n_cop_lookup_complete);
 
+       set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
+
        cookie = object->cookie;
        set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
        if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
@@ -629,7 +642,7 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
        _enter("{OBJ%x,%d,%d},%d",
               object->debug_id, object->n_ops, object->n_children, event);
 
-       clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
+       fscache_mark_object_dead(object);
        object->oob_event_mask = 0;
 
        if (list_empty(&object->dependents) &&
@@ -948,7 +961,8 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj
        if (!op)
                goto nomem;
 
-       fscache_operation_init(op, object->cache->ops->invalidate_object, NULL);
+       fscache_operation_init(op, object->cache->ops->invalidate_object,
+                              NULL, NULL);
        op->flags = FSCACHE_OP_ASYNC |
                (1 << FSCACHE_OP_EXCLUSIVE) |
                (1 << FSCACHE_OP_UNUSE_COOKIE);
@@ -974,13 +988,13 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj
        return transit_to(UPDATE_OBJECT);
 
 nomem:
-       clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
+       fscache_mark_object_dead(object);
        fscache_unuse_cookie(object);
        _leave(" [ENOMEM]");
        return transit_to(KILL_OBJECT);
 
 submit_op_failed:
-       clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
+       fscache_mark_object_dead(object);
        spin_unlock(&cookie->lock);
        fscache_unuse_cookie(object);
        kfree(op);
@@ -1016,3 +1030,50 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
        _leave("");
        return transit_to(WAIT_FOR_CMD);
 }
+
+/**
+ * fscache_object_retrying_stale - Note retrying stale object
+ * @object: The object that will be retried
+ *
+ * Note that an object lookup found an on-disk object that was adjudged to be
+ * stale and has been deleted.  The lookup will be retried.
+ */
+void fscache_object_retrying_stale(struct fscache_object *object)
+{
+       fscache_stat(&fscache_n_cache_no_space_reject);
+}
+EXPORT_SYMBOL(fscache_object_retrying_stale);
+
+/**
+ * fscache_object_mark_killed - Note that an object was killed
+ * @object: The object that was culled
+ * @why: The reason the object was killed.
+ *
+ * Note that an object was killed.  Returns true if the object was
+ * already marked killed, false if it wasn't.
+ */
+void fscache_object_mark_killed(struct fscache_object *object,
+                               enum fscache_why_object_killed why)
+{
+       if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
+               pr_err("Error: Object already killed by cache [%s]\n",
+                      object->cache->identifier);
+               return;
+       }
+
+       switch (why) {
+       case FSCACHE_OBJECT_NO_SPACE:
+               fscache_stat(&fscache_n_cache_no_space_reject);
+               break;
+       case FSCACHE_OBJECT_IS_STALE:
+               fscache_stat(&fscache_n_cache_stale_objects);
+               break;
+       case FSCACHE_OBJECT_WAS_RETIRED:
+               fscache_stat(&fscache_n_cache_retired_objects);
+               break;
+       case FSCACHE_OBJECT_WAS_CULLED:
+               fscache_stat(&fscache_n_cache_culled_objects);
+               break;
+       }
+}
+EXPORT_SYMBOL(fscache_object_mark_killed);
index e7b87a0e5185b53ac304d50b46daf14d8149356c..de67745e1cd7d3fdda98965c9a38d417deb4af66 100644 (file)
 atomic_t fscache_op_debug_id;
 EXPORT_SYMBOL(fscache_op_debug_id);
 
+static void fscache_operation_dummy_cancel(struct fscache_operation *op)
+{
+}
+
+/**
+ * fscache_operation_init - Do basic initialisation of an operation
+ * @op: The operation to initialise
+ * @release: The release function to assign
+ *
+ * Do basic initialisation of an operation.  The caller must still set flags,
+ * object and processor if needed.
+ */
+void fscache_operation_init(struct fscache_operation *op,
+                           fscache_operation_processor_t processor,
+                           fscache_operation_cancel_t cancel,
+                           fscache_operation_release_t release)
+{
+       INIT_WORK(&op->work, fscache_op_work_func);
+       atomic_set(&op->usage, 1);
+       op->state = FSCACHE_OP_ST_INITIALISED;
+       op->debug_id = atomic_inc_return(&fscache_op_debug_id);
+       op->processor = processor;
+       op->cancel = cancel ?: fscache_operation_dummy_cancel;
+       op->release = release;
+       INIT_LIST_HEAD(&op->pend_link);
+       fscache_stat(&fscache_n_op_initialised);
+}
+EXPORT_SYMBOL(fscache_operation_init);
+
 /**
  * fscache_enqueue_operation - Enqueue an operation for processing
  * @op: The operation to enqueue
@@ -75,6 +104,43 @@ static void fscache_run_op(struct fscache_object *object,
        fscache_stat(&fscache_n_op_run);
 }
 
+/*
+ * report an unexpected submission
+ */
+static void fscache_report_unexpected_submission(struct fscache_object *object,
+                                                struct fscache_operation *op,
+                                                const struct fscache_state *ostate)
+{
+       static bool once_only;
+       struct fscache_operation *p;
+       unsigned n;
+
+       if (once_only)
+               return;
+       once_only = true;
+
+       kdebug("unexpected submission OP%x [OBJ%x %s]",
+              op->debug_id, object->debug_id, object->state->name);
+       kdebug("objstate=%s [%s]", object->state->name, ostate->name);
+       kdebug("objflags=%lx", object->flags);
+       kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
+       kdebug("ops=%u inp=%u exc=%u",
+              object->n_ops, object->n_in_progress, object->n_exclusive);
+
+       if (!list_empty(&object->pending_ops)) {
+               n = 0;
+               list_for_each_entry(p, &object->pending_ops, pend_link) {
+                       ASSERTCMP(p->object, ==, object);
+                       kdebug("%p %p", op->processor, op->release);
+                       n++;
+               }
+
+               kdebug("n=%u", n);
+       }
+
+       dump_stack();
+}
+
 /*
  * submit an exclusive operation for an object
  * - other ops are excluded from running simultaneously with this one
@@ -83,6 +149,8 @@ static void fscache_run_op(struct fscache_object *object,
 int fscache_submit_exclusive_op(struct fscache_object *object,
                                struct fscache_operation *op)
 {
+       const struct fscache_state *ostate;
+       unsigned long flags;
        int ret;
 
        _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
@@ -95,8 +163,21 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
        ASSERTCMP(object->n_ops, >=, object->n_exclusive);
        ASSERT(list_empty(&op->pend_link));
 
+       ostate = object->state;
+       smp_rmb();
+
        op->state = FSCACHE_OP_ST_PENDING;
-       if (fscache_object_is_active(object)) {
+       flags = READ_ONCE(object->flags);
+       if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
+               fscache_stat(&fscache_n_op_rejected);
+               op->cancel(op);
+               op->state = FSCACHE_OP_ST_CANCELLED;
+               ret = -ENOBUFS;
+       } else if (unlikely(fscache_cache_is_broken(object))) {
+               op->cancel(op);
+               op->state = FSCACHE_OP_ST_CANCELLED;
+               ret = -EIO;
+       } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
                op->object = object;
                object->n_ops++;
                object->n_exclusive++;  /* reads and writes must wait */
@@ -118,7 +199,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
                /* need to issue a new write op after this */
                clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
                ret = 0;
-       } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
+       } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
                op->object = object;
                object->n_ops++;
                object->n_exclusive++;  /* reads and writes must wait */
@@ -126,55 +207,21 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
                list_add_tail(&op->pend_link, &object->pending_ops);
                fscache_stat(&fscache_n_op_pend);
                ret = 0;
+       } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
+               op->cancel(op);
+               op->state = FSCACHE_OP_ST_CANCELLED;
+               ret = -ENOBUFS;
        } else {
-               /* If we're in any other state, there must have been an I/O
-                * error of some nature.
-                */
-               ASSERT(test_bit(FSCACHE_IOERROR, &object->cache->flags));
-               ret = -EIO;
+               fscache_report_unexpected_submission(object, op, ostate);
+               op->cancel(op);
+               op->state = FSCACHE_OP_ST_CANCELLED;
+               ret = -ENOBUFS;
        }
 
        spin_unlock(&object->lock);
        return ret;
 }
 
-/*
- * report an unexpected submission
- */
-static void fscache_report_unexpected_submission(struct fscache_object *object,
-                                                struct fscache_operation *op,
-                                                const struct fscache_state *ostate)
-{
-       static bool once_only;
-       struct fscache_operation *p;
-       unsigned n;
-
-       if (once_only)
-               return;
-       once_only = true;
-
-       kdebug("unexpected submission OP%x [OBJ%x %s]",
-              op->debug_id, object->debug_id, object->state->name);
-       kdebug("objstate=%s [%s]", object->state->name, ostate->name);
-       kdebug("objflags=%lx", object->flags);
-       kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
-       kdebug("ops=%u inp=%u exc=%u",
-              object->n_ops, object->n_in_progress, object->n_exclusive);
-
-       if (!list_empty(&object->pending_ops)) {
-               n = 0;
-               list_for_each_entry(p, &object->pending_ops, pend_link) {
-                       ASSERTCMP(p->object, ==, object);
-                       kdebug("%p %p", op->processor, op->release);
-                       n++;
-               }
-
-               kdebug("n=%u", n);
-       }
-
-       dump_stack();
-}
-
 /*
  * submit an operation for an object
  * - objects may be submitted only in the following states:
@@ -187,6 +234,7 @@ int fscache_submit_op(struct fscache_object *object,
                      struct fscache_operation *op)
 {
        const struct fscache_state *ostate;
+       unsigned long flags;
        int ret;
 
        _enter("{OBJ%x OP%x},{%u}",
@@ -204,7 +252,17 @@ int fscache_submit_op(struct fscache_object *object,
        smp_rmb();
 
        op->state = FSCACHE_OP_ST_PENDING;
-       if (fscache_object_is_active(object)) {
+       flags = READ_ONCE(object->flags);
+       if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
+               fscache_stat(&fscache_n_op_rejected);
+               op->cancel(op);
+               op->state = FSCACHE_OP_ST_CANCELLED;
+               ret = -ENOBUFS;
+       } else if (unlikely(fscache_cache_is_broken(object))) {
+               op->cancel(op);
+               op->state = FSCACHE_OP_ST_CANCELLED;
+               ret = -EIO;
+       } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
                op->object = object;
                object->n_ops++;
 
@@ -222,23 +280,21 @@ int fscache_submit_op(struct fscache_object *object,
                        fscache_run_op(object, op);
                }
                ret = 0;
-       } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
+       } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
                op->object = object;
                object->n_ops++;
                atomic_inc(&op->usage);
                list_add_tail(&op->pend_link, &object->pending_ops);
                fscache_stat(&fscache_n_op_pend);
                ret = 0;
-       } else if (fscache_object_is_dying(object)) {
-               fscache_stat(&fscache_n_op_rejected);
+       } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
+               op->cancel(op);
                op->state = FSCACHE_OP_ST_CANCELLED;
                ret = -ENOBUFS;
-       } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
+       } else {
                fscache_report_unexpected_submission(object, op, ostate);
                ASSERT(!fscache_object_is_active(object));
-               op->state = FSCACHE_OP_ST_CANCELLED;
-               ret = -ENOBUFS;
-       } else {
+               op->cancel(op);
                op->state = FSCACHE_OP_ST_CANCELLED;
                ret = -ENOBUFS;
        }
@@ -293,9 +349,10 @@ void fscache_start_operations(struct fscache_object *object)
  * cancel an operation that's pending on an object
  */
 int fscache_cancel_op(struct fscache_operation *op,
-                     void (*do_cancel)(struct fscache_operation *))
+                     bool cancel_in_progress_op)
 {
        struct fscache_object *object = op->object;
+       bool put = false;
        int ret;
 
        _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
@@ -309,19 +366,37 @@ int fscache_cancel_op(struct fscache_operation *op,
        ret = -EBUSY;
        if (op->state == FSCACHE_OP_ST_PENDING) {
                ASSERT(!list_empty(&op->pend_link));
-               fscache_stat(&fscache_n_op_cancelled);
                list_del_init(&op->pend_link);
-               if (do_cancel)
-                       do_cancel(op);
+               put = true;
+
+               fscache_stat(&fscache_n_op_cancelled);
+               op->cancel(op);
+               op->state = FSCACHE_OP_ST_CANCELLED;
+               if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
+                       object->n_exclusive--;
+               if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
+                       wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
+               ret = 0;
+       } else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) {
+               ASSERTCMP(object->n_in_progress, >, 0);
+               if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
+                       object->n_exclusive--;
+               object->n_in_progress--;
+               if (object->n_in_progress == 0)
+                       fscache_start_operations(object);
+
+               fscache_stat(&fscache_n_op_cancelled);
+               op->cancel(op);
                op->state = FSCACHE_OP_ST_CANCELLED;
                if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
                        object->n_exclusive--;
                if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
                        wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
-               fscache_put_operation(op);
                ret = 0;
        }
 
+       if (put)
+               fscache_put_operation(op);
        spin_unlock(&object->lock);
        _leave(" = %d", ret);
        return ret;
@@ -345,6 +420,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
                list_del_init(&op->pend_link);
 
                ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
+               op->cancel(op);
                op->state = FSCACHE_OP_ST_CANCELLED;
 
                if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
@@ -377,8 +453,12 @@ void fscache_op_complete(struct fscache_operation *op, bool cancelled)
 
        spin_lock(&object->lock);
 
-       op->state = cancelled ?
-               FSCACHE_OP_ST_CANCELLED : FSCACHE_OP_ST_COMPLETE;
+       if (!cancelled) {
+               op->state = FSCACHE_OP_ST_COMPLETE;
+       } else {
+               op->cancel(op);
+               op->state = FSCACHE_OP_ST_CANCELLED;
+       }
 
        if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
                object->n_exclusive--;
@@ -409,9 +489,9 @@ void fscache_put_operation(struct fscache_operation *op)
                return;
 
        _debug("PUT OP");
-       ASSERTIFCMP(op->state != FSCACHE_OP_ST_COMPLETE,
+       ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED &&
+                   op->state != FSCACHE_OP_ST_COMPLETE,
                    op->state, ==, FSCACHE_OP_ST_CANCELLED);
-       op->state = FSCACHE_OP_ST_DEAD;
 
        fscache_stat(&fscache_n_op_release);
 
@@ -419,37 +499,39 @@ void fscache_put_operation(struct fscache_operation *op)
                op->release(op);
                op->release = NULL;
        }
+       op->state = FSCACHE_OP_ST_DEAD;
 
        object = op->object;
+       if (likely(object)) {
+               if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
+                       atomic_dec(&object->n_reads);
+               if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
+                       fscache_unuse_cookie(object);
+
+               /* now... we may get called with the object spinlock held, so we
+                * complete the cleanup here only if we can immediately acquire the
+                * lock, and defer it otherwise */
+               if (!spin_trylock(&object->lock)) {
+                       _debug("defer put");
+                       fscache_stat(&fscache_n_op_deferred_release);
+
+                       cache = object->cache;
+                       spin_lock(&cache->op_gc_list_lock);
+                       list_add_tail(&op->pend_link, &cache->op_gc_list);
+                       spin_unlock(&cache->op_gc_list_lock);
+                       schedule_work(&cache->op_gc);
+                       _leave(" [defer]");
+                       return;
+               }
 
-       if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
-               atomic_dec(&object->n_reads);
-       if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
-               fscache_unuse_cookie(object);
-
-       /* now... we may get called with the object spinlock held, so we
-        * complete the cleanup here only if we can immediately acquire the
-        * lock, and defer it otherwise */
-       if (!spin_trylock(&object->lock)) {
-               _debug("defer put");
-               fscache_stat(&fscache_n_op_deferred_release);
+               ASSERTCMP(object->n_ops, >, 0);
+               object->n_ops--;
+               if (object->n_ops == 0)
+                       fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
 
-               cache = object->cache;
-               spin_lock(&cache->op_gc_list_lock);
-               list_add_tail(&op->pend_link, &cache->op_gc_list);
-               spin_unlock(&cache->op_gc_list_lock);
-               schedule_work(&cache->op_gc);
-               _leave(" [defer]");
-               return;
+               spin_unlock(&object->lock);
        }
 
-       ASSERTCMP(object->n_ops, >, 0);
-       object->n_ops--;
-       if (object->n_ops == 0)
-               fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
-
-       spin_unlock(&object->lock);
-
        kfree(op);
        _leave(" [done]");
 }
index de33b3fccca650da99ee850597d5aef437b3a6fe..483bbc613bf04528cbc66d5d54a5b54b6a8dc85b 100644 (file)
@@ -213,7 +213,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
                return -ENOMEM;
        }
 
-       fscache_operation_init(op, fscache_attr_changed_op, NULL);
+       fscache_operation_init(op, fscache_attr_changed_op, NULL, NULL);
        op->flags = FSCACHE_OP_ASYNC |
                (1 << FSCACHE_OP_EXCLUSIVE) |
                (1 << FSCACHE_OP_UNUSE_COOKIE);
@@ -239,7 +239,7 @@ nobufs_dec:
        wake_cookie = __fscache_unuse_cookie(cookie);
 nobufs:
        spin_unlock(&cookie->lock);
-       kfree(op);
+       fscache_put_operation(op);
        if (wake_cookie)
                __fscache_wake_unused_cookie(cookie);
        fscache_stat(&fscache_n_attr_changed_nobufs);
@@ -248,6 +248,17 @@ nobufs:
 }
 EXPORT_SYMBOL(__fscache_attr_changed);
 
+/*
+ * Handle cancellation of a pending retrieval op
+ */
+static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
+{
+       struct fscache_retrieval *op =
+               container_of(_op, struct fscache_retrieval, op);
+
+       atomic_set(&op->n_pages, 0);
+}
+
 /*
  * release a retrieval op reference
  */
@@ -258,11 +269,12 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
 
        _enter("{OP%x}", op->op.debug_id);
 
-       ASSERTCMP(atomic_read(&op->n_pages), ==, 0);
+       ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
+                   atomic_read(&op->n_pages), ==, 0);
 
        fscache_hist(fscache_retrieval_histogram, op->start_time);
        if (op->context)
-               fscache_put_context(op->op.object->cookie, op->context);
+               fscache_put_context(op->cookie, op->context);
 
        _leave("");
 }
@@ -285,15 +297,24 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
                return NULL;
        }
 
-       fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
+       fscache_operation_init(&op->op, NULL,
+                              fscache_do_cancel_retrieval,
+                              fscache_release_retrieval_op);
        op->op.flags    = FSCACHE_OP_MYTHREAD |
                (1UL << FSCACHE_OP_WAITING) |
                (1UL << FSCACHE_OP_UNUSE_COOKIE);
+       op->cookie      = cookie;
        op->mapping     = mapping;
        op->end_io_func = end_io_func;
        op->context     = context;
        op->start_time  = jiffies;
        INIT_LIST_HEAD(&op->to_do);
+
+       /* Pin the netfs read context in case we need to do the actual netfs
+        * read because we've encountered a cache read failure.
+        */
+       if (context)
+               fscache_get_context(op->cookie, context);
        return op;
 }
 
@@ -329,25 +350,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
        return 0;
 }
 
-/*
- * Handle cancellation of a pending retrieval op
- */
-static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
-{
-       struct fscache_retrieval *op =
-               container_of(_op, struct fscache_retrieval, op);
-
-       atomic_set(&op->n_pages, 0);
-}
-
 /*
  * wait for an object to become active (or dead)
  */
 int fscache_wait_for_operation_activation(struct fscache_object *object,
                                          struct fscache_operation *op,
                                          atomic_t *stat_op_waits,
-                                         atomic_t *stat_object_dead,
-                                         void (*do_cancel)(struct fscache_operation *))
+                                         atomic_t *stat_object_dead)
 {
        int ret;
 
@@ -359,7 +368,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
                fscache_stat(stat_op_waits);
        if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
                        TASK_INTERRUPTIBLE) != 0) {
-               ret = fscache_cancel_op(op, do_cancel);
+               ret = fscache_cancel_op(op, false);
                if (ret == 0)
                        return -ERESTARTSYS;
 
@@ -377,11 +386,13 @@ check_if_dead:
                _leave(" = -ENOBUFS [cancelled]");
                return -ENOBUFS;
        }
-       if (unlikely(fscache_object_is_dead(object))) {
-               pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
-               fscache_cancel_op(op, do_cancel);
+       if (unlikely(fscache_object_is_dying(object) ||
+                    fscache_cache_is_broken(object))) {
+               enum fscache_operation_state state = op->state;
+               fscache_cancel_op(op, true);
                if (stat_object_dead)
                        fscache_stat(stat_object_dead);
+               _leave(" = -ENOBUFS [obj dead %d]", state);
                return -ENOBUFS;
        }
        return 0;
@@ -453,17 +464,12 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
 
        fscache_stat(&fscache_n_retrieval_ops);
 
-       /* pin the netfs read context in case we need to do the actual netfs
-        * read because we've encountered a cache read failure */
-       fscache_get_context(object->cookie, op->context);
-
        /* we wait for the operation to become active, and then process it
         * *here*, in this thread, and not in the thread pool */
        ret = fscache_wait_for_operation_activation(
                object, &op->op,
                __fscache_stat(&fscache_n_retrieval_op_waits),
-               __fscache_stat(&fscache_n_retrievals_object_dead),
-               fscache_do_cancel_retrieval);
+               __fscache_stat(&fscache_n_retrievals_object_dead));
        if (ret < 0)
                goto error;
 
@@ -503,7 +509,7 @@ nobufs_unlock:
        spin_unlock(&cookie->lock);
        if (wake_cookie)
                __fscache_wake_unused_cookie(cookie);
-       kfree(op);
+       fscache_put_retrieval(op);
 nobufs:
        fscache_stat(&fscache_n_retrievals_nobufs);
        _leave(" = -ENOBUFS");
@@ -584,17 +590,12 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
 
        fscache_stat(&fscache_n_retrieval_ops);
 
-       /* pin the netfs read context in case we need to do the actual netfs
-        * read because we've encountered a cache read failure */
-       fscache_get_context(object->cookie, op->context);
-
        /* we wait for the operation to become active, and then process it
         * *here*, in this thread, and not in the thread pool */
        ret = fscache_wait_for_operation_activation(
                object, &op->op,
                __fscache_stat(&fscache_n_retrieval_op_waits),
-               __fscache_stat(&fscache_n_retrievals_object_dead),
-               fscache_do_cancel_retrieval);
+               __fscache_stat(&fscache_n_retrievals_object_dead));
        if (ret < 0)
                goto error;
 
@@ -632,7 +633,7 @@ nobufs_unlock_dec:
        wake_cookie = __fscache_unuse_cookie(cookie);
 nobufs_unlock:
        spin_unlock(&cookie->lock);
-       kfree(op);
+       fscache_put_retrieval(op);
        if (wake_cookie)
                __fscache_wake_unused_cookie(cookie);
 nobufs:
@@ -700,8 +701,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
        ret = fscache_wait_for_operation_activation(
                object, &op->op,
                __fscache_stat(&fscache_n_alloc_op_waits),
-               __fscache_stat(&fscache_n_allocs_object_dead),
-               fscache_do_cancel_retrieval);
+               __fscache_stat(&fscache_n_allocs_object_dead));
        if (ret < 0)
                goto error;
 
@@ -726,7 +726,7 @@ nobufs_unlock_dec:
        wake_cookie = __fscache_unuse_cookie(cookie);
 nobufs_unlock:
        spin_unlock(&cookie->lock);
-       kfree(op);
+       fscache_put_retrieval(op);
        if (wake_cookie)
                __fscache_wake_unused_cookie(cookie);
 nobufs:
@@ -944,7 +944,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
        if (!op)
                goto nomem;
 
-       fscache_operation_init(&op->op, fscache_write_op,
+       fscache_operation_init(&op->op, fscache_write_op, NULL,
                               fscache_release_write_op);
        op->op.flags = FSCACHE_OP_ASYNC |
                (1 << FSCACHE_OP_WAITING) |
@@ -1016,7 +1016,7 @@ already_pending:
        spin_unlock(&object->lock);
        spin_unlock(&cookie->lock);
        radix_tree_preload_end();
-       kfree(op);
+       fscache_put_operation(&op->op);
        fscache_stat(&fscache_n_stores_ok);
        _leave(" = 0");
        return 0;
@@ -1036,7 +1036,7 @@ nobufs_unlock_obj:
 nobufs:
        spin_unlock(&cookie->lock);
        radix_tree_preload_end();
-       kfree(op);
+       fscache_put_operation(&op->op);
        if (wake_cookie)
                __fscache_wake_unused_cookie(cookie);
        fscache_stat(&fscache_n_stores_nobufs);
@@ -1044,7 +1044,7 @@ nobufs:
        return -ENOBUFS;
 
 nomem_free:
-       kfree(op);
+       fscache_put_operation(&op->op);
 nomem:
        fscache_stat(&fscache_n_stores_oom);
        _leave(" = -ENOMEM");
index 40d13c70ef518e492565ae59a86ee758244cdd65..7cfa0aacdf6d53c41768150f80f27bc2b7cb18c0 100644 (file)
@@ -23,6 +23,7 @@ atomic_t fscache_n_op_run;
 atomic_t fscache_n_op_enqueue;
 atomic_t fscache_n_op_requeue;
 atomic_t fscache_n_op_deferred_release;
+atomic_t fscache_n_op_initialised;
 atomic_t fscache_n_op_release;
 atomic_t fscache_n_op_gc;
 atomic_t fscache_n_op_cancelled;
@@ -130,6 +131,11 @@ atomic_t fscache_n_cop_write_page;
 atomic_t fscache_n_cop_uncache_page;
 atomic_t fscache_n_cop_dissociate_pages;
 
+atomic_t fscache_n_cache_no_space_reject;
+atomic_t fscache_n_cache_stale_objects;
+atomic_t fscache_n_cache_retired_objects;
+atomic_t fscache_n_cache_culled_objects;
+
 /*
  * display the general statistics
  */
@@ -246,7 +252,8 @@ static int fscache_stats_show(struct seq_file *m, void *v)
                   atomic_read(&fscache_n_op_enqueue),
                   atomic_read(&fscache_n_op_cancelled),
                   atomic_read(&fscache_n_op_rejected));
-       seq_printf(m, "Ops    : dfr=%u rel=%u gc=%u\n",
+       seq_printf(m, "Ops    : ini=%u dfr=%u rel=%u gc=%u\n",
+                  atomic_read(&fscache_n_op_initialised),
                   atomic_read(&fscache_n_op_deferred_release),
                   atomic_read(&fscache_n_op_release),
                   atomic_read(&fscache_n_op_gc));
@@ -271,6 +278,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
                   atomic_read(&fscache_n_cop_write_page),
                   atomic_read(&fscache_n_cop_uncache_page),
                   atomic_read(&fscache_n_cop_dissociate_pages));
+       seq_printf(m, "CacheEv: nsp=%d stl=%d rtr=%d cul=%d\n",
+                  atomic_read(&fscache_n_cache_no_space_reject),
+                  atomic_read(&fscache_n_cache_stale_objects),
+                  atomic_read(&fscache_n_cache_retired_objects),
+                  atomic_read(&fscache_n_cache_culled_objects));
        return 0;
 }
 
index e5bbf748b6987a922fa211e3edb083346927f3b6..eae2c11268bcb484075cfd08482beeb172dd66bc 100644 (file)
@@ -489,6 +489,7 @@ static void cuse_fc_release(struct fuse_conn *fc)
  */
 static int cuse_channel_open(struct inode *inode, struct file *file)
 {
+       struct fuse_dev *fud;
        struct cuse_conn *cc;
        int rc;
 
@@ -499,17 +500,22 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
 
        fuse_conn_init(&cc->fc);
 
+       fud = fuse_dev_alloc(&cc->fc);
+       if (!fud) {
+               kfree(cc);
+               return -ENOMEM;
+       }
+
        INIT_LIST_HEAD(&cc->list);
        cc->fc.release = cuse_fc_release;
 
-       cc->fc.connected = 1;
        cc->fc.initialized = 1;
        rc = cuse_send_init(cc);
        if (rc) {
-               fuse_conn_put(&cc->fc);
+               fuse_dev_free(fud);
                return rc;
        }
-       file->private_data = &cc->fc;   /* channel owns base reference to cc */
+       file->private_data = fud;
 
        return 0;
 }
@@ -527,7 +533,8 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
  */
 static int cuse_channel_release(struct inode *inode, struct file *file)
 {
-       struct cuse_conn *cc = fc_to_cc(file->private_data);
+       struct fuse_dev *fud = file->private_data;
+       struct cuse_conn *cc = fc_to_cc(fud->fc);
        int rc;
 
        /* remove from the conntbl, no more access from this point on */
index c8b68ab2e574a86f13fab97f9ed47b14a4e139d6..80cc1b35d46043c16bc456e0cadf61e76c281d52 100644 (file)
@@ -25,13 +25,13 @@ MODULE_ALIAS("devname:fuse");
 
 static struct kmem_cache *fuse_req_cachep;
 
-static struct fuse_conn *fuse_get_conn(struct file *file)
+static struct fuse_dev *fuse_get_dev(struct file *file)
 {
        /*
         * Lockless access is OK, because file->private data is set
         * once during mount and is valid until the file is released.
         */
-       return file->private_data;
+       return ACCESS_ONCE(file->private_data);
 }
 
 static void fuse_request_init(struct fuse_req *req, struct page **pages,
@@ -48,6 +48,7 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages,
        req->pages = pages;
        req->page_descs = page_descs;
        req->max_pages = npages;
+       __set_bit(FR_PENDING, &req->flags);
 }
 
 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
@@ -168,6 +169,10 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
        if (!fc->connected)
                goto out;
 
+       err = -ECONNREFUSED;
+       if (fc->conn_error)
+               goto out;
+
        req = fuse_request_alloc(npages);
        err = -ENOMEM;
        if (!req) {
@@ -177,8 +182,10 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
        }
 
        fuse_req_init_context(req);
-       req->waiting = 1;
-       req->background = for_background;
+       __set_bit(FR_WAITING, &req->flags);
+       if (for_background)
+               __set_bit(FR_BACKGROUND, &req->flags);
+
        return req;
 
  out:
@@ -268,15 +275,15 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
                req = get_reserved_req(fc, file);
 
        fuse_req_init_context(req);
-       req->waiting = 1;
-       req->background = 0;
+       __set_bit(FR_WAITING, &req->flags);
+       __clear_bit(FR_BACKGROUND, &req->flags);
        return req;
 }
 
 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
 {
        if (atomic_dec_and_test(&req->count)) {
-               if (unlikely(req->background)) {
+               if (test_bit(FR_BACKGROUND, &req->flags)) {
                        /*
                         * We get here in the unlikely case that a background
                         * request was allocated but not sent
@@ -287,8 +294,10 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
                        spin_unlock(&fc->lock);
                }
 
-               if (req->waiting)
+               if (test_bit(FR_WAITING, &req->flags)) {
+                       __clear_bit(FR_WAITING, &req->flags);
                        atomic_dec(&fc->num_waiting);
+               }
 
                if (req->stolen_file)
                        put_reserved_req(fc, req);
@@ -309,46 +318,38 @@ static unsigned len_args(unsigned numargs, struct fuse_arg *args)
        return nbytes;
 }
 
-static u64 fuse_get_unique(struct fuse_conn *fc)
+static u64 fuse_get_unique(struct fuse_iqueue *fiq)
 {
-       fc->reqctr++;
-       /* zero is special */
-       if (fc->reqctr == 0)
-               fc->reqctr = 1;
-
-       return fc->reqctr;
+       return ++fiq->reqctr;
 }
 
-static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
+static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
 {
        req->in.h.len = sizeof(struct fuse_in_header) +
                len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
-       list_add_tail(&req->list, &fc->pending);
-       req->state = FUSE_REQ_PENDING;
-       if (!req->waiting) {
-               req->waiting = 1;
-               atomic_inc(&fc->num_waiting);
-       }
-       wake_up(&fc->waitq);
-       kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+       list_add_tail(&req->list, &fiq->pending);
+       wake_up_locked(&fiq->waitq);
+       kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
 }
 
 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
                       u64 nodeid, u64 nlookup)
 {
+       struct fuse_iqueue *fiq = &fc->iq;
+
        forget->forget_one.nodeid = nodeid;
        forget->forget_one.nlookup = nlookup;
 
-       spin_lock(&fc->lock);
-       if (fc->connected) {
-               fc->forget_list_tail->next = forget;
-               fc->forget_list_tail = forget;
-               wake_up(&fc->waitq);
-               kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+       spin_lock(&fiq->waitq.lock);
+       if (fiq->connected) {
+               fiq->forget_list_tail->next = forget;
+               fiq->forget_list_tail = forget;
+               wake_up_locked(&fiq->waitq);
+               kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
        } else {
                kfree(forget);
        }
-       spin_unlock(&fc->lock);
+       spin_unlock(&fiq->waitq.lock);
 }
 
 static void flush_bg_queue(struct fuse_conn *fc)
@@ -356,12 +357,15 @@ static void flush_bg_queue(struct fuse_conn *fc)
        while (fc->active_background < fc->max_background &&
               !list_empty(&fc->bg_queue)) {
                struct fuse_req *req;
+               struct fuse_iqueue *fiq = &fc->iq;
 
                req = list_entry(fc->bg_queue.next, struct fuse_req, list);
                list_del(&req->list);
                fc->active_background++;
-               req->in.h.unique = fuse_get_unique(fc);
-               queue_request(fc, req);
+               spin_lock(&fiq->waitq.lock);
+               req->in.h.unique = fuse_get_unique(fiq);
+               queue_request(fiq, req);
+               spin_unlock(&fiq->waitq.lock);
        }
 }
 
@@ -372,20 +376,22 @@ static void flush_bg_queue(struct fuse_conn *fc)
  * was closed.  The requester thread is woken up (if still waiting),
  * the 'end' callback is called if given, else the reference to the
  * request is released
- *
- * Called with fc->lock, unlocks it
  */
 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
-__releases(fc->lock)
 {
-       void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
-       req->end = NULL;
-       list_del(&req->list);
-       list_del(&req->intr_entry);
-       req->state = FUSE_REQ_FINISHED;
-       if (req->background) {
-               req->background = 0;
+       struct fuse_iqueue *fiq = &fc->iq;
+
+       if (test_and_set_bit(FR_FINISHED, &req->flags))
+               return;
 
+       spin_lock(&fiq->waitq.lock);
+       list_del_init(&req->intr_entry);
+       spin_unlock(&fiq->waitq.lock);
+       WARN_ON(test_bit(FR_PENDING, &req->flags));
+       WARN_ON(test_bit(FR_SENT, &req->flags));
+       if (test_bit(FR_BACKGROUND, &req->flags)) {
+               spin_lock(&fc->lock);
+               clear_bit(FR_BACKGROUND, &req->flags);
                if (fc->num_background == fc->max_background)
                        fc->blocked = 0;
 
@@ -401,122 +407,105 @@ __releases(fc->lock)
                fc->num_background--;
                fc->active_background--;
                flush_bg_queue(fc);
+               spin_unlock(&fc->lock);
        }
-       spin_unlock(&fc->lock);
        wake_up(&req->waitq);
-       if (end)
-               end(fc, req);
+       if (req->end)
+               req->end(fc, req);
        fuse_put_request(fc, req);
 }
 
-static void wait_answer_interruptible(struct fuse_conn *fc,
-                                     struct fuse_req *req)
-__releases(fc->lock)
-__acquires(fc->lock)
-{
-       if (signal_pending(current))
-               return;
-
-       spin_unlock(&fc->lock);
-       wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
-       spin_lock(&fc->lock);
-}
-
-static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
+static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
 {
-       list_add_tail(&req->intr_entry, &fc->interrupts);
-       wake_up(&fc->waitq);
-       kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+       spin_lock(&fiq->waitq.lock);
+       if (list_empty(&req->intr_entry)) {
+               list_add_tail(&req->intr_entry, &fiq->interrupts);
+               wake_up_locked(&fiq->waitq);
+       }
+       spin_unlock(&fiq->waitq.lock);
+       kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
 }
 
 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
-__releases(fc->lock)
-__acquires(fc->lock)
 {
+       struct fuse_iqueue *fiq = &fc->iq;
+       int err;
+
        if (!fc->no_interrupt) {
                /* Any signal may interrupt this */
-               wait_answer_interruptible(fc, req);
-
-               if (req->aborted)
-                       goto aborted;
-               if (req->state == FUSE_REQ_FINISHED)
+               err = wait_event_interruptible(req->waitq,
+                                       test_bit(FR_FINISHED, &req->flags));
+               if (!err)
                        return;
 
-               req->interrupted = 1;
-               if (req->state == FUSE_REQ_SENT)
-                       queue_interrupt(fc, req);
+               set_bit(FR_INTERRUPTED, &req->flags);
+               /* matches barrier in fuse_dev_do_read() */
+               smp_mb__after_atomic();
+               if (test_bit(FR_SENT, &req->flags))
+                       queue_interrupt(fiq, req);
        }
 
-       if (!req->force) {
+       if (!test_bit(FR_FORCE, &req->flags)) {
                sigset_t oldset;
 
                /* Only fatal signals may interrupt this */
                block_sigs(&oldset);
-               wait_answer_interruptible(fc, req);
+               err = wait_event_interruptible(req->waitq,
+                                       test_bit(FR_FINISHED, &req->flags));
                restore_sigs(&oldset);
 
-               if (req->aborted)
-                       goto aborted;
-               if (req->state == FUSE_REQ_FINISHED)
+               if (!err)
                        return;
 
+               spin_lock(&fiq->waitq.lock);
                /* Request is not yet in userspace, bail out */
-               if (req->state == FUSE_REQ_PENDING) {
+               if (test_bit(FR_PENDING, &req->flags)) {
                        list_del(&req->list);
+                       spin_unlock(&fiq->waitq.lock);
                        __fuse_put_request(req);
                        req->out.h.error = -EINTR;
                        return;
                }
+               spin_unlock(&fiq->waitq.lock);
        }
 
        /*
         * Either request is already in userspace, or it was forced.
         * Wait it out.
         */
-       spin_unlock(&fc->lock);
-       wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
-       spin_lock(&fc->lock);
-
-       if (!req->aborted)
-               return;
-
- aborted:
-       BUG_ON(req->state != FUSE_REQ_FINISHED);
-       if (req->locked) {
-               /* This is uninterruptible sleep, because data is
-                  being copied to/from the buffers of req.  During
-                  locked state, there mustn't be any filesystem
-                  operation (e.g. page fault), since that could lead
-                  to deadlock */
-               spin_unlock(&fc->lock);
-               wait_event(req->waitq, !req->locked);
-               spin_lock(&fc->lock);
-       }
+       wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
 }
 
 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
 {
-       BUG_ON(req->background);
-       spin_lock(&fc->lock);
-       if (!fc->connected)
+       struct fuse_iqueue *fiq = &fc->iq;
+
+       BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
+       spin_lock(&fiq->waitq.lock);
+       if (!fiq->connected) {
+               spin_unlock(&fiq->waitq.lock);
                req->out.h.error = -ENOTCONN;
-       else if (fc->conn_error)
-               req->out.h.error = -ECONNREFUSED;
-       else {
-               req->in.h.unique = fuse_get_unique(fc);
-               queue_request(fc, req);
+       } else {
+               req->in.h.unique = fuse_get_unique(fiq);
+               queue_request(fiq, req);
                /* acquire extra reference, since request is still needed
                   after request_end() */
                __fuse_get_request(req);
+               spin_unlock(&fiq->waitq.lock);
 
                request_wait_answer(fc, req);
+               /* Pairs with smp_wmb() in request_end() */
+               smp_rmb();
        }
-       spin_unlock(&fc->lock);
 }
 
 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
 {
-       req->isreply = 1;
+       __set_bit(FR_ISREPLY, &req->flags);
+       if (!test_bit(FR_WAITING, &req->flags)) {
+               __set_bit(FR_WAITING, &req->flags);
+               atomic_inc(&fc->num_waiting);
+       }
        __fuse_request_send(fc, req);
 }
 EXPORT_SYMBOL_GPL(fuse_request_send);
@@ -586,10 +575,20 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
        return ret;
 }
 
-static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
-                                           struct fuse_req *req)
+/*
+ * Called under fc->lock
+ *
+ * fc->connected must have been checked previously
+ */
+void fuse_request_send_background_locked(struct fuse_conn *fc,
+                                        struct fuse_req *req)
 {
-       BUG_ON(!req->background);
+       BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
+       if (!test_bit(FR_WAITING, &req->flags)) {
+               __set_bit(FR_WAITING, &req->flags);
+               atomic_inc(&fc->num_waiting);
+       }
+       __set_bit(FR_ISREPLY, &req->flags);
        fc->num_background++;
        if (fc->num_background == fc->max_background)
                fc->blocked = 1;
@@ -602,54 +601,40 @@ static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
        flush_bg_queue(fc);
 }
 
-static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
+void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
 {
+       BUG_ON(!req->end);
        spin_lock(&fc->lock);
        if (fc->connected) {
-               fuse_request_send_nowait_locked(fc, req);
+               fuse_request_send_background_locked(fc, req);
                spin_unlock(&fc->lock);
        } else {
+               spin_unlock(&fc->lock);
                req->out.h.error = -ENOTCONN;
-               request_end(fc, req);
+               req->end(fc, req);
+               fuse_put_request(fc, req);
        }
 }
-
-void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
-{
-       req->isreply = 1;
-       fuse_request_send_nowait(fc, req);
-}
 EXPORT_SYMBOL_GPL(fuse_request_send_background);
 
 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
                                          struct fuse_req *req, u64 unique)
 {
        int err = -ENODEV;
+       struct fuse_iqueue *fiq = &fc->iq;
 
-       req->isreply = 0;
+       __clear_bit(FR_ISREPLY, &req->flags);
        req->in.h.unique = unique;
-       spin_lock(&fc->lock);
-       if (fc->connected) {
-               queue_request(fc, req);
+       spin_lock(&fiq->waitq.lock);
+       if (fiq->connected) {
+               queue_request(fiq, req);
                err = 0;
        }
-       spin_unlock(&fc->lock);
+       spin_unlock(&fiq->waitq.lock);
 
        return err;
 }
 
-/*
- * Called under fc->lock
- *
- * fc->connected must have been checked previously
- */
-void fuse_request_send_background_locked(struct fuse_conn *fc,
-                                        struct fuse_req *req)
-{
-       req->isreply = 1;
-       fuse_request_send_nowait_locked(fc, req);
-}
-
 void fuse_force_forget(struct file *file, u64 nodeid)
 {
        struct inode *inode = file_inode(file);
@@ -665,7 +650,7 @@ void fuse_force_forget(struct file *file, u64 nodeid)
        req->in.numargs = 1;
        req->in.args[0].size = sizeof(inarg);
        req->in.args[0].value = &inarg;
-       req->isreply = 0;
+       __clear_bit(FR_ISREPLY, &req->flags);
        __fuse_request_send(fc, req);
        /* ignore errors */
        fuse_put_request(fc, req);
@@ -676,38 +661,39 @@ void fuse_force_forget(struct file *file, u64 nodeid)
  * anything that could cause a page-fault.  If the request was already
  * aborted bail out.
  */
-static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
+static int lock_request(struct fuse_req *req)
 {
        int err = 0;
        if (req) {
-               spin_lock(&fc->lock);
-               if (req->aborted)
+               spin_lock(&req->waitq.lock);
+               if (test_bit(FR_ABORTED, &req->flags))
                        err = -ENOENT;
                else
-                       req->locked = 1;
-               spin_unlock(&fc->lock);
+                       set_bit(FR_LOCKED, &req->flags);
+               spin_unlock(&req->waitq.lock);
        }
        return err;
 }
 
 /*
- * Unlock request.  If it was aborted during being locked, the
- * requester thread is currently waiting for it to be unlocked, so
- * wake it up.
+ * Unlock request.  If it was aborted while locked, caller is responsible
+ * for unlocking and ending the request.
  */
-static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
+static int unlock_request(struct fuse_req *req)
 {
+       int err = 0;
        if (req) {
-               spin_lock(&fc->lock);
-               req->locked = 0;
-               if (req->aborted)
-                       wake_up(&req->waitq);
-               spin_unlock(&fc->lock);
+               spin_lock(&req->waitq.lock);
+               if (test_bit(FR_ABORTED, &req->flags))
+                       err = -ENOENT;
+               else
+                       clear_bit(FR_LOCKED, &req->flags);
+               spin_unlock(&req->waitq.lock);
        }
+       return err;
 }
 
 struct fuse_copy_state {
-       struct fuse_conn *fc;
        int write;
        struct fuse_req *req;
        struct iov_iter *iter;
@@ -721,13 +707,10 @@ struct fuse_copy_state {
        unsigned move_pages:1;
 };
 
-static void fuse_copy_init(struct fuse_copy_state *cs,
-                          struct fuse_conn *fc,
-                          int write,
+static void fuse_copy_init(struct fuse_copy_state *cs, int write,
                           struct iov_iter *iter)
 {
        memset(cs, 0, sizeof(*cs));
-       cs->fc = fc;
        cs->write = write;
        cs->iter = iter;
 }
@@ -760,7 +743,10 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
        struct page *page;
        int err;
 
-       unlock_request(cs->fc, cs->req);
+       err = unlock_request(cs->req);
+       if (err)
+               return err;
+
        fuse_copy_finish(cs);
        if (cs->pipebufs) {
                struct pipe_buffer *buf = cs->pipebufs;
@@ -809,7 +795,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
                iov_iter_advance(cs->iter, err);
        }
 
-       return lock_request(cs->fc, cs->req);
+       return lock_request(cs->req);
 }
 
 /* Do as much copy to/from userspace buffer as we can */
@@ -860,7 +846,10 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
        struct page *newpage;
        struct pipe_buffer *buf = cs->pipebufs;
 
-       unlock_request(cs->fc, cs->req);
+       err = unlock_request(cs->req);
+       if (err)
+               return err;
+
        fuse_copy_finish(cs);
 
        err = buf->ops->confirm(cs->pipe, buf);
@@ -914,12 +903,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
                lru_cache_add_file(newpage);
 
        err = 0;
-       spin_lock(&cs->fc->lock);
-       if (cs->req->aborted)
+       spin_lock(&cs->req->waitq.lock);
+       if (test_bit(FR_ABORTED, &cs->req->flags))
                err = -ENOENT;
        else
                *pagep = newpage;
-       spin_unlock(&cs->fc->lock);
+       spin_unlock(&cs->req->waitq.lock);
 
        if (err) {
                unlock_page(newpage);
@@ -939,7 +928,7 @@ out_fallback:
        cs->pg = buf->page;
        cs->offset = buf->offset;
 
-       err = lock_request(cs->fc, cs->req);
+       err = lock_request(cs->req);
        if (err)
                return err;
 
@@ -950,11 +939,15 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
                         unsigned offset, unsigned count)
 {
        struct pipe_buffer *buf;
+       int err;
 
        if (cs->nr_segs == cs->pipe->buffers)
                return -EIO;
 
-       unlock_request(cs->fc, cs->req);
+       err = unlock_request(cs->req);
+       if (err)
+               return err;
+
        fuse_copy_finish(cs);
 
        buf = cs->pipebufs;
@@ -1065,36 +1058,15 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
        return err;
 }
 
-static int forget_pending(struct fuse_conn *fc)
+static int forget_pending(struct fuse_iqueue *fiq)
 {
-       return fc->forget_list_head.next != NULL;
+       return fiq->forget_list_head.next != NULL;
 }
 
-static int request_pending(struct fuse_conn *fc)
+static int request_pending(struct fuse_iqueue *fiq)
 {
-       return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
-               forget_pending(fc);
-}
-
-/* Wait until a request is available on the pending list */
-static void request_wait(struct fuse_conn *fc)
-__releases(fc->lock)
-__acquires(fc->lock)
-{
-       DECLARE_WAITQUEUE(wait, current);
-
-       add_wait_queue_exclusive(&fc->waitq, &wait);
-       while (fc->connected && !request_pending(fc)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (signal_pending(current))
-                       break;
-
-               spin_unlock(&fc->lock);
-               schedule();
-               spin_lock(&fc->lock);
-       }
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&fc->waitq, &wait);
+       return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
+               forget_pending(fiq);
 }
 
 /*
@@ -1103,11 +1075,12 @@ __acquires(fc->lock)
  * Unlike other requests this is assembled on demand, without a need
  * to allocate a separate fuse_req structure.
  *
- * Called with fc->lock held, releases it
+ * Called with fiq->waitq.lock held, releases it
  */
-static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
+static int fuse_read_interrupt(struct fuse_iqueue *fiq,
+                              struct fuse_copy_state *cs,
                               size_t nbytes, struct fuse_req *req)
-__releases(fc->lock)
+__releases(fiq->waitq.lock)
 {
        struct fuse_in_header ih;
        struct fuse_interrupt_in arg;
@@ -1115,7 +1088,7 @@ __releases(fc->lock)
        int err;
 
        list_del_init(&req->intr_entry);
-       req->intr_unique = fuse_get_unique(fc);
+       req->intr_unique = fuse_get_unique(fiq);
        memset(&ih, 0, sizeof(ih));
        memset(&arg, 0, sizeof(arg));
        ih.len = reqsize;
@@ -1123,7 +1096,7 @@ __releases(fc->lock)
        ih.unique = req->intr_unique;
        arg.unique = req->in.h.unique;
 
-       spin_unlock(&fc->lock);
+       spin_unlock(&fiq->waitq.lock);
        if (nbytes < reqsize)
                return -EINVAL;
 
@@ -1135,21 +1108,21 @@ __releases(fc->lock)
        return err ? err : reqsize;
 }
 
-static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
+static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
                                               unsigned max,
                                               unsigned *countp)
 {
-       struct fuse_forget_link *head = fc->forget_list_head.next;
+       struct fuse_forget_link *head = fiq->forget_list_head.next;
        struct fuse_forget_link **newhead = &head;
        unsigned count;
 
        for (count = 0; *newhead != NULL && count < max; count++)
                newhead = &(*newhead)->next;
 
-       fc->forget_list_head.next = *newhead;
+       fiq->forget_list_head.next = *newhead;
        *newhead = NULL;
-       if (fc->forget_list_head.next == NULL)
-               fc->forget_list_tail = &fc->forget_list_head;
+       if (fiq->forget_list_head.next == NULL)
+               fiq->forget_list_tail = &fiq->forget_list_head;
 
        if (countp != NULL)
                *countp = count;
@@ -1157,24 +1130,24 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
        return head;
 }
 
-static int fuse_read_single_forget(struct fuse_conn *fc,
+static int fuse_read_single_forget(struct fuse_iqueue *fiq,
                                   struct fuse_copy_state *cs,
                                   size_t nbytes)
-__releases(fc->lock)
+__releases(fiq->waitq.lock)
 {
        int err;
-       struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
+       struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
        struct fuse_forget_in arg = {
                .nlookup = forget->forget_one.nlookup,
        };
        struct fuse_in_header ih = {
                .opcode = FUSE_FORGET,
                .nodeid = forget->forget_one.nodeid,
-               .unique = fuse_get_unique(fc),
+               .unique = fuse_get_unique(fiq),
                .len = sizeof(ih) + sizeof(arg),
        };
 
-       spin_unlock(&fc->lock);
+       spin_unlock(&fiq->waitq.lock);
        kfree(forget);
        if (nbytes < ih.len)
                return -EINVAL;
@@ -1190,9 +1163,9 @@ __releases(fc->lock)
        return ih.len;
 }
 
-static int fuse_read_batch_forget(struct fuse_conn *fc,
+static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
                                   struct fuse_copy_state *cs, size_t nbytes)
-__releases(fc->lock)
+__releases(fiq->waitq.lock)
 {
        int err;
        unsigned max_forgets;
@@ -1201,18 +1174,18 @@ __releases(fc->lock)
        struct fuse_batch_forget_in arg = { .count = 0 };
        struct fuse_in_header ih = {
                .opcode = FUSE_BATCH_FORGET,
-               .unique = fuse_get_unique(fc),
+               .unique = fuse_get_unique(fiq),
                .len = sizeof(ih) + sizeof(arg),
        };
 
        if (nbytes < ih.len) {
-               spin_unlock(&fc->lock);
+               spin_unlock(&fiq->waitq.lock);
                return -EINVAL;
        }
 
        max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
-       head = dequeue_forget(fc, max_forgets, &count);
-       spin_unlock(&fc->lock);
+       head = dequeue_forget(fiq, max_forgets, &count);
+       spin_unlock(&fiq->waitq.lock);
 
        arg.count = count;
        ih.len += count * sizeof(struct fuse_forget_one);
@@ -1239,14 +1212,15 @@ __releases(fc->lock)
        return ih.len;
 }
 
-static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
+static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
+                           struct fuse_copy_state *cs,
                            size_t nbytes)
-__releases(fc->lock)
+__releases(fiq->waitq.lock)
 {
-       if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
-               return fuse_read_single_forget(fc, cs, nbytes);
+       if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
+               return fuse_read_single_forget(fiq, cs, nbytes);
        else
-               return fuse_read_batch_forget(fc, cs, nbytes);
+               return fuse_read_batch_forget(fiq, cs, nbytes);
 }
 
 /*
@@ -1258,46 +1232,51 @@ __releases(fc->lock)
  * request_end().  Otherwise add it to the processing list, and set
  * the 'sent' flag.
  */
-static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
+static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
                                struct fuse_copy_state *cs, size_t nbytes)
 {
-       int err;
+       ssize_t err;
+       struct fuse_conn *fc = fud->fc;
+       struct fuse_iqueue *fiq = &fc->iq;
+       struct fuse_pqueue *fpq = &fud->pq;
        struct fuse_req *req;
        struct fuse_in *in;
        unsigned reqsize;
 
  restart:
-       spin_lock(&fc->lock);
+       spin_lock(&fiq->waitq.lock);
        err = -EAGAIN;
-       if ((file->f_flags & O_NONBLOCK) && fc->connected &&
-           !request_pending(fc))
+       if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
+           !request_pending(fiq))
                goto err_unlock;
 
-       request_wait(fc);
-       err = -ENODEV;
-       if (!fc->connected)
+       err = wait_event_interruptible_exclusive_locked(fiq->waitq,
+                               !fiq->connected || request_pending(fiq));
+       if (err)
                goto err_unlock;
-       err = -ERESTARTSYS;
-       if (!request_pending(fc))
+
+       err = -ENODEV;
+       if (!fiq->connected)
                goto err_unlock;
 
-       if (!list_empty(&fc->interrupts)) {
-               req = list_entry(fc->interrupts.next, struct fuse_req,
+       if (!list_empty(&fiq->interrupts)) {
+               req = list_entry(fiq->interrupts.next, struct fuse_req,
                                 intr_entry);
-               return fuse_read_interrupt(fc, cs, nbytes, req);
+               return fuse_read_interrupt(fiq, cs, nbytes, req);
        }
 
-       if (forget_pending(fc)) {
-               if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
-                       return fuse_read_forget(fc, cs, nbytes);
+       if (forget_pending(fiq)) {
+               if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
+                       return fuse_read_forget(fc, fiq, cs, nbytes);
 
-               if (fc->forget_batch <= -8)
-                       fc->forget_batch = 16;
+               if (fiq->forget_batch <= -8)
+                       fiq->forget_batch = 16;
        }
 
-       req = list_entry(fc->pending.next, struct fuse_req, list);
-       req->state = FUSE_REQ_READING;
-       list_move(&req->list, &fc->io);
+       req = list_entry(fiq->pending.next, struct fuse_req, list);
+       clear_bit(FR_PENDING, &req->flags);
+       list_del_init(&req->list);
+       spin_unlock(&fiq->waitq.lock);
 
        in = &req->in;
        reqsize = in->h.len;
@@ -1310,37 +1289,48 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
                request_end(fc, req);
                goto restart;
        }
-       spin_unlock(&fc->lock);
+       spin_lock(&fpq->lock);
+       list_add(&req->list, &fpq->io);
+       spin_unlock(&fpq->lock);
        cs->req = req;
        err = fuse_copy_one(cs, &in->h, sizeof(in->h));
        if (!err)
                err = fuse_copy_args(cs, in->numargs, in->argpages,
                                     (struct fuse_arg *) in->args, 0);
        fuse_copy_finish(cs);
-       spin_lock(&fc->lock);
-       req->locked = 0;
-       if (req->aborted) {
-               request_end(fc, req);
-               return -ENODEV;
+       spin_lock(&fpq->lock);
+       clear_bit(FR_LOCKED, &req->flags);
+       if (!fpq->connected) {
+               err = -ENODEV;
+               goto out_end;
        }
        if (err) {
                req->out.h.error = -EIO;
-               request_end(fc, req);
-               return err;
+               goto out_end;
        }
-       if (!req->isreply)
-               request_end(fc, req);
-       else {
-               req->state = FUSE_REQ_SENT;
-               list_move_tail(&req->list, &fc->processing);
-               if (req->interrupted)
-                       queue_interrupt(fc, req);
-               spin_unlock(&fc->lock);
+       if (!test_bit(FR_ISREPLY, &req->flags)) {
+               err = reqsize;
+               goto out_end;
        }
+       list_move_tail(&req->list, &fpq->processing);
+       spin_unlock(&fpq->lock);
+       set_bit(FR_SENT, &req->flags);
+       /* matches barrier in request_wait_answer() */
+       smp_mb__after_atomic();
+       if (test_bit(FR_INTERRUPTED, &req->flags))
+               queue_interrupt(fiq, req);
+
        return reqsize;
 
+out_end:
+       if (!test_bit(FR_PRIVATE, &req->flags))
+               list_del_init(&req->list);
+       spin_unlock(&fpq->lock);
+       request_end(fc, req);
+       return err;
+
  err_unlock:
-       spin_unlock(&fc->lock);
+       spin_unlock(&fiq->waitq.lock);
        return err;
 }
 
@@ -1359,16 +1349,17 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
 {
        struct fuse_copy_state cs;
        struct file *file = iocb->ki_filp;
-       struct fuse_conn *fc = fuse_get_conn(file);
-       if (!fc)
+       struct fuse_dev *fud = fuse_get_dev(file);
+
+       if (!fud)
                return -EPERM;
 
        if (!iter_is_iovec(to))
                return -EINVAL;
 
-       fuse_copy_init(&cs, fc, 1, to);
+       fuse_copy_init(&cs, 1, to);
 
-       return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to));
+       return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
 }
 
 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
@@ -1380,18 +1371,19 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
        int do_wakeup = 0;
        struct pipe_buffer *bufs;
        struct fuse_copy_state cs;
-       struct fuse_conn *fc = fuse_get_conn(in);
-       if (!fc)
+       struct fuse_dev *fud = fuse_get_dev(in);
+
+       if (!fud)
                return -EPERM;
 
        bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
        if (!bufs)
                return -ENOMEM;
 
-       fuse_copy_init(&cs, fc, 1, NULL);
+       fuse_copy_init(&cs, 1, NULL);
        cs.pipebufs = bufs;
        cs.pipe = pipe;
-       ret = fuse_dev_do_read(fc, in, &cs, len);
+       ret = fuse_dev_do_read(fud, in, &cs, len);
        if (ret < 0)
                goto out;
 
@@ -1830,11 +1822,11 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
 }
 
 /* Look up request on processing list by unique ID */
-static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
+static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
 {
        struct fuse_req *req;
 
-       list_for_each_entry(req, &fc->processing, list) {
+       list_for_each_entry(req, &fpq->processing, list) {
                if (req->in.h.unique == unique || req->intr_unique == unique)
                        return req;
        }
@@ -1871,10 +1863,12 @@ static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
  * it from the list and copy the rest of the buffer to the request.
  * The request is finished by calling request_end()
  */
-static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
+static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
                                 struct fuse_copy_state *cs, size_t nbytes)
 {
        int err;
+       struct fuse_conn *fc = fud->fc;
+       struct fuse_pqueue *fpq = &fud->pq;
        struct fuse_req *req;
        struct fuse_out_header oh;
 
@@ -1902,63 +1896,60 @@ static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
        if (oh.error <= -1000 || oh.error > 0)
                goto err_finish;
 
-       spin_lock(&fc->lock);
+       spin_lock(&fpq->lock);
        err = -ENOENT;
-       if (!fc->connected)
-               goto err_unlock;
+       if (!fpq->connected)
+               goto err_unlock_pq;
 
-       req = request_find(fc, oh.unique);
+       req = request_find(fpq, oh.unique);
        if (!req)
-               goto err_unlock;
+               goto err_unlock_pq;
 
-       if (req->aborted) {
-               spin_unlock(&fc->lock);
-               fuse_copy_finish(cs);
-               spin_lock(&fc->lock);
-               request_end(fc, req);
-               return -ENOENT;
-       }
        /* Is it an interrupt reply? */
        if (req->intr_unique == oh.unique) {
+               spin_unlock(&fpq->lock);
+
                err = -EINVAL;
                if (nbytes != sizeof(struct fuse_out_header))
-                       goto err_unlock;
+                       goto err_finish;
 
                if (oh.error == -ENOSYS)
                        fc->no_interrupt = 1;
                else if (oh.error == -EAGAIN)
-                       queue_interrupt(fc, req);
+                       queue_interrupt(&fc->iq, req);
 
-               spin_unlock(&fc->lock);
                fuse_copy_finish(cs);
                return nbytes;
        }
 
-       req->state = FUSE_REQ_WRITING;
-       list_move(&req->list, &fc->io);
+       clear_bit(FR_SENT, &req->flags);
+       list_move(&req->list, &fpq->io);
        req->out.h = oh;
-       req->locked = 1;
+       set_bit(FR_LOCKED, &req->flags);
+       spin_unlock(&fpq->lock);
        cs->req = req;
        if (!req->out.page_replace)
                cs->move_pages = 0;
-       spin_unlock(&fc->lock);
 
        err = copy_out_args(cs, &req->out, nbytes);
        fuse_copy_finish(cs);
 
-       spin_lock(&fc->lock);
-       req->locked = 0;
-       if (!err) {
-               if (req->aborted)
-                       err = -ENOENT;
-       } else if (!req->aborted)
+       spin_lock(&fpq->lock);
+       clear_bit(FR_LOCKED, &req->flags);
+       if (!fpq->connected)
+               err = -ENOENT;
+       else if (err)
                req->out.h.error = -EIO;
+       if (!test_bit(FR_PRIVATE, &req->flags))
+               list_del_init(&req->list);
+       spin_unlock(&fpq->lock);
+
        request_end(fc, req);
 
        return err ? err : nbytes;
 
- err_unlock:
-       spin_unlock(&fc->lock);
+ err_unlock_pq:
+       spin_unlock(&fpq->lock);
  err_finish:
        fuse_copy_finish(cs);
        return err;
@@ -1967,16 +1958,17 @@ static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
 {
        struct fuse_copy_state cs;
-       struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
-       if (!fc)
+       struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
+
+       if (!fud)
                return -EPERM;
 
        if (!iter_is_iovec(from))
                return -EINVAL;
 
-       fuse_copy_init(&cs, fc, 0, from);
+       fuse_copy_init(&cs, 0, from);
 
-       return fuse_dev_do_write(fc, &cs, iov_iter_count(from));
+       return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
 }
 
 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
@@ -1987,12 +1979,12 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
        unsigned idx;
        struct pipe_buffer *bufs;
        struct fuse_copy_state cs;
-       struct fuse_conn *fc;
+       struct fuse_dev *fud;
        size_t rem;
        ssize_t ret;
 
-       fc = fuse_get_conn(out);
-       if (!fc)
+       fud = fuse_get_dev(out);
+       if (!fud)
                return -EPERM;
 
        bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
@@ -2039,7 +2031,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
        }
        pipe_unlock(pipe);
 
-       fuse_copy_init(&cs, fc, 0, NULL);
+       fuse_copy_init(&cs, 0, NULL);
        cs.pipebufs = bufs;
        cs.nr_segs = nbuf;
        cs.pipe = pipe;
@@ -2047,7 +2039,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
        if (flags & SPLICE_F_MOVE)
                cs.move_pages = 1;
 
-       ret = fuse_dev_do_write(fc, &cs, len);
+       ret = fuse_dev_do_write(fud, &cs, len);
 
        for (idx = 0; idx < nbuf; idx++) {
                struct pipe_buffer *buf = &bufs[idx];
@@ -2061,18 +2053,21 @@ out:
 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
 {
        unsigned mask = POLLOUT | POLLWRNORM;
-       struct fuse_conn *fc = fuse_get_conn(file);
-       if (!fc)
+       struct fuse_iqueue *fiq;
+       struct fuse_dev *fud = fuse_get_dev(file);
+
+       if (!fud)
                return POLLERR;
 
-       poll_wait(file, &fc->waitq, wait);
+       fiq = &fud->fc->iq;
+       poll_wait(file, &fiq->waitq, wait);
 
-       spin_lock(&fc->lock);
-       if (!fc->connected)
+       spin_lock(&fiq->waitq.lock);
+       if (!fiq->connected)
                mask = POLLERR;
-       else if (request_pending(fc))
+       else if (request_pending(fiq))
                mask |= POLLIN | POLLRDNORM;
-       spin_unlock(&fc->lock);
+       spin_unlock(&fiq->waitq.lock);
 
        return mask;
 }
@@ -2083,67 +2078,18 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
  * This function releases and reacquires fc->lock
  */
 static void end_requests(struct fuse_conn *fc, struct list_head *head)
-__releases(fc->lock)
-__acquires(fc->lock)
 {
        while (!list_empty(head)) {
                struct fuse_req *req;
                req = list_entry(head->next, struct fuse_req, list);
                req->out.h.error = -ECONNABORTED;
-               request_end(fc, req);
-               spin_lock(&fc->lock);
-       }
-}
-
-/*
- * Abort requests under I/O
- *
- * The requests are set to aborted and finished, and the request
- * waiter is woken up.  This will make request_wait_answer() wait
- * until the request is unlocked and then return.
- *
- * If the request is asynchronous, then the end function needs to be
- * called after waiting for the request to be unlocked (if it was
- * locked).
- */
-static void end_io_requests(struct fuse_conn *fc)
-__releases(fc->lock)
-__acquires(fc->lock)
-{
-       while (!list_empty(&fc->io)) {
-               struct fuse_req *req =
-                       list_entry(fc->io.next, struct fuse_req, list);
-               void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
-
-               req->aborted = 1;
-               req->out.h.error = -ECONNABORTED;
-               req->state = FUSE_REQ_FINISHED;
+               clear_bit(FR_PENDING, &req->flags);
+               clear_bit(FR_SENT, &req->flags);
                list_del_init(&req->list);
-               wake_up(&req->waitq);
-               if (end) {
-                       req->end = NULL;
-                       __fuse_get_request(req);
-                       spin_unlock(&fc->lock);
-                       wait_event(req->waitq, !req->locked);
-                       end(fc, req);
-                       fuse_put_request(fc, req);
-                       spin_lock(&fc->lock);
-               }
+               request_end(fc, req);
        }
 }
 
-static void end_queued_requests(struct fuse_conn *fc)
-__releases(fc->lock)
-__acquires(fc->lock)
-{
-       fc->max_background = UINT_MAX;
-       flush_bg_queue(fc);
-       end_requests(fc, &fc->pending);
-       end_requests(fc, &fc->processing);
-       while (forget_pending(fc))
-               kfree(dequeue_forget(fc, 1, NULL));
-}
-
 static void end_polls(struct fuse_conn *fc)
 {
        struct rb_node *p;
@@ -2162,67 +2108,156 @@ static void end_polls(struct fuse_conn *fc)
 /*
  * Abort all requests.
  *
- * Emergency exit in case of a malicious or accidental deadlock, or
- * just a hung filesystem.
+ * Emergency exit in case of a malicious or accidental deadlock, or just a hung
+ * filesystem.
  *
- * The same effect is usually achievable through killing the
- * filesystem daemon and all users of the filesystem.  The exception
- * is the combination of an asynchronous request and the tricky
- * deadlock (see Documentation/filesystems/fuse.txt).
+ * The same effect is usually achievable through killing the filesystem daemon
+ * and all users of the filesystem.  The exception is the combination of an
+ * asynchronous request and the tricky deadlock (see
+ * Documentation/filesystems/fuse.txt).
  *
- * During the aborting, progression of requests from the pending and
- * processing lists onto the io list, and progression of new requests
- * onto the pending list is prevented by req->connected being false.
- *
- * Progression of requests under I/O to the processing list is
- * prevented by the req->aborted flag being true for these requests.
- * For this reason requests on the io list must be aborted first.
+ * Aborting requests under I/O goes as follows: 1: Separate out unlocked
+ * requests, they should be finished off immediately.  Locked requests will be
+ * finished after unlock; see unlock_request(). 2: Finish off the unlocked
+ * requests.  It is possible that some request will finish before we can.  This
+ * is OK, the request will in that case be removed from the list before we touch
+ * it.
  */
 void fuse_abort_conn(struct fuse_conn *fc)
 {
+       struct fuse_iqueue *fiq = &fc->iq;
+
        spin_lock(&fc->lock);
        if (fc->connected) {
+               struct fuse_dev *fud;
+               struct fuse_req *req, *next;
+               LIST_HEAD(to_end1);
+               LIST_HEAD(to_end2);
+
                fc->connected = 0;
                fc->blocked = 0;
                fuse_set_initialized(fc);
-               end_io_requests(fc);
-               end_queued_requests(fc);
+               list_for_each_entry(fud, &fc->devices, entry) {
+                       struct fuse_pqueue *fpq = &fud->pq;
+
+                       spin_lock(&fpq->lock);
+                       fpq->connected = 0;
+                       list_for_each_entry_safe(req, next, &fpq->io, list) {
+                               req->out.h.error = -ECONNABORTED;
+                               spin_lock(&req->waitq.lock);
+                               set_bit(FR_ABORTED, &req->flags);
+                               if (!test_bit(FR_LOCKED, &req->flags)) {
+                                       set_bit(FR_PRIVATE, &req->flags);
+                                       list_move(&req->list, &to_end1);
+                               }
+                               spin_unlock(&req->waitq.lock);
+                       }
+                       list_splice_init(&fpq->processing, &to_end2);
+                       spin_unlock(&fpq->lock);
+               }
+               fc->max_background = UINT_MAX;
+               flush_bg_queue(fc);
+
+               spin_lock(&fiq->waitq.lock);
+               fiq->connected = 0;
+               list_splice_init(&fiq->pending, &to_end2);
+               while (forget_pending(fiq))
+                       kfree(dequeue_forget(fiq, 1, NULL));
+               wake_up_all_locked(&fiq->waitq);
+               spin_unlock(&fiq->waitq.lock);
+               kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
                end_polls(fc);
-               wake_up_all(&fc->waitq);
                wake_up_all(&fc->blocked_waitq);
-               kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+               spin_unlock(&fc->lock);
+
+               while (!list_empty(&to_end1)) {
+                       req = list_first_entry(&to_end1, struct fuse_req, list);
+                       __fuse_get_request(req);
+                       list_del_init(&req->list);
+                       request_end(fc, req);
+               }
+               end_requests(fc, &to_end2);
+       } else {
+               spin_unlock(&fc->lock);
        }
-       spin_unlock(&fc->lock);
 }
 EXPORT_SYMBOL_GPL(fuse_abort_conn);
 
 int fuse_dev_release(struct inode *inode, struct file *file)
 {
-       struct fuse_conn *fc = fuse_get_conn(file);
-       if (fc) {
-               spin_lock(&fc->lock);
-               fc->connected = 0;
-               fc->blocked = 0;
-               fuse_set_initialized(fc);
-               end_queued_requests(fc);
-               end_polls(fc);
-               wake_up_all(&fc->blocked_waitq);
-               spin_unlock(&fc->lock);
-               fuse_conn_put(fc);
-       }
+       struct fuse_dev *fud = fuse_get_dev(file);
 
+       if (fud) {
+               struct fuse_conn *fc = fud->fc;
+               struct fuse_pqueue *fpq = &fud->pq;
+
+               WARN_ON(!list_empty(&fpq->io));
+               end_requests(fc, &fpq->processing);
+               /* Are we the last open device? */
+               if (atomic_dec_and_test(&fc->dev_count)) {
+                       WARN_ON(fc->iq.fasync != NULL);
+                       fuse_abort_conn(fc);
+               }
+               fuse_dev_free(fud);
+       }
        return 0;
 }
 EXPORT_SYMBOL_GPL(fuse_dev_release);
 
 static int fuse_dev_fasync(int fd, struct file *file, int on)
 {
-       struct fuse_conn *fc = fuse_get_conn(file);
-       if (!fc)
+       struct fuse_dev *fud = fuse_get_dev(file);
+
+       if (!fud)
                return -EPERM;
 
        /* No locking - fasync_helper does its own locking */
-       return fasync_helper(fd, file, on, &fc->fasync);
+       return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
+}
+
+static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
+{
+       struct fuse_dev *fud;
+
+       if (new->private_data)
+               return -EINVAL;
+
+       fud = fuse_dev_alloc(fc);
+       if (!fud)
+               return -ENOMEM;
+
+       new->private_data = fud;
+       atomic_inc(&fc->dev_count);
+
+       return 0;
+}
+
+static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
+                          unsigned long arg)
+{
+       int err = -ENOTTY;
+
+       if (cmd == FUSE_DEV_IOC_CLONE) {
+               int oldfd;
+
+               err = -EFAULT;
+               if (!get_user(oldfd, (__u32 __user *) arg)) {
+                       struct file *old = fget(oldfd);
+
+                       err = -EINVAL;
+                       if (old) {
+                               struct fuse_dev *fud = fuse_get_dev(old);
+
+                               if (fud) {
+                                       mutex_lock(&fuse_mutex);
+                                       err = fuse_device_clone(fud->fc, file);
+                                       mutex_unlock(&fuse_mutex);
+                               }
+                               fput(old);
+                       }
+               }
+       }
+       return err;
 }
 
 const struct file_operations fuse_dev_operations = {
@@ -2236,6 +2271,8 @@ const struct file_operations fuse_dev_operations = {
        .poll           = fuse_dev_poll,
        .release        = fuse_dev_release,
        .fasync         = fuse_dev_fasync,
+       .unlocked_ioctl = fuse_dev_ioctl,
+       .compat_ioctl   = fuse_dev_ioctl,
 };
 EXPORT_SYMBOL_GPL(fuse_dev_operations);
 
index 8c5e2fa68835a07216d250dc9536aafaa1063dc2..f523f2f04c196db5b1201a38a6e3222ae40d1724 100644 (file)
@@ -96,17 +96,17 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
                         * Drop the release request when client does not
                         * implement 'open'
                         */
-                       req->background = 0;
+                       __clear_bit(FR_BACKGROUND, &req->flags);
                        iput(req->misc.release.inode);
                        fuse_put_request(ff->fc, req);
                } else if (sync) {
-                       req->background = 0;
+                       __clear_bit(FR_BACKGROUND, &req->flags);
                        fuse_request_send(ff->fc, req);
                        iput(req->misc.release.inode);
                        fuse_put_request(ff->fc, req);
                } else {
                        req->end = fuse_release_end;
-                       req->background = 1;
+                       __set_bit(FR_BACKGROUND, &req->flags);
                        fuse_request_send_background(ff->fc, req);
                }
                kfree(ff);
@@ -299,8 +299,8 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
 {
        WARN_ON(atomic_read(&ff->count) > 1);
        fuse_prepare_release(ff, flags, FUSE_RELEASE);
-       ff->reserved_req->force = 1;
-       ff->reserved_req->background = 0;
+       __set_bit(FR_FORCE, &ff->reserved_req->flags);
+       __clear_bit(FR_BACKGROUND, &ff->reserved_req->flags);
        fuse_request_send(ff->fc, ff->reserved_req);
        fuse_put_request(ff->fc, ff->reserved_req);
        kfree(ff);
@@ -426,7 +426,7 @@ static int fuse_flush(struct file *file, fl_owner_t id)
        req->in.numargs = 1;
        req->in.args[0].size = sizeof(inarg);
        req->in.args[0].value = &inarg;
-       req->force = 1;
+       __set_bit(FR_FORCE, &req->flags);
        fuse_request_send(fc, req);
        err = req->out.h.error;
        fuse_put_request(fc, req);
@@ -1169,7 +1169,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (err <= 0)
                goto out;
 
-       err = file_remove_suid(file);
+       err = file_remove_privs(file);
        if (err)
                goto out;
 
@@ -1611,7 +1611,8 @@ static int fuse_writepage_locked(struct page *page)
        if (!req)
                goto err;
 
-       req->background = 1; /* writeback always goes to bg_queue */
+       /* writeback always goes to bg_queue */
+       __set_bit(FR_BACKGROUND, &req->flags);
        tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
        if (!tmp_page)
                goto err_free;
@@ -1742,8 +1743,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
                }
        }
 
-       if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT ||
-                                       old_req->state == FUSE_REQ_PENDING)) {
+       if (old_req->num_pages == 1 && test_bit(FR_PENDING, &old_req->flags)) {
                struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host);
 
                copy_highpage(old_req->pages[0], page);
@@ -1830,7 +1830,7 @@ static int fuse_writepages_fill(struct page *page,
                req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
                req->misc.write.next = NULL;
                req->in.argpages = 1;
-               req->background = 1;
+               __set_bit(FR_BACKGROUND, &req->flags);
                req->num_pages = 0;
                req->end = fuse_writepage_end;
                req->inode = inode;
index 7354dc142a50845a62e9a413d82d185afc1f5b0d..405113101db8d868fcb40c34199978be576d0961 100644 (file)
@@ -241,16 +241,6 @@ struct fuse_args {
 
 #define FUSE_ARGS(args) struct fuse_args args = {}
 
-/** The request state */
-enum fuse_req_state {
-       FUSE_REQ_INIT = 0,
-       FUSE_REQ_PENDING,
-       FUSE_REQ_READING,
-       FUSE_REQ_SENT,
-       FUSE_REQ_WRITING,
-       FUSE_REQ_FINISHED
-};
-
 /** The request IO state (for asynchronous processing) */
 struct fuse_io_priv {
        int async;
@@ -266,8 +256,41 @@ struct fuse_io_priv {
        struct completion *done;
 };
 
+/**
+ * Request flags
+ *
+ * FR_ISREPLY:         set if the request has reply
+ * FR_FORCE:           force sending of the request even if interrupted
+ * FR_BACKGROUND:      request is sent in the background
+ * FR_WAITING:         request is counted as "waiting"
+ * FR_ABORTED:         the request was aborted
+ * FR_INTERRUPTED:     the request has been interrupted
+ * FR_LOCKED:          data is being copied to/from the request
+ * FR_PENDING:         request is not yet in userspace
+ * FR_SENT:            request is in userspace, waiting for an answer
+ * FR_FINISHED:                request is finished
+ * FR_PRIVATE:         request is on private list
+ */
+enum fuse_req_flag {
+       FR_ISREPLY,
+       FR_FORCE,
+       FR_BACKGROUND,
+       FR_WAITING,
+       FR_ABORTED,
+       FR_INTERRUPTED,
+       FR_LOCKED,
+       FR_PENDING,
+       FR_SENT,
+       FR_FINISHED,
+       FR_PRIVATE,
+};
+
 /**
  * A request to the client
+ *
+ * .waitq.lock protects the following fields:
+ *   - FR_ABORTED
+ *   - FR_LOCKED (may also be modified under fc->lock, tested under both)
  */
 struct fuse_req {
        /** This can be on either pending processing or io lists in
@@ -283,35 +306,8 @@ struct fuse_req {
        /** Unique ID for the interrupt request */
        u64 intr_unique;
 
-       /*
-        * The following bitfields are either set once before the
-        * request is queued or setting/clearing them is protected by
-        * fuse_conn->lock
-        */
-
-       /** True if the request has reply */
-       unsigned isreply:1;
-
-       /** Force sending of the request even if interrupted */
-       unsigned force:1;
-
-       /** The request was aborted */
-       unsigned aborted:1;
-
-       /** Request is sent in the background */
-       unsigned background:1;
-
-       /** The request has been interrupted */
-       unsigned interrupted:1;
-
-       /** Data is being copied to/from the request */
-       unsigned locked:1;
-
-       /** Request is counted as "waiting" */
-       unsigned waiting:1;
-
-       /** State of the request */
-       enum fuse_req_state state;
+       /* Request flags, updated with test/set/clear_bit() */
+       unsigned long flags;
 
        /** The request input */
        struct fuse_in in;
@@ -380,6 +376,61 @@ struct fuse_req {
        struct file *stolen_file;
 };
 
+struct fuse_iqueue {
+       /** Connection established */
+       unsigned connected;
+
+       /** Readers of the connection are waiting on this */
+       wait_queue_head_t waitq;
+
+       /** The next unique request id */
+       u64 reqctr;
+
+       /** The list of pending requests */
+       struct list_head pending;
+
+       /** Pending interrupts */
+       struct list_head interrupts;
+
+       /** Queue of pending forgets */
+       struct fuse_forget_link forget_list_head;
+       struct fuse_forget_link *forget_list_tail;
+
+       /** Batching of FORGET requests (positive indicates FORGET batch) */
+       int forget_batch;
+
+       /** O_ASYNC requests */
+       struct fasync_struct *fasync;
+};
+
+struct fuse_pqueue {
+       /** Connection established */
+       unsigned connected;
+
+       /** Lock protecting accessess to  members of this structure */
+       spinlock_t lock;
+
+       /** The list of requests being processed */
+       struct list_head processing;
+
+       /** The list of requests under I/O */
+       struct list_head io;
+};
+
+/**
+ * Fuse device instance
+ */
+struct fuse_dev {
+       /** Fuse connection for this device */
+       struct fuse_conn *fc;
+
+       /** Processing queue */
+       struct fuse_pqueue pq;
+
+       /** list entry on fc->devices */
+       struct list_head entry;
+};
+
 /**
  * A Fuse connection.
  *
@@ -394,6 +445,9 @@ struct fuse_conn {
        /** Refcount */
        atomic_t count;
 
+       /** Number of fuse_dev's */
+       atomic_t dev_count;
+
        struct rcu_head rcu;
 
        /** The user id for this mount */
@@ -411,17 +465,8 @@ struct fuse_conn {
        /** Maximum write size */
        unsigned max_write;
 
-       /** Readers of the connection are waiting on this */
-       wait_queue_head_t waitq;
-
-       /** The list of pending requests */
-       struct list_head pending;
-
-       /** The list of requests being processed */
-       struct list_head processing;
-
-       /** The list of requests under I/O */
-       struct list_head io;
+       /** Input queue */
+       struct fuse_iqueue iq;
 
        /** The next unique kernel file handle */
        u64 khctr;
@@ -444,16 +489,6 @@ struct fuse_conn {
        /** The list of background requests set aside for later queuing */
        struct list_head bg_queue;
 
-       /** Pending interrupts */
-       struct list_head interrupts;
-
-       /** Queue of pending forgets */
-       struct fuse_forget_link forget_list_head;
-       struct fuse_forget_link *forget_list_tail;
-
-       /** Batching of FORGET requests (positive indicates FORGET batch) */
-       int forget_batch;
-
        /** Flag indicating that INIT reply has been received. Allocating
         * any fuse request will be suspended until the flag is set */
        int initialized;
@@ -469,9 +504,6 @@ struct fuse_conn {
        /** waitq for reserved requests */
        wait_queue_head_t reserved_req_waitq;
 
-       /** The next unique request id */
-       u64 reqctr;
-
        /** Connection established, cleared on umount, connection
            abort and device release */
        unsigned connected;
@@ -594,9 +626,6 @@ struct fuse_conn {
        /** number of dentries used in the above array */
        int ctl_ndents;
 
-       /** O_ASYNC requests */
-       struct fasync_struct *fasync;
-
        /** Key for lock owner ID scrambling */
        u32 scramble_key[4];
 
@@ -614,6 +643,9 @@ struct fuse_conn {
 
        /** Read/write semaphore to hold when accessing sb. */
        struct rw_semaphore killsb;
+
+       /** List of device instances belonging to this connection */
+       struct list_head devices;
 };
 
 static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
@@ -826,6 +858,9 @@ void fuse_conn_init(struct fuse_conn *fc);
  */
 void fuse_conn_put(struct fuse_conn *fc);
 
+struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc);
+void fuse_dev_free(struct fuse_dev *fud);
+
 /**
  * Add connection to control filesystem
  */
index 082ac1c97f397f7f8014c7896123f8c7a5eebe8f..2913db2a5b99bee2b01b79d07b1451f752f0ec01 100644 (file)
@@ -362,8 +362,8 @@ static void fuse_send_destroy(struct fuse_conn *fc)
        if (req && fc->conn_init) {
                fc->destroy_req = NULL;
                req->in.h.opcode = FUSE_DESTROY;
-               req->force = 1;
-               req->background = 0;
+               __set_bit(FR_FORCE, &req->flags);
+               __clear_bit(FR_BACKGROUND, &req->flags);
                fuse_request_send(fc, req);
                fuse_put_request(fc, req);
        }
@@ -567,30 +567,46 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
        return 0;
 }
 
+static void fuse_iqueue_init(struct fuse_iqueue *fiq)
+{
+       memset(fiq, 0, sizeof(struct fuse_iqueue));
+       init_waitqueue_head(&fiq->waitq);
+       INIT_LIST_HEAD(&fiq->pending);
+       INIT_LIST_HEAD(&fiq->interrupts);
+       fiq->forget_list_tail = &fiq->forget_list_head;
+       fiq->connected = 1;
+}
+
+static void fuse_pqueue_init(struct fuse_pqueue *fpq)
+{
+       memset(fpq, 0, sizeof(struct fuse_pqueue));
+       spin_lock_init(&fpq->lock);
+       INIT_LIST_HEAD(&fpq->processing);
+       INIT_LIST_HEAD(&fpq->io);
+       fpq->connected = 1;
+}
+
 void fuse_conn_init(struct fuse_conn *fc)
 {
        memset(fc, 0, sizeof(*fc));
        spin_lock_init(&fc->lock);
        init_rwsem(&fc->killsb);
        atomic_set(&fc->count, 1);
-       init_waitqueue_head(&fc->waitq);
+       atomic_set(&fc->dev_count, 1);
        init_waitqueue_head(&fc->blocked_waitq);
        init_waitqueue_head(&fc->reserved_req_waitq);
-       INIT_LIST_HEAD(&fc->pending);
-       INIT_LIST_HEAD(&fc->processing);
-       INIT_LIST_HEAD(&fc->io);
-       INIT_LIST_HEAD(&fc->interrupts);
+       fuse_iqueue_init(&fc->iq);
        INIT_LIST_HEAD(&fc->bg_queue);
        INIT_LIST_HEAD(&fc->entry);
-       fc->forget_list_tail = &fc->forget_list_head;
+       INIT_LIST_HEAD(&fc->devices);
        atomic_set(&fc->num_waiting, 0);
        fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
        fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;
        fc->khctr = 0;
        fc->polled_files = RB_ROOT;
-       fc->reqctr = 0;
        fc->blocked = 0;
        fc->initialized = 0;
+       fc->connected = 1;
        fc->attr_version = 1;
        get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
 }
@@ -930,6 +946,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
 
 static void fuse_free_conn(struct fuse_conn *fc)
 {
+       WARN_ON(!list_empty(&fc->devices));
        kfree_rcu(fc, rcu);
 }
 
@@ -975,8 +992,42 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
        return 0;
 }
 
+struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc)
+{
+       struct fuse_dev *fud;
+
+       fud = kzalloc(sizeof(struct fuse_dev), GFP_KERNEL);
+       if (fud) {
+               fud->fc = fuse_conn_get(fc);
+               fuse_pqueue_init(&fud->pq);
+
+               spin_lock(&fc->lock);
+               list_add_tail(&fud->entry, &fc->devices);
+               spin_unlock(&fc->lock);
+       }
+
+       return fud;
+}
+EXPORT_SYMBOL_GPL(fuse_dev_alloc);
+
+void fuse_dev_free(struct fuse_dev *fud)
+{
+       struct fuse_conn *fc = fud->fc;
+
+       if (fc) {
+               spin_lock(&fc->lock);
+               list_del(&fud->entry);
+               spin_unlock(&fc->lock);
+
+               fuse_conn_put(fc);
+       }
+       kfree(fud);
+}
+EXPORT_SYMBOL_GPL(fuse_dev_free);
+
 static int fuse_fill_super(struct super_block *sb, void *data, int silent)
 {
+       struct fuse_dev *fud;
        struct fuse_conn *fc;
        struct inode *root;
        struct fuse_mount_data d;
@@ -1026,12 +1077,17 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
                goto err_fput;
 
        fuse_conn_init(fc);
+       fc->release = fuse_free_conn;
+
+       fud = fuse_dev_alloc(fc);
+       if (!fud)
+               goto err_put_conn;
 
        fc->dev = sb->s_dev;
        fc->sb = sb;
        err = fuse_bdi_init(fc, sb);
        if (err)
-               goto err_put_conn;
+               goto err_dev_free;
 
        sb->s_bdi = &fc->bdi;
 
@@ -1040,7 +1096,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
                fc->dont_mask = 1;
        sb->s_flags |= MS_POSIXACL;
 
-       fc->release = fuse_free_conn;
        fc->flags = d.flags;
        fc->user_id = d.user_id;
        fc->group_id = d.group_id;
@@ -1053,14 +1108,14 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        root = fuse_get_root_inode(sb, d.rootmode);
        root_dentry = d_make_root(root);
        if (!root_dentry)
-               goto err_put_conn;
+               goto err_dev_free;
        /* only now - we want root dentry with NULL ->d_op */
        sb->s_d_op = &fuse_dentry_operations;
 
        init_req = fuse_request_alloc(0);
        if (!init_req)
                goto err_put_root;
-       init_req->background = 1;
+       __set_bit(FR_BACKGROUND, &init_req->flags);
 
        if (is_bdev) {
                fc->destroy_req = fuse_request_alloc(0);
@@ -1079,8 +1134,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
 
        list_add_tail(&fc->entry, &fuse_conn_list);
        sb->s_root = root_dentry;
-       fc->connected = 1;
-       file->private_data = fuse_conn_get(fc);
+       file->private_data = fud;
        mutex_unlock(&fuse_mutex);
        /*
         * atomic_dec_and_test() in fput() provides the necessary
@@ -1099,6 +1153,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        fuse_request_free(init_req);
  err_put_root:
        dput(root_dentry);
+ err_dev_free:
+       fuse_dev_free(fud);
  err_put_conn:
        fuse_bdi_destroy(fc);
        fuse_conn_put(fc);
@@ -1238,7 +1294,6 @@ static void fuse_fs_cleanup(void)
 }
 
 static struct kobject *fuse_kobj;
-static struct kobject *connections_kobj;
 
 static int fuse_sysfs_init(void)
 {
@@ -1250,11 +1305,9 @@ static int fuse_sysfs_init(void)
                goto out_err;
        }
 
-       connections_kobj = kobject_create_and_add("connections", fuse_kobj);
-       if (!connections_kobj) {
-               err = -ENOMEM;
+       err = sysfs_create_mount_point(fuse_kobj, "connections");
+       if (err)
                goto out_fuse_unregister;
-       }
 
        return 0;
 
@@ -1266,7 +1319,7 @@ static int fuse_sysfs_init(void)
 
 static void fuse_sysfs_cleanup(void)
 {
-       kobject_put(connections_kobj);
+       sysfs_remove_mount_point(fuse_kobj, "connections");
        kobject_put(fuse_kobj);
 }
 
index 95d255219b1eb89c68db91d85037a19af20cf1a6..1f1c7dcbcc2ff4066fef329828af148f7c058662 100644 (file)
@@ -252,7 +252,7 @@ extern void hfs_mark_mdb_dirty(struct super_block *sb);
 #define __hfs_u_to_mtime(sec)  cpu_to_be32(sec + 2082844800U - sys_tz.tz_minuteswest * 60)
 #define __hfs_m_to_utime(sec)  (be32_to_cpu(sec) - 2082844800U  + sys_tz.tz_minuteswest * 60)
 
-#define HFS_I(inode)   (list_entry(inode, struct hfs_inode_info, vfs_inode))
+#define HFS_I(inode)   (container_of(inode, struct hfs_inode_info, vfs_inode))
 #define HFS_SB(sb)     ((struct hfs_sb_info *)(sb)->s_fs_info)
 
 #define hfs_m_to_utime(time)   (struct timespec){ .tv_sec = __hfs_m_to_utime(time) }
index b0441d65fa54ec405520e62207d724f71cc61411..f91a1faf819e9f06ae31b74f157e2c5627f4ebea 100644 (file)
@@ -263,7 +263,7 @@ struct hfsplus_inode_info {
 
 static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode)
 {
-       return list_entry(inode, struct hfsplus_inode_info, vfs_inode);
+       return container_of(inode, struct hfsplus_inode_info, vfs_inode);
 }
 
 /*
index b63b75fa00e7f1e26dcedf6884fb33f760ee6253..bb04b58d1d698486ec31c4fd3e91dab567dbb517 100644 (file)
@@ -304,7 +304,7 @@ extern const struct address_space_operations hpfs_symlink_aops;
 
 static inline struct hpfs_inode_info *hpfs_i(struct inode *inode)
 {
-       return list_entry(inode, struct hpfs_inode_info, vfs_inode);
+       return container_of(inode, struct hpfs_inode_info, vfs_inode);
 }
 
 static inline struct hpfs_sb_info *hpfs_sb(struct super_block *sb)
index 069721f0cc0e0b733bb659fb0d7836cd71499690..d30640f7a193879d07f4ff2c12efe3b817a386f3 100644 (file)
@@ -841,7 +841,11 @@ unsigned int get_next_ino(void)
        }
 #endif
 
-       *p = ++res;
+       res++;
+       /* get_next_ino should not provide a 0 inode number */
+       if (unlikely(!res))
+               res++;
+       *p = res;
        put_cpu_var(last_ino);
        return res;
 }
@@ -1674,7 +1678,31 @@ int should_remove_suid(struct dentry *dentry)
 }
 EXPORT_SYMBOL(should_remove_suid);
 
-static int __remove_suid(struct dentry *dentry, int kill)
+/*
+ * Return mask of changes for notify_change() that need to be done as a
+ * response to write or truncate. Return 0 if nothing has to be changed.
+ * Negative value on error (change should be denied).
+ */
+int dentry_needs_remove_privs(struct dentry *dentry)
+{
+       struct inode *inode = d_inode(dentry);
+       int mask = 0;
+       int ret;
+
+       if (IS_NOSEC(inode))
+               return 0;
+
+       mask = should_remove_suid(dentry);
+       ret = security_inode_need_killpriv(dentry);
+       if (ret < 0)
+               return ret;
+       if (ret)
+               mask |= ATTR_KILL_PRIV;
+       return mask;
+}
+EXPORT_SYMBOL(dentry_needs_remove_privs);
+
+static int __remove_privs(struct dentry *dentry, int kill)
 {
        struct iattr newattrs;
 
@@ -1686,33 +1714,32 @@ static int __remove_suid(struct dentry *dentry, int kill)
        return notify_change(dentry, &newattrs, NULL);
 }
 
-int file_remove_suid(struct file *file)
+/*
+ * Remove special file priviledges (suid, capabilities) when file is written
+ * to or truncated.
+ */
+int file_remove_privs(struct file *file)
 {
        struct dentry *dentry = file->f_path.dentry;
        struct inode *inode = d_inode(dentry);
-       int killsuid;
-       int killpriv;
+       int kill;
        int error = 0;
 
        /* Fast path for nothing security related */
        if (IS_NOSEC(inode))
                return 0;
 
-       killsuid = should_remove_suid(dentry);
-       killpriv = security_inode_need_killpriv(dentry);
-
-       if (killpriv < 0)
-               return killpriv;
-       if (killpriv)
-               error = security_inode_killpriv(dentry);
-       if (!error && killsuid)
-               error = __remove_suid(dentry, killsuid);
-       if (!error && (inode->i_sb->s_flags & MS_NOSEC))
-               inode->i_flags |= S_NOSEC;
+       kill = file_needs_remove_privs(file);
+       if (kill < 0)
+               return kill;
+       if (kill)
+               error = __remove_privs(dentry, kill);
+       if (!error)
+               inode_has_no_xattr(inode);
 
        return error;
 }
-EXPORT_SYMBOL(file_remove_suid);
+EXPORT_SYMBOL(file_remove_privs);
 
 /**
  *     file_update_time        -       update mtime and ctime time
@@ -1967,9 +1994,8 @@ EXPORT_SYMBOL(inode_dio_wait);
  * inode is being instantiated).  The reason for the cmpxchg() loop
  * --- which wouldn't be necessary if all code paths which modify
  * i_flags actually followed this rule, is that there is at least one
- * code path which doesn't today --- for example,
- * __generic_file_aio_write() calls file_remove_suid() without holding
- * i_mutex --- so we use cmpxchg() out of an abundance of caution.
+ * code path which doesn't today so we use cmpxchg() out of an abundance
+ * of caution.
  *
  * In the long run, i_mutex is overkill, and we should probably look
  * at using the i_lock spinlock to protect i_flags, and then make sure
index 01dce1d1476b7bc93633f787e989c464c5f2ef58..4d5af583ab031964f5fd427953940e26ec671a78 100644 (file)
@@ -107,6 +107,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
 extern long do_handle_open(int mountdirfd,
                           struct file_handle __user *ufh, int open_flag);
 extern int open_check_o_direct(struct file *f);
+extern int vfs_open(const struct path *, struct file *, const struct cred *);
 
 /*
  * inode.c
index d200a9b8fd5efc86e71113bcb8090d0c87674a90..824e61ede465fd6ec4c433785dcdfb2da96dc848 100644 (file)
@@ -19,7 +19,7 @@
 struct kstatfs;
 struct kvec;
 
-#define JFFS2_INODE_INFO(i) (list_entry(i, struct jffs2_inode_info, vfs_inode))
+#define JFFS2_INODE_INFO(i) (container_of(i, struct jffs2_inode_info, vfs_inode))
 #define OFNI_EDONI_2SFFJ(f)  (&(f)->vfs_inode)
 #define JFFS2_SB_INFO(sb) (sb->s_fs_info)
 #define OFNI_BS_2SFFJ(c)  ((struct super_block *)c->os_priv)
index fa7e795bd8aec0a421656a82c22e2a2ccfe18ea4..1f26d1910409afb8d3de7ba4455b7b195ed5c4bd 100644 (file)
@@ -206,7 +206,7 @@ struct jfs_sb_info {
 
 static inline struct jfs_inode_info *JFS_IP(struct inode *inode)
 {
-       return list_entry(inode, struct jfs_inode_info, vfs_inode);
+       return container_of(inode, struct jfs_inode_info, vfs_inode);
 }
 
 static inline int jfs_dirtable_inline(struct inode *inode)
index fffca9517321c88ee1e0128b864b257c56e2350d..2d48d28e164015668dcc8d04ff72148f07f28e38 100644 (file)
@@ -592,6 +592,9 @@ int kernfs_add_one(struct kernfs_node *kn)
                goto out_unlock;
 
        ret = -ENOENT;
+       if (parent->flags & KERNFS_EMPTY_DIR)
+               goto out_unlock;
+
        if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent))
                goto out_unlock;
 
@@ -783,6 +786,38 @@ struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
        return ERR_PTR(rc);
 }
 
+/**
+ * kernfs_create_empty_dir - create an always empty directory
+ * @parent: parent in which to create a new directory
+ * @name: name of the new directory
+ *
+ * Returns the created node on success, ERR_PTR() value on failure.
+ */
+struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
+                                           const char *name)
+{
+       struct kernfs_node *kn;
+       int rc;
+
+       /* allocate */
+       kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, KERNFS_DIR);
+       if (!kn)
+               return ERR_PTR(-ENOMEM);
+
+       kn->flags |= KERNFS_EMPTY_DIR;
+       kn->dir.root = parent->dir.root;
+       kn->ns = NULL;
+       kn->priv = NULL;
+
+       /* link in */
+       rc = kernfs_add_one(kn);
+       if (!rc)
+               return kn;
+
+       kernfs_put(kn);
+       return ERR_PTR(rc);
+}
+
 static struct dentry *kernfs_iop_lookup(struct inode *dir,
                                        struct dentry *dentry,
                                        unsigned int flags)
@@ -1254,7 +1289,8 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
        mutex_lock(&kernfs_mutex);
 
        error = -ENOENT;
-       if (!kernfs_active(kn) || !kernfs_active(new_parent))
+       if (!kernfs_active(kn) || !kernfs_active(new_parent) ||
+           (new_parent->flags & KERNFS_EMPTY_DIR))
                goto out;
 
        error = 0;
index 2da8493a380b8a43a8eca65c04f4de245c4b0219..756dd56aaf60acd337fb251fb287cbd382d57740 100644 (file)
@@ -296,6 +296,8 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
        case KERNFS_DIR:
                inode->i_op = &kernfs_dir_iops;
                inode->i_fop = &kernfs_dir_fops;
+               if (kn->flags & KERNFS_EMPTY_DIR)
+                       make_empty_dir_inode(inode);
                break;
        case KERNFS_FILE:
                inode->i_size = kn->attr.size;
index 65e1feca8b982c55bff37e5a85529f8cb0d4121e..102edfd39000c15f14594a47b6236ba8edb5ccc8 100644 (file)
 
 #include "internal.h"
 
-static inline int simple_positive(struct dentry *dentry)
-{
-       return d_really_is_positive(dentry) && !d_unhashed(dentry);
-}
-
 int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
                   struct kstat *stat)
 {
@@ -1108,3 +1103,98 @@ const struct inode_operations simple_symlink_inode_operations = {
        .readlink = generic_readlink
 };
 EXPORT_SYMBOL(simple_symlink_inode_operations);
+
+/*
+ * Operations for a permanently empty directory.
+ */
+static struct dentry *empty_dir_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
+{
+       return ERR_PTR(-ENOENT);
+}
+
+static int empty_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
+                                struct kstat *stat)
+{
+       struct inode *inode = d_inode(dentry);
+       generic_fillattr(inode, stat);
+       return 0;
+}
+
+static int empty_dir_setattr(struct dentry *dentry, struct iattr *attr)
+{
+       return -EPERM;
+}
+
+static int empty_dir_setxattr(struct dentry *dentry, const char *name,
+                             const void *value, size_t size, int flags)
+{
+       return -EOPNOTSUPP;
+}
+
+static ssize_t empty_dir_getxattr(struct dentry *dentry, const char *name,
+                                 void *value, size_t size)
+{
+       return -EOPNOTSUPP;
+}
+
+static int empty_dir_removexattr(struct dentry *dentry, const char *name)
+{
+       return -EOPNOTSUPP;
+}
+
+static ssize_t empty_dir_listxattr(struct dentry *dentry, char *list, size_t size)
+{
+       return -EOPNOTSUPP;
+}
+
+static const struct inode_operations empty_dir_inode_operations = {
+       .lookup         = empty_dir_lookup,
+       .permission     = generic_permission,
+       .setattr        = empty_dir_setattr,
+       .getattr        = empty_dir_getattr,
+       .setxattr       = empty_dir_setxattr,
+       .getxattr       = empty_dir_getxattr,
+       .removexattr    = empty_dir_removexattr,
+       .listxattr      = empty_dir_listxattr,
+};
+
+static loff_t empty_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+       /* An empty directory has two entries . and .. at offsets 0 and 1 */
+       return generic_file_llseek_size(file, offset, whence, 2, 2);
+}
+
+static int empty_dir_readdir(struct file *file, struct dir_context *ctx)
+{
+       dir_emit_dots(file, ctx);
+       return 0;
+}
+
+static const struct file_operations empty_dir_operations = {
+       .llseek         = empty_dir_llseek,
+       .read           = generic_read_dir,
+       .iterate        = empty_dir_readdir,
+       .fsync          = noop_fsync,
+};
+
+
+void make_empty_dir_inode(struct inode *inode)
+{
+       set_nlink(inode, 2);
+       inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
+       inode->i_uid = GLOBAL_ROOT_UID;
+       inode->i_gid = GLOBAL_ROOT_GID;
+       inode->i_rdev = 0;
+       inode->i_size = 2;
+       inode->i_blkbits = PAGE_SHIFT;
+       inode->i_blocks = 0;
+
+       inode->i_op = &empty_dir_inode_operations;
+       inode->i_fop = &empty_dir_operations;
+}
+
+bool is_empty_dir_inode(struct inode *inode)
+{
+       return (inode->i_fop == &empty_dir_operations) &&
+               (inode->i_op == &empty_dir_inode_operations);
+}
index 118e4e7bc9351cb3563a0ce85798301c83f0213e..d19ac258105aadb44382650de7b50504b6eaf237 100644 (file)
@@ -45,11 +45,6 @@ minix_last_byte(struct inode *inode, unsigned long page_nr)
        return last_byte;
 }
 
-static inline unsigned long dir_pages(struct inode *inode)
-{
-       return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
-}
-
 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
 {
        struct address_space *mapping = page->mapping;
index 1ebd11854622946e91633726b919aacfee0198ae..01ad81dcacc5a45e1f15bbce6cc61508380e4c27 100644 (file)
@@ -84,7 +84,7 @@ static inline struct minix_sb_info *minix_sb(struct super_block *sb)
 
 static inline struct minix_inode_info *minix_i(struct inode *inode)
 {
-       return list_entry(inode, struct minix_inode_info, vfs_inode);
+       return container_of(inode, struct minix_inode_info, vfs_inode);
 }
 
 static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize)
index b5b8082bfa4208086a7ee06741cb76670d51bab9..14db05d424f7059d9bd035f88f9c50b719447ee8 100644 (file)
@@ -118,7 +118,6 @@ static inline void unlock_mount_hash(void)
 }
 
 struct proc_mounts {
-       struct seq_file m;
        struct mnt_namespace *ns;
        struct path root;
        int (*show)(struct seq_file *, struct vfsmount *);
@@ -127,8 +126,6 @@ struct proc_mounts {
        loff_t cached_index;
 };
 
-#define proc_mounts(p) (container_of((p), struct proc_mounts, m))
-
 extern const struct seq_operations mounts_op;
 
 extern bool __is_local_mountpoint(struct dentry *dentry);
index 2dad0eaf91d34d8f47d3cc525eafd45107d429bd..ae4e4c18b2ac0b2c366f893f7ffa4d898446c0f7 100644 (file)
@@ -792,7 +792,7 @@ static void set_root(struct nameidata *nd)
        get_fs_root(current->fs, &nd->root);
 }
 
-static unsigned set_root_rcu(struct nameidata *nd)
+static void set_root_rcu(struct nameidata *nd)
 {
        struct fs_struct *fs = current->fs;
        unsigned seq;
@@ -802,7 +802,6 @@ static unsigned set_root_rcu(struct nameidata *nd)
                nd->root = fs->root;
                nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
        } while (read_seqcount_retry(&fs->seq, seq));
-       return nd->root_seq;
 }
 
 static void path_put_conditional(struct path *path, struct nameidata *nd)
@@ -1998,7 +1997,8 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
        if (*s == '/') {
                if (flags & LOOKUP_RCU) {
                        rcu_read_lock();
-                       nd->seq = set_root_rcu(nd);
+                       set_root_rcu(nd);
+                       nd->seq = nd->root_seq;
                } else {
                        set_root(nd);
                        path_get(&nd->root);
index 9c1c43d0d4f10112bcf711068873a70e2a057200..c7cb8a526c05fbaa5ba18934cd04eda1eb2a6e60 100644 (file)
@@ -1226,7 +1226,7 @@ EXPORT_SYMBOL(replace_mount_options);
 /* iterator; we want it to have access to namespace_sem, thus here... */
 static void *m_start(struct seq_file *m, loff_t *pos)
 {
-       struct proc_mounts *p = proc_mounts(m);
+       struct proc_mounts *p = m->private;
 
        down_read(&namespace_sem);
        if (p->cached_event == p->ns->event) {
@@ -1247,7 +1247,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
 
 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 {
-       struct proc_mounts *p = proc_mounts(m);
+       struct proc_mounts *p = m->private;
 
        p->cached_mount = seq_list_next(v, &p->ns->list, pos);
        p->cached_index = *pos;
@@ -1261,7 +1261,7 @@ static void m_stop(struct seq_file *m, void *v)
 
 static int m_show(struct seq_file *m, void *v)
 {
-       struct proc_mounts *p = proc_mounts(m);
+       struct proc_mounts *p = m->private;
        struct mount *r = list_entry(v, struct mount, mnt_list);
        return p->show(m, &r->mnt);
 }
@@ -2343,6 +2343,8 @@ unlock:
        return err;
 }
 
+static bool fs_fully_visible(struct file_system_type *fs_type, int *new_mnt_flags);
+
 /*
  * create a new mount for userspace and request it to be added into the
  * namespace's tree
@@ -2374,6 +2376,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
                        flags |= MS_NODEV;
                        mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
                }
+               if (type->fs_flags & FS_USERNS_VISIBLE) {
+                       if (!fs_fully_visible(type, &mnt_flags))
+                               return -EPERM;
+               }
        }
 
        mnt = vfs_kern_mount(type, flags, name, data);
@@ -3175,9 +3181,10 @@ bool current_chrooted(void)
        return chrooted;
 }
 
-bool fs_fully_visible(struct file_system_type *type)
+static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
 {
        struct mnt_namespace *ns = current->nsproxy->mnt_ns;
+       int new_flags = *new_mnt_flags;
        struct mount *mnt;
        bool visible = false;
 
@@ -3196,16 +3203,36 @@ bool fs_fully_visible(struct file_system_type *type)
                if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
                        continue;
 
-               /* This mount is not fully visible if there are any child mounts
-                * that cover anything except for empty directories.
+               /* Verify the mount flags are equal to or more permissive
+                * than the proposed new mount.
+                */
+               if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
+                   !(new_flags & MNT_READONLY))
+                       continue;
+               if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
+                   !(new_flags & MNT_NODEV))
+                       continue;
+               if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
+                   ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
+                       continue;
+
+               /* This mount is not fully visible if there are any
+                * locked child mounts that cover anything except for
+                * empty directories.
                 */
                list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
                        struct inode *inode = child->mnt_mountpoint->d_inode;
-                       if (!S_ISDIR(inode->i_mode))
-                               goto next;
-                       if (inode->i_nlink > 2)
+                       /* Only worry about locked mounts */
+                       if (!(mnt->mnt.mnt_flags & MNT_LOCKED))
+                               continue;
+                       /* Is the directory permanetly empty? */
+                       if (!is_empty_dir_inode(inode))
                                goto next;
                }
+               /* Preserve the locked attributes */
+               *new_mnt_flags |= mnt->mnt.mnt_flags & (MNT_LOCK_READONLY | \
+                                                       MNT_LOCK_NODEV    | \
+                                                       MNT_LOCK_ATIME);
                visible = true;
                goto found;
        next:   ;
index 80021c709af9cc02dc2d3a41a0f14fd7b6409e89..93575e91a7aa7b2993afb16b781c657fa2d6a7af 100644 (file)
@@ -1145,6 +1145,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
                case 0x00:
                        ncp_dbg(1, "renamed %pd -> %pd\n",
                                old_dentry, new_dentry);
+                       ncp_d_prune(old_dentry);
+                       ncp_d_prune(new_dentry);
                        break;
                case 0x9E:
                        error = -ENAMETOOLONG;
index 8d129bb7355afbb2ca7f1904ff0263f604e86dd6..682529c009966b85f986955c04d2b48fb645e981 100644 (file)
@@ -458,7 +458,7 @@ check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp)
  * pg_authenticate method for nfsv4 callback threads.
  *
  * The authflavor has been negotiated, so an incorrect flavor is a server
- * bug. Drop packets with incorrect authflavor.
+ * bug. Deny packets with incorrect authflavor.
  *
  * All other checking done after NFS decoding where the nfs_client can be
  * found in nfs4_callback_compound
@@ -468,12 +468,12 @@ static int nfs_callback_authenticate(struct svc_rqst *rqstp)
        switch (rqstp->rq_authop->flavour) {
        case RPC_AUTH_NULL:
                if (rqstp->rq_proc != CB_NULL)
-                       return SVC_DROP;
+                       return SVC_DENIED;
                break;
        case RPC_AUTH_GSS:
                /* No RPC_AUTH_GSS support yet in NFSv4.1 */
                 if (svc_is_backchannel(rqstp))
-                       return SVC_DROP;
+                       return SVC_DENIED;
        }
        return SVC_OK;
 }
index 197806fb87ffb459c19f3c4bbc8da50c58c870dc..29e3c1b011b73e4661f4deb1ef200e2f8d27792b 100644 (file)
@@ -327,10 +327,8 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
        dprintk("%s slot table seqid: %u\n", __func__, slot->seq_nr);
 
        /* Normal */
-       if (likely(args->csa_sequenceid == slot->seq_nr + 1)) {
-               slot->seq_nr++;
+       if (likely(args->csa_sequenceid == slot->seq_nr + 1))
                goto out_ok;
-       }
 
        /* Replay */
        if (args->csa_sequenceid == slot->seq_nr) {
@@ -418,6 +416,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
                              struct cb_process_state *cps)
 {
        struct nfs4_slot_table *tbl;
+       struct nfs4_slot *slot;
        struct nfs_client *clp;
        int i;
        __be32 status = htonl(NFS4ERR_BADSESSION);
@@ -429,25 +428,32 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
 
        if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
                goto out;
+
        tbl = &clp->cl_session->bc_slot_table;
+       slot = tbl->slots + args->csa_slotid;
 
        spin_lock(&tbl->slot_tbl_lock);
        /* state manager is resetting the session */
        if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
-               spin_unlock(&tbl->slot_tbl_lock);
                status = htonl(NFS4ERR_DELAY);
                /* Return NFS4ERR_BADSESSION if we're draining the session
                 * in order to reset it.
                 */
                if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
                        status = htonl(NFS4ERR_BADSESSION);
-               goto out;
+               goto out_unlock;
        }
 
-       status = validate_seqid(&clp->cl_session->bc_slot_table, args);
-       spin_unlock(&tbl->slot_tbl_lock);
+       memcpy(&res->csr_sessionid, &args->csa_sessionid,
+              sizeof(res->csr_sessionid));
+       res->csr_sequenceid = args->csa_sequenceid;
+       res->csr_slotid = args->csa_slotid;
+       res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+       res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+
+       status = validate_seqid(tbl, args);
        if (status)
-               goto out;
+               goto out_unlock;
 
        cps->slotid = args->csa_slotid;
 
@@ -458,15 +464,17 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
         */
        if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
                status = htonl(NFS4ERR_DELAY);
-               goto out;
+               goto out_unlock;
        }
 
-       memcpy(&res->csr_sessionid, &args->csa_sessionid,
-              sizeof(res->csr_sessionid));
-       res->csr_sequenceid = args->csa_sequenceid;
-       res->csr_slotid = args->csa_slotid;
-       res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
-       res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+       /*
+        * RFC5661 20.9.3
+        * If CB_SEQUENCE returns an error, then the state of the slot
+        * (sequence ID, cached reply) MUST NOT change.
+        */
+       slot->seq_nr++;
+out_unlock:
+       spin_unlock(&tbl->slot_tbl_lock);
 
 out:
        cps->clp = clp; /* put in nfs4_callback_compound */
index 19ca95cdfd9b0f26aedbbc23f036babf2aeca67a..6b1697a01dde35e1384d72a4a7cd2e02d1afbf84 100644 (file)
@@ -909,7 +909,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
        xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
 
        status = decode_compound_hdr_arg(&xdr_in, &hdr_arg);
-       if (status == __constant_htonl(NFS4ERR_RESOURCE))
+       if (status == htonl(NFS4ERR_RESOURCE))
                return rpc_garbage_args;
 
        if (hdr_arg.minorversion == 0) {
index 892aefff36300a0861f9bf5d43cb7a4b5069d873..ecebb406cc1aec554ce780f2c3680eddc351e8da 100644 (file)
@@ -825,7 +825,6 @@ error:
  * Load up the server record from information gained in an fsinfo record
  */
 static void nfs_server_set_fsinfo(struct nfs_server *server,
-                                 struct nfs_fh *mntfh,
                                  struct nfs_fsinfo *fsinfo)
 {
        unsigned long max_rpc_payload;
@@ -901,7 +900,7 @@ int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, struct nfs
        if (error < 0)
                goto out_error;
 
-       nfs_server_set_fsinfo(server, mntfh, &fsinfo);
+       nfs_server_set_fsinfo(server, &fsinfo);
 
        /* Get some general file system info */
        if (server->namelen == 0) {
@@ -1193,8 +1192,6 @@ void nfs_clients_init(struct net *net)
 }
 
 #ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *proc_fs_nfs;
-
 static int nfs_server_list_open(struct inode *inode, struct file *file);
 static void *nfs_server_list_start(struct seq_file *p, loff_t *pos);
 static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos);
@@ -1364,27 +1361,29 @@ static int nfs_volume_list_show(struct seq_file *m, void *v)
 {
        struct nfs_server *server;
        struct nfs_client *clp;
-       char dev[8], fsid[17];
+       char dev[13];   // 8 for 2^24, 1 for ':', 3 for 2^8, 1 for '\0'
+       char fsid[34];  // 2 * 16 for %llx, 1 for ':', 1 for '\0'
        struct nfs_net *nn = net_generic(seq_file_net(m), nfs_net_id);
 
        /* display header on line 1 */
        if (v == &nn->nfs_volume_list) {
-               seq_puts(m, "NV SERVER   PORT DEV     FSID              FSC\n");
+               seq_puts(m, "NV SERVER   PORT DEV          FSID"
+                           "                              FSC\n");
                return 0;
        }
        /* display one transport per line on subsequent lines */
        server = list_entry(v, struct nfs_server, master_link);
        clp = server->nfs_client;
 
-       snprintf(dev, 8, "%u:%u",
+       snprintf(dev, sizeof(dev), "%u:%u",
                 MAJOR(server->s_dev), MINOR(server->s_dev));
 
-       snprintf(fsid, 17, "%llx:%llx",
+       snprintf(fsid, sizeof(fsid), "%llx:%llx",
                 (unsigned long long) server->fsid.major,
                 (unsigned long long) server->fsid.minor);
 
        rcu_read_lock();
-       seq_printf(m, "v%u %s %s %-7s %-17s %s\n",
+       seq_printf(m, "v%u %s %s %-12s %-33s %s\n",
                   clp->rpc_ops->version,
                   rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR),
                   rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT),
@@ -1434,27 +1433,20 @@ void nfs_fs_proc_net_exit(struct net *net)
  */
 int __init nfs_fs_proc_init(void)
 {
-       struct proc_dir_entry *p;
-
-       proc_fs_nfs = proc_mkdir("fs/nfsfs", NULL);
-       if (!proc_fs_nfs)
+       if (!proc_mkdir("fs/nfsfs", NULL))
                goto error_0;
 
        /* a file of servers with which we're dealing */
-       p = proc_symlink("servers", proc_fs_nfs, "../../net/nfsfs/servers");
-       if (!p)
+       if (!proc_symlink("fs/nfsfs/servers", NULL, "../../net/nfsfs/servers"))
                goto error_1;
 
        /* a file of volumes that we have mounted */
-       p = proc_symlink("volumes", proc_fs_nfs, "../../net/nfsfs/volumes");
-       if (!p)
-               goto error_2;
-       return 0;
+       if (!proc_symlink("fs/nfsfs/volumes", NULL, "../../net/nfsfs/volumes"))
+               goto error_1;
 
-error_2:
-       remove_proc_entry("servers", proc_fs_nfs);
+       return 0;
 error_1:
-       remove_proc_entry("fs/nfsfs", NULL);
+       remove_proc_subtree("fs/nfsfs", NULL);
 error_0:
        return -ENOMEM;
 }
@@ -1464,9 +1456,7 @@ error_0:
  */
 void nfs_fs_proc_exit(void)
 {
-       remove_proc_entry("volumes", proc_fs_nfs);
-       remove_proc_entry("servers", proc_fs_nfs);
-       remove_proc_entry("fs/nfsfs", NULL);
+       remove_proc_subtree("fs/nfsfs", NULL);
 }
 
 #endif /* CONFIG_PROC_FS */
index b2c8b31b2be77d9a1d524b230ed2b66e479ad3fe..547308a5ec6f4a738006370e523c751c90927e1b 100644 (file)
@@ -1470,9 +1470,6 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
 {
        int err;
 
-       if ((open_flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
-               *opened |= FILE_CREATED;
-
        err = finish_open(file, dentry, do_open, opened);
        if (err)
                goto out;
@@ -1771,7 +1768,7 @@ EXPORT_SYMBOL_GPL(nfs_mkdir);
 
 static void nfs_dentry_handle_enoent(struct dentry *dentry)
 {
-       if (d_really_is_positive(dentry) && !d_unhashed(dentry))
+       if (simple_positive(dentry))
                d_delete(dentry);
 }
 
index 8b8d83a526ce2366ae974a87761c9c62c22da7f5..cc4fa1ed61fc5bdfe04d1afcaa5f081bb3ba0470 100644 (file)
@@ -555,31 +555,22 @@ static int nfs_launder_page(struct page *page)
        return nfs_wb_page(inode, page);
 }
 
-#ifdef CONFIG_NFS_SWAP
 static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
                                                sector_t *span)
 {
-       int ret;
        struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host);
 
        *span = sis->pages;
 
-       rcu_read_lock();
-       ret = xs_swapper(rcu_dereference(clnt->cl_xprt), 1);
-       rcu_read_unlock();
-
-       return ret;
+       return rpc_clnt_swap_activate(clnt);
 }
 
 static void nfs_swap_deactivate(struct file *file)
 {
        struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host);
 
-       rcu_read_lock();
-       xs_swapper(rcu_dereference(clnt->cl_xprt), 0);
-       rcu_read_unlock();
+       rpc_clnt_swap_deactivate(clnt);
 }
-#endif
 
 const struct address_space_operations nfs_file_aops = {
        .readpage = nfs_readpage,
@@ -596,10 +587,8 @@ const struct address_space_operations nfs_file_aops = {
        .launder_page = nfs_launder_page,
        .is_dirty_writeback = nfs_check_dirty_writeback,
        .error_remove_page = generic_error_remove_page,
-#ifdef CONFIG_NFS_SWAP
        .swap_activate = nfs_swap_activate,
        .swap_deactivate = nfs_swap_deactivate,
-#endif
 };
 
 /*
index 7d05089e52d6c8b7a29e92e80011bdee0e06a32c..c12951b9551eab8b0394ed2aa218cf15ce6f2c39 100644 (file)
@@ -20,6 +20,7 @@
 #include "../nfs4trace.h"
 #include "../iostat.h"
 #include "../nfs.h"
+#include "../nfs42.h"
 
 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
 
@@ -182,17 +183,14 @@ static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
 
 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
 {
-       struct nfs4_ff_layout_mirror *tmp;
        int i, j;
 
        for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
                for (j = i + 1; j < fls->mirror_array_cnt; j++)
                        if (fls->mirror_array[i]->efficiency <
-                           fls->mirror_array[j]->efficiency) {
-                               tmp = fls->mirror_array[i];
-                               fls->mirror_array[i] = fls->mirror_array[j];
-                               fls->mirror_array[j] = tmp;
-                       }
+                           fls->mirror_array[j]->efficiency)
+                               swap(fls->mirror_array[i],
+                                    fls->mirror_array[j]);
        }
 }
 
@@ -274,6 +272,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
 
                spin_lock_init(&fls->mirror_array[i]->lock);
                fls->mirror_array[i]->ds_count = ds_count;
+               fls->mirror_array[i]->lseg = &fls->generic_hdr;
 
                /* deviceid */
                rc = decode_deviceid(&stream, &devid);
@@ -344,6 +343,10 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
                        fls->mirror_array[i]->gid);
        }
 
+       p = xdr_inline_decode(&stream, 4);
+       if (p)
+               fls->flags = be32_to_cpup(p);
+
        ff_layout_sort_mirrors(fls);
        rc = ff_layout_check_layout(lgr);
        if (rc)
@@ -415,6 +418,146 @@ ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
        return 1;
 }
 
+static void
+nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer)
+{
+       /* first IO request? */
+       if (atomic_inc_return(&timer->n_ops) == 1) {
+               timer->start_time = ktime_get();
+       }
+}
+
+static ktime_t
+nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer)
+{
+       ktime_t start, now;
+
+       if (atomic_dec_return(&timer->n_ops) < 0)
+               WARN_ON_ONCE(1);
+
+       now = ktime_get();
+       start = timer->start_time;
+       timer->start_time = now;
+       return ktime_sub(now, start);
+}
+
+static ktime_t
+nfs4_ff_layout_calc_completion_time(struct rpc_task *task)
+{
+       return ktime_sub(ktime_get(), task->tk_start);
+}
+
+static bool
+nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
+                           struct nfs4_ff_layoutstat *layoutstat)
+{
+       static const ktime_t notime = {0};
+       ktime_t now = ktime_get();
+
+       nfs4_ff_start_busy_timer(&layoutstat->busy_timer);
+       if (ktime_equal(mirror->start_time, notime))
+               mirror->start_time = now;
+       if (ktime_equal(mirror->last_report_time, notime))
+               mirror->last_report_time = now;
+       if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
+                       FF_LAYOUTSTATS_REPORT_INTERVAL) {
+               mirror->last_report_time = now;
+               return true;
+       }
+
+       return false;
+}
+
+static void
+nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
+               __u64 requested)
+{
+       struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
+
+       iostat->ops_requested++;
+       iostat->bytes_requested += requested;
+}
+
+static void
+nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
+               __u64 requested,
+               __u64 completed,
+               ktime_t time_completed)
+{
+       struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
+       ktime_t timer;
+
+       iostat->ops_completed++;
+       iostat->bytes_completed += completed;
+       iostat->bytes_not_delivered += requested - completed;
+
+       timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer);
+       iostat->total_busy_time =
+                       ktime_add(iostat->total_busy_time, timer);
+       iostat->aggregate_completion_time =
+                       ktime_add(iostat->aggregate_completion_time, time_completed);
+}
+
+static void
+nfs4_ff_layout_stat_io_start_read(struct nfs4_ff_layout_mirror *mirror,
+               __u64 requested)
+{
+       bool report;
+
+       spin_lock(&mirror->lock);
+       report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat);
+       nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
+       spin_unlock(&mirror->lock);
+
+       if (report)
+               pnfs_report_layoutstat(mirror->lseg->pls_layout->plh_inode);
+}
+
+static void
+nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
+               struct nfs4_ff_layout_mirror *mirror,
+               __u64 requested,
+               __u64 completed)
+{
+       spin_lock(&mirror->lock);
+       nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
+                       requested, completed,
+                       nfs4_ff_layout_calc_completion_time(task));
+       spin_unlock(&mirror->lock);
+}
+
+static void
+nfs4_ff_layout_stat_io_start_write(struct nfs4_ff_layout_mirror *mirror,
+               __u64 requested)
+{
+       bool report;
+
+       spin_lock(&mirror->lock);
+       report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat);
+       nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
+       spin_unlock(&mirror->lock);
+
+       if (report)
+               pnfs_report_layoutstat(mirror->lseg->pls_layout->plh_inode);
+}
+
+static void
+nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
+               struct nfs4_ff_layout_mirror *mirror,
+               __u64 requested,
+               __u64 completed,
+               enum nfs3_stable_how committed)
+{
+       if (committed == NFS_UNSTABLE)
+               requested = completed = 0;
+
+       spin_lock(&mirror->lock);
+       nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
+                       requested, completed,
+                       nfs4_ff_layout_calc_completion_time(task));
+       spin_unlock(&mirror->lock);
+}
+
 static int
 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
                            struct nfs_commit_info *cinfo,
@@ -631,7 +774,7 @@ static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
                        nfs_direct_set_resched_writes(hdr->dreq);
                        /* fake unstable write to let common nfs resend pages */
                        hdr->verf.committed = NFS_UNSTABLE;
-                       hdr->good_bytes = 0;
+                       hdr->good_bytes = hdr->args.count;
                }
                return;
        }
@@ -879,6 +1022,12 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
        return 0;
 }
 
+static bool
+ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
+{
+       return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
+}
+
 /*
  * We reference the rpc_cred of the first WRITE that triggers the need for
  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
@@ -891,6 +1040,9 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
 static void
 ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
 {
+       if (!ff_layout_need_layoutcommit(hdr->lseg))
+               return;
+
        pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
                        hdr->mds_offset + hdr->res.count);
        dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
@@ -909,6 +1061,10 @@ ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
 static int ff_layout_read_prepare_common(struct rpc_task *task,
                                         struct nfs_pgio_header *hdr)
 {
+       nfs4_ff_layout_stat_io_start_read(
+                       FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
+                       hdr->args.count);
+
        if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
                rpc_exit(task, -EIO);
                return -EIO;
@@ -962,15 +1118,15 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
 {
        struct nfs_pgio_header *hdr = data;
 
-       if (ff_layout_read_prepare_common(task, hdr))
-               return;
-
        if (ff_layout_setup_sequence(hdr->ds_clp,
                                     &hdr->args.seq_args,
                                     &hdr->res.seq_res,
                                     task))
                return;
 
+       if (ff_layout_read_prepare_common(task, hdr))
+               return;
+
        if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
                        hdr->args.lock_context, FMODE_READ) == -EIO)
                rpc_exit(task, -EIO); /* lost lock, terminate I/O */
@@ -982,6 +1138,10 @@ static void ff_layout_read_call_done(struct rpc_task *task, void *data)
 
        dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
 
+       nfs4_ff_layout_stat_io_end_read(task,
+                       FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
+                       hdr->args.count, hdr->res.count);
+
        if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
            task->tk_status == 0) {
                nfs4_sequence_done(task, &hdr->res.seq_res);
@@ -1074,7 +1234,8 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
                return -EAGAIN;
        }
 
-       if (data->verf.committed == NFS_UNSTABLE)
+       if (data->verf.committed == NFS_UNSTABLE
+           && ff_layout_need_layoutcommit(data->lseg))
                pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
 
        return 0;
@@ -1083,6 +1244,10 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
 static int ff_layout_write_prepare_common(struct rpc_task *task,
                                          struct nfs_pgio_header *hdr)
 {
+       nfs4_ff_layout_stat_io_start_write(
+                       FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
+                       hdr->args.count);
+
        if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
                rpc_exit(task, -EIO);
                return -EIO;
@@ -1116,15 +1281,15 @@ static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
 {
        struct nfs_pgio_header *hdr = data;
 
-       if (ff_layout_write_prepare_common(task, hdr))
-               return;
-
        if (ff_layout_setup_sequence(hdr->ds_clp,
                                     &hdr->args.seq_args,
                                     &hdr->res.seq_res,
                                     task))
                return;
 
+       if (ff_layout_write_prepare_common(task, hdr))
+               return;
+
        if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
                        hdr->args.lock_context, FMODE_WRITE) == -EIO)
                rpc_exit(task, -EIO); /* lost lock, terminate I/O */
@@ -1134,6 +1299,11 @@ static void ff_layout_write_call_done(struct rpc_task *task, void *data)
 {
        struct nfs_pgio_header *hdr = data;
 
+       nfs4_ff_layout_stat_io_end_write(task,
+                       FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
+                       hdr->args.count, hdr->res.count,
+                       hdr->res.verf->committed);
+
        if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
            task->tk_status == 0) {
                nfs4_sequence_done(task, &hdr->res.seq_res);
@@ -1152,8 +1322,17 @@ static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
            &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
 }
 
+static void ff_layout_commit_prepare_common(struct rpc_task *task,
+               struct nfs_commit_data *cdata)
+{
+       nfs4_ff_layout_stat_io_start_write(
+                       FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
+                       0);
+}
+
 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
 {
+       ff_layout_commit_prepare_common(task, data);
        rpc_call_start(task);
 }
 
@@ -1161,10 +1340,30 @@ static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
 {
        struct nfs_commit_data *wdata = data;
 
-       ff_layout_setup_sequence(wdata->ds_clp,
+       if (ff_layout_setup_sequence(wdata->ds_clp,
                                 &wdata->args.seq_args,
                                 &wdata->res.seq_res,
-                                task);
+                                task))
+               return;
+       ff_layout_commit_prepare_common(task, data);
+}
+
+static void ff_layout_commit_done(struct rpc_task *task, void *data)
+{
+       struct nfs_commit_data *cdata = data;
+       struct nfs_page *req;
+       __u64 count = 0;
+
+       if (task->tk_status == 0) {
+               list_for_each_entry(req, &cdata->pages, wb_list)
+                       count += req->wb_bytes;
+       }
+
+       nfs4_ff_layout_stat_io_end_write(task,
+                       FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
+                       count, count, NFS_FILE_SYNC);
+
+       pnfs_generic_write_commit_done(task, data);
 }
 
 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
@@ -1205,14 +1404,14 @@ static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
 
 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
        .rpc_call_prepare = ff_layout_commit_prepare_v3,
-       .rpc_call_done = pnfs_generic_write_commit_done,
+       .rpc_call_done = ff_layout_commit_done,
        .rpc_count_stats = ff_layout_commit_count_stats,
        .rpc_release = pnfs_generic_commit_release,
 };
 
 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
        .rpc_call_prepare = ff_layout_commit_prepare_v4,
-       .rpc_call_done = pnfs_generic_write_commit_done,
+       .rpc_call_done = ff_layout_commit_done,
        .rpc_count_stats = ff_layout_commit_count_stats,
        .rpc_release = pnfs_generic_commit_release,
 };
@@ -1256,7 +1455,6 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
        fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
        if (fh)
                hdr->args.fh = fh;
-
        /*
         * Note that if we ever decide to split across DSes,
         * then we may need to handle dense-like offsets.
@@ -1385,6 +1583,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
        fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
        if (fh)
                data->args.fh = fh;
+
        return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
                                   vers == 3 ? &ff_layout_commit_call_ops_v3 :
                                               &ff_layout_commit_call_ops_v4,
@@ -1488,6 +1687,247 @@ out:
        dprintk("%s: Return\n", __func__);
 }
 
+static int
+ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
+{
+       const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
+
+       return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
+}
+
+static size_t
+ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
+                         const int buflen)
+{
+       const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
+       const struct in6_addr *addr = &sin6->sin6_addr;
+
+       /*
+        * RFC 4291, Section 2.2.2
+        *
+        * Shorthanded ANY address
+        */
+       if (ipv6_addr_any(addr))
+               return snprintf(buf, buflen, "::");
+
+       /*
+        * RFC 4291, Section 2.2.2
+        *
+        * Shorthanded loopback address
+        */
+       if (ipv6_addr_loopback(addr))
+               return snprintf(buf, buflen, "::1");
+
+       /*
+        * RFC 4291, Section 2.2.3
+        *
+        * Special presentation address format for mapped v4
+        * addresses.
+        */
+       if (ipv6_addr_v4mapped(addr))
+               return snprintf(buf, buflen, "::ffff:%pI4",
+                                       &addr->s6_addr32[3]);
+
+       /*
+        * RFC 4291, Section 2.2.1
+        */
+       return snprintf(buf, buflen, "%pI6c", addr);
+}
+
+/* Derived from rpc_sockaddr2uaddr */
+static void
+ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
+{
+       struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
+       char portbuf[RPCBIND_MAXUADDRPLEN];
+       char addrbuf[RPCBIND_MAXUADDRLEN];
+       char *netid;
+       unsigned short port;
+       int len, netid_len;
+       __be32 *p;
+
+       switch (sap->sa_family) {
+       case AF_INET:
+               if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
+                       return;
+               port = ntohs(((struct sockaddr_in *)sap)->sin_port);
+               netid = "tcp";
+               netid_len = 3;
+               break;
+       case AF_INET6:
+               if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
+                       return;
+               port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
+               netid = "tcp6";
+               netid_len = 4;
+               break;
+       default:
+               /* we only support tcp and tcp6 */
+               WARN_ON_ONCE(1);
+               return;
+       }
+
+       snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
+       len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
+
+       p = xdr_reserve_space(xdr, 4 + netid_len);
+       xdr_encode_opaque(p, netid, netid_len);
+
+       p = xdr_reserve_space(xdr, 4 + len);
+       xdr_encode_opaque(p, addrbuf, len);
+}
+
+static void
+ff_layout_encode_nfstime(struct xdr_stream *xdr,
+                        ktime_t t)
+{
+       struct timespec64 ts;
+       __be32 *p;
+
+       p = xdr_reserve_space(xdr, 12);
+       ts = ktime_to_timespec64(t);
+       p = xdr_encode_hyper(p, ts.tv_sec);
+       *p++ = cpu_to_be32(ts.tv_nsec);
+}
+
+static void
+ff_layout_encode_io_latency(struct xdr_stream *xdr,
+                           struct nfs4_ff_io_stat *stat)
+{
+       __be32 *p;
+
+       p = xdr_reserve_space(xdr, 5 * 8);
+       p = xdr_encode_hyper(p, stat->ops_requested);
+       p = xdr_encode_hyper(p, stat->bytes_requested);
+       p = xdr_encode_hyper(p, stat->ops_completed);
+       p = xdr_encode_hyper(p, stat->bytes_completed);
+       p = xdr_encode_hyper(p, stat->bytes_not_delivered);
+       ff_layout_encode_nfstime(xdr, stat->total_busy_time);
+       ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
+}
+
+static void
+ff_layout_encode_layoutstats(struct xdr_stream *xdr,
+                            struct nfs42_layoutstat_args *args,
+                            struct nfs42_layoutstat_devinfo *devinfo)
+{
+       struct nfs4_ff_layout_mirror *mirror = devinfo->layout_private;
+       struct nfs4_pnfs_ds_addr *da;
+       struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
+       struct nfs_fh *fh = &mirror->fh_versions[0];
+       __be32 *p, *start;
+
+       da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
+       dprintk("%s: DS %s: encoding address %s\n",
+               __func__, ds->ds_remotestr, da->da_remotestr);
+       /* layoutupdate length */
+       start = xdr_reserve_space(xdr, 4);
+       /* netaddr4 */
+       ff_layout_encode_netaddr(xdr, da);
+       /* nfs_fh4 */
+       p = xdr_reserve_space(xdr, 4 + fh->size);
+       xdr_encode_opaque(p, fh->data, fh->size);
+       /* ff_io_latency4 read */
+       spin_lock(&mirror->lock);
+       ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
+       /* ff_io_latency4 write */
+       ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
+       spin_unlock(&mirror->lock);
+       /* nfstime4 */
+       ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
+       /* bool */
+       p = xdr_reserve_space(xdr, 4);
+       *p = cpu_to_be32(false);
+
+       *start = cpu_to_be32((xdr->p - start - 1) * 4);
+}
+
+static bool
+ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
+                              struct pnfs_layout_segment *pls,
+                              int *dev_count, int dev_limit)
+{
+       struct nfs4_ff_layout_mirror *mirror;
+       struct nfs4_deviceid_node *dev;
+       struct nfs42_layoutstat_devinfo *devinfo;
+       int i;
+
+       for (i = 0; i <= FF_LAYOUT_MIRROR_COUNT(pls); i++) {
+               if (*dev_count >= dev_limit)
+                       break;
+               mirror = FF_LAYOUT_COMP(pls, i);
+               if (!mirror || !mirror->mirror_ds)
+                       continue;
+               dev = FF_LAYOUT_DEVID_NODE(pls, i);
+               devinfo = &args->devinfo[*dev_count];
+               memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
+               devinfo->offset = pls->pls_range.offset;
+               devinfo->length = pls->pls_range.length;
+               /* well, we don't really know if IO is continuous or not! */
+               devinfo->read_count = mirror->read_stat.io_stat.bytes_completed;
+               devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
+               devinfo->write_count = mirror->write_stat.io_stat.bytes_completed;
+               devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
+               devinfo->layout_type = LAYOUT_FLEX_FILES;
+               devinfo->layoutstats_encode = ff_layout_encode_layoutstats;
+               devinfo->layout_private = mirror;
+               /* lseg refcount put in cleanup_layoutstats */
+               pnfs_get_lseg(pls);
+
+               ++(*dev_count);
+       }
+
+       return *dev_count < dev_limit;
+}
+
+static int
+ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
+{
+       struct pnfs_layout_segment *pls;
+       int dev_count = 0;
+
+       spin_lock(&args->inode->i_lock);
+       list_for_each_entry(pls, &NFS_I(args->inode)->layout->plh_segs, pls_list) {
+               dev_count += FF_LAYOUT_MIRROR_COUNT(pls);
+       }
+       spin_unlock(&args->inode->i_lock);
+       /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
+       if (dev_count > PNFS_LAYOUTSTATS_MAXDEV) {
+               dprintk("%s: truncating devinfo to limit (%d:%d)\n",
+                       __func__, dev_count, PNFS_LAYOUTSTATS_MAXDEV);
+               dev_count = PNFS_LAYOUTSTATS_MAXDEV;
+       }
+       args->devinfo = kmalloc(dev_count * sizeof(*args->devinfo), GFP_KERNEL);
+       if (!args->devinfo)
+               return -ENOMEM;
+
+       dev_count = 0;
+       spin_lock(&args->inode->i_lock);
+       list_for_each_entry(pls, &NFS_I(args->inode)->layout->plh_segs, pls_list) {
+               if (!ff_layout_mirror_prepare_stats(args, pls, &dev_count,
+                                                   PNFS_LAYOUTSTATS_MAXDEV)) {
+                       break;
+               }
+       }
+       spin_unlock(&args->inode->i_lock);
+       args->num_dev = dev_count;
+
+       return 0;
+}
+
+static void
+ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data *data)
+{
+       struct nfs4_ff_layout_mirror *mirror;
+       int i;
+
+       for (i = 0; i < data->args.num_dev; i++) {
+               mirror = data->args.devinfo[i].layout_private;
+               data->args.devinfo[i].layout_private = NULL;
+               pnfs_put_lseg(mirror->lseg);
+       }
+}
+
 static struct pnfs_layoutdriver_type flexfilelayout_type = {
        .id                     = LAYOUT_FLEX_FILES,
        .name                   = "LAYOUT_FLEX_FILES",
@@ -1510,6 +1950,8 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
        .alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
        .encode_layoutreturn    = ff_layout_encode_layoutreturn,
        .sync                   = pnfs_nfs_generic_sync,
+       .prepare_layoutstats    = ff_layout_prepare_layoutstats,
+       .cleanup_layoutstats    = ff_layout_cleanup_layoutstats,
 };
 
 static int __init nfs4flexfilelayout_init(void)
index 070f20445b2d33883445038d4888538845555198..f92f9a0a856b3e698c8859923438549d1bffed37 100644 (file)
@@ -9,12 +9,17 @@
 #ifndef FS_NFS_NFS4FLEXFILELAYOUT_H
 #define FS_NFS_NFS4FLEXFILELAYOUT_H
 
+#define FF_FLAGS_NO_LAYOUTCOMMIT 1
+
 #include "../pnfs.h"
 
 /* XXX: Let's filter out insanely large mirror count for now to avoid oom
  * due to network error etc. */
 #define NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT 4096
 
+/* LAYOUTSTATS report interval in ms */
+#define FF_LAYOUTSTATS_REPORT_INTERVAL (60000L)
+
 struct nfs4_ff_ds_version {
        u32                             version;
        u32                             minor_version;
@@ -41,24 +46,48 @@ struct nfs4_ff_layout_ds_err {
        struct nfs4_deviceid            deviceid;
 };
 
+struct nfs4_ff_io_stat {
+       __u64                           ops_requested;
+       __u64                           bytes_requested;
+       __u64                           ops_completed;
+       __u64                           bytes_completed;
+       __u64                           bytes_not_delivered;
+       ktime_t                         total_busy_time;
+       ktime_t                         aggregate_completion_time;
+};
+
+struct nfs4_ff_busy_timer {
+       ktime_t start_time;
+       atomic_t n_ops;
+};
+
+struct nfs4_ff_layoutstat {
+       struct nfs4_ff_io_stat io_stat;
+       struct nfs4_ff_busy_timer busy_timer;
+};
+
 struct nfs4_ff_layout_mirror {
+       struct pnfs_layout_segment      *lseg; /* back pointer */
        u32                             ds_count;
        u32                             efficiency;
        struct nfs4_ff_layout_ds        *mirror_ds;
        u32                             fh_versions_cnt;
        struct nfs_fh                   *fh_versions;
        nfs4_stateid                    stateid;
-       struct nfs4_string              user_name;
-       struct nfs4_string              group_name;
        u32                             uid;
        u32                             gid;
        struct rpc_cred                 *cred;
        spinlock_t                      lock;
+       struct nfs4_ff_layoutstat       read_stat;
+       struct nfs4_ff_layoutstat       write_stat;
+       ktime_t                         start_time;
+       ktime_t                         last_report_time;
 };
 
 struct nfs4_ff_layout_segment {
        struct pnfs_layout_segment      generic_hdr;
        u64                             stripe_unit;
+       u32                             flags;
        u32                             mirror_array_cnt;
        struct nfs4_ff_layout_mirror    **mirror_array;
 };
index 77a2d026aa12b62bdc29dac2345cff0b3237e9c4..f13e1969eedd911bf6a5d9be6af6e4ae403f6c1e 100644 (file)
@@ -324,7 +324,8 @@ static int ff_layout_update_mirror_cred(struct nfs4_ff_layout_mirror *mirror,
                                __func__, PTR_ERR(cred));
                        return PTR_ERR(cred);
                } else {
-                       mirror->cred = cred;
+                       if (cmpxchg(&mirror->cred, NULL, cred))
+                               put_rpccred(cred);
                }
        }
        return 0;
@@ -386,7 +387,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
        /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
        smp_rmb();
        if (ds->ds_clp)
-               goto out;
+               goto out_update_creds;
 
        flavor = nfs4_ff_layout_choose_authflavor(mirror);
 
@@ -430,7 +431,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
                        }
                }
        }
-
+out_update_creds:
        if (ff_layout_update_mirror_cred(mirror, ds))
                ds = NULL;
 out:
index f734562c6d244034cb5036fee8ab0b7d69cc90c5..b77b328a06d74f0124d2a65b51fac0fc21fbd692 100644 (file)
@@ -678,6 +678,8 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
        if (!err) {
                generic_fillattr(inode, stat);
                stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
+               if (S_ISDIR(inode->i_mode))
+                       stat->blksize = NFS_SERVER(inode)->dtsize;
        }
 out:
        trace_nfs_getattr_exit(inode, err);
@@ -2008,17 +2010,15 @@ static int __init init_nfs_fs(void)
        if (err)
                goto out1;
 
-#ifdef CONFIG_PROC_FS
        rpc_proc_register(&init_net, &nfs_rpcstat);
-#endif
-       if ((err = register_nfs_fs()) != 0)
+
+       err = register_nfs_fs();
+       if (err)
                goto out0;
 
        return 0;
 out0:
-#ifdef CONFIG_PROC_FS
        rpc_proc_unregister(&init_net, "nfs");
-#endif
        nfs_destroy_directcache();
 out1:
        nfs_destroy_writepagecache();
@@ -2049,9 +2049,7 @@ static void __exit exit_nfs_fs(void)
        nfs_destroy_nfspagecache();
        nfs_fscache_unregister();
        unregister_pernet_subsys(&nfs_net_ops);
-#ifdef CONFIG_PROC_FS
        rpc_proc_unregister(&init_net, "nfs");
-#endif
        unregister_nfs_fs();
        nfs_fs_proc_exit();
        nfsiod_stop();
index 53852a4bd88be68781bb9dd8a39760fd497d1d61..9b04c2e6fffc3f306f3c598b7c4557beff653c8e 100644 (file)
@@ -1342,7 +1342,7 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
        if (args->npages != 0)
                xdr_write_pages(xdr, args->pages, 0, args->len);
        else
-               xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
+               xdr_reserve_space(xdr, args->len);
 
        error = nfsacl_encode(xdr->buf, base, args->inode,
                            (args->mask & NFS_ACL) ?
index 7afb8947dfdf3e299ee39c6490640f40567a70b7..ff66ae700b8991eeed513e210397f6639c0e5f70 100644 (file)
@@ -5,11 +5,18 @@
 #ifndef __LINUX_FS_NFS_NFS4_2_H
 #define __LINUX_FS_NFS_NFS4_2_H
 
+/*
+ * FIXME:  four LAYOUTSTATS calls per compound at most! Do we need to support
+ * more? Need to consider not to pre-alloc too much for a compound.
+ */
+#define PNFS_LAYOUTSTATS_MAXDEV (4)
+
 /* nfs4.2proc.c */
 int nfs42_proc_allocate(struct file *, loff_t, loff_t);
 int nfs42_proc_deallocate(struct file *, loff_t, loff_t);
 loff_t nfs42_proc_llseek(struct file *, loff_t, int);
-
+int nfs42_proc_layoutstats_generic(struct nfs_server *,
+                                  struct nfs42_layoutstat_data *);
 /* nfs4.2xdr.h */
 extern struct rpc_procinfo nfs4_2_procedures[];
 
index 3a9e75235f30e60e5a4ae974864c63fb003b969d..f486b80f927ab7204159852a9740900a6c73aec6 100644 (file)
 #include <linux/nfs_fs.h>
 #include "nfs4_fs.h"
 #include "nfs42.h"
+#include "iostat.h"
+#include "pnfs.h"
+#include "internal.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS
 
 static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file,
                                fmode_t fmode)
@@ -165,3 +170,85 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
 
        return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
 }
+
+static void
+nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
+{
+       struct nfs42_layoutstat_data *data = calldata;
+       struct nfs_server *server = NFS_SERVER(data->args.inode);
+
+       nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args,
+                            &data->res.seq_res, task);
+}
+
+static void
+nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
+{
+       struct nfs42_layoutstat_data *data = calldata;
+
+       if (!nfs4_sequence_done(task, &data->res.seq_res))
+               return;
+
+       switch (task->tk_status) {
+       case 0:
+               break;
+       case -ENOTSUPP:
+       case -EOPNOTSUPP:
+               NFS_SERVER(data->inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
+       default:
+               dprintk("%s server returns %d\n", __func__, task->tk_status);
+       }
+}
+
+static void
+nfs42_layoutstat_release(void *calldata)
+{
+       struct nfs42_layoutstat_data *data = calldata;
+       struct nfs_server *nfss = NFS_SERVER(data->args.inode);
+
+       if (nfss->pnfs_curr_ld->cleanup_layoutstats)
+               nfss->pnfs_curr_ld->cleanup_layoutstats(data);
+
+       pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout);
+       smp_mb__before_atomic();
+       clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags);
+       smp_mb__after_atomic();
+       nfs_iput_and_deactive(data->inode);
+       kfree(data->args.devinfo);
+       kfree(data);
+}
+
+static const struct rpc_call_ops nfs42_layoutstat_ops = {
+       .rpc_call_prepare = nfs42_layoutstat_prepare,
+       .rpc_call_done = nfs42_layoutstat_done,
+       .rpc_release = nfs42_layoutstat_release,
+};
+
+int nfs42_proc_layoutstats_generic(struct nfs_server *server,
+                                  struct nfs42_layoutstat_data *data)
+{
+       struct rpc_message msg = {
+               .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS],
+               .rpc_argp = &data->args,
+               .rpc_resp = &data->res,
+       };
+       struct rpc_task_setup task_setup = {
+               .rpc_client = server->client,
+               .rpc_message = &msg,
+               .callback_ops = &nfs42_layoutstat_ops,
+               .callback_data = data,
+               .flags = RPC_TASK_ASYNC,
+       };
+       struct rpc_task *task;
+
+       data->inode = nfs_igrab_and_active(data->args.inode);
+       if (!data->inode) {
+               nfs42_layoutstat_release(data);
+               return -EAGAIN;
+       }
+       nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
+       task = rpc_run_task(&task_setup);
+       if (IS_ERR(task))
+               return PTR_ERR(task);
+       return 0;
+}
index 1a25b27248f2ff5fd0026b00a3032589ee8cdff5..a6bd27da6286f9fee14f0b087eddcc1ec437cdde 100644 (file)
@@ -4,6 +4,8 @@
 #ifndef __LINUX_FS_NFS_NFS4_2XDR_H
 #define __LINUX_FS_NFS_NFS4_2XDR_H
 
+#include "nfs42.h"
+
 #define encode_fallocate_maxsz         (encode_stateid_maxsz + \
                                         2 /* offset */ + \
                                         2 /* length */)
                                         1 /* whence */ + \
                                         2 /* offset */ + \
                                         2 /* length */)
+#define encode_io_info_maxsz           4
+#define encode_layoutstats_maxsz       (op_decode_hdr_maxsz + \
+                                       2 /* offset */ + \
+                                       2 /* length */ + \
+                                       encode_stateid_maxsz + \
+                                       encode_io_info_maxsz + \
+                                       encode_io_info_maxsz + \
+                                       1 /* opaque devaddr4 length */ + \
+                                       XDR_QUADLEN(PNFS_LAYOUTSTATS_MAXSIZE))
+#define decode_layoutstats_maxsz       (op_decode_hdr_maxsz)
 
 #define NFS4_enc_allocate_sz           (compound_encode_hdr_maxsz + \
                                         encode_putfh_maxsz + \
 #define NFS4_dec_seek_sz               (compound_decode_hdr_maxsz + \
                                         decode_putfh_maxsz + \
                                         decode_seek_maxsz)
+#define NFS4_enc_layoutstats_sz                (compound_encode_hdr_maxsz + \
+                                        encode_sequence_maxsz + \
+                                        encode_putfh_maxsz + \
+                                        PNFS_LAYOUTSTATS_MAXDEV * encode_layoutstats_maxsz)
+#define NFS4_dec_layoutstats_sz                (compound_decode_hdr_maxsz + \
+                                        decode_sequence_maxsz + \
+                                        decode_putfh_maxsz + \
+                                        PNFS_LAYOUTSTATS_MAXDEV * decode_layoutstats_maxsz)
 
 
 static void encode_fallocate(struct xdr_stream *xdr,
@@ -81,6 +101,33 @@ static void encode_seek(struct xdr_stream *xdr,
        encode_uint32(xdr, args->sa_what);
 }
 
+static void encode_layoutstats(struct xdr_stream *xdr,
+                              struct nfs42_layoutstat_args *args,
+                              struct nfs42_layoutstat_devinfo *devinfo,
+                              struct compound_hdr *hdr)
+{
+       __be32 *p;
+
+       encode_op_hdr(xdr, OP_LAYOUTSTATS, decode_layoutstats_maxsz, hdr);
+       p = reserve_space(xdr, 8 + 8);
+       p = xdr_encode_hyper(p, devinfo->offset);
+       p = xdr_encode_hyper(p, devinfo->length);
+       encode_nfs4_stateid(xdr, &args->stateid);
+       p = reserve_space(xdr, 4*8 + NFS4_DEVICEID4_SIZE + 4);
+       p = xdr_encode_hyper(p, devinfo->read_count);
+       p = xdr_encode_hyper(p, devinfo->read_bytes);
+       p = xdr_encode_hyper(p, devinfo->write_count);
+       p = xdr_encode_hyper(p, devinfo->write_bytes);
+       p = xdr_encode_opaque_fixed(p, devinfo->dev_id.data,
+                       NFS4_DEVICEID4_SIZE);
+       /* Encode layoutupdate4 */
+       *p++ = cpu_to_be32(devinfo->layout_type);
+       if (devinfo->layoutstats_encode != NULL)
+               devinfo->layoutstats_encode(xdr, args, devinfo);
+       else
+               encode_uint32(xdr, 0);
+}
+
 /*
  * Encode ALLOCATE request
  */
@@ -137,6 +184,28 @@ static void nfs4_xdr_enc_seek(struct rpc_rqst *req,
        encode_nops(&hdr);
 }
 
+/*
+ * Encode LAYOUTSTATS request
+ */
+static void nfs4_xdr_enc_layoutstats(struct rpc_rqst *req,
+                                    struct xdr_stream *xdr,
+                                    struct nfs42_layoutstat_args *args)
+{
+       int i;
+
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+
+       encode_compound_hdr(xdr, req, &hdr);
+       encode_sequence(xdr, &args->seq_args, &hdr);
+       encode_putfh(xdr, args->fh, &hdr);
+       WARN_ON(args->num_dev > PNFS_LAYOUTSTATS_MAXDEV);
+       for (i = 0; i < args->num_dev; i++)
+               encode_layoutstats(xdr, args, &args->devinfo[i], &hdr);
+       encode_nops(&hdr);
+}
+
 static int decode_allocate(struct xdr_stream *xdr, struct nfs42_falloc_res *res)
 {
        return decode_op_hdr(xdr, OP_ALLOCATE);
@@ -169,6 +238,12 @@ out_overflow:
        return -EIO;
 }
 
+static int decode_layoutstats(struct xdr_stream *xdr,
+                             struct nfs42_layoutstat_res *res)
+{
+       return decode_op_hdr(xdr, OP_LAYOUTSTATS);
+}
+
 /*
  * Decode ALLOCATE request
  */
@@ -246,4 +321,35 @@ static int nfs4_xdr_dec_seek(struct rpc_rqst *rqstp,
 out:
        return status;
 }
+
+/*
+ * Decode LAYOUTSTATS request
+ */
+static int nfs4_xdr_dec_layoutstats(struct rpc_rqst *rqstp,
+                                   struct xdr_stream *xdr,
+                                   struct nfs42_layoutstat_res *res)
+{
+       struct compound_hdr hdr;
+       int status, i;
+
+       status = decode_compound_hdr(xdr, &hdr);
+       if (status)
+               goto out;
+       status = decode_sequence(xdr, &res->seq_res, rqstp);
+       if (status)
+               goto out;
+       status = decode_putfh(xdr);
+       if (status)
+               goto out;
+       WARN_ON(res->num_dev > PNFS_LAYOUTSTATS_MAXDEV);
+       for (i = 0; i < res->num_dev; i++) {
+               status = decode_layoutstats(xdr, res);
+               if (status)
+                       goto out;
+       }
+out:
+       res->rpc_status = status;
+       return status;
+}
+
 #endif /* __LINUX_FS_NFS_NFS4_2XDR_H */
index fdef424b0cd3c6120ddc7d9e379e49647b7b24fb..ea3bee919a765840a267f8fc59ccdec4ef61f676 100644 (file)
@@ -233,6 +233,7 @@ extern int nfs4_handle_exception(struct nfs_server *, int, struct nfs4_exception
 extern int nfs4_call_sync(struct rpc_clnt *, struct nfs_server *,
                          struct rpc_message *, struct nfs4_sequence_args *,
                          struct nfs4_sequence_res *, int);
+extern void nfs4_init_sequence(struct nfs4_sequence_args *, struct nfs4_sequence_res *, int);
 extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
 extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *, bool);
index e42be52a8c18d8121c934a5ac79483e77166206c..3aa6a9ba51136f31f30dea29d60dded106b05241 100644 (file)
@@ -676,7 +676,6 @@ found:
                break;
        }
 
-       /* No matching nfs_client found. */
        spin_unlock(&nn->nfs_client_lock);
        dprintk("NFS: <-- %s status = %d\n", __func__, status);
        nfs_put_client(prev);
index f58c17b3b480367c6322359ae7ca4b33b3695348..dcd39d4e2efebd78eed64d4df00fd2745f747027 100644 (file)
@@ -41,6 +41,10 @@ nfs4_file_open(struct inode *inode, struct file *filp)
 
        dprintk("NFS: open file(%pd2)\n", dentry);
 
+       err = nfs_check_flags(openflags);
+       if (err)
+               return err;
+
        if ((openflags & O_ACCMODE) == 3)
                openflags--;
 
index c0b3a16b4a00806f79ea9eb28b6933a8d94bcc52..039b3eb6d83404f33224961465406d52203ff570 100644 (file)
@@ -35,13 +35,6 @@ int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh, bool auth_p
                goto out;
        }
 
-       if (fsinfo.fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
-               printk(KERN_ERR "nfs4_get_rootfh:"
-                      " getroot obtained referral\n");
-               ret = -EREMOTE;
-               goto out;
-       }
-
        memcpy(&server->fsid, &fsinfo.fattr->fsid, sizeof(server->fsid));
 out:
        nfs_free_fattr(fsinfo.fattr);
index 2e1737c40a29837488823e04f27db975e0a51b9b..535dfc69c628f825cc4422339406b1365f66e8d4 100644 (file)
@@ -494,12 +494,7 @@ nfs_idmap_delete(struct nfs_client *clp)
 
 int nfs_idmap_init(void)
 {
-       int ret;
-       ret = nfs_idmap_init_keyring();
-       if (ret != 0)
-               goto out;
-out:
-       return ret;
+       return nfs_idmap_init_keyring();
 }
 
 void nfs_idmap_quit(void)
index 55e1e3af23a3d3f2313f977b185eb8c3f8ccbc6d..6f228b5af819ea576240c40869c1da74d823e460 100644 (file)
@@ -356,6 +356,9 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_
                case 0:
                        return 0;
                case -NFS4ERR_OPENMODE:
+               case -NFS4ERR_DELEG_REVOKED:
+               case -NFS4ERR_ADMIN_REVOKED:
+               case -NFS4ERR_BAD_STATEID:
                        if (inode && nfs4_have_delegation(inode, FMODE_READ)) {
                                nfs4_inode_return_delegation(inode);
                                exception->retry = 1;
@@ -367,15 +370,6 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_
                        if (ret < 0)
                                break;
                        goto wait_on_recovery;
-               case -NFS4ERR_DELEG_REVOKED:
-               case -NFS4ERR_ADMIN_REVOKED:
-               case -NFS4ERR_BAD_STATEID:
-                       if (state == NULL)
-                               break;
-                       ret = nfs4_schedule_stateid_recovery(server, state);
-                       if (ret < 0)
-                               break;
-                       goto wait_on_recovery;
                case -NFS4ERR_EXPIRED:
                        if (state != NULL) {
                                ret = nfs4_schedule_stateid_recovery(server, state);
@@ -482,8 +476,8 @@ struct nfs4_call_sync_data {
        struct nfs4_sequence_res *seq_res;
 };
 
-static void nfs4_init_sequence(struct nfs4_sequence_args *args,
-                              struct nfs4_sequence_res *res, int cache_reply)
+void nfs4_init_sequence(struct nfs4_sequence_args *args,
+                       struct nfs4_sequence_res *res, int cache_reply)
 {
        args->sa_slot = NULL;
        args->sa_cache_this = cache_reply;
@@ -1553,6 +1547,13 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmod
        struct nfs4_state *newstate;
        int ret;
 
+       if ((opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
+            opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEG_CUR_FH) &&
+           (opendata->o_arg.u.delegation_type & fmode) != fmode)
+               /* This mode can't have been delegated, so we must have
+                * a valid open_stateid to cover it - not need to reclaim.
+                */
+               return 0;
        opendata->o_arg.open_flags = 0;
        opendata->o_arg.fmode = fmode;
        opendata->o_arg.share_access = nfs4_map_atomic_open_share(
@@ -1684,6 +1685,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
                                        "%d.\n", __func__, err);
                case 0:
                case -ENOENT:
+               case -EAGAIN:
                case -ESTALE:
                        break;
                case -NFS4ERR_BADSESSION:
@@ -3355,6 +3357,8 @@ static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
                        goto out;
                case -NFS4ERR_MOVED:
                        err = nfs4_get_referral(client, dir, name, fattr, fhandle);
+                       if (err == -NFS4ERR_MOVED)
+                               err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
                        goto out;
                case -NFS4ERR_WRONGSEC:
                        err = -EPERM;
@@ -4955,49 +4959,128 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp,
        memcpy(bootverf->data, verf, sizeof(bootverf->data));
 }
 
-static unsigned int
-nfs4_init_nonuniform_client_string(struct nfs_client *clp,
-                                  char *buf, size_t len)
+static int
+nfs4_init_nonuniform_client_string(struct nfs_client *clp)
 {
-       unsigned int result;
+       int result;
+       size_t len;
+       char *str;
+       bool retried = false;
 
        if (clp->cl_owner_id != NULL)
-               return strlcpy(buf, clp->cl_owner_id, len);
+               return 0;
+retry:
+       rcu_read_lock();
+       len = 10 + strlen(clp->cl_ipaddr) + 1 +
+               strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
+               1 +
+               strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
+               1;
+       rcu_read_unlock();
+
+       if (len > NFS4_OPAQUE_LIMIT + 1)
+               return -EINVAL;
+
+       /*
+        * Since this string is allocated at mount time, and held until the
+        * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
+        * about a memory-reclaim deadlock.
+        */
+       str = kmalloc(len, GFP_KERNEL);
+       if (!str)
+               return -ENOMEM;
 
        rcu_read_lock();
-       result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s",
-                               clp->cl_ipaddr,
-                               rpc_peeraddr2str(clp->cl_rpcclient,
-                                                       RPC_DISPLAY_ADDR),
-                               rpc_peeraddr2str(clp->cl_rpcclient,
-                                                       RPC_DISPLAY_PROTO));
+       result = scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
+                       clp->cl_ipaddr,
+                       rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
+                       rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
        rcu_read_unlock();
-       clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
-       return result;
+
+       /* Did something change? */
+       if (result >= len) {
+               kfree(str);
+               if (retried)
+                       return -EINVAL;
+               retried = true;
+               goto retry;
+       }
+       clp->cl_owner_id = str;
+       return 0;
 }
 
-static unsigned int
-nfs4_init_uniform_client_string(struct nfs_client *clp,
-                               char *buf, size_t len)
+static int
+nfs4_init_uniquifier_client_string(struct nfs_client *clp)
+{
+       int result;
+       size_t len;
+       char *str;
+
+       len = 10 + 10 + 1 + 10 + 1 +
+               strlen(nfs4_client_id_uniquifier) + 1 +
+               strlen(clp->cl_rpcclient->cl_nodename) + 1;
+
+       if (len > NFS4_OPAQUE_LIMIT + 1)
+               return -EINVAL;
+
+       /*
+        * Since this string is allocated at mount time, and held until the
+        * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
+        * about a memory-reclaim deadlock.
+        */
+       str = kmalloc(len, GFP_KERNEL);
+       if (!str)
+               return -ENOMEM;
+
+       result = scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
+                       clp->rpc_ops->version, clp->cl_minorversion,
+                       nfs4_client_id_uniquifier,
+                       clp->cl_rpcclient->cl_nodename);
+       if (result >= len) {
+               kfree(str);
+               return -EINVAL;
+       }
+       clp->cl_owner_id = str;
+       return 0;
+}
+
+static int
+nfs4_init_uniform_client_string(struct nfs_client *clp)
 {
-       const char *nodename = clp->cl_rpcclient->cl_nodename;
-       unsigned int result;
+       int result;
+       size_t len;
+       char *str;
 
        if (clp->cl_owner_id != NULL)
-               return strlcpy(buf, clp->cl_owner_id, len);
+               return 0;
 
        if (nfs4_client_id_uniquifier[0] != '\0')
-               result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
-                               clp->rpc_ops->version,
-                               clp->cl_minorversion,
-                               nfs4_client_id_uniquifier,
-                               nodename);
-       else
-               result = scnprintf(buf, len, "Linux NFSv%u.%u %s",
-                               clp->rpc_ops->version, clp->cl_minorversion,
-                               nodename);
-       clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
-       return result;
+               return nfs4_init_uniquifier_client_string(clp);
+
+       len = 10 + 10 + 1 + 10 + 1 +
+               strlen(clp->cl_rpcclient->cl_nodename) + 1;
+
+       if (len > NFS4_OPAQUE_LIMIT + 1)
+               return -EINVAL;
+
+       /*
+        * Since this string is allocated at mount time, and held until the
+        * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
+        * about a memory-reclaim deadlock.
+        */
+       str = kmalloc(len, GFP_KERNEL);
+       if (!str)
+               return -ENOMEM;
+
+       result = scnprintf(str, len, "Linux NFSv%u.%u %s",
+                       clp->rpc_ops->version, clp->cl_minorversion,
+                       clp->cl_rpcclient->cl_nodename);
+       if (result >= len) {
+               kfree(str);
+               return -EINVAL;
+       }
+       clp->cl_owner_id = str;
+       return 0;
 }
 
 /*
@@ -5044,7 +5127,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
        struct nfs4_setclientid setclientid = {
                .sc_verifier = &sc_verifier,
                .sc_prog = program,
-               .sc_cb_ident = clp->cl_cb_ident,
+               .sc_clnt = clp,
        };
        struct rpc_message msg = {
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
@@ -5064,16 +5147,15 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
 
        /* nfs_client_id4 */
        nfs4_init_boot_verifier(clp, &sc_verifier);
+
        if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
-               setclientid.sc_name_len =
-                               nfs4_init_uniform_client_string(clp,
-                                               setclientid.sc_name,
-                                               sizeof(setclientid.sc_name));
+               status = nfs4_init_uniform_client_string(clp);
        else
-               setclientid.sc_name_len =
-                               nfs4_init_nonuniform_client_string(clp,
-                                               setclientid.sc_name,
-                                               sizeof(setclientid.sc_name));
+               status = nfs4_init_nonuniform_client_string(clp);
+
+       if (status)
+               goto out;
+
        /* cb_client4 */
        setclientid.sc_netid_len =
                                nfs4_init_callback_netid(clp,
@@ -5083,9 +5165,9 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
                                sizeof(setclientid.sc_uaddr), "%s.%u.%u",
                                clp->cl_ipaddr, port >> 8, port & 255);
 
-       dprintk("NFS call  setclientid auth=%s, '%.*s'\n",
+       dprintk("NFS call  setclientid auth=%s, '%s'\n",
                clp->cl_rpcclient->cl_auth->au_ops->au_name,
-               setclientid.sc_name_len, setclientid.sc_name);
+               clp->cl_owner_id);
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task)) {
                status = PTR_ERR(task);
@@ -5402,6 +5484,7 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
        atomic_inc(&lsp->ls_count);
        /* Ensure we don't close file until we're done freeing locks! */
        p->ctx = get_nfs_open_context(ctx);
+       get_file(fl->fl_file);
        memcpy(&p->fl, fl, sizeof(p->fl));
        p->server = NFS_SERVER(inode);
        return p;
@@ -5413,6 +5496,7 @@ static void nfs4_locku_release_calldata(void *data)
        nfs_free_seqid(calldata->arg.seqid);
        nfs4_put_lock_state(calldata->lsp);
        put_nfs_open_context(calldata->ctx);
+       fput(calldata->fl.fl_file);
        kfree(calldata);
 }
 
@@ -6846,11 +6930,14 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
        };
 
        nfs4_init_boot_verifier(clp, &verifier);
-       args.id_len = nfs4_init_uniform_client_string(clp, args.id,
-                                                       sizeof(args.id));
-       dprintk("NFS call  exchange_id auth=%s, '%.*s'\n",
+
+       status = nfs4_init_uniform_client_string(clp);
+       if (status)
+               goto out;
+
+       dprintk("NFS call  exchange_id auth=%s, '%s'\n",
                clp->cl_rpcclient->cl_auth->au_ops->au_name,
-               args.id_len, args.id);
+               clp->cl_owner_id);
 
        res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
                                        GFP_NOFS);
@@ -6885,7 +6972,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
                /* unsupported! */
                WARN_ON_ONCE(1);
                status = -EINVAL;
-               goto out_server_scope;
+               goto out_impl_id;
        }
 
        status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
@@ -6913,6 +7000,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
                /* use the most recent implementation id */
                kfree(clp->cl_implid);
                clp->cl_implid = res.impl_id;
+               res.impl_id = NULL;
 
                if (clp->cl_serverscope != NULL &&
                    !nfs41_same_server_scope(clp->cl_serverscope,
@@ -6926,15 +7014,16 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
 
                if (clp->cl_serverscope == NULL) {
                        clp->cl_serverscope = res.server_scope;
-                       goto out;
+                       res.server_scope = NULL;
                }
-       } else
-               kfree(res.impl_id);
+       }
 
-out_server_owner:
-       kfree(res.server_owner);
+out_impl_id:
+       kfree(res.impl_id);
 out_server_scope:
        kfree(res.server_scope);
+out_server_owner:
+       kfree(res.server_owner);
 out:
        if (clp->cl_implid != NULL)
                dprintk("NFS reply exchange_id: Server Implementation ID: "
@@ -8061,9 +8150,8 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
        struct rpc_task *task;
        int status = 0;
 
-       dprintk("NFS: %4d initiating layoutcommit call. sync %d "
-               "lbw: %llu inode %lu\n",
-               data->task.tk_pid, sync,
+       dprintk("NFS: initiating layoutcommit call. sync %d "
+               "lbw: %llu inode %lu\n", sync,
                data->args.lastbytewritten,
                data->args.inode->i_ino);
 
@@ -8557,7 +8645,8 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
                | NFS_CAP_ATOMIC_OPEN_V1
                | NFS_CAP_ALLOCATE
                | NFS_CAP_DEALLOCATE
-               | NFS_CAP_SEEK,
+               | NFS_CAP_SEEK
+               | NFS_CAP_LAYOUTSTATS,
        .init_client = nfs41_init_client,
        .shutdown_client = nfs41_shutdown_client,
        .match_stateid = nfs41_match_stateid,
index 2782cfca22650922e012a4f86a1755e3cca68243..605840dc89cf9e28c173659af201aab109f9328d 100644 (file)
@@ -309,7 +309,6 @@ int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
 
        if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
                goto do_confirm;
-       nfs4_begin_drain_session(clp);
        status = nfs4_proc_exchange_id(clp, cred);
        if (status != 0)
                goto out;
@@ -1482,6 +1481,8 @@ restart:
                                        spin_unlock(&state->state_lock);
                                }
                                nfs4_put_open_state(state);
+                               clear_bit(NFS4CLNT_RECLAIM_NOGRACE,
+                                       &state->flags);
                                spin_lock(&sp->so_lock);
                                goto restart;
                        }
@@ -1830,6 +1831,7 @@ static int nfs4_establish_lease(struct nfs_client *clp)
                clp->cl_mvops->reboot_recovery_ops;
        int status;
 
+       nfs4_begin_drain_session(clp);
        cred = nfs4_get_clid_cred(clp);
        if (cred == NULL)
                return -ENOENT;
index 0aea97841d3038b56056d0d7fcd0dcddb11f584e..558cd65dbdb752d111b5b85649b72bae36fdf040 100644 (file)
@@ -139,7 +139,8 @@ static int nfs4_stat_to_errno(int);
 #define encode_setclientid_maxsz \
                                (op_encode_hdr_maxsz + \
                                XDR_QUADLEN(NFS4_VERIFIER_SIZE) + \
-                               XDR_QUADLEN(NFS4_SETCLIENTID_NAMELEN) + \
+                               /* client name */ \
+                               1 + XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \
                                1 /* sc_prog */ + \
                                1 + XDR_QUADLEN(RPCBIND_MAXNETIDLEN) + \
                                1 + XDR_QUADLEN(RPCBIND_MAXUADDRLEN) + \
@@ -288,7 +289,8 @@ static int nfs4_stat_to_errno(int);
 #define encode_exchange_id_maxsz (op_encode_hdr_maxsz + \
                                encode_verifier_maxsz + \
                                1 /* co_ownerid.len */ + \
-                               XDR_QUADLEN(NFS4_EXCHANGE_ID_LEN) + \
+                               /* eia_clientowner */ \
+                               1 + XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \
                                1 /* flags */ + \
                                1 /* spa_how */ + \
                                /* max is SP4_MACH_CRED (for now) */ + \
@@ -1667,13 +1669,14 @@ static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclie
        encode_op_hdr(xdr, OP_SETCLIENTID, decode_setclientid_maxsz, hdr);
        encode_nfs4_verifier(xdr, setclientid->sc_verifier);
 
-       encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name);
+       encode_string(xdr, strlen(setclientid->sc_clnt->cl_owner_id),
+                       setclientid->sc_clnt->cl_owner_id);
        p = reserve_space(xdr, 4);
        *p = cpu_to_be32(setclientid->sc_prog);
        encode_string(xdr, setclientid->sc_netid_len, setclientid->sc_netid);
        encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr);
        p = reserve_space(xdr, 4);
-       *p = cpu_to_be32(setclientid->sc_cb_ident);
+       *p = cpu_to_be32(setclientid->sc_clnt->cl_cb_ident);
 }
 
 static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4_setclientid_res *arg, struct compound_hdr *hdr)
@@ -1747,7 +1750,8 @@ static void encode_exchange_id(struct xdr_stream *xdr,
        encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
        encode_nfs4_verifier(xdr, args->verifier);
 
-       encode_string(xdr, args->id_len, args->id);
+       encode_string(xdr, strlen(args->client->cl_owner_id),
+                       args->client->cl_owner_id);
 
        encode_uint32(xdr, args->flags);
        encode_uint32(xdr, args->state_protect.how);
@@ -7427,6 +7431,7 @@ struct rpc_procinfo       nfs4_procedures[] = {
        PROC(SEEK,              enc_seek,               dec_seek),
        PROC(ALLOCATE,          enc_allocate,           dec_allocate),
        PROC(DEALLOCATE,        enc_deallocate,         dec_deallocate),
+       PROC(LAYOUTSTATS,       enc_layoutstats,        dec_layoutstats),
 #endif /* CONFIG_NFS_V4_2 */
 };
 
index 282b3936951060a2c8a6216c4c690e166a5fcb8d..1da68d3b1edabdb78c60527f502af5f40d6cf69b 100644 (file)
@@ -636,9 +636,8 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
 
        hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
 
-       dprintk("NFS: %5u initiated pgio call "
+       dprintk("NFS: initiated pgio call "
                "(req %s/%llu, %u bytes @ offset %llu)\n",
-               hdr->task.tk_pid,
                hdr->inode->i_sb->s_id,
                (unsigned long long)NFS_FILEID(hdr->inode),
                hdr->args.count,
@@ -690,8 +689,6 @@ static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
 static void nfs_pgio_release(void *calldata)
 {
        struct nfs_pgio_header *hdr = calldata;
-       if (hdr->rw_ops->rw_release)
-               hdr->rw_ops->rw_release(hdr);
        nfs_pgio_data_destroy(hdr);
        hdr->completion_ops->completion(hdr);
 }
@@ -711,7 +708,9 @@ static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
  * nfs_pageio_init - initialise a page io descriptor
  * @desc: pointer to descriptor
  * @inode: pointer to inode
- * @doio: pointer to io function
+ * @pg_ops: pointer to pageio operations
+ * @compl_ops: pointer to pageio completion operations
+ * @rw_ops: pointer to nfs read/write operations
  * @bsize: io block size
  * @io_flags: extra parameters for the io function
  */
@@ -1186,6 +1185,7 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
  * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
  *                             nfs_pageio_descriptor
  * @desc: pointer to io descriptor
+ * @mirror_idx: pointer to mirror index
  */
 static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
                                       u32 mirror_idx)
index 230606243be6ad079733e583d173b14d3baeda55..0ba9a02c95664960f8c0f46ea97249bd8653fe16 100644 (file)
@@ -35,6 +35,7 @@
 #include "iostat.h"
 #include "nfs4trace.h"
 #include "delegation.h"
+#include "nfs42.h"
 
 #define NFSDBG_FACILITY                NFSDBG_PNFS
 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
@@ -1821,6 +1822,7 @@ int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
        /* Resend all requests through the MDS */
        nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
                              hdr->completion_ops);
+       set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
        return nfs_pageio_resend(&pgio, hdr);
 }
 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
@@ -1865,6 +1867,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
                mirror->pg_recoalesce = 1;
        }
        nfs_pgio_data_destroy(hdr);
+       hdr->release(hdr);
 }
 
 static enum pnfs_try_status
@@ -1979,6 +1982,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
                mirror->pg_recoalesce = 1;
        }
        nfs_pgio_data_destroy(hdr);
+       hdr->release(hdr);
 }
 
 /*
@@ -2247,3 +2251,63 @@ struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
        }
        return thp;
 }
+
+#if IS_ENABLED(CONFIG_NFS_V4_2)
+int
+pnfs_report_layoutstat(struct inode *inode)
+{
+       struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
+       struct nfs_server *server = NFS_SERVER(inode);
+       struct nfs_inode *nfsi = NFS_I(inode);
+       struct nfs42_layoutstat_data *data;
+       struct pnfs_layout_hdr *hdr;
+       int status = 0;
+
+       if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
+               goto out;
+
+       if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
+               goto out;
+
+       if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
+               goto out;
+
+       spin_lock(&inode->i_lock);
+       if (!NFS_I(inode)->layout) {
+               spin_unlock(&inode->i_lock);
+               goto out;
+       }
+       hdr = NFS_I(inode)->layout;
+       pnfs_get_layout_hdr(hdr);
+       spin_unlock(&inode->i_lock);
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data) {
+               status = -ENOMEM;
+               goto out_put;
+       }
+
+       data->args.fh = NFS_FH(inode);
+       data->args.inode = inode;
+       nfs4_stateid_copy(&data->args.stateid, &hdr->plh_stateid);
+       status = ld->prepare_layoutstats(&data->args);
+       if (status)
+               goto out_free;
+
+       status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);
+
+out:
+       dprintk("%s returns %d\n", __func__, status);
+       return status;
+
+out_free:
+       kfree(data);
+out_put:
+       pnfs_put_layout_hdr(hdr);
+       smp_mb__before_atomic();
+       clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
+       smp_mb__after_atomic();
+       goto out;
+}
+EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
+#endif
index 1e6308f82fc3d5887de850e72226233e4f2138d2..3e6ab7bfbabd428425227b6f9d2a94711edd371e 100644 (file)
@@ -178,6 +178,8 @@ struct pnfs_layoutdriver_type {
        void (*encode_layoutcommit) (struct pnfs_layout_hdr *lo,
                                     struct xdr_stream *xdr,
                                     const struct nfs4_layoutcommit_args *args);
+       int (*prepare_layoutstats) (struct nfs42_layoutstat_args *args);
+       void (*cleanup_layoutstats) (struct nfs42_layoutstat_data *data);
 };
 
 struct pnfs_layout_hdr {
@@ -290,7 +292,6 @@ int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *);
 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void);
 void pnfs_error_mark_layout_for_return(struct inode *inode,
                                       struct pnfs_layout_segment *lseg);
-
 /* nfs4_deviceid_flags */
 enum {
        NFS_DEVICEID_INVALID = 0,       /* set when MDS clientid recalled */
@@ -689,4 +690,14 @@ static inline void nfs4_pnfs_v3_ds_connect_unload(void)
 
 #endif /* CONFIG_NFS_V4_1 */
 
+#if IS_ENABLED(CONFIG_NFS_V4_2)
+int pnfs_report_layoutstat(struct inode *inode);
+#else
+static inline int
+pnfs_report_layoutstat(struct inode *inode)
+{
+       return 0;
+}
+#endif
+
 #endif /* FS_NFS_PNFS_H */
index f175b833b6ba75b22bced1f9d013ad670caed82d..aa62004f1706f9c685b368379ce021c1f3474c72 100644 (file)
@@ -2847,7 +2847,7 @@ static int param_set_portnr(const char *val, const struct kernel_param *kp)
        *((unsigned int *)kp->arg) = num;
        return 0;
 }
-static struct kernel_param_ops param_ops_portnr = {
+static const struct kernel_param_ops param_ops_portnr = {
        .set = param_set_portnr,
        .get = param_get_uint,
 };
index e6c262555e08a62aff65ef3baa04e9666e9f18c2..65869ca9c851dbf4f0b289ca84865a018c2b6e57 100644 (file)
@@ -1290,6 +1290,7 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr,
 static void nfs_redirty_request(struct nfs_page *req)
 {
        nfs_mark_request_dirty(req);
+       set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
        nfs_unlock_request(req);
        nfs_end_page_writeback(req);
        nfs_release_request(req);
@@ -1348,11 +1349,6 @@ void nfs_commit_prepare(struct rpc_task *task, void *calldata)
        NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
 }
 
-static void nfs_writeback_release_common(struct nfs_pgio_header *hdr)
-{
-       /* do nothing! */
-}
-
 /*
  * Special version of should_remove_suid() that ignores capabilities.
  */
@@ -1556,7 +1552,7 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
        /* Set up the initial task struct.  */
        nfs_ops->commit_setup(data, &msg);
 
-       dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
+       dprintk("NFS: initiated commit call\n");
 
        nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client,
                NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg);
@@ -2013,7 +2009,6 @@ static const struct nfs_rw_ops nfs_rw_write_ops = {
        .rw_mode                = FMODE_WRITE,
        .rw_alloc_header        = nfs_writehdr_alloc,
        .rw_free_header         = nfs_writehdr_free,
-       .rw_release             = nfs_writeback_release_common,
        .rw_done                = nfs_writeback_done,
        .rw_result              = nfs_writeback_result,
        .rw_initiate            = nfs_initiate_write,
index 0ee0bed3649baf1f8974c15e42916294ab55ff6c..6b8b92b19cec9c868992fd089a00fc74f669fbdc 100644 (file)
@@ -61,11 +61,6 @@ static inline void nilfs_put_page(struct page *page)
        page_cache_release(page);
 }
 
-static inline unsigned long dir_pages(struct inode *inode)
-{
-       return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
-}
-
 /*
  * Return the offset into page `page_nr' of the last valid
  * byte in that page, plus one.
index 258d9fe2521a52f1a6d96d310d90bd0890434634..4a73d6dffabf696198f9ce892b2f93d7008e3266 100644 (file)
@@ -307,31 +307,13 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
 static ssize_t
 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
-       struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = file->f_mapping->host;
-       size_t count = iov_iter_count(iter);
-       ssize_t size;
+       struct inode *inode = file_inode(iocb->ki_filp);
 
        if (iov_iter_rw(iter) == WRITE)
                return 0;
 
        /* Needs synchronization with the cleaner */
-       size = blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block);
-
-       /*
-        * In case of error extending write may have instantiated a few
-        * blocks outside i_size. Trim these off again.
-        */
-       if (unlikely(iov_iter_rw(iter) == WRITE && size < 0)) {
-               loff_t isize = i_size_read(inode);
-               loff_t end = offset + count;
-
-               if (end > isize)
-                       nilfs_write_failed(mapping, end);
-       }
-
-       return size;
+       return blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block);
 }
 
 const struct address_space_operations nilfs_aops = {
index 4506486974336d6451abc0658de78c0c06bdec9a..5b1e2a497e5114c26e556830f9f891c36130ffc7 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/fs.h> /* struct inode */
 #include <linux/fsnotify_backend.h>
 #include <linux/idr.h>
-#include <linux/init.h> /* module_init */
+#include <linux/init.h> /* fs_initcall */
 #include <linux/inotify.h>
 #include <linux/kernel.h> /* roundup() */
 #include <linux/namei.h> /* LOOKUP_FOLLOW */
@@ -812,4 +812,4 @@ static int __init inotify_user_setup(void)
 
        return 0;
 }
-module_init(inotify_user_setup);
+fs_initcall(inotify_user_setup);
index 2cd65367076458e84532eebcb67869944e3aa327..262561fea923aa2315cffe91af91d12b399ded8c 100644 (file)
@@ -382,7 +382,7 @@ static ssize_t ntfs_prepare_file_for_write(struct kiocb *iocb,
        base_ni = ni;
        if (NInoAttr(ni))
                base_ni = ni->ext.base_ntfs_ino;
-       err = file_remove_suid(file);
+       err = file_remove_privs(file);
        if (unlikely(err))
                goto out;
        /*
index 76b6cfb579d73944fff234df3999a19a9e002e51..b3c3469de6cb050f3422d70fe0fc0867003a408c 100644 (file)
@@ -239,7 +239,7 @@ typedef struct {
  */
 static inline ntfs_inode *NTFS_I(struct inode *inode)
 {
-       return (ntfs_inode *)list_entry(inode, big_ntfs_inode, vfs_inode);
+       return (ntfs_inode *)container_of(inode, big_ntfs_inode, vfs_inode);
 }
 
 static inline struct inode *VFS_I(ntfs_inode *ni)
index e0250bdcc44005db6510ac516a53923282db5d12..e33dab287fa00a3d1d657e4b2d23c1f74529ae72 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -51,8 +51,10 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
                newattrs.ia_valid |= ATTR_FILE;
        }
 
-       /* Remove suid/sgid on truncate too */
-       ret = should_remove_suid(dentry);
+       /* Remove suid, sgid, and file capabilities on truncate too */
+       ret = dentry_needs_remove_privs(dentry);
+       if (ret < 0)
+               return ret;
        if (ret)
                newattrs.ia_valid |= ret | ATTR_FORCE;
 
@@ -678,18 +680,18 @@ int open_check_o_direct(struct file *f)
 }
 
 static int do_dentry_open(struct file *f,
+                         struct inode *inode,
                          int (*open)(struct inode *, struct file *),
                          const struct cred *cred)
 {
        static const struct file_operations empty_fops = {};
-       struct inode *inode;
        int error;
 
        f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
                                FMODE_PREAD | FMODE_PWRITE;
 
        path_get(&f->f_path);
-       inode = f->f_inode = f->f_path.dentry->d_inode;
+       f->f_inode = inode;
        f->f_mapping = inode->i_mapping;
 
        if (unlikely(f->f_flags & O_PATH)) {
@@ -793,7 +795,8 @@ int finish_open(struct file *file, struct dentry *dentry,
        BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
 
        file->f_path.dentry = dentry;
-       error = do_dentry_open(file, open, current_cred());
+       error = do_dentry_open(file, d_backing_inode(dentry), open,
+                              current_cred());
        if (!error)
                *opened |= FILE_OPENED;
 
@@ -822,6 +825,34 @@ int finish_no_open(struct file *file, struct dentry *dentry)
 }
 EXPORT_SYMBOL(finish_no_open);
 
+char *file_path(struct file *filp, char *buf, int buflen)
+{
+       return d_path(&filp->f_path, buf, buflen);
+}
+EXPORT_SYMBOL(file_path);
+
+/**
+ * vfs_open - open the file at the given path
+ * @path: path to open
+ * @file: newly allocated file with f_flag initialized
+ * @cred: credentials to use
+ */
+int vfs_open(const struct path *path, struct file *file,
+            const struct cred *cred)
+{
+       struct dentry *dentry = path->dentry;
+       struct inode *inode = dentry->d_inode;
+
+       file->f_path = *path;
+       if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
+               inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
+               if (IS_ERR(inode))
+                       return PTR_ERR(inode);
+       }
+
+       return do_dentry_open(file, inode, NULL, cred);
+}
+
 struct file *dentry_open(const struct path *path, int flags,
                         const struct cred *cred)
 {
@@ -853,26 +884,6 @@ struct file *dentry_open(const struct path *path, int flags,
 }
 EXPORT_SYMBOL(dentry_open);
 
-/**
- * vfs_open - open the file at the given path
- * @path: path to open
- * @filp: newly allocated file with f_flag initialized
- * @cred: credentials to use
- */
-int vfs_open(const struct path *path, struct file *filp,
-            const struct cred *cred)
-{
-       struct inode *inode = path->dentry->d_inode;
-
-       if (inode->i_op->dentry_open)
-               return inode->i_op->dentry_open(path->dentry, filp, cred);
-       else {
-               filp->f_path = *path;
-               return do_dentry_open(filp, NULL, cred);
-       }
-}
-EXPORT_SYMBOL(vfs_open);
-
 static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op)
 {
        int lookup_flags = 0;
index 308379b2d0b2cb82b6ad505755484a19b4042fa0..f140e3dbfb7bdeb967cad4639c9e32c236a3cced 100644 (file)
@@ -337,37 +337,30 @@ static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
        return true;
 }
 
-static int ovl_dentry_open(struct dentry *dentry, struct file *file,
-                   const struct cred *cred)
+struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
 {
        int err;
        struct path realpath;
        enum ovl_path_type type;
-       bool want_write = false;
 
        type = ovl_path_real(dentry, &realpath);
-       if (ovl_open_need_copy_up(file->f_flags, type, realpath.dentry)) {
-               want_write = true;
+       if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
                err = ovl_want_write(dentry);
                if (err)
-                       goto out;
+                       return ERR_PTR(err);
 
-               if (file->f_flags & O_TRUNC)
+               if (file_flags & O_TRUNC)
                        err = ovl_copy_up_last(dentry, NULL, true);
                else
                        err = ovl_copy_up(dentry);
+               ovl_drop_write(dentry);
                if (err)
-                       goto out_drop_write;
+                       return ERR_PTR(err);
 
                ovl_path_upper(dentry, &realpath);
        }
 
-       err = vfs_open(&realpath, file, cred);
-out_drop_write:
-       if (want_write)
-               ovl_drop_write(dentry);
-out:
-       return err;
+       return d_backing_inode(realpath.dentry);
 }
 
 static const struct inode_operations ovl_file_inode_operations = {
@@ -378,7 +371,6 @@ static const struct inode_operations ovl_file_inode_operations = {
        .getxattr       = ovl_getxattr,
        .listxattr      = ovl_listxattr,
        .removexattr    = ovl_removexattr,
-       .dentry_open    = ovl_dentry_open,
 };
 
 static const struct inode_operations ovl_symlink_inode_operations = {
index 17ac5afc9ffbce150d03e352fd7aaa99f101aa0c..ea5a40b06e3ad3f9e114bd7d3aab8b931b567051 100644 (file)
@@ -173,6 +173,7 @@ ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
                     void *value, size_t size);
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
 int ovl_removexattr(struct dentry *dentry, const char *name);
+struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags);
 
 struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
                            struct ovl_entry *oe);
index 907870e81a72e36f4c5abb29fd34e23b4307efc5..70e9af5516004d20188ca0757110fe5bf203d19f 100644 (file)
@@ -23,6 +23,7 @@ struct ovl_cache_entry {
        u64 ino;
        struct list_head l_node;
        struct rb_node node;
+       struct ovl_cache_entry *next_maybe_whiteout;
        bool is_whiteout;
        char name[];
 };
@@ -39,7 +40,7 @@ struct ovl_readdir_data {
        struct rb_root root;
        struct list_head *list;
        struct list_head middle;
-       struct dentry *dir;
+       struct ovl_cache_entry *first_maybe_whiteout;
        int count;
        int err;
 };
@@ -79,7 +80,7 @@ static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
        return NULL;
 }
 
-static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
+static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
                                                   const char *name, int len,
                                                   u64 ino, unsigned int d_type)
 {
@@ -98,29 +99,8 @@ static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
        p->is_whiteout = false;
 
        if (d_type == DT_CHR) {
-               struct dentry *dentry;
-               const struct cred *old_cred;
-               struct cred *override_cred;
-
-               override_cred = prepare_creds();
-               if (!override_cred) {
-                       kfree(p);
-                       return NULL;
-               }
-
-               /*
-                * CAP_DAC_OVERRIDE for lookup
-                */
-               cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
-               old_cred = override_creds(override_cred);
-
-               dentry = lookup_one_len(name, dir, len);
-               if (!IS_ERR(dentry)) {
-                       p->is_whiteout = ovl_is_whiteout(dentry);
-                       dput(dentry);
-               }
-               revert_creds(old_cred);
-               put_cred(override_cred);
+               p->next_maybe_whiteout = rdd->first_maybe_whiteout;
+               rdd->first_maybe_whiteout = p;
        }
        return p;
 }
@@ -148,7 +128,7 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
                        return 0;
        }
 
-       p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type);
+       p = ovl_cache_entry_new(rdd, name, len, ino, d_type);
        if (p == NULL)
                return -ENOMEM;
 
@@ -169,7 +149,7 @@ static int ovl_fill_lower(struct ovl_readdir_data *rdd,
        if (p) {
                list_move_tail(&p->l_node, &rdd->middle);
        } else {
-               p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type);
+               p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
                if (p == NULL)
                        rdd->err = -ENOMEM;
                else
@@ -219,6 +199,43 @@ static int ovl_fill_merge(struct dir_context *ctx, const char *name,
                return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type);
 }
 
+static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
+{
+       int err;
+       struct ovl_cache_entry *p;
+       struct dentry *dentry;
+       const struct cred *old_cred;
+       struct cred *override_cred;
+
+       override_cred = prepare_creds();
+       if (!override_cred)
+               return -ENOMEM;
+
+       /*
+        * CAP_DAC_OVERRIDE for lookup
+        */
+       cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
+       old_cred = override_creds(override_cred);
+
+       err = mutex_lock_killable(&dir->d_inode->i_mutex);
+       if (!err) {
+               while (rdd->first_maybe_whiteout) {
+                       p = rdd->first_maybe_whiteout;
+                       rdd->first_maybe_whiteout = p->next_maybe_whiteout;
+                       dentry = lookup_one_len(p->name, dir, p->len);
+                       if (!IS_ERR(dentry)) {
+                               p->is_whiteout = ovl_is_whiteout(dentry);
+                               dput(dentry);
+                       }
+               }
+               mutex_unlock(&dir->d_inode->i_mutex);
+       }
+       revert_creds(old_cred);
+       put_cred(override_cred);
+
+       return err;
+}
+
 static inline int ovl_dir_read(struct path *realpath,
                               struct ovl_readdir_data *rdd)
 {
@@ -229,7 +246,7 @@ static inline int ovl_dir_read(struct path *realpath,
        if (IS_ERR(realfile))
                return PTR_ERR(realfile);
 
-       rdd->dir = realpath->dentry;
+       rdd->first_maybe_whiteout = NULL;
        rdd->ctx.pos = 0;
        do {
                rdd->count = 0;
@@ -238,6 +255,10 @@ static inline int ovl_dir_read(struct path *realpath,
                if (err >= 0)
                        err = rdd->err;
        } while (!err && rdd->count);
+
+       if (!err && rdd->first_maybe_whiteout)
+               err = ovl_check_whiteouts(realpath->dentry, rdd);
+
        fput(realfile);
 
        return err;
index bf8537c7f455207830046a50d67d394f86d37f4a..7466ff339c667ea63ead6bf04f18d5662ef3d142 100644 (file)
@@ -273,8 +273,56 @@ static void ovl_dentry_release(struct dentry *dentry)
        }
 }
 
+static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       struct ovl_entry *oe = dentry->d_fsdata;
+       unsigned int i;
+       int ret = 1;
+
+       for (i = 0; i < oe->numlower; i++) {
+               struct dentry *d = oe->lowerstack[i].dentry;
+
+               if (d->d_flags & DCACHE_OP_REVALIDATE) {
+                       ret = d->d_op->d_revalidate(d, flags);
+                       if (ret < 0)
+                               return ret;
+                       if (!ret) {
+                               if (!(flags & LOOKUP_RCU))
+                                       d_invalidate(d);
+                               return -ESTALE;
+                       }
+               }
+       }
+       return 1;
+}
+
+static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       struct ovl_entry *oe = dentry->d_fsdata;
+       unsigned int i;
+       int ret = 1;
+
+       for (i = 0; i < oe->numlower; i++) {
+               struct dentry *d = oe->lowerstack[i].dentry;
+
+               if (d->d_flags & DCACHE_OP_WEAK_REVALIDATE) {
+                       ret = d->d_op->d_weak_revalidate(d, flags);
+                       if (ret <= 0)
+                               break;
+               }
+       }
+       return ret;
+}
+
 static const struct dentry_operations ovl_dentry_operations = {
        .d_release = ovl_dentry_release,
+       .d_select_inode = ovl_d_select_inode,
+};
+
+static const struct dentry_operations ovl_reval_dentry_operations = {
+       .d_release = ovl_dentry_release,
+       .d_revalidate = ovl_dentry_revalidate,
+       .d_weak_revalidate = ovl_dentry_weak_revalidate,
 };
 
 static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
@@ -288,6 +336,20 @@ static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
        return oe;
 }
 
+static bool ovl_dentry_remote(struct dentry *dentry)
+{
+       return dentry->d_flags &
+               (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
+}
+
+static bool ovl_dentry_weird(struct dentry *dentry)
+{
+       return dentry->d_flags & (DCACHE_NEED_AUTOMOUNT |
+                                 DCACHE_MANAGE_TRANSIT |
+                                 DCACHE_OP_HASH |
+                                 DCACHE_OP_COMPARE);
+}
+
 static inline struct dentry *ovl_lookup_real(struct dentry *dir,
                                             struct qstr *name)
 {
@@ -303,6 +365,10 @@ static inline struct dentry *ovl_lookup_real(struct dentry *dir,
        } else if (!dentry->d_inode) {
                dput(dentry);
                dentry = NULL;
+       } else if (ovl_dentry_weird(dentry)) {
+               dput(dentry);
+               /* Don't support traversing automounts and other weirdness */
+               dentry = ERR_PTR(-EREMOTE);
        }
        return dentry;
 }
@@ -350,6 +416,11 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                        goto out;
 
                if (this) {
+                       if (unlikely(ovl_dentry_remote(this))) {
+                               dput(this);
+                               err = -EREMOTE;
+                               goto out;
+                       }
                        if (ovl_is_whiteout(this)) {
                                dput(this);
                                this = NULL;
@@ -694,25 +765,6 @@ static void ovl_unescape(char *s)
        }
 }
 
-static bool ovl_is_allowed_fs_type(struct dentry *root)
-{
-       const struct dentry_operations *dop = root->d_op;
-
-       /*
-        * We don't support:
-        *  - automount filesystems
-        *  - filesystems with revalidate (FIXME for lower layer)
-        *  - filesystems with case insensitive names
-        */
-       if (dop &&
-           (dop->d_manage || dop->d_automount ||
-            dop->d_revalidate || dop->d_weak_revalidate ||
-            dop->d_compare || dop->d_hash)) {
-               return false;
-       }
-       return true;
-}
-
 static int ovl_mount_dir_noesc(const char *name, struct path *path)
 {
        int err = -EINVAL;
@@ -727,7 +779,7 @@ static int ovl_mount_dir_noesc(const char *name, struct path *path)
                goto out;
        }
        err = -EINVAL;
-       if (!ovl_is_allowed_fs_type(path->dentry)) {
+       if (ovl_dentry_weird(path->dentry)) {
                pr_err("overlayfs: filesystem on '%s' not supported\n", name);
                goto out_put;
        }
@@ -751,13 +803,21 @@ static int ovl_mount_dir(const char *name, struct path *path)
        if (tmp) {
                ovl_unescape(tmp);
                err = ovl_mount_dir_noesc(tmp, path);
+
+               if (!err)
+                       if (ovl_dentry_remote(path->dentry)) {
+                               pr_err("overlayfs: filesystem on '%s' not supported as upperdir\n",
+                                      tmp);
+                               path_put(path);
+                               err = -EINVAL;
+                       }
                kfree(tmp);
        }
        return err;
 }
 
 static int ovl_lower_dir(const char *name, struct path *path, long *namelen,
-                        int *stack_depth)
+                        int *stack_depth, bool *remote)
 {
        int err;
        struct kstatfs statfs;
@@ -774,6 +834,9 @@ static int ovl_lower_dir(const char *name, struct path *path, long *namelen,
        *namelen = max(*namelen, statfs.f_namelen);
        *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth);
 
+       if (ovl_dentry_remote(path->dentry))
+               *remote = true;
+
        return 0;
 
 out_put:
@@ -827,6 +890,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        unsigned int numlower;
        unsigned int stacklen = 0;
        unsigned int i;
+       bool remote = false;
        int err;
 
        err = -ENOMEM;
@@ -900,7 +964,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        lower = lowertmp;
        for (numlower = 0; numlower < stacklen; numlower++) {
                err = ovl_lower_dir(lower, &stack[numlower],
-                                   &ufs->lower_namelen, &sb->s_stack_depth);
+                                   &ufs->lower_namelen, &sb->s_stack_depth,
+                                   &remote);
                if (err)
                        goto out_put_lowerpath;
 
@@ -958,7 +1023,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        if (!ufs->upper_mnt)
                sb->s_flags |= MS_RDONLY;
 
-       sb->s_d_op = &ovl_dentry_operations;
+       if (remote)
+               sb->s_d_op = &ovl_reval_dentry_operations;
+       else
+               sb->s_d_op = &ovl_dentry_operations;
 
        err = -ENOMEM;
        oe = ovl_alloc_entry(numlower);
index 84bb65b835701365126833a9e74d9e5d7a8a6c00..4fb17ded7d4780d8d19e7c68cb1c7cedfeb79cf7 100644 (file)
@@ -547,51 +547,45 @@ posix_acl_create(struct inode *dir, umode_t *mode,
                struct posix_acl **default_acl, struct posix_acl **acl)
 {
        struct posix_acl *p;
+       struct posix_acl *clone;
        int ret;
 
+       *acl = NULL;
+       *default_acl = NULL;
+
        if (S_ISLNK(*mode) || !IS_POSIXACL(dir))
-               goto no_acl;
+               return 0;
 
        p = get_acl(dir, ACL_TYPE_DEFAULT);
-       if (IS_ERR(p)) {
-               if (p == ERR_PTR(-EOPNOTSUPP))
-                       goto apply_umask;
-               return PTR_ERR(p);
+       if (!p || p == ERR_PTR(-EOPNOTSUPP)) {
+               *mode &= ~current_umask();
+               return 0;
        }
+       if (IS_ERR(p))
+               return PTR_ERR(p);
 
-       if (!p)
-               goto apply_umask;
-
-       *acl = posix_acl_clone(p, GFP_NOFS);
-       if (!*acl)
+       clone = posix_acl_clone(p, GFP_NOFS);
+       if (!clone)
                goto no_mem;
 
-       ret = posix_acl_create_masq(*acl, mode);
+       ret = posix_acl_create_masq(clone, mode);
        if (ret < 0)
                goto no_mem_clone;
 
-       if (ret == 0) {
-               posix_acl_release(*acl);
-               *acl = NULL;
-       }
+       if (ret == 0)
+               posix_acl_release(clone);
+       else
+               *acl = clone;
 
-       if (!S_ISDIR(*mode)) {
+       if (!S_ISDIR(*mode))
                posix_acl_release(p);
-               *default_acl = NULL;
-       } else {
+       else
                *default_acl = p;
-       }
-       return 0;
 
-apply_umask:
-       *mode &= ~current_umask();
-no_acl:
-       *default_acl = NULL;
-       *acl = NULL;
        return 0;
 
 no_mem_clone:
-       posix_acl_release(*acl);
+       posix_acl_release(clone);
 no_mem:
        posix_acl_release(p);
        return -ENOMEM;
index 1d540b3f226fe3bba3ac423816dfdf0f8603810b..87782e874b6af4523adff3e7e721046d5f857ee0 100644 (file)
@@ -491,14 +491,17 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
 }
 #endif
 
-#ifdef CONFIG_SCHEDSTATS
+#ifdef CONFIG_SCHED_INFO
 /*
  * Provides /proc/PID/schedstat
  */
 static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
                              struct pid *pid, struct task_struct *task)
 {
-       seq_printf(m, "%llu %llu %lu\n",
+       if (unlikely(!sched_info_on()))
+               seq_printf(m, "0 0 0\n");
+       else
+               seq_printf(m, "%llu %llu %lu\n",
                   (unsigned long long)task->se.sum_exec_runtime,
                   (unsigned long long)task->sched_info.run_delay,
                   task->sched_info.pcount);
@@ -2787,7 +2790,7 @@ static const struct pid_entry tgid_base_stuff[] = {
 #ifdef CONFIG_STACKTRACE
        ONE("stack",      S_IRUSR, proc_pid_stack),
 #endif
-#ifdef CONFIG_SCHEDSTATS
+#ifdef CONFIG_SCHED_INFO
        ONE("schedstat",  S_IRUGO, proc_pid_schedstat),
 #endif
 #ifdef CONFIG_LATENCYTOP
@@ -3135,7 +3138,7 @@ static const struct pid_entry tid_base_stuff[] = {
 #ifdef CONFIG_STACKTRACE
        ONE("stack",      S_IRUSR, proc_pid_stack),
 #endif
-#ifdef CONFIG_SCHEDSTATS
+#ifdef CONFIG_SCHED_INFO
        ONE("schedstat", S_IRUGO, proc_pid_schedstat),
 #endif
 #ifdef CONFIG_LATENCYTOP
index df6327a2b86507b371608a752f65bf12241bf38c..e5dee5c3188eb10e94742fbb57bb3b3564fa61bb 100644 (file)
@@ -373,6 +373,10 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
                WARN(1, "create '/proc/%s' by hand\n", qstr.name);
                return NULL;
        }
+       if (is_empty_pde(*parent)) {
+               WARN(1, "attempt to add to permanently empty directory");
+               return NULL;
+       }
 
        ent = kzalloc(sizeof(struct proc_dir_entry) + qstr.len + 1, GFP_KERNEL);
        if (!ent)
@@ -455,6 +459,25 @@ struct proc_dir_entry *proc_mkdir(const char *name,
 }
 EXPORT_SYMBOL(proc_mkdir);
 
+struct proc_dir_entry *proc_create_mount_point(const char *name)
+{
+       umode_t mode = S_IFDIR | S_IRUGO | S_IXUGO;
+       struct proc_dir_entry *ent, *parent = NULL;
+
+       ent = __proc_create(&parent, name, mode, 2);
+       if (ent) {
+               ent->data = NULL;
+               ent->proc_fops = NULL;
+               ent->proc_iops = NULL;
+               if (proc_register(parent, ent) < 0) {
+                       kfree(ent);
+                       parent->nlink--;
+                       ent = NULL;
+               }
+       }
+       return ent;
+}
+
 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
                                        struct proc_dir_entry *parent,
                                        const struct file_operations *proc_fops,
index afe232b9df6e5b6c83779712c8cd068169992a55..bd95b9fdebb005cd9912c3b80027cfc1659fc530 100644 (file)
@@ -422,6 +422,10 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
                inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
                PROC_I(inode)->pde = de;
 
+               if (is_empty_pde(de)) {
+                       make_empty_dir_inode(inode);
+                       return inode;
+               }
                if (de->mode) {
                        inode->i_mode = de->mode;
                        inode->i_uid = de->uid;
index c835b94c0cd3afec0bea4017ca8bacd63b32ff8e..aa2781095bd15f4d9e98b5bcf58fe77ba5576f09 100644 (file)
@@ -191,6 +191,12 @@ static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
 }
 extern void pde_put(struct proc_dir_entry *);
 
+static inline bool is_empty_pde(const struct proc_dir_entry *pde)
+{
+       return S_ISDIR(pde->mode) && !pde->proc_iops;
+}
+struct proc_dir_entry *proc_create_mount_point(const char *name);
+
 /*
  * inode.c
  */
index d4a35746cab91f8967dc83b7466fd572a77ae8d4..f8595e8b5cd067e474d1ca2c9a20d60fd0ec903d 100644 (file)
@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
 
        if (file) {
                seq_pad(m, ' ');
-               seq_path(m, &file->f_path, "");
+               seq_file_path(m, file, "");
        }
 
        seq_putc(m, '\n');
index fea2561d773bbce01c9858f91ec5fec58f9f7481..fdda62e6115e1c4584cc88d360c27de7a26e1793 100644 (file)
@@ -19,6 +19,28 @@ static const struct inode_operations proc_sys_inode_operations;
 static const struct file_operations proc_sys_dir_file_operations;
 static const struct inode_operations proc_sys_dir_operations;
 
+/* Support for permanently empty directories */
+
+struct ctl_table sysctl_mount_point[] = {
+       { }
+};
+
+static bool is_empty_dir(struct ctl_table_header *head)
+{
+       return head->ctl_table[0].child == sysctl_mount_point;
+}
+
+static void set_empty_dir(struct ctl_dir *dir)
+{
+       dir->header.ctl_table[0].child = sysctl_mount_point;
+}
+
+static void clear_empty_dir(struct ctl_dir *dir)
+
+{
+       dir->header.ctl_table[0].child = NULL;
+}
+
 void proc_sys_poll_notify(struct ctl_table_poll *poll)
 {
        if (!poll)
@@ -187,6 +209,17 @@ static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header)
        struct ctl_table *entry;
        int err;
 
+       /* Is this a permanently empty directory? */
+       if (is_empty_dir(&dir->header))
+               return -EROFS;
+
+       /* Am I creating a permanently empty directory? */
+       if (header->ctl_table == sysctl_mount_point) {
+               if (!RB_EMPTY_ROOT(&dir->root))
+                       return -EINVAL;
+               set_empty_dir(dir);
+       }
+
        dir->header.nreg++;
        header->parent = dir;
        err = insert_links(header);
@@ -202,6 +235,8 @@ fail:
        erase_header(header);
        put_links(header);
 fail_links:
+       if (header->ctl_table == sysctl_mount_point)
+               clear_empty_dir(dir);
        header->parent = NULL;
        drop_sysctl_table(&dir->header);
        return err;
@@ -419,6 +454,8 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
                inode->i_mode |= S_IFDIR;
                inode->i_op = &proc_sys_dir_operations;
                inode->i_fop = &proc_sys_dir_file_operations;
+               if (is_empty_dir(head))
+                       make_empty_dir_inode(inode);
        }
 out:
        return inode;
index b7fa4bfe896a2c17f05dd97add756bed889fd2b6..68feb0f70e6358f83f5a5281bf3c64d16cad2015 100644 (file)
@@ -112,9 +112,6 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
                ns = task_active_pid_ns(current);
                options = data;
 
-               if (!capable(CAP_SYS_ADMIN) && !fs_fully_visible(fs_type))
-                       return ERR_PTR(-EPERM);
-
                /* Does the mounter have privilege over the pid namespace? */
                if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
                        return ERR_PTR(-EPERM);
@@ -159,7 +156,7 @@ static struct file_system_type proc_fs_type = {
        .name           = "proc",
        .mount          = proc_mount,
        .kill_sb        = proc_kill_sb,
-       .fs_flags       = FS_USERNS_MOUNT,
+       .fs_flags       = FS_USERNS_VISIBLE | FS_USERNS_MOUNT,
 };
 
 void __init proc_root_init(void)
@@ -182,10 +179,10 @@ void __init proc_root_init(void)
 #endif
        proc_mkdir("fs", NULL);
        proc_mkdir("driver", NULL);
-       proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */
+       proc_create_mount_point("fs/nfsd"); /* somewhere for the nfsd filesystem to be mounted */
 #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE)
        /* just give it a mountpoint */
-       proc_mkdir("openprom", NULL);
+       proc_create_mount_point("openprom");
 #endif
        proc_tty_init();
        proc_mkdir("bus", NULL);
index 6dee68d013ffa69f1f6c9d49873f17f19eb3874d..ca1e091881d44fe5797924d5a34daca41f8166e9 100644 (file)
@@ -310,7 +310,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
         */
        if (file) {
                seq_pad(m, ' ');
-               seq_path(m, &file->f_path, "\n");
+               seq_file_path(m, file, "\n");
                goto done;
        }
 
@@ -1509,7 +1509,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
 
        if (file) {
                seq_puts(m, " file=");
-               seq_path(m, &file->f_path, "\n\t= ");
+               seq_file_path(m, file, "\n\t= ");
        } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
                seq_puts(m, " heap");
        } else {
index 599ec2e201043ea8f210a7c15a13c4e7eab22440..e0d64c92e4f6576c38a8a4a7cc9b8d13a2b3362e 100644 (file)
@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
 
        if (file) {
                seq_pad(m, ' ');
-               seq_path(m, &file->f_path, "");
+               seq_file_path(m, file, "");
        } else if (mm) {
                pid_t tid = pid_of_stack(priv, vma, is_pid);
 
index 8db932da40091b44f2a8df49e921d942f2291908..8ebd9a3340852823b17f61af59f4dd6a65f1b35f 100644 (file)
@@ -17,7 +17,8 @@
 
 static unsigned mounts_poll(struct file *file, poll_table *wait)
 {
-       struct proc_mounts *p = proc_mounts(file->private_data);
+       struct seq_file *m = file->private_data;
+       struct proc_mounts *p = m->private;
        struct mnt_namespace *ns = p->ns;
        unsigned res = POLLIN | POLLRDNORM;
        int event;
@@ -25,8 +26,8 @@ static unsigned mounts_poll(struct file *file, poll_table *wait)
        poll_wait(file, &p->ns->poll, wait);
 
        event = ACCESS_ONCE(ns->event);
-       if (p->m.poll_event != event) {
-               p->m.poll_event = event;
+       if (m->poll_event != event) {
+               m->poll_event = event;
                res |= POLLERR | POLLPRI;
        }
 
@@ -92,7 +93,7 @@ static void show_type(struct seq_file *m, struct super_block *sb)
 
 static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt)
 {
-       struct proc_mounts *p = proc_mounts(m);
+       struct proc_mounts *p = m->private;
        struct mount *r = real_mount(mnt);
        int err = 0;
        struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
@@ -126,7 +127,7 @@ out:
 
 static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
 {
-       struct proc_mounts *p = proc_mounts(m);
+       struct proc_mounts *p = m->private;
        struct mount *r = real_mount(mnt);
        struct super_block *sb = mnt->mnt_sb;
        struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
@@ -186,7 +187,7 @@ out:
 
 static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
 {
-       struct proc_mounts *p = proc_mounts(m);
+       struct proc_mounts *p = m->private;
        struct mount *r = real_mount(mnt);
        struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
        struct super_block *sb = mnt_path.dentry->d_sb;
@@ -236,6 +237,7 @@ static int mounts_open_common(struct inode *inode, struct file *file,
        struct mnt_namespace *ns = NULL;
        struct path root;
        struct proc_mounts *p;
+       struct seq_file *m;
        int ret = -EINVAL;
 
        if (!task)
@@ -260,26 +262,21 @@ static int mounts_open_common(struct inode *inode, struct file *file,
        task_unlock(task);
        put_task_struct(task);
 
-       ret = -ENOMEM;
-       p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
-       if (!p)
+       ret = seq_open_private(file, &mounts_op, sizeof(struct proc_mounts));
+       if (ret)
                goto err_put_path;
 
-       file->private_data = &p->m;
-       ret = seq_open(file, &mounts_op);
-       if (ret)
-               goto err_free;
+       m = file->private_data;
+       m->poll_event = ns->event;
 
+       p = m->private;
        p->ns = ns;
        p->root = root;
-       p->m.poll_event = ns->event;
        p->show = show;
        p->cached_event = ~0ULL;
 
        return 0;
 
- err_free:
-       kfree(p);
  err_put_path:
        path_put(&root);
  err_put_ns:
@@ -290,10 +287,11 @@ static int mounts_open_common(struct inode *inode, struct file *file,
 
 static int mounts_release(struct inode *inode, struct file *file)
 {
-       struct proc_mounts *p = proc_mounts(file->private_data);
+       struct seq_file *m = file->private_data;
+       struct proc_mounts *p = m->private;
        path_put(&p->root);
        put_mnt_ns(p->ns);
-       return seq_release(inode, file);
+       return seq_release_private(inode, file);
 }
 
 static int mounts_open(struct inode *inode, struct file *file)
index dc43b5f29305efdd7ee94b72035b3e5b3ba11d17..3adcc4669faca29785a663f63e39de22a65ae9c1 100644 (file)
@@ -461,22 +461,18 @@ static struct file_system_type pstore_fs_type = {
        .kill_sb        = pstore_kill_sb,
 };
 
-static struct kobject *pstore_kobj;
-
 static int __init init_pstore_fs(void)
 {
-       int err = 0;
+       int err;
 
        /* Create a convenient mount point for people to access pstore */
-       pstore_kobj = kobject_create_and_add("pstore", fs_kobj);
-       if (!pstore_kobj) {
-               err = -ENOMEM;
+       err = sysfs_create_mount_point(fs_kobj, "pstore");
+       if (err)
                goto out;
-       }
 
        err = register_filesystem(&pstore_fs_type);
        if (err < 0)
-               kobject_put(pstore_kobj);
+               sysfs_remove_mount_point(fs_kobj, "pstore");
 
 out:
        return err;
index 8d64bb5366bf0721bcde9416d5ff3d356c8a0d9b..e1f37278cf97bfc0e7973c2f860062cc02c70cbe 100644 (file)
@@ -32,11 +32,6 @@ static struct page *qnx6_get_page(struct inode *dir, unsigned long n)
        return page;
 }
 
-static inline unsigned long dir_pages(struct inode *inode)
-{
-       return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
-}
-
 static unsigned last_entry(struct inode *inode, unsigned long page_nr)
 {
        unsigned long last_byte = inode->i_size;
index 52b492721603db1e37a8fb424a7f112a4932da2f..ce9e39fd5dafc768c27b2ceaa4e69a02c3ed1e6e 100644 (file)
@@ -48,18 +48,21 @@ static void *seq_buf_alloc(unsigned long size)
  *     ERR_PTR(error).  In the end of sequence they return %NULL. ->show()
  *     returns 0 in case of success and negative number in case of error.
  *     Returning SEQ_SKIP means "discard this element and move on".
+ *     Note: seq_open() will allocate a struct seq_file and store its
+ *     pointer in @file->private_data. This pointer should not be modified.
  */
 int seq_open(struct file *file, const struct seq_operations *op)
 {
-       struct seq_file *p = file->private_data;
+       struct seq_file *p;
+
+       WARN_ON(file->private_data);
+
+       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+
+       file->private_data = p;
 
-       if (!p) {
-               p = kmalloc(sizeof(*p), GFP_KERNEL);
-               if (!p)
-                       return -ENOMEM;
-               file->private_data = p;
-       }
-       memset(p, 0, sizeof(*p));
        mutex_init(&p->lock);
        p->op = op;
 #ifdef CONFIG_USER_NS
@@ -487,6 +490,20 @@ int seq_path(struct seq_file *m, const struct path *path, const char *esc)
 }
 EXPORT_SYMBOL(seq_path);
 
+/**
+ * seq_file_path - seq_file interface to print a pathname of a file
+ * @m: the seq_file handle
+ * @file: the struct file to print
+ * @esc: set of characters to escape in the output
+ *
+ * return the absolute path to the file.
+ */
+int seq_file_path(struct seq_file *m, struct file *file, const char *esc)
+{
+       return seq_path(m, &file->f_path, esc);
+}
+EXPORT_SYMBOL(seq_file_path);
+
 /*
  * Same as seq_path, but relative to supplied root.
  */
index 73588e7700ed8a38aa98b5765d0d331df9c83193..d09fcd6fb85d0d0ebb8c270ea1e85574e64b2f9b 100644 (file)
@@ -49,6 +49,6 @@ struct squashfs_inode_info {
 
 static inline struct squashfs_inode_info *squashfs_i(struct inode *inode)
 {
-       return list_entry(inode, struct squashfs_inode_info, vfs_inode);
+       return container_of(inode, struct squashfs_inode_info, vfs_inode);
 }
 #endif
index 928c20f47af9c1e26906e7e3be7dfa7705e8ca15..b61372354f2bd1cd104d140003859a4333456c79 100644 (file)
@@ -842,7 +842,7 @@ int get_anon_bdev(dev_t *p)
        else if (error)
                return -EAGAIN;
 
-       if (dev == (1 << MINORBITS)) {
+       if (dev >= (1 << MINORBITS)) {
                spin_lock(&unnamed_dev_lock);
                ida_remove(&unnamed_dev_ida, dev);
                if (unnamed_dev_start > dev)
index 0b45ff42f3741123a15f58d426e201a78d20b3d0..94374e43502599c466153858476bd42652a7ea1b 100644 (file)
@@ -121,3 +121,37 @@ int sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj,
 
        return kernfs_rename_ns(kn, new_parent, kn->name, new_ns);
 }
+
+/**
+ * sysfs_create_mount_point - create an always empty directory
+ * @parent_kobj:  kobject that will contain this always empty directory
+ * @name: The name of the always empty directory to add
+ */
+int sysfs_create_mount_point(struct kobject *parent_kobj, const char *name)
+{
+       struct kernfs_node *kn, *parent = parent_kobj->sd;
+
+       kn = kernfs_create_empty_dir(parent, name);
+       if (IS_ERR(kn)) {
+               if (PTR_ERR(kn) == -EEXIST)
+                       sysfs_warn_dup(parent, name);
+               return PTR_ERR(kn);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(sysfs_create_mount_point);
+
+/**
+ *     sysfs_remove_mount_point - remove an always empty directory.
+ *     @parent_kobj: kobject that will contain this always empty directory
+ *     @name: The name of the always empty directory to remove
+ *
+ */
+void sysfs_remove_mount_point(struct kobject *parent_kobj, const char *name)
+{
+       struct kernfs_node *parent = parent_kobj->sd;
+
+       kernfs_remove_by_name_ns(parent, name, NULL);
+}
+EXPORT_SYMBOL_GPL(sysfs_remove_mount_point);
index 8a49486bf30c9859a5474f3cc5055bc7b9917de4..1c6ac6fcee9fb15c869ef80fc5947ba9117d77ea 100644 (file)
@@ -31,9 +31,6 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
        bool new_sb;
 
        if (!(flags & MS_KERNMOUNT)) {
-               if (!capable(CAP_SYS_ADMIN) && !fs_fully_visible(fs_type))
-                       return ERR_PTR(-EPERM);
-
                if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET))
                        return ERR_PTR(-EPERM);
        }
@@ -58,7 +55,7 @@ static struct file_system_type sysfs_fs_type = {
        .name           = "sysfs",
        .mount          = sysfs_mount,
        .kill_sb        = sysfs_kill_sb,
-       .fs_flags       = FS_USERNS_MOUNT,
+       .fs_flags       = FS_USERNS_VISIBLE | FS_USERNS_MOUNT,
 };
 
 int __init sysfs_init(void)
index 8f3555f00c54276aa1f0e1d443b7dd375850bf23..63c1bcb224ee886e5261b5b3a9867a581f872f01 100644 (file)
@@ -33,11 +33,6 @@ static inline void dir_put_page(struct page *page)
        page_cache_release(page);
 }
 
-static inline unsigned long dir_pages(struct inode *inode)
-{
-       return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
-}
-
 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
 {
        struct address_space *mapping = page->mapping;
index 2c13525131cd8146dd19d6a07086a70afc97b25a..6c212288adcb095391e9557f246c927daa2d0663 100644 (file)
@@ -73,7 +73,7 @@ struct sysv_inode_info {
 
 static inline struct sysv_inode_info *SYSV_I(struct inode *inode)
 {
-       return list_entry(inode, struct sysv_inode_info, vfs_inode);
+       return container_of(inode, struct sysv_inode_info, vfs_inode);
 }
 
 static inline struct sysv_sb_info *SYSV_SB(struct super_block *sb)
index d92bdf3b079a79d5a5ff88bd41207894a11cc483..cbc8d5d2755a691a560c46f7105e85ca6220d835 100644 (file)
@@ -496,16 +496,11 @@ struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *pare
        return dentry;
 }
 
-static inline int tracefs_positive(struct dentry *dentry)
-{
-       return dentry->d_inode && !d_unhashed(dentry);
-}
-
 static int __tracefs_remove(struct dentry *dentry, struct dentry *parent)
 {
        int ret = 0;
 
-       if (tracefs_positive(dentry)) {
+       if (simple_positive(dentry)) {
                if (dentry->d_inode) {
                        dget(dentry);
                        switch (dentry->d_inode->i_mode & S_IFMT) {
@@ -582,7 +577,7 @@ void tracefs_remove_recursive(struct dentry *dentry)
         */
        spin_lock(&parent->d_lock);
        list_for_each_entry(child, &parent->d_subdirs, d_child) {
-               if (!tracefs_positive(child))
+               if (!simple_positive(child))
                        continue;
 
                /* perhaps simple_empty(child) makes more sense */
@@ -603,7 +598,7 @@ void tracefs_remove_recursive(struct dentry *dentry)
                 * from d_subdirs. When releasing the parent->d_lock we can
                 * no longer trust that the next pointer is valid.
                 * Restart the loop. We'll skip this one with the
-                * tracefs_positive() check.
+                * simple_positive() check.
                 */
                goto loop;
        }
@@ -631,14 +626,12 @@ bool tracefs_initialized(void)
        return tracefs_registered;
 }
 
-static struct kobject *trace_kobj;
-
 static int __init tracefs_init(void)
 {
        int retval;
 
-       trace_kobj = kobject_create_and_add("tracing", kernel_kobj);
-       if (!trace_kobj)
+       retval = sysfs_create_mount_point(kernel_kobj, "tracing");
+       if (retval)
                return -EINVAL;
 
        retval = register_filesystem(&trace_fs_type);
index b5cd8ed2aa12ff3e27a8b3681886152943bf3af4..b1b9a63d8cf3e603754fa21e0037854ea5c8b630 100644 (file)
@@ -56,7 +56,7 @@ struct udf_inode_info {
 
 static inline struct udf_inode_info *UDF_I(struct inode *inode)
 {
-       return list_entry(inode, struct udf_inode_info, vfs_inode);
+       return container_of(inode, struct udf_inode_info, vfs_inode);
 }
 
 #endif /* _UDF_I_H) */
index 2c1036080d5276bcb51314e649e45f0d6f0990a5..a7106eda50241bfd28ae002986335b4d0cf8f0f6 100644 (file)
@@ -51,8 +51,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
        
        if (ufs_fragnum(fragment) + count > uspi->s_fpg)
                ufs_error (sb, "ufs_free_fragments", "internal error");
-       
-       lock_ufs(sb);
+
+       mutex_lock(&UFS_SB(sb)->s_lock);
        
        cgno = ufs_dtog(uspi, fragment);
        bit = ufs_dtogd(uspi, fragment);
@@ -115,13 +115,13 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
        if (sb->s_flags & MS_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        ufs_mark_sb_dirty(sb);
-       
-       unlock_ufs(sb);
+
+       mutex_unlock(&UFS_SB(sb)->s_lock);
        UFSD("EXIT\n");
        return;
 
 failed:
-       unlock_ufs(sb);
+       mutex_unlock(&UFS_SB(sb)->s_lock);
        UFSD("EXIT (FAILED)\n");
        return;
 }
@@ -151,7 +151,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
                goto failed;
        }
 
-       lock_ufs(sb);
+       mutex_lock(&UFS_SB(sb)->s_lock);
        
 do_more:
        overflow = 0;
@@ -211,12 +211,12 @@ do_more:
        }
 
        ufs_mark_sb_dirty(sb);
-       unlock_ufs(sb);
+       mutex_unlock(&UFS_SB(sb)->s_lock);
        UFSD("EXIT\n");
        return;
 
 failed_unlock:
-       unlock_ufs(sb);
+       mutex_unlock(&UFS_SB(sb)->s_lock);
 failed:
        UFSD("EXIT (FAILED)\n");
        return;
@@ -357,7 +357,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
        usb1 = ubh_get_usb_first(uspi);
        *err = -ENOSPC;
 
-       lock_ufs(sb);
+       mutex_lock(&UFS_SB(sb)->s_lock);
        tmp = ufs_data_ptr_to_cpu(sb, p);
 
        if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
@@ -378,19 +378,19 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
                                  "fragment %llu, tmp %llu\n",
                                  (unsigned long long)fragment,
                                  (unsigned long long)tmp);
-                       unlock_ufs(sb);
+                       mutex_unlock(&UFS_SB(sb)->s_lock);
                        return INVBLOCK;
                }
                if (fragment < UFS_I(inode)->i_lastfrag) {
                        UFSD("EXIT (ALREADY ALLOCATED)\n");
-                       unlock_ufs(sb);
+                       mutex_unlock(&UFS_SB(sb)->s_lock);
                        return 0;
                }
        }
        else {
                if (tmp) {
                        UFSD("EXIT (ALREADY ALLOCATED)\n");
-                       unlock_ufs(sb);
+                       mutex_unlock(&UFS_SB(sb)->s_lock);
                        return 0;
                }
        }
@@ -399,7 +399,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
         * There is not enough space for user on the device
         */
        if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
-               unlock_ufs(sb);
+               mutex_unlock(&UFS_SB(sb)->s_lock);
                UFSD("EXIT (FAILED)\n");
                return 0;
        }
@@ -424,7 +424,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
                        ufs_clear_frags(inode, result + oldcount,
                                        newcount - oldcount, locked_page != NULL);
                }
-               unlock_ufs(sb);
+               mutex_unlock(&UFS_SB(sb)->s_lock);
                UFSD("EXIT, result %llu\n", (unsigned long long)result);
                return result;
        }
@@ -439,7 +439,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
                                                fragment + count);
                ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
                                locked_page != NULL);
-               unlock_ufs(sb);
+               mutex_unlock(&UFS_SB(sb)->s_lock);
                UFSD("EXIT, result %llu\n", (unsigned long long)result);
                return result;
        }
@@ -477,7 +477,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
                *err = 0;
                UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
                                                fragment + count);
-               unlock_ufs(sb);
+               mutex_unlock(&UFS_SB(sb)->s_lock);
                if (newcount < request)
                        ufs_free_fragments (inode, result + newcount, request - newcount);
                ufs_free_fragments (inode, tmp, oldcount);
@@ -485,7 +485,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
                return result;
        }
 
-       unlock_ufs(sb);
+       mutex_unlock(&UFS_SB(sb)->s_lock);
        UFSD("EXIT (FAILED)\n");
        return 0;
 }              
index 1bfe8cabff0f660d107c6bc97446a0cceaf940eb..74f2e80288bfad7824961373891fad9bea30a158 100644 (file)
@@ -65,11 +65,6 @@ static inline void ufs_put_page(struct page *page)
        page_cache_release(page);
 }
 
-static inline unsigned long ufs_dir_pages(struct inode *inode)
-{
-       return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
-}
-
 ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
 {
        ino_t res = 0;
@@ -87,7 +82,8 @@ ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
 
 /* Releases the page */
 void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
-                 struct page *page, struct inode *inode)
+                 struct page *page, struct inode *inode,
+                 bool update_times)
 {
        loff_t pos = page_offset(page) +
                        (char *) de - (char *) page_address(page);
@@ -103,7 +99,8 @@ void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
 
        err = ufs_commit_chunk(page, pos, len);
        ufs_put_page(page);
-       dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+       if (update_times)
+               dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
        mark_inode_dirty(dir);
 }
 
@@ -256,7 +253,7 @@ struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
        int namelen = qstr->len;
        unsigned reclen = UFS_DIR_REC_LEN(namelen);
        unsigned long start, n;
-       unsigned long npages = ufs_dir_pages(dir);
+       unsigned long npages = dir_pages(dir);
        struct page *page = NULL;
        struct ufs_inode_info *ui = UFS_I(dir);
        struct ufs_dir_entry *de;
@@ -320,7 +317,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
        unsigned short rec_len, name_len;
        struct page *page = NULL;
        struct ufs_dir_entry *de;
-       unsigned long npages = ufs_dir_pages(dir);
+       unsigned long npages = dir_pages(dir);
        unsigned long n;
        char *kaddr;
        loff_t pos;
@@ -437,7 +434,7 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
        struct super_block *sb = inode->i_sb;
        unsigned int offset = pos & ~PAGE_CACHE_MASK;
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
-       unsigned long npages = ufs_dir_pages(inode);
+       unsigned long npages = dir_pages(inode);
        unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
        int need_revalidate = file->f_version != inode->i_version;
        unsigned flags = UFS_SB(sb)->s_flags;
@@ -608,7 +605,7 @@ int ufs_empty_dir(struct inode * inode)
 {
        struct super_block *sb = inode->i_sb;
        struct page *page = NULL;
-       unsigned long i, npages = ufs_dir_pages(inode);
+       unsigned long i, npages = dir_pages(inode);
 
        for (i = 0; i < npages; i++) {
                char *kaddr;
index 7caa016528883c6d8b8e7234ef0adc7ce23c6044..fd0203ce1f7fde1595d463ef4ae67b8d9482cfcb 100644 (file)
@@ -69,11 +69,11 @@ void ufs_free_inode (struct inode * inode)
        
        ino = inode->i_ino;
 
-       lock_ufs(sb);
+       mutex_lock(&UFS_SB(sb)->s_lock);
 
        if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
                ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
-               unlock_ufs(sb);
+               mutex_unlock(&UFS_SB(sb)->s_lock);
                return;
        }
        
@@ -81,7 +81,7 @@ void ufs_free_inode (struct inode * inode)
        bit = ufs_inotocgoff (ino);
        ucpi = ufs_load_cylinder (sb, cg);
        if (!ucpi) {
-               unlock_ufs(sb);
+               mutex_unlock(&UFS_SB(sb)->s_lock);
                return;
        }
        ucg = ubh_get_ucg(UCPI_UBH(ucpi));
@@ -115,7 +115,7 @@ void ufs_free_inode (struct inode * inode)
                ubh_sync_block(UCPI_UBH(ucpi));
        
        ufs_mark_sb_dirty(sb);
-       unlock_ufs(sb);
+       mutex_unlock(&UFS_SB(sb)->s_lock);
        UFSD("EXIT\n");
 }
 
@@ -193,7 +193,7 @@ struct inode *ufs_new_inode(struct inode *dir, umode_t mode)
        sbi = UFS_SB(sb);
        uspi = sbi->s_uspi;
 
-       lock_ufs(sb);
+       mutex_lock(&sbi->s_lock);
 
        /*
         * Try to place the inode in its parent directory
@@ -331,21 +331,21 @@ cg_found:
                        sync_dirty_buffer(bh);
                brelse(bh);
        }
-       unlock_ufs(sb);
+       mutex_unlock(&sbi->s_lock);
 
        UFSD("allocating inode %lu\n", inode->i_ino);
        UFSD("EXIT\n");
        return inode;
 
 fail_remove_inode:
-       unlock_ufs(sb);
+       mutex_unlock(&sbi->s_lock);
        clear_nlink(inode);
        unlock_new_inode(inode);
        iput(inode);
        UFSD("EXIT (FAILED): err %d\n", err);
        return ERR_PTR(err);
 failed:
-       unlock_ufs(sb);
+       mutex_unlock(&sbi->s_lock);
        make_bad_inode(inode);
        iput (inode);
        UFSD("EXIT (FAILED): err %d\n", err);
index 99aaf5c9bf4d83f0f5ec6469d6d6cb20827750d0..f913a6924b23814852e03f2fce8acb911dfc6635 100644 (file)
@@ -903,6 +903,9 @@ void ufs_evict_inode(struct inode * inode)
        invalidate_inode_buffers(inode);
        clear_inode(inode);
 
-       if (want_delete)
+       if (want_delete) {
+               lock_ufs(inode->i_sb);
                ufs_free_inode(inode);
+               unlock_ufs(inode->i_sb);
+       }
 }
index f773deb1d2e3fd561b906a0dd2a050d16161a5be..47966554317c922da9c73d904f0ea104ec0392f2 100644 (file)
@@ -56,11 +56,9 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, unsi
        if (dentry->d_name.len > UFS_MAXNAMLEN)
                return ERR_PTR(-ENAMETOOLONG);
 
-       lock_ufs(dir->i_sb);
        ino = ufs_inode_by_name(dir, &dentry->d_name);
        if (ino)
                inode = ufs_iget(dir->i_sb, ino);
-       unlock_ufs(dir->i_sb);
        return d_splice_alias(inode, dentry);
 }
 
@@ -76,24 +74,16 @@ static int ufs_create (struct inode * dir, struct dentry * dentry, umode_t mode,
                bool excl)
 {
        struct inode *inode;
-       int err;
-
-       UFSD("BEGIN\n");
 
        inode = ufs_new_inode(dir, mode);
-       err = PTR_ERR(inode);
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
-       if (!IS_ERR(inode)) {
-               inode->i_op = &ufs_file_inode_operations;
-               inode->i_fop = &ufs_file_operations;
-               inode->i_mapping->a_ops = &ufs_aops;
-               mark_inode_dirty(inode);
-               lock_ufs(dir->i_sb);
-               err = ufs_add_nondir(dentry, inode);
-               unlock_ufs(dir->i_sb);
-       }
-       UFSD("END: err=%d\n", err);
-       return err;
+       inode->i_op = &ufs_file_inode_operations;
+       inode->i_fop = &ufs_file_operations;
+       inode->i_mapping->a_ops = &ufs_aops;
+       mark_inode_dirty(inode);
+       return ufs_add_nondir(dentry, inode);
 }
 
 static int ufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
@@ -110,9 +100,7 @@ static int ufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev
                init_special_inode(inode, mode, rdev);
                ufs_set_inode_dev(inode->i_sb, UFS_I(inode), rdev);
                mark_inode_dirty(inode);
-               lock_ufs(dir->i_sb);
                err = ufs_add_nondir(dentry, inode);
-               unlock_ufs(dir->i_sb);
        }
        return err;
 }
@@ -121,19 +109,18 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
        const char * symname)
 {
        struct super_block * sb = dir->i_sb;
-       int err = -ENAMETOOLONG;
+       int err;
        unsigned l = strlen(symname)+1;
        struct inode * inode;
 
        if (l > sb->s_blocksize)
-               goto out_notlocked;
+               return -ENAMETOOLONG;
 
        inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
        err = PTR_ERR(inode);
        if (IS_ERR(inode))
-               goto out_notlocked;
+               return err;
 
-       lock_ufs(dir->i_sb);
        if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
                /* slow symlink */
                inode->i_op = &ufs_symlink_inode_operations;
@@ -150,17 +137,13 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
        }
        mark_inode_dirty(inode);
 
-       err = ufs_add_nondir(dentry, inode);
-out:
-       unlock_ufs(dir->i_sb);
-out_notlocked:
-       return err;
+       return ufs_add_nondir(dentry, inode);
 
 out_fail:
        inode_dec_link_count(inode);
        unlock_new_inode(inode);
        iput(inode);
-       goto out;
+       return err;
 }
 
 static int ufs_link (struct dentry * old_dentry, struct inode * dir,
@@ -169,14 +152,16 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
        struct inode *inode = d_inode(old_dentry);
        int error;
 
-       lock_ufs(dir->i_sb);
-
        inode->i_ctime = CURRENT_TIME_SEC;
        inode_inc_link_count(inode);
        ihold(inode);
 
-       error = ufs_add_nondir(dentry, inode);
-       unlock_ufs(dir->i_sb);
+       error = ufs_add_link(dentry, inode);
+       if (error) {
+               inode_dec_link_count(inode);
+               iput(inode);
+       } else
+               d_instantiate(dentry, inode);
        return error;
 }
 
@@ -185,9 +170,12 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
        struct inode * inode;
        int err;
 
+       inode_inc_link_count(dir);
+
        inode = ufs_new_inode(dir, S_IFDIR|mode);
+       err = PTR_ERR(inode);
        if (IS_ERR(inode))
-               return PTR_ERR(inode);
+               goto out_dir;
 
        inode->i_op = &ufs_dir_inode_operations;
        inode->i_fop = &ufs_dir_operations;
@@ -195,9 +183,6 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
 
        inode_inc_link_count(inode);
 
-       lock_ufs(dir->i_sb);
-       inode_inc_link_count(dir);
-
        err = ufs_make_empty(inode, dir);
        if (err)
                goto out_fail;
@@ -205,20 +190,19 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
        err = ufs_add_link(dentry, inode);
        if (err)
                goto out_fail;
-       unlock_ufs(dir->i_sb);
 
+       unlock_new_inode(inode);
        d_instantiate(dentry, inode);
-out:
-       return err;
+       return 0;
 
 out_fail:
        inode_dec_link_count(inode);
        inode_dec_link_count(inode);
        unlock_new_inode(inode);
        iput (inode);
+out_dir:
        inode_dec_link_count(dir);
-       unlock_ufs(dir->i_sb);
-       goto out;
+       return err;
 }
 
 static int ufs_unlink(struct inode *dir, struct dentry *dentry)
@@ -248,7 +232,6 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
        struct inode * inode = d_inode(dentry);
        int err= -ENOTEMPTY;
 
-       lock_ufs(dir->i_sb);
        if (ufs_empty_dir (inode)) {
                err = ufs_unlink(dir, dentry);
                if (!err) {
@@ -257,7 +240,6 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
                        inode_dec_link_count(dir);
                }
        }
-       unlock_ufs(dir->i_sb);
        return err;
 }
 
@@ -295,7 +277,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
                new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
                if (!new_de)
                        goto out_dir;
-               ufs_set_link(new_dir, new_de, new_page, old_inode);
+               ufs_set_link(new_dir, new_de, new_page, old_inode, 1);
                new_inode->i_ctime = CURRENT_TIME_SEC;
                if (dir_de)
                        drop_nlink(new_inode);
@@ -318,7 +300,12 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
        mark_inode_dirty(old_inode);
 
        if (dir_de) {
-               ufs_set_link(old_inode, dir_de, dir_page, new_dir);
+               if (old_dir != new_dir)
+                       ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0);
+               else {
+                       kunmap(dir_page);
+                       page_cache_release(dir_page);
+               }
                inode_dec_link_count(old_dir);
        }
        return 0;
index 098508a93c7b302fe8e6ab65ec7cd753d2515634..250579a80d90bd379caee1b7aeaf252dda97c34d 100644 (file)
@@ -695,6 +695,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
        unsigned flags;
 
        lock_ufs(sb);
+       mutex_lock(&UFS_SB(sb)->s_lock);
 
        UFSD("ENTER\n");
 
@@ -712,6 +713,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
        ufs_put_cstotal(sb);
 
        UFSD("EXIT\n");
+       mutex_unlock(&UFS_SB(sb)->s_lock);
        unlock_ufs(sb);
 
        return 0;
@@ -800,6 +802,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
        UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY));
        
        mutex_init(&sbi->mutex);
+       mutex_init(&sbi->s_lock);
        spin_lock_init(&sbi->work_lock);
        INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
        /*
@@ -1278,6 +1281,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
 
        sync_filesystem(sb);
        lock_ufs(sb);
+       mutex_lock(&UFS_SB(sb)->s_lock);
        uspi = UFS_SB(sb)->s_uspi;
        flags = UFS_SB(sb)->s_flags;
        usb1 = ubh_get_usb_first(uspi);
@@ -1291,6 +1295,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
        new_mount_opt = 0;
        ufs_set_opt (new_mount_opt, ONERROR_LOCK);
        if (!ufs_parse_options (data, &new_mount_opt)) {
+               mutex_unlock(&UFS_SB(sb)->s_lock);
                unlock_ufs(sb);
                return -EINVAL;
        }
@@ -1298,12 +1303,14 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
                new_mount_opt |= ufstype;
        } else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
                pr_err("ufstype can't be changed during remount\n");
+               mutex_unlock(&UFS_SB(sb)->s_lock);
                unlock_ufs(sb);
                return -EINVAL;
        }
 
        if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
                UFS_SB(sb)->s_mount_opt = new_mount_opt;
+               mutex_unlock(&UFS_SB(sb)->s_lock);
                unlock_ufs(sb);
                return 0;
        }
@@ -1327,6 +1334,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
         */
 #ifndef CONFIG_UFS_FS_WRITE
                pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n");
+               mutex_unlock(&UFS_SB(sb)->s_lock);
                unlock_ufs(sb);
                return -EINVAL;
 #else
@@ -1336,11 +1344,13 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
                    ufstype != UFS_MOUNT_UFSTYPE_SUNx86 &&
                    ufstype != UFS_MOUNT_UFSTYPE_UFS2) {
                        pr_err("this ufstype is read-only supported\n");
+                       mutex_unlock(&UFS_SB(sb)->s_lock);
                        unlock_ufs(sb);
                        return -EINVAL;
                }
                if (!ufs_read_cylinder_structures(sb)) {
                        pr_err("failed during remounting\n");
+                       mutex_unlock(&UFS_SB(sb)->s_lock);
                        unlock_ufs(sb);
                        return -EPERM;
                }
@@ -1348,6 +1358,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
 #endif
        }
        UFS_SB(sb)->s_mount_opt = new_mount_opt;
+       mutex_unlock(&UFS_SB(sb)->s_lock);
        unlock_ufs(sb);
        return 0;
 }
index 2a07396d5f9eb623625f238d0d8b61c70301ae19..2e31ea2e35a3b9bee06af97f03aa553d6f65082c 100644 (file)
@@ -30,6 +30,7 @@ struct ufs_sb_info {
        int work_queued; /* non-zero if the delayed work is queued */
        struct delayed_work sync_work; /* FS sync delayed work */
        spinlock_t work_lock; /* protects sync_work and work_queued */
+       struct mutex s_lock;
 };
 
 struct ufs_inode_info {
@@ -105,7 +106,7 @@ extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page
 extern int ufs_empty_dir (struct inode *);
 extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **);
 extern void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
-                        struct page *page, struct inode *inode);
+                        struct page *page, struct inode *inode, bool update_times);
 
 /* file.c */
 extern const struct inode_operations ufs_file_inode_operations;
index 874507de3485b818e94bfcd0348f79e9747fdce6..f0e8249722d40a0dcaf9f31bc25effaba142b0a9 100644 (file)
@@ -577,6 +577,13 @@ restart:
        if (error)
                return error;
 
+       /* For changing security info in file_remove_privs() we need i_mutex */
+       if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
+               xfs_rw_iunlock(ip, *iolock);
+               *iolock = XFS_IOLOCK_EXCL;
+               xfs_rw_ilock(ip, *iolock);
+               goto restart;
+       }
        /*
         * If the offset is beyond the size of the file, we need to zero any
         * blocks that fall between the existing EOF and the start of this
@@ -637,7 +644,9 @@ restart:
         * setgid bits if the process is not being run by root.  This keeps
         * people from modifying setuid and setgid binaries.
         */
-       return file_remove_suid(file);
+       if (!IS_NOSEC(inode))
+               return file_remove_privs(file);
+       return 0;
 }
 
 /*
index 273de709495c1db522d46dad7f339677fc8df756..b52c0dc4b4925a1d29c6c8b2d55a1ed0c40b2b75 100644 (file)
@@ -51,6 +51,7 @@
 #define METHOD_NAME__BBN        "_BBN"
 #define METHOD_NAME__CBA        "_CBA"
 #define METHOD_NAME__CID        "_CID"
+#define METHOD_NAME__CLS        "_CLS"
 #define METHOD_NAME__CRS        "_CRS"
 #define METHOD_NAME__DDN        "_DDN"
 #define METHOD_NAME__HID        "_HID"
index a8f344363e7737d056bebc7ec7f4d45fa2e170f0..f56de8c5d844de3019d2e8fb8b04f5fedab2d58f 100644 (file)
 
 /* DEBUG_PRINT functions */
 
-#define ACPI_DEBUG_PRINT(plist)         ACPI_ACTUAL_DEBUG plist
-#define ACPI_DEBUG_PRINT_RAW(plist)     ACPI_ACTUAL_DEBUG_RAW plist
+#ifndef COMPILER_VA_MACRO
+
+#define ACPI_DEBUG_PRINT(plist)         acpi_debug_print plist
+#define ACPI_DEBUG_PRINT_RAW(plist)     acpi_debug_print_raw plist
+
+#else
 
 /* Helper macros for DEBUG_PRINT */
 
        ACPI_DO_DEBUG_PRINT (acpi_debug_print_raw, level, line, \
                filename, modulename, component, __VA_ARGS__)
 
+#define ACPI_DEBUG_PRINT(plist)         ACPI_ACTUAL_DEBUG plist
+#define ACPI_DEBUG_PRINT_RAW(plist)     ACPI_ACTUAL_DEBUG_RAW plist
+
+#endif
+
 /*
  * Function entry tracing
  *
index b43276f339efd0114400ab6396ba7efd651a44c3..83061cac719bce830608daac2ed45e1760f5bd76 100644 (file)
@@ -420,7 +420,7 @@ static inline bool is_acpi_node(struct fwnode_handle *fwnode)
        return fwnode && fwnode->type == FWNODE_ACPI;
 }
 
-static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode)
+static inline struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode)
 {
        return is_acpi_node(fwnode) ?
                container_of(fwnode, struct acpi_device, fwnode) : NULL;
index d68f1cd39c495f732b66c048d64cbe384162cd6d..e8ec18a4a634d8d5679a49757fcb616233ec5a9d 100644 (file)
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20150515
+#define ACPI_CA_VERSION                 0x20150619
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -195,9 +195,18 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
  * address. Although ACPICA adheres to the ACPI specification which
  * requires the use of the corresponding 64-bit address if it is non-zero,
  * some machines have been found to have a corrupted non-zero 64-bit
- * address. Default is TRUE, favor the 32-bit addresses.
+ * address. Default is FALSE, do not favor the 32-bit addresses.
  */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
+
+/*
+ * Optionally use 32-bit FACS table addresses.
+ * It is reported that some platforms fail to resume from system suspending
+ * if 64-bit FACS table address is selected:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=74021
+ * Default is TRUE, favor the 32-bit addresses.
+ */
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_facs_addresses, TRUE);
 
 /*
  * Optionally truncate I/O addresses to 16 bits. Provides compatibility
@@ -219,6 +228,11 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_auto_repair, FALSE);
  */
 ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_ssdt_table_install, FALSE);
 
+/*
+ * Optionally enable runtime namespace override.
+ */
+ACPI_INIT_GLOBAL(u8, acpi_gbl_runtime_namespace_override, TRUE);
+
 /*
  * We keep track of the latest version of Windows that has been requested by
  * the BIOS. ACPI 5.0.
@@ -814,8 +828,12 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_leave_sleep_state(u8 sleep_state))
 
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
-                               acpi_set_firmware_waking_vector(u32
-                                                               physical_address))
+                               acpi_set_firmware_waking_vectors
+                               (acpi_physical_address physical_address,
+                                acpi_physical_address physical_address64))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+                                acpi_set_firmware_waking_vector(u32
+                                                                physical_address))
 #if ACPI_MACHINE_WIDTH == 64
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
                                acpi_set_firmware_waking_vector64(u64
index cb8a6b97cedae07b239fbc5eb671e1ddfe6d38ea..2d5faf508cadfd159a5b1213bc2bc16d66ae5535 100644 (file)
@@ -65,6 +65,7 @@
 #define ACPI_SIG_DSDT           "DSDT" /* Differentiated System Description Table */
 #define ACPI_SIG_FADT           "FACP" /* Fixed ACPI Description Table */
 #define ACPI_SIG_FACS           "FACS" /* Firmware ACPI Control Structure */
+#define ACPI_SIG_OSDT           "OSDT" /* Override System Description Table */
 #define ACPI_SIG_PSDT           "PSDT" /* Persistent System Description Table */
 #define ACPI_SIG_RSDP           "RSD PTR "     /* Root System Description Pointer */
 #define ACPI_SIG_RSDT           "RSDT" /* Root System Description Table */
index 06b61f01ea599177c0cba9b3172c89672d7aa63b..fcd570999f354247c9dbba73163797ff38cbd1a6 100644 (file)
@@ -835,6 +835,17 @@ struct acpi_madt_generic_distributor {
        u8 reserved2[3];        /* reserved - must be zero */
 };
 
+/* Values for Version field above */
+
+enum acpi_madt_gic_version {
+       ACPI_MADT_GIC_VERSION_NONE = 0,
+       ACPI_MADT_GIC_VERSION_V1 = 1,
+       ACPI_MADT_GIC_VERSION_V2 = 2,
+       ACPI_MADT_GIC_VERSION_V3 = 3,
+       ACPI_MADT_GIC_VERSION_V4 = 4,
+       ACPI_MADT_GIC_VERSION_RESERVED = 5      /* 5 and greater are reserved */
+};
+
 /* 13: Generic MSI Frame (ACPI 5.1) */
 
 struct acpi_madt_generic_msi_frame {
index 370d69d871a0d19054c6fd81bf33789670493f08..a948fc586b9b8406d44b5e35f4e50c1ed436830e 100644 (file)
@@ -51,8 +51,8 @@
  * These tables are not consumed directly by the ACPICA subsystem, but are
  * included here to support device drivers and the AML disassembler.
  *
- * The tables in this file are defined by third-party specifications, and are
- * not defined directly by the ACPI specification itself.
+ * Generally, the tables in this file are defined by third-party specifications,
+ * and are not defined directly by the ACPI specification itself.
  *
  ******************************************************************************/
 
@@ -80,6 +80,7 @@
 #define ACPI_SIG_SPCR           "SPCR" /* Serial Port Console Redirection table */
 #define ACPI_SIG_SPMI           "SPMI" /* Server Platform Management Interface table */
 #define ACPI_SIG_TCPA           "TCPA" /* Trusted Computing Platform Alliance table */
+#define ACPI_SIG_TPM2           "TPM2" /* Trusted Platform Module 2.0 H/W interface table */
 #define ACPI_SIG_UEFI           "UEFI" /* Uefi Boot Optimization Table */
 #define ACPI_SIG_VRTC           "VRTC" /* Virtual Real Time Clock Table */
 #define ACPI_SIG_WAET           "WAET" /* Windows ACPI Emulated devices Table */
@@ -1179,20 +1180,85 @@ enum acpi_spmi_interface_types {
 /*******************************************************************************
  *
  * TCPA - Trusted Computing Platform Alliance table
- *        Version 1
+ *        Version 2
+ *
+ * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
+ * December 19, 2014
  *
- * Conforms to "TCG PC Specific Implementation Specification",
- * Version 1.1, August 18, 2003
+ * NOTE: There are two versions of the table with the same signature --
+ * the client version and the server version.
  *
  ******************************************************************************/
 
-struct acpi_table_tcpa {
+struct acpi_table_tcpa_client {
        struct acpi_table_header header;        /* Common ACPI table header */
+       u16 platform_class;
+       u32 minimum_log_length; /* Minimum length for the event log area */
+       u64 log_address;        /* Address of the event log area */
+};
+
+struct acpi_table_tcpa_server {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u16 platform_class;
        u16 reserved;
-       u32 max_log_length;     /* Maximum length for the event log area */
+       u64 minimum_log_length; /* Minimum length for the event log area */
        u64 log_address;        /* Address of the event log area */
+       u16 spec_revision;
+       u8 device_flags;
+       u8 interrupt_flags;
+       u8 gpe_number;
+       u8 reserved2[3];
+       u32 global_interrupt;
+       struct acpi_generic_address address;
+       u32 reserved3;
+       struct acpi_generic_address config_address;
+       u8 group;
+       u8 bus;                 /* PCI Bus/Segment/Function numbers */
+       u8 device;
+       u8 function;
+};
+
+/* Values for device_flags above */
+
+#define ACPI_TCPA_PCI_DEVICE            (1)
+#define ACPI_TCPA_BUS_PNP               (1<<1)
+#define ACPI_TCPA_ADDRESS_VALID         (1<<2)
+
+/* Values for interrupt_flags above */
+
+#define ACPI_TCPA_INTERRUPT_MODE        (1)
+#define ACPI_TCPA_INTERRUPT_POLARITY    (1<<1)
+#define ACPI_TCPA_SCI_VIA_GPE           (1<<2)
+#define ACPI_TCPA_GLOBAL_INTERRUPT      (1<<3)
+
+/*******************************************************************************
+ *
+ * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
+ *        Version 4
+ *
+ * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
+ * December 19, 2014
+ *
+ ******************************************************************************/
+
+struct acpi_table_tpm2 {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u16 platform_class;
+       u16 reserved;
+       u64 control_address;
+       u32 start_method;
+
+       /* Platform-specific data follows */
 };
 
+/* Values for start_method above */
+
+#define ACPI_TPM2_NOT_ALLOWED                       0
+#define ACPI_TPM2_START_METHOD                      2
+#define ACPI_TPM2_MEMORY_MAPPED                     6
+#define ACPI_TPM2_COMMAND_BUFFER                    7
+#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD  8
+
 /*******************************************************************************
  *
  * UEFI - UEFI Boot optimization Table
index 4018986d2a2e2b96f5bf2f493078561666b59e41..1df891660f4394e5f805e57455cbb662b9fac920 100644 (file)
@@ -51,7 +51,8 @@
  * These tables are not consumed directly by the ACPICA subsystem, but are
  * included here to support device drivers and the AML disassembler.
  *
- * The tables in this file are fully defined within the ACPI specification.
+ * In general, the tables in this file are fully defined within the ACPI
+ * specification.
  *
  ******************************************************************************/
 
@@ -69,7 +70,6 @@
 #define ACPI_SIG_PMTT           "PMTT" /* Platform Memory Topology Table */
 #define ACPI_SIG_RASF           "RASF" /* RAS Feature table */
 #define ACPI_SIG_STAO           "STAO" /* Status Override table */
-#define ACPI_SIG_TPM2           "TPM2" /* Trusted Platform Module 2.0 H/W interface table */
 #define ACPI_SIG_WPBT           "WPBT" /* Windows Platform Binary Table */
 #define ACPI_SIG_XENV           "XENV" /* Xen Environment table */
 
@@ -720,36 +720,6 @@ struct acpi_table_stao {
        u8 ignore_uart;
 };
 
-/*******************************************************************************
- *
- * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
- *        Version 3
- *
- * Conforms to "TPM 2.0 Hardware Interface Table (TPM2)" 29 November 2011
- *
- ******************************************************************************/
-
-struct acpi_table_tpm2 {
-       struct acpi_table_header header;        /* Common ACPI table header */
-       u32 flags;
-       u64 control_address;
-       u32 start_method;
-};
-
-/* Control area structure (not part of table, pointed to by control_address) */
-
-struct acpi_tpm2_control {
-       u32 reserved;
-       u32 error;
-       u32 cancel;
-       u32 start;
-       u64 interrupt_control;
-       u32 command_size;
-       u64 command_address;
-       u32 response_size;
-       u64 response_address;
-};
-
 /*******************************************************************************
  *
  * WPBT - Windows Platform Environment Table (ACPI 6.0)
index 63fd7f5e9fb3495198e659c8b04a16db2e489877..c2a41d223162a3ef3936d40c111e89fd5a4c6600 100644 (file)
@@ -542,14 +542,14 @@ typedef u64 acpi_integer;
 #define ACPI_COMPARE_NAME(a,b)          (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b)))
 #define ACPI_MOVE_NAME(dest,src)        (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src)))
 #else
-#define ACPI_COMPARE_NAME(a,b)          (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE))
-#define ACPI_MOVE_NAME(dest,src)        (ACPI_STRNCPY (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE))
+#define ACPI_COMPARE_NAME(a,b)          (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE))
+#define ACPI_MOVE_NAME(dest,src)        (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE))
 #endif
 
 /* Support for the special RSDP signature (8 characters) */
 
-#define ACPI_VALIDATE_RSDP_SIG(a)       (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
-#define ACPI_MAKE_RSDP_SIG(dest)        (ACPI_MEMCPY (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+#define ACPI_VALIDATE_RSDP_SIG(a)       (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
+#define ACPI_MAKE_RSDP_SIG(dest)        (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
 
 /*******************************************************************************
  *
@@ -568,6 +568,7 @@ typedef u64 acpi_integer;
 #define ACPI_NO_ACPI_ENABLE             0x10
 #define ACPI_NO_DEVICE_INIT             0x20
 #define ACPI_NO_OBJECT_INIT             0x40
+#define ACPI_NO_FACS_INIT               0x80
 
 /*
  * Initialization state
@@ -1140,6 +1141,10 @@ u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported);
 
 #define ACPI_UUID_LENGTH                16
 
+/* Length of 3-byte PCI class code values when converted back to a string */
+
+#define ACPI_PCICLS_STRING_SIZE         7      /* Includes null terminator */
+
 /* Structures used for device/processor HID, UID, CID, and SUB */
 
 struct acpi_pnp_device_id {
@@ -1162,7 +1167,7 @@ struct acpi_device_info {
        u32 name;               /* ACPI object Name */
        acpi_object_type type;  /* ACPI object Type */
        u8 param_count;         /* If a method, required parameter count */
-       u8 valid;               /* Indicates which optional fields are valid */
+       u16 valid;              /* Indicates which optional fields are valid */
        u8 flags;               /* Miscellaneous info */
        u8 highest_dstates[4];  /* _sx_d values: 0xFF indicates not valid */
        u8 lowest_dstates[5];   /* _sx_w values: 0xFF indicates not valid */
@@ -1171,6 +1176,7 @@ struct acpi_device_info {
        struct acpi_pnp_device_id hardware_id;  /* _HID value */
        struct acpi_pnp_device_id unique_id;    /* _UID value */
        struct acpi_pnp_device_id subsystem_id; /* _SUB value */
+       struct acpi_pnp_device_id class_code;   /* _CLS value */
        struct acpi_pnp_device_id_list compatible_id_list;      /* _CID list <must be last> */
 };
 
@@ -1180,14 +1186,15 @@ struct acpi_device_info {
 
 /* Flags for Valid field above (acpi_get_object_info) */
 
-#define ACPI_VALID_STA                  0x01
-#define ACPI_VALID_ADR                  0x02
-#define ACPI_VALID_HID                  0x04
-#define ACPI_VALID_UID                  0x08
-#define ACPI_VALID_SUB                  0x10
-#define ACPI_VALID_CID                  0x20
-#define ACPI_VALID_SXDS                 0x40
-#define ACPI_VALID_SXWS                 0x80
+#define ACPI_VALID_STA                  0x0001
+#define ACPI_VALID_ADR                  0x0002
+#define ACPI_VALID_HID                  0x0004
+#define ACPI_VALID_UID                  0x0008
+#define ACPI_VALID_SUB                  0x0010
+#define ACPI_VALID_CID                  0x0020
+#define ACPI_VALID_CLS                  0x0040
+#define ACPI_VALID_SXDS                 0x0100
+#define ACPI_VALID_SXWS                 0x0200
 
 /* Flags for _STA return value (current_status above) */
 
index 073997d729e9c9710c10656ff3f34c6e30527c34..3cedd43943f42a8465772def8b68f3f93f322676 100644 (file)
 
 /* We will be linking to the standard Clib functions */
 
-#define ACPI_STRSTR(s1,s2)      strstr((s1), (s2))
-#define ACPI_STRCHR(s1,c)       strchr((s1), (c))
-#define ACPI_STRLEN(s)          (acpi_size) strlen((s))
-#define ACPI_STRCPY(d,s)        (void) strcpy((d), (s))
-#define ACPI_STRNCPY(d,s,n)     (void) strncpy((d), (s), (acpi_size)(n))
-#define ACPI_STRNCMP(d,s,n)     strncmp((d), (s), (acpi_size)(n))
-#define ACPI_STRCMP(d,s)        strcmp((d), (s))
-#define ACPI_STRCAT(d,s)        (void) strcat((d), (s))
-#define ACPI_STRNCAT(d,s,n)     strncat((d), (s), (acpi_size)(n))
-#define ACPI_STRTOUL(d,s,n)     strtoul((d), (s), (acpi_size)(n))
-#define ACPI_MEMCMP(s1,s2,n)    memcmp((const char *)(s1), (const char *)(s2), (acpi_size)(n))
-#define ACPI_MEMCPY(d,s,n)      (void) memcpy((d), (s), (acpi_size)(n))
-#define ACPI_MEMSET(d,s,n)      (void) memset((d), (s), (acpi_size)(n))
-
-#define ACPI_TOUPPER(i)         toupper((int) (i))
-#define ACPI_TOLOWER(i)         tolower((int) (i))
-#define ACPI_IS_XDIGIT(i)       isxdigit((int) (i))
-#define ACPI_IS_DIGIT(i)        isdigit((int) (i))
-#define ACPI_IS_SPACE(i)        isspace((int) (i))
-#define ACPI_IS_UPPER(i)        isupper((int) (i))
-#define ACPI_IS_PRINT(i)        isprint((int) (i))
-#define ACPI_IS_ALPHA(i)        isalpha((int) (i))
-
 #else
 
 /******************************************************************************
@@ -406,22 +383,6 @@ typedef char *va_list;
 
 /* Use the local (ACPICA) definitions of the clib functions */
 
-#define ACPI_STRSTR(s1,s2)      acpi_ut_strstr ((s1), (s2))
-#define ACPI_STRCHR(s1,c)       acpi_ut_strchr ((s1), (c))
-#define ACPI_STRLEN(s)          (acpi_size) acpi_ut_strlen ((s))
-#define ACPI_STRCPY(d,s)        (void) acpi_ut_strcpy ((d), (s))
-#define ACPI_STRNCPY(d,s,n)     (void) acpi_ut_strncpy ((d), (s), (acpi_size)(n))
-#define ACPI_STRNCMP(d,s,n)     acpi_ut_strncmp ((d), (s), (acpi_size)(n))
-#define ACPI_STRCMP(d,s)        acpi_ut_strcmp ((d), (s))
-#define ACPI_STRCAT(d,s)        (void) acpi_ut_strcat ((d), (s))
-#define ACPI_STRNCAT(d,s,n)     acpi_ut_strncat ((d), (s), (acpi_size)(n))
-#define ACPI_STRTOUL(d,s,n)     acpi_ut_strtoul ((d), (s), (acpi_size)(n))
-#define ACPI_MEMCMP(s1,s2,n)    acpi_ut_memcmp((const char *)(s1), (const char *)(s2), (acpi_size)(n))
-#define ACPI_MEMCPY(d,s,n)      (void) acpi_ut_memcpy ((d), (s), (acpi_size)(n))
-#define ACPI_MEMSET(d,v,n)      (void) acpi_ut_memset ((d), (v), (acpi_size)(n))
-#define ACPI_TOUPPER(c)         acpi_ut_to_upper ((int) (c))
-#define ACPI_TOLOWER(c)         acpi_ut_to_lower ((int) (c))
-
 #endif                         /* ACPI_USE_SYSTEM_CLIBRARY */
 
 #ifndef ACPI_FILE
index 14dc6f68ca1854de0095e1d572d5e6de36abf7b5..0a7dc8e583b1c2d742e691cf6e4c00ccc0154f5a 100644 (file)
 #if defined(_LINUX) || defined(__linux__)
 #include <acpi/platform/aclinuxex.h>
 
+#elif defined(_AED_EFI)
+#include "acefiex.h"
+
+#elif defined(_GNU_EFI)
+#include "acefiex.h"
+
 #elif defined(__DragonFly__)
 #include "acdragonflyex.h"
 
index f54de0a635582d45b3aca6bb1977313d431e0549..5457a06cb52879f2775ed64630f63025ca3fbb9c 100644 (file)
@@ -75,4 +75,8 @@
 #undef strchr
 #endif
 
+/* GCC supports __VA_ARGS__ in macros */
+
+#define COMPILER_VA_MACRO               1
+
 #endif                         /* __ACGCC_H__ */
index a7d7f1043e9c124f84dbd1130259c343a754c9db..e840b294c6f5beb2f8a178aa0d3bbb8e92040e40 100644 (file)
@@ -43,7 +43,7 @@ static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
 {
        return acpi_backlight_vendor;
 }
-static void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type)
+static inline void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type)
 {
 }
 #endif
index e6a83d712ef6772ac7265e914837da7806e21441..55e3abc2d027270b0e6e1c554b9f376d3cd355f1 100644 (file)
 #endif
 
 #ifdef CONFIG_SMP
+
+#ifndef smp_mb
 #define smp_mb()       mb()
+#endif
+
+#ifndef smp_rmb
 #define smp_rmb()      rmb()
+#endif
+
+#ifndef smp_wmb
 #define smp_wmb()      wmb()
+#endif
+
+#ifndef smp_read_barrier_depends
 #define smp_read_barrier_depends()     read_barrier_depends()
-#else
+#endif
+
+#else  /* !CONFIG_SMP */
+
+#ifndef smp_mb
 #define smp_mb()       barrier()
+#endif
+
+#ifndef smp_rmb
 #define smp_rmb()      barrier()
+#endif
+
+#ifndef smp_wmb
 #define smp_wmb()      barrier()
+#endif
+
+#ifndef smp_read_barrier_depends
 #define smp_read_barrier_depends()     do { } while (0)
 #endif
 
+#endif /* CONFIG_SMP */
+
 #ifndef smp_store_mb
 #define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); mb(); } while (0)
 #endif
index 19a240446fca657e9928e93defa3ead34905ee56..e42495ad813632002d5d313da5351e615240f274 100644 (file)
@@ -56,10 +56,7 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
 
 static __inline void drm_free_large(void *ptr)
 {
-       if (!is_vmalloc_addr(ptr))
-               return kfree(ptr);
-
-       vfree(ptr);
+       kvfree(ptr);
 }
 
 #endif
diff --git a/include/dt-bindings/clock/bcm-cygnus.h b/include/dt-bindings/clock/bcm-cygnus.h
new file mode 100644 (file)
index 0000000..32fbc47
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ *  BSD LICENSE
+ *
+ *  Copyright(c) 2014 Broadcom Corporation.  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *    * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in
+ *      the documentation and/or other materials provided with the
+ *      distribution.
+ *    * Neither the name of Broadcom Corporation nor the names of its
+ *      contributors may be used to endorse or promote products derived
+ *      from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CLOCK_BCM_CYGNUS_H
+#define _CLOCK_BCM_CYGNUS_H
+
+/* GENPLL clock ID */
+#define BCM_CYGNUS_GENPLL                     0
+#define BCM_CYGNUS_GENPLL_AXI21_CLK           1
+#define BCM_CYGNUS_GENPLL_250MHZ_CLK          2
+#define BCM_CYGNUS_GENPLL_IHOST_SYS_CLK       3
+#define BCM_CYGNUS_GENPLL_ENET_SW_CLK         4
+#define BCM_CYGNUS_GENPLL_AUDIO_125_CLK       5
+#define BCM_CYGNUS_GENPLL_CAN_CLK             6
+
+/* LCPLL0 clock ID */
+#define BCM_CYGNUS_LCPLL0                     0
+#define BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK    1
+#define BCM_CYGNUS_LCPLL0_DDR_PHY_CLK         2
+#define BCM_CYGNUS_LCPLL0_SDIO_CLK            3
+#define BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK     4
+#define BCM_CYGNUS_LCPLL0_SMART_CARD_CLK      5
+#define BCM_CYGNUS_LCPLL0_CH5_UNUSED          6
+
+/* MIPI PLL clock ID */
+#define BCM_CYGNUS_MIPIPLL                    0
+#define BCM_CYGNUS_MIPIPLL_CH0_UNUSED         1
+#define BCM_CYGNUS_MIPIPLL_CH1_LCD            2
+#define BCM_CYGNUS_MIPIPLL_CH2_V3D            3
+#define BCM_CYGNUS_MIPIPLL_CH3_UNUSED         4
+#define BCM_CYGNUS_MIPIPLL_CH4_UNUSED         5
+#define BCM_CYGNUS_MIPIPLL_CH5_UNUSED         6
+
+/* ASIU clock ID */
+#define BCM_CYGNUS_ASIU_KEYPAD_CLK    0
+#define BCM_CYGNUS_ASIU_ADC_CLK       1
+#define BCM_CYGNUS_ASIU_PWM_CLK       2
+
+#endif /* _CLOCK_BCM_CYGNUS_H */
diff --git a/include/dt-bindings/clock/hi6220-clock.h b/include/dt-bindings/clock/hi6220-clock.h
new file mode 100644 (file)
index 0000000..70ee383
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2015 Hisilicon Limited.
+ *
+ * Author: Bintian Wang <bintian.wang@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_HI6220_H
+#define __DT_BINDINGS_CLOCK_HI6220_H
+
+/* clk in Hi6220 AO (always on) controller */
+#define HI6220_NONE_CLOCK      0
+
+/* fixed rate clocks */
+#define HI6220_REF32K          1
+#define HI6220_CLK_TCXO                2
+#define HI6220_MMC1_PAD                3
+#define HI6220_MMC2_PAD                4
+#define HI6220_MMC0_PAD                5
+#define HI6220_PLL_BBP         6
+#define HI6220_PLL_GPU         7
+#define HI6220_PLL1_DDR                8
+#define HI6220_PLL_SYS         9
+#define HI6220_PLL_SYS_MEDIA   10
+#define HI6220_DDR_SRC         11
+#define HI6220_PLL_MEDIA       12
+#define HI6220_PLL_DDR         13
+
+/* fixed factor clocks */
+#define HI6220_300M            14
+#define HI6220_150M            15
+#define HI6220_PICOPHY_SRC     16
+#define HI6220_MMC0_SRC_SEL    17
+#define HI6220_MMC1_SRC_SEL    18
+#define HI6220_MMC2_SRC_SEL    19
+#define HI6220_VPU_CODEC       20
+#define HI6220_MMC0_SMP                21
+#define HI6220_MMC1_SMP                22
+#define HI6220_MMC2_SMP                23
+
+/* gate clocks */
+#define HI6220_WDT0_PCLK       24
+#define HI6220_WDT1_PCLK       25
+#define HI6220_WDT2_PCLK       26
+#define HI6220_TIMER0_PCLK     27
+#define HI6220_TIMER1_PCLK     28
+#define HI6220_TIMER2_PCLK     29
+#define HI6220_TIMER3_PCLK     30
+#define HI6220_TIMER4_PCLK     31
+#define HI6220_TIMER5_PCLK     32
+#define HI6220_TIMER6_PCLK     33
+#define HI6220_TIMER7_PCLK     34
+#define HI6220_TIMER8_PCLK     35
+#define HI6220_UART0_PCLK      36
+
+#define HI6220_AO_NR_CLKS      37
+
+/* clk in Hi6220 systrl */
+/* gate clock */
+#define HI6220_MMC0_CLK                1
+#define HI6220_MMC0_CIUCLK     2
+#define HI6220_MMC1_CLK                3
+#define HI6220_MMC1_CIUCLK     4
+#define HI6220_MMC2_CLK                5
+#define HI6220_MMC2_CIUCLK     6
+#define HI6220_USBOTG_HCLK     7
+#define HI6220_CLK_PICOPHY     8
+#define HI6220_HIFI            9
+#define HI6220_DACODEC_PCLK    10
+#define HI6220_EDMAC_ACLK      11
+#define HI6220_CS_ATB          12
+#define HI6220_I2C0_CLK                13
+#define HI6220_I2C1_CLK                14
+#define HI6220_I2C2_CLK                15
+#define HI6220_I2C3_CLK                16
+#define HI6220_UART1_PCLK      17
+#define HI6220_UART2_PCLK      18
+#define HI6220_UART3_PCLK      19
+#define HI6220_UART4_PCLK      20
+#define HI6220_SPI_CLK         21
+#define HI6220_TSENSOR_CLK     22
+#define HI6220_MMU_CLK         23
+#define HI6220_HIFI_SEL                24
+#define HI6220_MMC0_SYSPLL     25
+#define HI6220_MMC1_SYSPLL     26
+#define HI6220_MMC2_SYSPLL     27
+#define HI6220_MMC0_SEL                28
+#define HI6220_MMC1_SEL                29
+#define HI6220_BBPPLL_SEL      30
+#define HI6220_MEDIA_PLL_SRC   31
+#define HI6220_MMC2_SEL                32
+#define HI6220_CS_ATB_SYSPLL   33
+
+/* mux clocks */
+#define HI6220_MMC0_SRC                34
+#define HI6220_MMC0_SMP_IN     35
+#define HI6220_MMC1_SRC                36
+#define HI6220_MMC1_SMP_IN     37
+#define HI6220_MMC2_SRC                38
+#define HI6220_MMC2_SMP_IN     39
+#define HI6220_HIFI_SRC                40
+#define HI6220_UART1_SRC       41
+#define HI6220_UART2_SRC       42
+#define HI6220_UART3_SRC       43
+#define HI6220_UART4_SRC       44
+#define HI6220_MMC0_MUX0       45
+#define HI6220_MMC1_MUX0       46
+#define HI6220_MMC2_MUX0       47
+#define HI6220_MMC0_MUX1       48
+#define HI6220_MMC1_MUX1       49
+#define HI6220_MMC2_MUX1       50
+
+/* divider clocks */
+#define HI6220_CLK_BUS         51
+#define HI6220_MMC0_DIV                52
+#define HI6220_MMC1_DIV                53
+#define HI6220_MMC2_DIV                54
+#define HI6220_HIFI_DIV                55
+#define HI6220_BBPPLL0_DIV     56
+#define HI6220_CS_DAPB         57
+#define HI6220_CS_ATB_DIV      58
+
+#define HI6220_SYS_NR_CLKS     59
+
+/* clk in Hi6220 media controller */
+/* gate clocks */
+#define HI6220_DSI_PCLK                1
+#define HI6220_G3D_PCLK                2
+#define HI6220_ACLK_CODEC_VPU  3
+#define HI6220_ISP_SCLK                4
+#define HI6220_ADE_CORE                5
+#define HI6220_MED_MMU         6
+#define HI6220_CFG_CSI4PHY     7
+#define HI6220_CFG_CSI2PHY     8
+#define HI6220_ISP_SCLK_GATE   9
+#define HI6220_ISP_SCLK_GATE1  10
+#define HI6220_ADE_CORE_GATE   11
+#define HI6220_CODEC_VPU_GATE  12
+#define HI6220_MED_SYSPLL      13
+
+/* mux clocks */
+#define HI6220_1440_1200       14
+#define HI6220_1000_1200       15
+#define HI6220_1000_1440       16
+
+/* divider clocks */
+#define HI6220_CODEC_JPEG      17
+#define HI6220_ISP_SCLK_SRC    18
+#define HI6220_ISP_SCLK1       19
+#define HI6220_ADE_CORE_SRC    20
+#define HI6220_ADE_PIX_SRC     21
+#define HI6220_G3D_CLK         22
+#define HI6220_CODEC_VPU_SRC   23
+
+#define HI6220_MEDIA_NR_CLKS   24
+
+/* clk in Hi6220 power controller */
+/* gate clocks */
+#define HI6220_PLL_GPU_GATE    1
+#define HI6220_PLL1_DDR_GATE   2
+#define HI6220_PLL_DDR_GATE    3
+#define HI6220_PLL_MEDIA_GATE  4
+#define HI6220_PLL0_BBP_GATE   5
+
+/* divider clocks */
+#define HI6220_DDRC_SRC                6
+#define HI6220_DDRC_AXI1       7
+
+#define HI6220_POWER_NR_CLKS   8
+#endif
diff --git a/include/dt-bindings/clock/lpc18xx-ccu.h b/include/dt-bindings/clock/lpc18xx-ccu.h
new file mode 100644 (file)
index 0000000..bbfe00b
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the licence that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ *
+ */
+
+/* Clock Control Unit 1 (CCU1) clock offsets */
+#define CLK_APB3_BUS           0x100
+#define CLK_APB3_I2C1          0x108
+#define CLK_APB3_DAC           0x110
+#define CLK_APB3_ADC0          0x118
+#define CLK_APB3_ADC1          0x120
+#define CLK_APB3_CAN0          0x128
+#define CLK_APB1_BUS           0x200
+#define CLK_APB1_MOTOCON_PWM   0x208
+#define CLK_APB1_I2C0          0x210
+#define CLK_APB1_I2S           0x218
+#define CLK_APB1_CAN1          0x220
+#define CLK_SPIFI              0x300
+#define CLK_CPU_BUS            0x400
+#define CLK_CPU_SPIFI          0x408
+#define CLK_CPU_GPIO           0x410
+#define CLK_CPU_LCD            0x418
+#define CLK_CPU_ETHERNET       0x420
+#define CLK_CPU_USB0           0x428
+#define CLK_CPU_EMC            0x430
+#define CLK_CPU_SDIO           0x438
+#define CLK_CPU_DMA            0x440
+#define CLK_CPU_CORE           0x448
+#define CLK_CPU_SCT            0x468
+#define CLK_CPU_USB1           0x470
+#define CLK_CPU_EMCDIV         0x478
+#define CLK_CPU_FLASHA         0x480
+#define CLK_CPU_FLASHB         0x488
+#define CLK_CPU_M0APP          0x490
+#define CLK_CPU_ADCHS          0x498
+#define CLK_CPU_EEPROM         0x4a0
+#define CLK_CPU_WWDT           0x500
+#define CLK_CPU_UART0          0x508
+#define CLK_CPU_UART1          0x510
+#define CLK_CPU_SSP0           0x518
+#define CLK_CPU_TIMER0         0x520
+#define CLK_CPU_TIMER1         0x528
+#define CLK_CPU_SCU            0x530
+#define CLK_CPU_CREG           0x538
+#define CLK_CPU_RITIMER                0x600
+#define CLK_CPU_UART2          0x608
+#define CLK_CPU_UART3          0x610
+#define CLK_CPU_TIMER2         0x618
+#define CLK_CPU_TIMER3         0x620
+#define CLK_CPU_SSP1           0x628
+#define CLK_CPU_QEI            0x630
+#define CLK_PERIPH_BUS         0x700
+#define CLK_PERIPH_CORE                0x710
+#define CLK_PERIPH_SGPIO       0x718
+#define CLK_USB0               0x800
+#define CLK_USB1               0x900
+#define CLK_SPI                        0xA00
+#define CLK_ADCHS              0xB00
+
+/* Clock Control Unit 2 (CCU2) clock offsets */
+#define CLK_AUDIO              0x100
+#define CLK_APB2_UART3         0x200
+#define CLK_APB2_UART2         0x300
+#define CLK_APB0_UART1         0x400
+#define CLK_APB0_UART0         0x500
+#define CLK_APB2_SSP1          0x600
+#define CLK_APB0_SSP0          0x700
+#define CLK_SDIO               0x800
diff --git a/include/dt-bindings/clock/lpc18xx-cgu.h b/include/dt-bindings/clock/lpc18xx-cgu.h
new file mode 100644 (file)
index 0000000..6e57c6d
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the licence that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ *
+ */
+
+/* LPC18xx/43xx base clock ids */
+#define BASE_SAFE_CLK          0
+#define BASE_USB0_CLK          1
+#define BASE_PERIPH_CLK                2
+#define BASE_USB1_CLK          3
+#define BASE_CPU_CLK           4
+#define BASE_SPIFI_CLK         5
+#define BASE_SPI_CLK           6
+#define BASE_PHY_RX_CLK                7
+#define BASE_PHY_TX_CLK                8
+#define BASE_APB1_CLK          9
+#define BASE_APB3_CLK          10
+#define BASE_LCD_CLK           11
+#define BASE_ADCHS_CLK         12
+#define BASE_SDIO_CLK          13
+#define BASE_SSP0_CLK          14
+#define BASE_SSP1_CLK          15
+#define BASE_UART0_CLK         16
+#define BASE_UART1_CLK         17
+#define BASE_UART2_CLK         18
+#define BASE_UART3_CLK         19
+#define BASE_OUT_CLK           20
+#define BASE_RES1_CLK          21
+#define BASE_RES2_CLK          22
+#define BASE_RES3_CLK          23
+#define BASE_RES4_CLK          24
+#define BASE_AUDIO_CLK         25
+#define BASE_CGU_OUT0_CLK      26
+#define BASE_CGU_OUT1_CLK      27
+#define BASE_CLK_MAX           (BASE_CGU_OUT1_CLK + 1)
index 591f7fba89e2ea89d57c2dd6b02c4aace50d83d8..7a510384a82ae19e1776bfc771b6a6626c987a9e 100644 (file)
@@ -48,6 +48,7 @@
 #define MMP2_CLK_SSP1                  78
 #define MMP2_CLK_SSP2                  79
 #define MMP2_CLK_SSP3                  80
+#define MMP2_CLK_TIMER                 81
 
 /* axi periphrals */
 #define MMP2_CLK_SDH0                  101
index 79630b9d74b81b4c81582d74fd5b74bbe037a455..3e45bdfe1aa45f60a6cb976304053202bda4ab8a 100644 (file)
@@ -18,7 +18,9 @@
 #define PXA168_CLK_PLL1_13_1_5         18
 #define PXA168_CLK_PLL1_2_1_5          19
 #define PXA168_CLK_PLL1_3_16           20
+#define PXA168_CLK_PLL1_192            21
 #define PXA168_CLK_UART_PLL            27
+#define PXA168_CLK_USB_PLL             28
 
 /* apb periphrals */
 #define PXA168_CLK_TWSI0               60
@@ -40,6 +42,7 @@
 #define PXA168_CLK_SSP2                        76
 #define PXA168_CLK_SSP3                        77
 #define PXA168_CLK_SSP4                        78
+#define PXA168_CLK_TIMER               79
 
 /* axi periphrals */
 #define PXA168_CLK_DFC                 100
diff --git a/include/dt-bindings/clock/marvell,pxa1928.h b/include/dt-bindings/clock/marvell,pxa1928.h
new file mode 100644 (file)
index 0000000..d4f2e18
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef __DTS_MARVELL_PXA1928_CLOCK_H
+#define __DTS_MARVELL_PXA1928_CLOCK_H
+
+/*
+ * Clock ID values here correspond to the control register offset/4.
+ */
+
+/* apb peripherals */
+#define PXA1928_CLK_RTC                        0x00
+#define PXA1928_CLK_TWSI0              0x01
+#define PXA1928_CLK_TWSI1              0x02
+#define PXA1928_CLK_TWSI2              0x03
+#define PXA1928_CLK_TWSI3              0x04
+#define PXA1928_CLK_OWIRE              0x05
+#define PXA1928_CLK_KPC                        0x06
+#define PXA1928_CLK_TB_ROTARY          0x07
+#define PXA1928_CLK_SW_JTAG            0x08
+#define PXA1928_CLK_TIMER1             0x09
+#define PXA1928_CLK_UART0              0x0b
+#define PXA1928_CLK_UART1              0x0c
+#define PXA1928_CLK_UART2              0x0d
+#define PXA1928_CLK_GPIO               0x0e
+#define PXA1928_CLK_PWM0               0x0f
+#define PXA1928_CLK_PWM1               0x10
+#define PXA1928_CLK_PWM2               0x11
+#define PXA1928_CLK_PWM3               0x12
+#define PXA1928_CLK_SSP0               0x13
+#define PXA1928_CLK_SSP1               0x14
+#define PXA1928_CLK_SSP2               0x15
+
+#define PXA1928_CLK_TWSI4              0x1f
+#define PXA1928_CLK_TWSI5              0x20
+#define PXA1928_CLK_UART3              0x22
+#define PXA1928_CLK_THSENS_GLOB                0x24
+#define PXA1928_CLK_THSENS_CPU         0x26
+#define PXA1928_CLK_THSENS_VPU         0x27
+#define PXA1928_CLK_THSENS_GC          0x28
+#define PXA1928_APBC_NR_CLKS           0x30
+
+
+/* axi peripherals */
+#define PXA1928_CLK_SDH0               0x15
+#define PXA1928_CLK_SDH1               0x16
+#define PXA1928_CLK_USB                        0x17
+#define PXA1928_CLK_NAND               0x18
+#define PXA1928_CLK_DMA                        0x19
+
+#define PXA1928_CLK_SDH2               0x3a
+#define PXA1928_CLK_SDH3               0x3b
+#define PXA1928_CLK_HSIC               0x3e
+#define PXA1928_CLK_SDH4               0x57
+#define PXA1928_CLK_GC3D               0x5d
+#define PXA1928_CLK_GC2D               0x5f
+
+#define PXA1928_APMU_NR_CLKS           0x60
+
+#endif
index 719cffb2bea207e43b9fe40498401e14bb7d9151..135082a0b62f6c472421f9dbcb568d9d92de8554 100644 (file)
@@ -18,7 +18,9 @@
 #define PXA910_CLK_PLL1_13_1_5         18
 #define PXA910_CLK_PLL1_2_1_5          19
 #define PXA910_CLK_PLL1_3_16           20
+#define PXA910_CLK_PLL1_192            21
 #define PXA910_CLK_UART_PLL            27
+#define PXA910_CLK_USB_PLL             28
 
 /* apb periphrals */
 #define PXA910_CLK_TWSI0               60
@@ -37,6 +39,8 @@
 #define PXA910_CLK_UART2               73
 #define PXA910_CLK_SSP0                        74
 #define PXA910_CLK_SSP1                        75
+#define PXA910_CLK_TIMER0              76
+#define PXA910_CLK_TIMER1              77
 
 /* axi periphrals */
 #define PXA910_CLK_DFC                 100
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
new file mode 100644 (file)
index 0000000..bd2720d
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Meson8b clock tree IDs
+ */
+
+#ifndef __MESON8B_CLKC_H
+#define __MESON8B_CLKC_H
+
+#define CLKID_UNUSED           0
+#define CLKID_XTAL             1
+#define CLKID_PLL_FIXED                2
+#define CLKID_PLL_VID          3
+#define CLKID_PLL_SYS          4
+#define CLKID_FCLK_DIV2                5
+#define CLKID_FCLK_DIV3                6
+#define CLKID_FCLK_DIV4                7
+#define CLKID_FCLK_DIV5                8
+#define CLKID_FCLK_DIV7                9
+#define CLKID_CLK81            10
+#define CLKID_MALI             11
+#define CLKID_CPUCLK           12
+#define CLKID_ZERO             13
+
+#define CLK_NR_CLKS            (CLKID_ZERO + 1)
+
+#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/mt8135-clk.h b/include/dt-bindings/clock/mt8135-clk.h
new file mode 100644 (file)
index 0000000..6dac6c0
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8135_H
+#define _DT_BINDINGS_CLK_MT8135_H
+
+/* TOPCKGEN */
+
+#define CLK_TOP_DSI0_LNTC_DSICLK       1
+#define CLK_TOP_HDMITX_CLKDIG_CTS      2
+#define CLK_TOP_CLKPH_MCK              3
+#define CLK_TOP_CPUM_TCK_IN            4
+#define CLK_TOP_MAINPLL_806M           5
+#define CLK_TOP_MAINPLL_537P3M         6
+#define CLK_TOP_MAINPLL_322P4M         7
+#define CLK_TOP_MAINPLL_230P3M         8
+#define CLK_TOP_UNIVPLL_624M           9
+#define CLK_TOP_UNIVPLL_416M           10
+#define CLK_TOP_UNIVPLL_249P6M         11
+#define CLK_TOP_UNIVPLL_178P3M         12
+#define CLK_TOP_UNIVPLL_48M            13
+#define CLK_TOP_MMPLL_D2               14
+#define CLK_TOP_MMPLL_D3               15
+#define CLK_TOP_MMPLL_D5               16
+#define CLK_TOP_MMPLL_D7               17
+#define CLK_TOP_MMPLL_D4               18
+#define CLK_TOP_MMPLL_D6               19
+#define CLK_TOP_SYSPLL_D2              20
+#define CLK_TOP_SYSPLL_D4              21
+#define CLK_TOP_SYSPLL_D6              22
+#define CLK_TOP_SYSPLL_D8              23
+#define CLK_TOP_SYSPLL_D10             24
+#define CLK_TOP_SYSPLL_D12             25
+#define CLK_TOP_SYSPLL_D16             26
+#define CLK_TOP_SYSPLL_D24             27
+#define CLK_TOP_SYSPLL_D3              28
+#define CLK_TOP_SYSPLL_D2P5            29
+#define CLK_TOP_SYSPLL_D5              30
+#define CLK_TOP_SYSPLL_D3P5            31
+#define CLK_TOP_UNIVPLL1_D2            32
+#define CLK_TOP_UNIVPLL1_D4            33
+#define CLK_TOP_UNIVPLL1_D6            34
+#define CLK_TOP_UNIVPLL1_D8            35
+#define CLK_TOP_UNIVPLL1_D10           36
+#define CLK_TOP_UNIVPLL2_D2            37
+#define CLK_TOP_UNIVPLL2_D4            38
+#define CLK_TOP_UNIVPLL2_D6            39
+#define CLK_TOP_UNIVPLL2_D8            40
+#define CLK_TOP_UNIVPLL_D3             41
+#define CLK_TOP_UNIVPLL_D5             42
+#define CLK_TOP_UNIVPLL_D7             43
+#define CLK_TOP_UNIVPLL_D10            44
+#define CLK_TOP_UNIVPLL_D26            45
+#define CLK_TOP_APLL                   46
+#define CLK_TOP_APLL_D4                        47
+#define CLK_TOP_APLL_D8                        48
+#define CLK_TOP_APLL_D16               49
+#define CLK_TOP_APLL_D24               50
+#define CLK_TOP_LVDSPLL_D2             51
+#define CLK_TOP_LVDSPLL_D4             52
+#define CLK_TOP_LVDSPLL_D8             53
+#define CLK_TOP_LVDSTX_CLKDIG_CT       54
+#define CLK_TOP_VPLL_DPIX              55
+#define CLK_TOP_TVHDMI_H               56
+#define CLK_TOP_HDMITX_CLKDIG_D2       57
+#define CLK_TOP_HDMITX_CLKDIG_D3       58
+#define CLK_TOP_TVHDMI_D2              59
+#define CLK_TOP_TVHDMI_D4              60
+#define CLK_TOP_MEMPLL_MCK_D4          61
+#define CLK_TOP_AXI_SEL                        62
+#define CLK_TOP_SMI_SEL                        63
+#define CLK_TOP_MFG_SEL                        64
+#define CLK_TOP_IRDA_SEL               65
+#define CLK_TOP_CAM_SEL                        66
+#define CLK_TOP_AUD_INTBUS_SEL         67
+#define CLK_TOP_JPG_SEL                        68
+#define CLK_TOP_DISP_SEL               69
+#define CLK_TOP_MSDC30_1_SEL           70
+#define CLK_TOP_MSDC30_2_SEL           71
+#define CLK_TOP_MSDC30_3_SEL           72
+#define CLK_TOP_MSDC30_4_SEL           73
+#define CLK_TOP_USB20_SEL              74
+#define CLK_TOP_VENC_SEL               75
+#define CLK_TOP_SPI_SEL                        76
+#define CLK_TOP_UART_SEL               77
+#define CLK_TOP_MEM_SEL                        78
+#define CLK_TOP_CAMTG_SEL              79
+#define CLK_TOP_AUDIO_SEL              80
+#define CLK_TOP_FIX_SEL                        81
+#define CLK_TOP_VDEC_SEL               82
+#define CLK_TOP_DDRPHYCFG_SEL          83
+#define CLK_TOP_DPILVDS_SEL            84
+#define CLK_TOP_PMICSPI_SEL            85
+#define CLK_TOP_MSDC30_0_SEL           86
+#define CLK_TOP_SMI_MFG_AS_SEL         87
+#define CLK_TOP_GCPU_SEL               88
+#define CLK_TOP_DPI1_SEL               89
+#define CLK_TOP_CCI_SEL                        90
+#define CLK_TOP_APLL_SEL               91
+#define CLK_TOP_HDMIPLL_SEL            92
+#define CLK_TOP_NR_CLK                 93
+
+/* APMIXED_SYS */
+
+#define CLK_APMIXED_ARMPLL1            1
+#define CLK_APMIXED_ARMPLL2            2
+#define CLK_APMIXED_MAINPLL            3
+#define CLK_APMIXED_UNIVPLL            4
+#define CLK_APMIXED_MMPLL              5
+#define CLK_APMIXED_MSDCPLL            6
+#define CLK_APMIXED_TVDPLL             7
+#define CLK_APMIXED_LVDSPLL            8
+#define CLK_APMIXED_AUDPLL             9
+#define CLK_APMIXED_VDECPLL            10
+#define CLK_APMIXED_NR_CLK             11
+
+/* INFRA_SYS */
+
+#define CLK_INFRA_PMIC_WRAP            1
+#define CLK_INFRA_PMICSPI              2
+#define CLK_INFRA_CCIF1_AP_CTRL                3
+#define CLK_INFRA_CCIF0_AP_CTRL                4
+#define CLK_INFRA_KP                   5
+#define CLK_INFRA_CPUM                 6
+#define CLK_INFRA_M4U                  7
+#define CLK_INFRA_MFGAXI               8
+#define CLK_INFRA_DEVAPC               9
+#define CLK_INFRA_AUDIO                        10
+#define CLK_INFRA_MFG_BUS              11
+#define CLK_INFRA_SMI                  12
+#define CLK_INFRA_DBGCLK               13
+#define CLK_INFRA_NR_CLK               14
+
+/* PERI_SYS */
+
+#define CLK_PERI_I2C5                  1
+#define CLK_PERI_I2C4                  2
+#define CLK_PERI_I2C3                  3
+#define CLK_PERI_I2C2                  4
+#define CLK_PERI_I2C1                  5
+#define CLK_PERI_I2C0                  6
+#define CLK_PERI_UART3                 7
+#define CLK_PERI_UART2                 8
+#define CLK_PERI_UART1                 9
+#define CLK_PERI_UART0                 10
+#define CLK_PERI_IRDA                  11
+#define CLK_PERI_NLI                   12
+#define CLK_PERI_MD_HIF                        13
+#define CLK_PERI_AP_HIF                        14
+#define CLK_PERI_MSDC30_3              15
+#define CLK_PERI_MSDC30_2              16
+#define CLK_PERI_MSDC30_1              17
+#define CLK_PERI_MSDC20_2              18
+#define CLK_PERI_MSDC20_1              19
+#define CLK_PERI_AP_DMA                        20
+#define CLK_PERI_USB1                  21
+#define CLK_PERI_USB0                  22
+#define CLK_PERI_PWM                   23
+#define CLK_PERI_PWM7                  24
+#define CLK_PERI_PWM6                  25
+#define CLK_PERI_PWM5                  26
+#define CLK_PERI_PWM4                  27
+#define CLK_PERI_PWM3                  28
+#define CLK_PERI_PWM2                  29
+#define CLK_PERI_PWM1                  30
+#define CLK_PERI_THERM                 31
+#define CLK_PERI_NFI                   32
+#define CLK_PERI_USBSLV                        33
+#define CLK_PERI_USB1_MCU              34
+#define CLK_PERI_USB0_MCU              35
+#define CLK_PERI_GCPU                  36
+#define CLK_PERI_FHCTL                 37
+#define CLK_PERI_SPI1                  38
+#define CLK_PERI_AUXADC                        39
+#define CLK_PERI_PERI_PWRAP            40
+#define CLK_PERI_I2C6                  41
+#define CLK_PERI_UART0_SEL             42
+#define CLK_PERI_UART1_SEL             43
+#define CLK_PERI_UART2_SEL             44
+#define CLK_PERI_UART3_SEL             45
+#define CLK_PERI_NR_CLK                        46
+
+#endif /* _DT_BINDINGS_CLK_MT8135_H */
diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h
new file mode 100644 (file)
index 0000000..4ad76ed
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8173_H
+#define _DT_BINDINGS_CLK_MT8173_H
+
+/* TOPCKGEN */
+
+#define CLK_TOP_CLKPH_MCK_O            1
+#define CLK_TOP_DPI                    2
+#define CLK_TOP_USB_SYSPLL_125M                3
+#define CLK_TOP_HDMITX_DIG_CTS         4
+#define CLK_TOP_ARMCA7PLL_754M         5
+#define CLK_TOP_ARMCA7PLL_502M         6
+#define CLK_TOP_MAIN_H546M             7
+#define CLK_TOP_MAIN_H364M             8
+#define CLK_TOP_MAIN_H218P4M           9
+#define CLK_TOP_MAIN_H156M             10
+#define CLK_TOP_TVDPLL_445P5M          11
+#define CLK_TOP_TVDPLL_594M            12
+#define CLK_TOP_UNIV_624M              13
+#define CLK_TOP_UNIV_416M              14
+#define CLK_TOP_UNIV_249P6M            15
+#define CLK_TOP_UNIV_178P3M            16
+#define CLK_TOP_UNIV_48M               17
+#define CLK_TOP_CLKRTC_EXT             18
+#define CLK_TOP_CLKRTC_INT             19
+#define CLK_TOP_FPC                    20
+#define CLK_TOP_HDMITXPLL_D2           21
+#define CLK_TOP_HDMITXPLL_D3           22
+#define CLK_TOP_ARMCA7PLL_D2           23
+#define CLK_TOP_ARMCA7PLL_D3           24
+#define CLK_TOP_APLL1                  25
+#define CLK_TOP_APLL2                  26
+#define CLK_TOP_DMPLL                  27
+#define CLK_TOP_DMPLL_D2               28
+#define CLK_TOP_DMPLL_D4               29
+#define CLK_TOP_DMPLL_D8               30
+#define CLK_TOP_DMPLL_D16              31
+#define CLK_TOP_LVDSPLL_D2             32
+#define CLK_TOP_LVDSPLL_D4             33
+#define CLK_TOP_LVDSPLL_D8             34
+#define CLK_TOP_MMPLL                  35
+#define CLK_TOP_MMPLL_D2               36
+#define CLK_TOP_MSDCPLL                        37
+#define CLK_TOP_MSDCPLL_D2             38
+#define CLK_TOP_MSDCPLL_D4             39
+#define CLK_TOP_MSDCPLL2               40
+#define CLK_TOP_MSDCPLL2_D2            41
+#define CLK_TOP_MSDCPLL2_D4            42
+#define CLK_TOP_SYSPLL_D2              43
+#define CLK_TOP_SYSPLL1_D2             44
+#define CLK_TOP_SYSPLL1_D4             45
+#define CLK_TOP_SYSPLL1_D8             46
+#define CLK_TOP_SYSPLL1_D16            47
+#define CLK_TOP_SYSPLL_D3              48
+#define CLK_TOP_SYSPLL2_D2             49
+#define CLK_TOP_SYSPLL2_D4             50
+#define CLK_TOP_SYSPLL_D5              51
+#define CLK_TOP_SYSPLL3_D2             52
+#define CLK_TOP_SYSPLL3_D4             53
+#define CLK_TOP_SYSPLL_D7              54
+#define CLK_TOP_SYSPLL4_D2             55
+#define CLK_TOP_SYSPLL4_D4             56
+#define CLK_TOP_TVDPLL                 57
+#define CLK_TOP_TVDPLL_D2              58
+#define CLK_TOP_TVDPLL_D4              59
+#define CLK_TOP_TVDPLL_D8              60
+#define CLK_TOP_TVDPLL_D16             61
+#define CLK_TOP_UNIVPLL_D2             62
+#define CLK_TOP_UNIVPLL1_D2            63
+#define CLK_TOP_UNIVPLL1_D4            64
+#define CLK_TOP_UNIVPLL1_D8            65
+#define CLK_TOP_UNIVPLL_D3             66
+#define CLK_TOP_UNIVPLL2_D2            67
+#define CLK_TOP_UNIVPLL2_D4            68
+#define CLK_TOP_UNIVPLL2_D8            69
+#define CLK_TOP_UNIVPLL_D5             70
+#define CLK_TOP_UNIVPLL3_D2            71
+#define CLK_TOP_UNIVPLL3_D4            72
+#define CLK_TOP_UNIVPLL3_D8            73
+#define CLK_TOP_UNIVPLL_D7             74
+#define CLK_TOP_UNIVPLL_D26            75
+#define CLK_TOP_UNIVPLL_D52            76
+#define CLK_TOP_VCODECPLL              77
+#define CLK_TOP_VCODECPLL_370P5                78
+#define CLK_TOP_VENCPLL                        79
+#define CLK_TOP_VENCPLL_D2             80
+#define CLK_TOP_VENCPLL_D4             81
+#define CLK_TOP_AXI_SEL                        82
+#define CLK_TOP_MEM_SEL                        83
+#define CLK_TOP_DDRPHYCFG_SEL          84
+#define CLK_TOP_MM_SEL                 85
+#define CLK_TOP_PWM_SEL                        86
+#define CLK_TOP_VDEC_SEL               87
+#define CLK_TOP_VENC_SEL               88
+#define CLK_TOP_MFG_SEL                        89
+#define CLK_TOP_CAMTG_SEL              90
+#define CLK_TOP_UART_SEL               91
+#define CLK_TOP_SPI_SEL                        92
+#define CLK_TOP_USB20_SEL              93
+#define CLK_TOP_USB30_SEL              94
+#define CLK_TOP_MSDC50_0_H_SEL         95
+#define CLK_TOP_MSDC50_0_SEL           96
+#define CLK_TOP_MSDC30_1_SEL           97
+#define CLK_TOP_MSDC30_2_SEL           98
+#define CLK_TOP_MSDC30_3_SEL           99
+#define CLK_TOP_AUDIO_SEL              100
+#define CLK_TOP_AUD_INTBUS_SEL         101
+#define CLK_TOP_PMICSPI_SEL            102
+#define CLK_TOP_SCP_SEL                        103
+#define CLK_TOP_ATB_SEL                        104
+#define CLK_TOP_VENC_LT_SEL            105
+#define CLK_TOP_DPI0_SEL               106
+#define CLK_TOP_IRDA_SEL               107
+#define CLK_TOP_CCI400_SEL             108
+#define CLK_TOP_AUD_1_SEL              109
+#define CLK_TOP_AUD_2_SEL              110
+#define CLK_TOP_MEM_MFG_IN_SEL         111
+#define CLK_TOP_AXI_MFG_IN_SEL         112
+#define CLK_TOP_SCAM_SEL               113
+#define CLK_TOP_SPINFI_IFR_SEL         114
+#define CLK_TOP_HDMI_SEL               115
+#define CLK_TOP_DPILVDS_SEL            116
+#define CLK_TOP_MSDC50_2_H_SEL         117
+#define CLK_TOP_HDCP_SEL               118
+#define CLK_TOP_HDCP_24M_SEL           119
+#define CLK_TOP_RTC_SEL                        120
+#define CLK_TOP_APLL1_DIV0             121
+#define CLK_TOP_APLL1_DIV1             122
+#define CLK_TOP_APLL1_DIV2             123
+#define CLK_TOP_APLL1_DIV3             124
+#define CLK_TOP_APLL1_DIV4             125
+#define CLK_TOP_APLL1_DIV5             126
+#define CLK_TOP_APLL2_DIV0             127
+#define CLK_TOP_APLL2_DIV1             128
+#define CLK_TOP_APLL2_DIV2             129
+#define CLK_TOP_APLL2_DIV3             130
+#define CLK_TOP_APLL2_DIV4             131
+#define CLK_TOP_APLL2_DIV5             132
+#define CLK_TOP_I2S0_M_SEL             133
+#define CLK_TOP_I2S1_M_SEL             134
+#define CLK_TOP_I2S2_M_SEL             135
+#define CLK_TOP_I2S3_M_SEL             136
+#define CLK_TOP_I2S3_B_SEL             137
+#define CLK_TOP_NR_CLK                 138
+
+/* APMIXED_SYS */
+
+#define CLK_APMIXED_ARMCA15PLL 1
+#define CLK_APMIXED_ARMCA7PLL  2
+#define CLK_APMIXED_MAINPLL            3
+#define CLK_APMIXED_UNIVPLL            4
+#define CLK_APMIXED_MMPLL              5
+#define CLK_APMIXED_MSDCPLL            6
+#define CLK_APMIXED_VENCPLL            7
+#define CLK_APMIXED_TVDPLL             8
+#define CLK_APMIXED_MPLL               9
+#define CLK_APMIXED_VCODECPLL          10
+#define CLK_APMIXED_APLL1              11
+#define CLK_APMIXED_APLL2              12
+#define CLK_APMIXED_LVDSPLL            13
+#define CLK_APMIXED_MSDCPLL2           14
+#define CLK_APMIXED_NR_CLK             15
+
+/* INFRA_SYS */
+
+#define CLK_INFRA_DBGCLK               1
+#define CLK_INFRA_SMI                  2
+#define CLK_INFRA_AUDIO                        3
+#define CLK_INFRA_GCE                  4
+#define CLK_INFRA_L2C_SRAM             5
+#define CLK_INFRA_M4U                  6
+#define CLK_INFRA_CPUM                 7
+#define CLK_INFRA_KP                   8
+#define CLK_INFRA_CEC                  9
+#define CLK_INFRA_PMICSPI              10
+#define CLK_INFRA_PMICWRAP             11
+#define CLK_INFRA_NR_CLK               12
+
+/* PERI_SYS */
+
+#define CLK_PERI_NFI                   1
+#define CLK_PERI_THERM                 2
+#define CLK_PERI_PWM1                  3
+#define CLK_PERI_PWM2                  4
+#define CLK_PERI_PWM3                  5
+#define CLK_PERI_PWM4                  6
+#define CLK_PERI_PWM5                  7
+#define CLK_PERI_PWM6                  8
+#define CLK_PERI_PWM7                  9
+#define CLK_PERI_PWM                   10
+#define CLK_PERI_USB0                  11
+#define CLK_PERI_USB1                  12
+#define CLK_PERI_AP_DMA                        13
+#define CLK_PERI_MSDC30_0              14
+#define CLK_PERI_MSDC30_1              15
+#define CLK_PERI_MSDC30_2              16
+#define CLK_PERI_MSDC30_3              17
+#define CLK_PERI_NLI_ARB               18
+#define CLK_PERI_IRDA                  19
+#define CLK_PERI_UART0                 20
+#define CLK_PERI_UART1                 21
+#define CLK_PERI_UART2                 22
+#define CLK_PERI_UART3                 23
+#define CLK_PERI_I2C0                  24
+#define CLK_PERI_I2C1                  25
+#define CLK_PERI_I2C2                  26
+#define CLK_PERI_I2C3                  27
+#define CLK_PERI_I2C4                  28
+#define CLK_PERI_AUXADC                        29
+#define CLK_PERI_SPI0                  30
+#define CLK_PERI_I2C5                  31
+#define CLK_PERI_NFIECC                        32
+#define CLK_PERI_SPI                   33
+#define CLK_PERI_IRRX                  34
+#define CLK_PERI_I2C6                  35
+#define CLK_PERI_UART0_SEL             36
+#define CLK_PERI_UART1_SEL             37
+#define CLK_PERI_UART2_SEL             38
+#define CLK_PERI_UART3_SEL             39
+#define CLK_PERI_NR_CLK                        40
+
+#endif /* _DT_BINDINGS_CLK_MT8173_H */
diff --git a/include/dt-bindings/reset-controller/mt8135-resets.h b/include/dt-bindings/reset-controller/mt8135-resets.h
new file mode 100644 (file)
index 0000000..1fb6295
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8135
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8135
+
+/* INFRACFG resets */
+#define MT8135_INFRA_EMI_REG_RST        0
+#define MT8135_INFRA_DRAMC0_A0_RST      1
+#define MT8135_INFRA_CCIF0_RST          2
+#define MT8135_INFRA_APCIRQ_EINT_RST    3
+#define MT8135_INFRA_APXGPT_RST         4
+#define MT8135_INFRA_SCPSYS_RST         5
+#define MT8135_INFRA_CCIF1_RST          6
+#define MT8135_INFRA_PMIC_WRAP_RST      7
+#define MT8135_INFRA_KP_RST             8
+#define MT8135_INFRA_EMI_RST            32
+#define MT8135_INFRA_DRAMC0_RST         34
+#define MT8135_INFRA_SMI_RST            35
+#define MT8135_INFRA_M4U_RST            36
+
+/*  PERICFG resets */
+#define MT8135_PERI_UART0_SW_RST        0
+#define MT8135_PERI_UART1_SW_RST        1
+#define MT8135_PERI_UART2_SW_RST        2
+#define MT8135_PERI_UART3_SW_RST        3
+#define MT8135_PERI_IRDA_SW_RST         4
+#define MT8135_PERI_PTP_SW_RST          5
+#define MT8135_PERI_AP_HIF_SW_RST       6
+#define MT8135_PERI_GPCU_SW_RST         7
+#define MT8135_PERI_MD_HIF_SW_RST       8
+#define MT8135_PERI_NLI_SW_RST          9
+#define MT8135_PERI_AUXADC_SW_RST       10
+#define MT8135_PERI_DMA_SW_RST          11
+#define MT8135_PERI_NFI_SW_RST          14
+#define MT8135_PERI_PWM_SW_RST          15
+#define MT8135_PERI_THERM_SW_RST        16
+#define MT8135_PERI_MSDC0_SW_RST        17
+#define MT8135_PERI_MSDC1_SW_RST        18
+#define MT8135_PERI_MSDC2_SW_RST        19
+#define MT8135_PERI_MSDC3_SW_RST        20
+#define MT8135_PERI_I2C0_SW_RST         22
+#define MT8135_PERI_I2C1_SW_RST         23
+#define MT8135_PERI_I2C2_SW_RST         24
+#define MT8135_PERI_I2C3_SW_RST         25
+#define MT8135_PERI_I2C4_SW_RST         26
+#define MT8135_PERI_I2C5_SW_RST         27
+#define MT8135_PERI_I2C6_SW_RST         28
+#define MT8135_PERI_USB_SW_RST          29
+#define MT8135_PERI_SPI1_SW_RST         33
+#define MT8135_PERI_PWRAP_BRIDGE_SW_RST 34
+
+#endif  /* _DT_BINDINGS_RESET_CONTROLLER_MT8135 */
diff --git a/include/dt-bindings/reset-controller/mt8173-resets.h b/include/dt-bindings/reset-controller/mt8173-resets.h
new file mode 100644 (file)
index 0000000..9464b37
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8173
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8173
+
+/* INFRACFG resets */
+#define MT8173_INFRA_EMI_REG_RST        0
+#define MT8173_INFRA_DRAMC0_A0_RST      1
+#define MT8173_INFRA_APCIRQ_EINT_RST    3
+#define MT8173_INFRA_APXGPT_RST         4
+#define MT8173_INFRA_SCPSYS_RST         5
+#define MT8173_INFRA_KP_RST             6
+#define MT8173_INFRA_PMIC_WRAP_RST      7
+#define MT8173_INFRA_MPIP_RST           8
+#define MT8173_INFRA_CEC_RST            9
+#define MT8173_INFRA_EMI_RST            32
+#define MT8173_INFRA_DRAMC0_RST         34
+#define MT8173_INFRA_APMIXEDSYS_RST     35
+#define MT8173_INFRA_MIPI_DSI_RST       36
+#define MT8173_INFRA_TRNG_RST           37
+#define MT8173_INFRA_SYSIRQ_RST         38
+#define MT8173_INFRA_MIPI_CSI_RST       39
+#define MT8173_INFRA_GCE_FAXI_RST       40
+#define MT8173_INFRA_MMIOMMURST         47
+
+
+/*  PERICFG resets */
+#define MT8173_PERI_UART0_SW_RST        0
+#define MT8173_PERI_UART1_SW_RST        1
+#define MT8173_PERI_UART2_SW_RST        2
+#define MT8173_PERI_UART3_SW_RST        3
+#define MT8173_PERI_IRRX_SW_RST         4
+#define MT8173_PERI_PWM_SW_RST          8
+#define MT8173_PERI_AUXADC_SW_RST       10
+#define MT8173_PERI_DMA_SW_RST          11
+#define MT8173_PERI_I2C6_SW_RST         13
+#define MT8173_PERI_NFI_SW_RST          14
+#define MT8173_PERI_THERM_SW_RST        16
+#define MT8173_PERI_MSDC2_SW_RST        17
+#define MT8173_PERI_MSDC3_SW_RST        18
+#define MT8173_PERI_MSDC0_SW_RST        19
+#define MT8173_PERI_MSDC1_SW_RST        20
+#define MT8173_PERI_I2C0_SW_RST         22
+#define MT8173_PERI_I2C1_SW_RST         23
+#define MT8173_PERI_I2C2_SW_RST         24
+#define MT8173_PERI_I2C3_SW_RST         25
+#define MT8173_PERI_I2C4_SW_RST         26
+#define MT8173_PERI_HDMI_SW_RST         29
+#define MT8173_PERI_SPI0_SW_RST         33
+
+#endif  /* _DT_BINDINGS_RESET_CONTROLLER_MT8173 */
index 1618cdfb38c7b8e312f4a39b412593d4c480bc07..c471dfc93b716e162815709a25e5a705c4b45159 100644 (file)
@@ -53,7 +53,7 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
        return adev ? adev->handle : NULL;
 }
 
-#define ACPI_COMPANION(dev)            acpi_node((dev)->fwnode)
+#define ACPI_COMPANION(dev)            to_acpi_node((dev)->fwnode)
 #define ACPI_COMPANION_SET(dev, adev)  set_primary_fwnode(dev, (adev) ? \
        acpi_fwnode_handle(adev) : NULL)
 #define ACPI_HANDLE(dev)               acpi_device_handle(ACPI_COMPANION(dev))
@@ -454,7 +454,7 @@ static inline bool is_acpi_node(struct fwnode_handle *fwnode)
        return false;
 }
 
-static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode)
+static inline struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode)
 {
        return NULL;
 }
index a48d90e3bcbb86d98d911f21d248897c3c563730..a23209b43842106c6a74de8a51d6b4b752613157 100644 (file)
@@ -50,10 +50,10 @@ enum wb_stat_item {
  */
 struct bdi_writeback_congested {
        unsigned long state;            /* WB_[a]sync_congested flags */
+       atomic_t refcnt;                /* nr of attached wb's and blkg */
 
 #ifdef CONFIG_CGROUP_WRITEBACK
        struct backing_dev_info *bdi;   /* the associated bdi */
-       atomic_t refcnt;                /* nr of attached wb's and blkg */
        int blkcg_id;                   /* ID of the associated blkcg */
        struct rb_node rb_node;         /* on bdi->cgwb_congestion_tree */
 #endif
@@ -150,11 +150,12 @@ struct backing_dev_info {
        atomic_long_t tot_write_bandwidth;
 
        struct bdi_writeback wb;  /* the root writeback info for this bdi */
-       struct bdi_writeback_congested wb_congested; /* its congested state */
 #ifdef CONFIG_CGROUP_WRITEBACK
        struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
        struct rb_root cgwb_congested_tree; /* their congested states */
        atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
+#else
+       struct bdi_writeback_congested *wb_congested;
 #endif
        wait_queue_head_t wb_waitq;
 
index 0e6d4828a77a358edd3c77ef7d14eecc6f6001b3..0fe9df983ab7410c67143ecdc7a26bac958b4597 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/writeback.h>
 #include <linux/blk-cgroup.h>
 #include <linux/backing-dev-defs.h>
+#include <linux/slab.h>
 
 int __must_check bdi_init(struct backing_dev_info *bdi);
 void bdi_destroy(struct backing_dev_info *bdi);
@@ -465,11 +466,14 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
 static inline struct bdi_writeback_congested *
 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
 {
-       return bdi->wb.congested;
+       atomic_inc(&bdi->wb_congested->refcnt);
+       return bdi->wb_congested;
 }
 
 static inline void wb_congested_put(struct bdi_writeback_congested *congested)
 {
+       if (atomic_dec_and_test(&congested->refcnt))
+               kfree(congested);
 }
 
 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
index 73b45225a7ca1bbe749ea3b62e38a35add22936b..e6797ded700ec8f7ef6a8fccccddf71212715077 100644 (file)
@@ -317,6 +317,13 @@ sb_getblk(struct super_block *sb, sector_t block)
        return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
 }
 
+
+static inline struct buffer_head *
+sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
+{
+       return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
+}
+
 static inline struct buffer_head *
 sb_find_get_block(struct super_block *sb, sector_t block)
 {
index 30f92cefaa721d040d5f1e2d92ec16429f8c00e3..9ebee53d3bf586ef80690fa778c0a3e030e410f1 100644 (file)
@@ -43,9 +43,9 @@ struct ceph_options {
        int flags;
        struct ceph_fsid fsid;
        struct ceph_entity_addr my_addr;
-       int mount_timeout;
-       int osd_idle_ttl;
-       int osd_keepalive_timeout;
+       unsigned long mount_timeout;            /* jiffies */
+       unsigned long osd_idle_ttl;             /* jiffies */
+       unsigned long osd_keepalive_timeout;    /* jiffies */
 
        /*
         * any type that can't be simply compared or doesn't need need
@@ -63,9 +63,9 @@ struct ceph_options {
 /*
  * defaults
  */
-#define CEPH_MOUNT_TIMEOUT_DEFAULT  60
-#define CEPH_OSD_KEEPALIVE_DEFAULT  5
-#define CEPH_OSD_IDLE_TTL_DEFAULT    60
+#define CEPH_MOUNT_TIMEOUT_DEFAULT     msecs_to_jiffies(60 * 1000)
+#define CEPH_OSD_KEEPALIVE_DEFAULT     msecs_to_jiffies(5 * 1000)
+#define CEPH_OSD_IDLE_TTL_DEFAULT      msecs_to_jiffies(60 * 1000)
 
 #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
 #define CEPH_MSG_MAX_MIDDLE_LEN        (16*1024*1024)
@@ -93,13 +93,9 @@ enum {
        CEPH_MOUNT_SHUTDOWN,
 };
 
-/*
- * subtract jiffies
- */
-static inline unsigned long time_sub(unsigned long a, unsigned long b)
+static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)
 {
-       BUG_ON(time_after(b, a));
-       return (long)a - (long)b;
+       return timeout ?: MAX_SCHEDULE_TIMEOUT;
 }
 
 struct ceph_mds_client;
@@ -178,6 +174,7 @@ static inline int calc_pages_for(u64 off, u64 len)
 
 extern struct kmem_cache *ceph_inode_cachep;
 extern struct kmem_cache *ceph_cap_cachep;
+extern struct kmem_cache *ceph_cap_flush_cachep;
 extern struct kmem_cache *ceph_dentry_cachep;
 extern struct kmem_cache *ceph_file_cachep;
 
index 61b19c46bdb33d5fc2f4752df0c345350fa739e8..7506b485bb6d1d4cee0aff00cbe1132d55396071 100644 (file)
@@ -249,7 +249,7 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
                                 struct ceph_msg *msg);
 
 extern void osd_req_op_init(struct ceph_osd_request *osd_req,
-                                       unsigned int which, u16 opcode);
+                           unsigned int which, u16 opcode, u32 flags);
 
 extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
                                        unsigned int which,
index df695313f975769b6cfc740760cbc8c33b6a8d5c..78842f46f152694906e0b73f99fd2aa93611e2f7 100644 (file)
@@ -31,6 +31,7 @@
 #define CLK_GET_RATE_NOCACHE   BIT(6) /* do not use the cached clk rate */
 #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
 #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
+#define CLK_RECALC_NEW_RATES   BIT(9) /* recalc rates after notifications */
 
 struct clk_hw;
 struct clk_core;
@@ -209,7 +210,7 @@ struct clk_ops {
 struct clk_init_data {
        const char              *name;
        const struct clk_ops    *ops;
-       const char              **parent_names;
+       const char              * const *parent_names;
        u8                      num_parents;
        unsigned long           flags;
 };
@@ -426,12 +427,14 @@ extern const struct clk_ops clk_mux_ops;
 extern const struct clk_ops clk_mux_ro_ops;
 
 struct clk *clk_register_mux(struct device *dev, const char *name,
-               const char **parent_names, u8 num_parents, unsigned long flags,
+               const char * const *parent_names, u8 num_parents,
+               unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
                u8 clk_mux_flags, spinlock_t *lock);
 
 struct clk *clk_register_mux_table(struct device *dev, const char *name,
-               const char **parent_names, u8 num_parents, unsigned long flags,
+               const char * const *parent_names, u8 num_parents,
+               unsigned long flags,
                void __iomem *reg, u8 shift, u32 mask,
                u8 clk_mux_flags, u32 *table, spinlock_t *lock);
 
@@ -457,7 +460,7 @@ struct clk_fixed_factor {
        unsigned int    div;
 };
 
-extern struct clk_ops clk_fixed_factor_ops;
+extern const struct clk_ops clk_fixed_factor_ops;
 struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                unsigned int mult, unsigned int div);
@@ -518,7 +521,7 @@ struct clk_composite {
 };
 
 struct clk *clk_register_composite(struct device *dev, const char *name,
-               const char **parent_names, int num_parents,
+               const char * const *parent_names, int num_parents,
                struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
                struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
                struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
@@ -589,6 +592,7 @@ long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
                              unsigned long max_rate,
                              unsigned long *best_parent_rate,
                              struct clk_hw **best_parent_p);
+void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
 
 static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
 {
@@ -624,6 +628,8 @@ struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
                                  void *data);
 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
 int of_clk_get_parent_count(struct device_node *np);
+int of_clk_parent_fill(struct device_node *np, const char **parents,
+                      unsigned int size);
 const char *of_clk_get_parent_name(struct device_node *np, int index);
 
 void of_clk_init(const struct of_device_id *matches);
index 26fc8bc77f85644adf8cb955579252b1eebcc423..7f8ad9593da725438aaefb397b99d9fea4109f90 100644 (file)
@@ -475,6 +475,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
        (volatile typeof(x) *)&(x); })
 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
 
+/**
+ * lockless_dereference() - safely load a pointer for later dereference
+ * @p: The pointer to load
+ *
+ * Similar to rcu_dereference(), but for situations where the pointed-to
+ * object's lifetime is managed by something other than RCU.  That
+ * "something other" might be reference counting or simple immortality.
+ */
+#define lockless_dereference(p) \
+({ \
+       typeof(p) _________p1 = READ_ONCE(p); \
+       smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
+       (_________p1); \
+})
+
 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
 #ifdef CONFIG_KPROBES
 # define __kprobes     __attribute__((__section__(".kprobes.text")))
index cf53d0773ce3a24623354fd16dee2b3b17597a9b..d81961e9e37daf04456f36a1cedd607ca0dcdf8a 100644 (file)
@@ -9,5 +9,6 @@
 extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
                                size_t len);
 extern __u16 crc_t10dif(unsigned char const *, size_t);
+extern __u16 crc_t10dif_update(__u16 crc, unsigned char const *, size_t);
 
 #endif
index 48a1a7d100f190efae067afa7866b75e10eff9fe..48b49305716bd728e69477db6b430c34e7781746 100644 (file)
@@ -1,7 +1,11 @@
 #ifndef CEPH_CRUSH_CRUSH_H
 #define CEPH_CRUSH_CRUSH_H
 
-#include <linux/types.h>
+#ifdef __KERNEL__
+# include <linux/types.h>
+#else
+# include "crush_compat.h"
+#endif
 
 /*
  * CRUSH is a pseudo-random data distribution algorithm that
 #define CRUSH_MAGIC 0x00010000ul   /* for detecting algorithm revisions */
 
 #define CRUSH_MAX_DEPTH 10  /* max crush hierarchy depth */
+#define CRUSH_MAX_RULESET (1<<8)  /* max crush ruleset number */
+#define CRUSH_MAX_RULES CRUSH_MAX_RULESET  /* should be the same as max rulesets */
 
+#define CRUSH_MAX_DEVICE_WEIGHT (100u * 0x10000u)
+#define CRUSH_MAX_BUCKET_WEIGHT (65535u * 0x10000u)
 
 #define CRUSH_ITEM_UNDEF  0x7ffffffe  /* undefined result (internal use only) */
 #define CRUSH_ITEM_NONE   0x7fffffff  /* no result */
@@ -108,6 +116,15 @@ enum {
 };
 extern const char *crush_bucket_alg_name(int alg);
 
+/*
+ * although tree was a legacy algorithm, it has been buggy, so
+ * exclude it.
+ */
+#define CRUSH_LEGACY_ALLOWED_BUCKET_ALGS (     \
+               (1 << CRUSH_BUCKET_UNIFORM) |   \
+               (1 << CRUSH_BUCKET_LIST) |      \
+               (1 << CRUSH_BUCKET_STRAW))
+
 struct crush_bucket {
        __s32 id;        /* this'll be negative */
        __u16 type;      /* non-zero; type=0 is reserved for devices */
@@ -174,7 +191,7 @@ struct crush_map {
        /* choose local attempts using a fallback permutation before
         * re-descent */
        __u32 choose_local_fallback_tries;
-       /* choose attempts before giving up */ 
+       /* choose attempts before giving up */
        __u32 choose_total_tries;
        /* attempt chooseleaf inner descent once for firstn mode; on
         * reject retry outer descent.  Note that this does *not*
@@ -187,6 +204,25 @@ struct crush_map {
         * that want to limit reshuffling, a value of 3 or 4 will make the
         * mappings line up a bit better with previous mappings. */
        __u8 chooseleaf_vary_r;
+
+#ifndef __KERNEL__
+       /*
+        * version 0 (original) of straw_calc has various flaws.  version 1
+        * fixes a few of them.
+        */
+       __u8 straw_calc_version;
+
+       /*
+        * allowed bucket algs is a bitmask, here the bit positions
+        * are CRUSH_BUCKET_*.  note that these are *bits* and
+        * CRUSH_BUCKET_* values are not, so we need to or together (1
+        * << CRUSH_BUCKET_WHATEVER).  The 0th bit is not used to
+        * minimize confusion (bucket type values start at 1).
+        */
+       __u32 allowed_bucket_algs;
+
+       __u32 *choose_tries;
+#endif
 };
 
 
index 91e884230d5db99cbfacc478bf8acbe2d4533908..d1d90258242eef2965eb3e1e1e399132e85c8c34 100644 (file)
@@ -1,6 +1,12 @@
 #ifndef CEPH_CRUSH_HASH_H
 #define CEPH_CRUSH_HASH_H
 
+#ifdef __KERNEL__
+# include <linux/types.h>
+#else
+# include "crush_compat.h"
+#endif
+
 #define CRUSH_HASH_RJENKINS1   0
 
 #define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1
index eab367446eea7fa683cb4fd15e74ad3822bb35c8..5dfd5b1125d2b257a4a00d1e77661613ca2227ec 100644 (file)
@@ -8,7 +8,7 @@
  * LGPL2
  */
 
-#include <linux/crush/crush.h>
+#include "crush.h"
 
 extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size);
 extern int crush_do_rule(const struct crush_map *map,
index df334cbacc6d0b8c2702c28e13e9a804314bb754..d2d50249b7b2a16bd08b3c6f82ea3748836c7afc 100644 (file)
@@ -160,6 +160,7 @@ struct dentry_operations {
        char *(*d_dname)(struct dentry *, char *, int);
        struct vfsmount *(*d_automount)(struct path *);
        int (*d_manage)(struct dentry *, bool);
+       struct inode *(*d_select_inode)(struct dentry *, unsigned);
 } ____cacheline_aligned;
 
 /*
@@ -225,6 +226,7 @@ struct dentry_operations {
 
 #define DCACHE_MAY_FREE                        0x00800000
 #define DCACHE_FALLTHRU                        0x01000000 /* Fall through to lower layer */
+#define DCACHE_OP_SELECT_INODE         0x02000000 /* Unioned entry: dcache op selects inode */
 
 extern seqlock_t rename_lock;
 
@@ -505,6 +507,11 @@ static inline bool d_really_is_positive(const struct dentry *dentry)
        return dentry->d_inode != NULL;
 }
 
+static inline int simple_positive(struct dentry *dentry)
+{
+       return d_really_is_positive(dentry) && !d_unhashed(dentry);
+}
+
 extern void d_set_fallthru(struct dentry *dentry);
 
 static inline bool d_is_fallthru(const struct dentry *dentry)
index 00ac57c26615103b0983040981cac8012c9e2f99..5a31bf3a40243e1faff078fdc509361568143f01 100644 (file)
@@ -1300,4 +1300,26 @@ static void __exit __driver##_exit(void) \
 } \
 module_exit(__driver##_exit);
 
+/**
+ * builtin_driver() - Helper macro for drivers that don't do anything
+ * special in init and have no exit. This eliminates some boilerplate.
+ * Each driver may only use this macro once, and calling it replaces
+ * device_initcall (or in some cases, the legacy __initcall).  This is
+ * meant to be a direct parallel of module_driver() above but without
+ * the __exit stuff that is not used for builtin cases.
+ *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @...: Additional arguments to be passed to __register
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define builtin_driver(__driver, __register, ...) \
+static int __init __driver##_init(void) \
+{ \
+       return __register(&(__driver) , ##__VA_ARGS__); \
+} \
+device_initcall(__driver##_init);
+
 #endif /* _DEVICE_H_ */
index 230f87bdf5ad02008ff622e65bc761e41d4b22e0..fbb88740634af011af6cc1be098f0a6d20aba07f 100644 (file)
@@ -47,6 +47,9 @@ struct files_struct {
    * read mostly part
    */
        atomic_t count;
+       bool resize_in_progress;
+       wait_queue_head_t resize_wait;
+
        struct fdtable __rcu *fdt;
        struct fdtable fdtab;
   /*
index 3f1a84635da896c5c4da944a9c29c428fab2774b..a0653e560c2679a2eea870035a55cd3282e47894 100644 (file)
@@ -1654,7 +1654,6 @@ struct inode_operations {
        int (*set_acl)(struct inode *, struct posix_acl *, int);
 
        /* WARNING: probably going away soon, do not use! */
-       int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
 } ____cacheline_aligned;
 
 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
@@ -1917,6 +1916,7 @@ struct file_system_type {
 #define FS_HAS_SUBTYPE         4
 #define FS_USERNS_MOUNT                8       /* Can be mounted by userns root */
 #define FS_USERNS_DEV_MOUNT    16 /* A userns mount does not imply MNT_NODEV */
+#define FS_USERNS_VISIBLE      32      /* FS must already be visible */
 #define FS_RENAME_DOES_D_MOVE  32768   /* FS will handle d_move() during rename() internally. */
        struct dentry *(*mount) (struct file_system_type *, int,
                       const char *, void *);
@@ -2004,7 +2004,6 @@ extern int vfs_ustat(dev_t, struct kstatfs *);
 extern int freeze_super(struct super_block *super);
 extern int thaw_super(struct super_block *super);
 extern bool our_mnt(struct vfsmount *mnt);
-extern bool fs_fully_visible(struct file_system_type *);
 
 extern int current_umask(void);
 
@@ -2213,7 +2212,6 @@ extern struct file *file_open_name(struct filename *, int, umode_t);
 extern struct file *filp_open(const char *, int, umode_t);
 extern struct file *file_open_root(struct dentry *, struct vfsmount *,
                                   const char *, int);
-extern int vfs_open(const struct path *, struct file *, const struct cred *);
 extern struct file * dentry_open(const struct path *, int, const struct cred *);
 extern int filp_close(struct file *, fl_owner_t id);
 
@@ -2530,6 +2528,8 @@ extern struct file * open_exec(const char *);
 extern int is_subdir(struct dentry *, struct dentry *);
 extern int path_is_under(struct path *, struct path *);
 
+extern char *file_path(struct file *, char *, int);
+
 #include <linux/err.h>
 
 /* needed for stackable file system support */
@@ -2581,7 +2581,12 @@ extern struct inode *new_inode_pseudo(struct super_block *sb);
 extern struct inode *new_inode(struct super_block *sb);
 extern void free_inode_nonrcu(struct inode *inode);
 extern int should_remove_suid(struct dentry *);
-extern int file_remove_suid(struct file *);
+extern int file_remove_privs(struct file *);
+extern int dentry_needs_remove_privs(struct dentry *dentry);
+static inline int file_needs_remove_privs(struct file *file)
+{
+       return dentry_needs_remove_privs(file->f_path.dentry);
+}
 
 extern void __insert_inode_hash(struct inode *, unsigned long hashval);
 static inline void insert_inode_hash(struct inode *inode)
@@ -2816,6 +2821,8 @@ extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned in
 extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
 extern const struct file_operations simple_dir_operations;
 extern const struct inode_operations simple_dir_inode_operations;
+extern void make_empty_dir_inode(struct inode *inode);
+extern bool is_empty_dir_inode(struct inode *inode);
 struct tree_descr { char *name; const struct file_operations *ops; int mode; };
 struct dentry *d_alloc_name(struct dentry *, const char *);
 extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
index 771484993ca7c662e6dc07c115e421050459256a..604e1526cd00a23e27d6a426c8baa6a71bc309c2 100644 (file)
@@ -74,6 +74,7 @@ extern wait_queue_head_t fscache_cache_cleared_wq;
  */
 typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
 typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
+typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op);
 
 enum fscache_operation_state {
        FSCACHE_OP_ST_BLANK,            /* Op is not yet submitted */
@@ -109,6 +110,9 @@ struct fscache_operation {
         *   the op in a non-pool thread */
        fscache_operation_processor_t processor;
 
+       /* Operation cancellation cleanup (optional) */
+       fscache_operation_cancel_t cancel;
+
        /* operation releaser */
        fscache_operation_release_t release;
 };
@@ -119,33 +123,17 @@ extern void fscache_op_work_func(struct work_struct *work);
 extern void fscache_enqueue_operation(struct fscache_operation *);
 extern void fscache_op_complete(struct fscache_operation *, bool);
 extern void fscache_put_operation(struct fscache_operation *);
-
-/**
- * fscache_operation_init - Do basic initialisation of an operation
- * @op: The operation to initialise
- * @release: The release function to assign
- *
- * Do basic initialisation of an operation.  The caller must still set flags,
- * object and processor if needed.
- */
-static inline void fscache_operation_init(struct fscache_operation *op,
-                                       fscache_operation_processor_t processor,
-                                       fscache_operation_release_t release)
-{
-       INIT_WORK(&op->work, fscache_op_work_func);
-       atomic_set(&op->usage, 1);
-       op->state = FSCACHE_OP_ST_INITIALISED;
-       op->debug_id = atomic_inc_return(&fscache_op_debug_id);
-       op->processor = processor;
-       op->release = release;
-       INIT_LIST_HEAD(&op->pend_link);
-}
+extern void fscache_operation_init(struct fscache_operation *,
+                                  fscache_operation_processor_t,
+                                  fscache_operation_cancel_t,
+                                  fscache_operation_release_t);
 
 /*
  * data read operation
  */
 struct fscache_retrieval {
        struct fscache_operation op;
+       struct fscache_cookie   *cookie;        /* The netfs cookie */
        struct address_space    *mapping;       /* netfs pages */
        fscache_rw_complete_t   end_io_func;    /* function to call on I/O completion */
        void                    *context;       /* netfs read context (pinned) */
@@ -371,6 +359,7 @@ struct fscache_object {
 #define FSCACHE_OBJECT_IS_LOOKED_UP    4       /* T if object has been looked up */
 #define FSCACHE_OBJECT_IS_AVAILABLE    5       /* T if object has become active */
 #define FSCACHE_OBJECT_RETIRED         6       /* T if object was retired on relinquishment */
+#define FSCACHE_OBJECT_KILLED_BY_CACHE 7       /* T if object was killed by the cache */
 
        struct list_head        cache_link;     /* link in cache->object_list */
        struct hlist_node       cookie_link;    /* link in cookie->backing_objects */
@@ -410,17 +399,16 @@ static inline bool fscache_object_is_available(struct fscache_object *object)
        return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
 }
 
-static inline bool fscache_object_is_active(struct fscache_object *object)
+static inline bool fscache_cache_is_broken(struct fscache_object *object)
 {
-       return fscache_object_is_available(object) &&
-               fscache_object_is_live(object) &&
-               !test_bit(FSCACHE_IOERROR, &object->cache->flags);
+       return test_bit(FSCACHE_IOERROR, &object->cache->flags);
 }
 
-static inline bool fscache_object_is_dead(struct fscache_object *object)
+static inline bool fscache_object_is_active(struct fscache_object *object)
 {
-       return fscache_object_is_dying(object) &&
-               test_bit(FSCACHE_IOERROR, &object->cache->flags);
+       return fscache_object_is_available(object) &&
+               fscache_object_is_live(object) &&
+               !fscache_cache_is_broken(object);
 }
 
 /**
@@ -551,4 +539,15 @@ extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
                                               const void *data,
                                               uint16_t datalen);
 
+extern void fscache_object_retrying_stale(struct fscache_object *object);
+
+enum fscache_why_object_killed {
+       FSCACHE_OBJECT_IS_STALE,
+       FSCACHE_OBJECT_NO_SPACE,
+       FSCACHE_OBJECT_WAS_RETIRED,
+       FSCACHE_OBJECT_WAS_CULLED,
+};
+extern void fscache_object_mark_killed(struct fscache_object *object,
+                                      enum fscache_why_object_killed why);
+
 #endif /* _LINUX_FSCACHE_CACHE_H */
index 1ccaab44abcc80541a1aebc024b62e650cb3982c..5383bb1394a1a75abc932c3a0fece2fc58f8a076 100644 (file)
@@ -119,16 +119,16 @@ extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
 
 extern struct gen_pool *devm_gen_pool_create(struct device *dev,
                int min_alloc_order, int nid);
-extern struct gen_pool *dev_get_gen_pool(struct device *dev);
+extern struct gen_pool *gen_pool_get(struct device *dev);
 
 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
                        size_t size);
 
 #ifdef CONFIG_OF
-extern struct gen_pool *of_get_named_gen_pool(struct device_node *np,
+extern struct gen_pool *of_gen_pool_get(struct device_node *np,
        const char *propname, int index);
 #else
-static inline struct gen_pool *of_get_named_gen_pool(struct device_node *np,
+static inline struct gen_pool *of_gen_pool_get(struct device_node *np,
        const char *propname, int index)
 {
        return NULL;
index 6ba7cf23748fe90f354e50c7514c230ee7e14bd7..ad35f300b9a46019b43eee1b49c5c0fb281ada10 100644 (file)
@@ -384,6 +384,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
 void drain_all_pages(struct zone *zone);
 void drain_local_pages(struct zone *zone);
 
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+void page_alloc_init_late(void);
+#else
+static inline void page_alloc_init_late(void)
+{
+}
+#endif
+
 /*
  * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
  * GFP flags are used before interrupts are enabled. Once interrupts are
index fd098169fe87ed0b92aecd23b46e7a9a790bafad..adac255aee86e359fb83085ba2c41edfc6f1991d 100644 (file)
@@ -407,6 +407,21 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
        return -EINVAL;
 }
 
+/* Child properties interface */
+struct fwnode_handle;
+
+static inline struct gpio_desc *fwnode_get_named_gpiod(
+       struct fwnode_handle *fwnode, const char *propname)
+{
+       return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_desc *devm_get_gpiod_from_child(
+       struct device *dev, const char *con_id, struct fwnode_handle *child)
+{
+       return ERR_PTR(-ENOSYS);
+}
+
 #endif /* CONFIG_GPIOLIB */
 
 /*
index 3343298e40e83453e84dad4d8704af95ef57b5f7..859d673d98c80239715df9ef56c32672647e47a1 100644 (file)
@@ -26,6 +26,7 @@
 #define HWLOCK_IRQ     0x02    /* Disable interrupts, don't save state */
 
 struct device;
+struct device_node;
 struct hwspinlock;
 struct hwspinlock_device;
 struct hwspinlock_ops;
@@ -66,6 +67,7 @@ int hwspin_lock_unregister(struct hwspinlock_device *bank);
 struct hwspinlock *hwspin_lock_request(void);
 struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
 int hwspin_lock_free(struct hwspinlock *hwlock);
+int of_hwspin_lock_get_id(struct device_node *np, int index);
 int hwspin_lock_get_id(struct hwspinlock *hwlock);
 int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
                                                        unsigned long *);
@@ -120,6 +122,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
 {
 }
 
+static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
+{
+       return 0;
+}
+
 static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
 {
        return 0;
index 21b6d768edd7a4e0f1aee98ed9f437000fa1b996..7c68c36d3fd88788f043447c5bed889cdd408bb6 100644 (file)
 
 #define __exit          __section(.exit.text) __exitused __cold notrace
 
-/* temporary, until all users are removed */
-#define __cpuinit
-#define __cpuinitdata
-#define __cpuinitconst
-#define __cpuexit
-#define __cpuexitdata
-#define __cpuexitconst
-
 /* Used for MEMORY_HOTPLUG */
 #define __meminit        __section(.meminit.text) __cold notrace
 #define __meminitdata    __section(.meminit.data)
 #define __INITRODATA   .section        ".init.rodata","a",%progbits
 #define __FINITDATA    .previous
 
-/* temporary, until all users are removed */
-#define __CPUINIT
-
 #define __MEMINIT        .section      ".meminit.text", "ax"
 #define __MEMINITDATA    .section      ".meminit.data", "aw"
 #define __MEMINITRODATA  .section      ".meminit.rodata", "a"
index 08a5ef6e8f25a485f0af16b35e3ba1d066ccc6d2..eecc9ea6cd58716f7e4d2597f4f5699ad818d546 100644 (file)
 #include <linux/input.h>
 
 #ifdef CONFIG_OF
-void touchscreen_parse_of_params(struct input_dev *dev);
+void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch);
 #else
-static inline void touchscreen_parse_of_params(struct input_dev *dev)
+static inline void touchscreen_parse_of_params(struct input_dev *dev,
+                                              bool multitouch)
 {
 }
 #endif
index 812149160d3bc5829750d5f2790ae23c4791092f..92188b0225bb31f33eba9deacc4b9b88036c1d8e 100644 (file)
@@ -407,7 +407,6 @@ enum {
        IRQCHIP_EOI_THREADED            = (1 <<  6),
 };
 
-/* This include will go away once we isolated irq_desc usage to core code */
 #include <linux/irqdesc.h>
 
 /*
index 14d79131f53dda6e6a03fe1d1e375b5b0ba45ed9..638887376e582c83531ea663269ddfc1ef625a42 100644 (file)
 #ifndef _LINUX_IRQCHIP_H
 #define _LINUX_IRQCHIP_H
 
+#include <linux/of.h>
+
+/*
+ * This macro must be used by the different irqchip drivers to declare
+ * the association between their DT compatible string and their
+ * initialization function.
+ *
+ * @name: name that must be unique accross all IRQCHIP_DECLARE of the
+ * same file.
+ * @compstr: compatible string of the irqchip driver
+ * @fn: initialization function
+ */
+#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
+
 #ifdef CONFIG_IRQCHIP
 void irqchip_init(void);
 #else
index c52d1480f272448a18ae5ea8400328c82f0c76da..624a668e61f1a6c5096e2afa78bd403655c11e59 100644 (file)
@@ -3,9 +3,6 @@
 
 /*
  * Core internal functions to deal with irq descriptors
- *
- * This include will move to kernel/irq once we cleaned up the tree.
- * For now it's included from <linux/irq.h>
  */
 
 struct irq_affinity_notify;
@@ -103,6 +100,11 @@ static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
 #endif
 }
 
+static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
+{
+       return desc->irq_data.irq;
+}
+
 static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
 {
        return &desc->irq_data;
@@ -188,6 +190,47 @@ __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
        desc->name = name;
 }
 
+/**
+ * irq_set_handler_locked - Set irq handler from a locked region
+ * @data:      Pointer to the irq_data structure which identifies the irq
+ * @handler:   Flow control handler function for this interrupt
+ *
+ * Sets the handler in the irq descriptor associated to @data.
+ *
+ * Must be called with irq_desc locked and valid parameters. Typical
+ * call site is the irq_set_type() callback.
+ */
+static inline void irq_set_handler_locked(struct irq_data *data,
+                                         irq_flow_handler_t handler)
+{
+       struct irq_desc *desc = irq_data_to_desc(data);
+
+       desc->handle_irq = handler;
+}
+
+/**
+ * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region
+ * @data:      Pointer to the irq_data structure for which the chip is set
+ * @chip:      Pointer to the new irq chip
+ * @handler:   Flow control handler function for this interrupt
+ * @name:      Name of the interrupt
+ *
+ * Replace the irq chip at the proper hierarchy level in @data and
+ * sets the handler and name in the associated irq descriptor.
+ *
+ * Must be called with irq_desc locked and valid parameters.
+ */
+static inline void
+irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
+                                irq_flow_handler_t handler, const char *name)
+{
+       struct irq_desc *desc = irq_data_to_desc(data);
+
+       desc->handle_irq = handler;
+       desc->name = name;
+       data->chip = chip;
+}
+
 static inline int irq_balancing_disabled(unsigned int irq)
 {
        struct irq_desc *desc;
index fdd5cc16c9c43bcf4bb72cbd7c18d2047af7dd7c..9669bf9d4f48fa38381f6c5f931b6579fc10a800 100644 (file)
@@ -23,12 +23,6 @@ unsigned int irq_get_next_irq(unsigned int offset);
                        ;                                               \
                else
 
-#ifdef CONFIG_SMP
-#define irq_node(irq)  (irq_get_irq_data(irq)->node)
-#else
-#define irq_node(irq)  0
-#endif
-
 # define for_each_active_irq(irq)                      \
        for (irq = irq_get_next_irq(0); irq < nr_irqs;  \
             irq = irq_get_next_irq(irq + 1))
index 5acf5b70866d42d5fabd74180193327c5fd7c7bb..5f0be58640ea6e73f88dd02cb3ac3bc7cdb6a0e6 100644 (file)
@@ -439,6 +439,9 @@ extern int panic_on_unrecovered_nmi;
 extern int panic_on_io_nmi;
 extern int panic_on_warn;
 extern int sysctl_panic_on_stackoverflow;
+
+extern bool crash_kexec_post_notifiers;
+
 /*
  * Only to be used by arch init code. If the user over-wrote the default
  * CONFIG_PANIC_TIMEOUT, honor it.
@@ -813,13 +816,15 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
 #endif
 
 /* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
-#define VERIFY_OCTAL_PERMISSIONS(perms)                                        \
-       (BUILD_BUG_ON_ZERO((perms) < 0) +                               \
-        BUILD_BUG_ON_ZERO((perms) > 0777) +                            \
-        /* User perms >= group perms >= other perms */                 \
-        BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) +     \
-        BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) +      \
-        /* Other writable?  Generally considered a bad idea. */        \
-        BUILD_BUG_ON_ZERO((perms) & 2) +                               \
+#define VERIFY_OCTAL_PERMISSIONS(perms)                                                \
+       (BUILD_BUG_ON_ZERO((perms) < 0) +                                       \
+        BUILD_BUG_ON_ZERO((perms) > 0777) +                                    \
+        /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */                \
+        BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) +       \
+        BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) +              \
+        /* USER_WRITABLE >= GROUP_WRITABLE */                                  \
+        BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) +       \
+        /* OTHER_WRITABLE?  Generally considered a bad idea. */                \
+        BUILD_BUG_ON_ZERO((perms) & 2) +                                       \
         (perms))
 #endif
index e6b2f7db9c0ceb7873aee3fc4a7666452d8f9b8a..123be25ea15a6b37ae15fcc93219950d4d357380 100644 (file)
@@ -45,6 +45,7 @@ enum kernfs_node_flag {
        KERNFS_LOCKDEP          = 0x0100,
        KERNFS_SUICIDAL         = 0x0400,
        KERNFS_SUICIDED         = 0x0800,
+       KERNFS_EMPTY_DIR        = 0x1000,
 };
 
 /* @flags for kernfs_create_root() */
@@ -286,6 +287,8 @@ void kernfs_destroy_root(struct kernfs_root *root);
 struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
                                         const char *name, umode_t mode,
                                         void *priv, const void *ns);
+struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
+                                           const char *name);
 struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
                                         const char *name,
                                         umode_t mode, loff_t size,
index 9a2b000094cf98e5758e2a3b463bae69b8febef8..b122eeafb5dc17b8a8b1a1852dc1c420ecf0f8d2 100644 (file)
@@ -12,6 +12,7 @@
 #ifndef __LINUX_LEDS_H_INCLUDED
 #define __LINUX_LEDS_H_INCLUDED
 
+#include <linux/device.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/rwsem.h>
@@ -222,6 +223,11 @@ struct led_trigger {
        struct list_head  next_trig;
 };
 
+ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
+                       const char *buf, size_t count);
+ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
+                       char *buf);
+
 /* Registration functions for complex triggers */
 extern int led_trigger_register(struct led_trigger *trigger);
 extern void led_trigger_unregister(struct led_trigger *trigger);
@@ -238,6 +244,16 @@ extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
                                      unsigned long *delay_on,
                                      unsigned long *delay_off,
                                      int invert);
+extern void led_trigger_set_default(struct led_classdev *led_cdev);
+extern void led_trigger_set(struct led_classdev *led_cdev,
+                       struct led_trigger *trigger);
+extern void led_trigger_remove(struct led_classdev *led_cdev);
+
+static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
+{
+       return led_cdev->trigger_data;
+}
+
 /**
  * led_trigger_rename_static - rename a trigger
  * @name: the new trigger name
@@ -267,6 +283,15 @@ static inline void led_trigger_register_simple(const char *name,
 static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
 static inline void led_trigger_event(struct led_trigger *trigger,
                                enum led_brightness event) {}
+static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
+static inline void led_trigger_set(struct led_classdev *led_cdev,
+                               struct led_trigger *trigger) {}
+static inline void led_trigger_remove(struct led_classdev *led_cdev) {}
+static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
+{
+       return NULL;
+}
+
 #endif /* CONFIG_LEDS_TRIGGERS */
 
 /* Trigger specific functions */
index 01508c7b8c812b1965c7d382ed996eada23280b5..2a663c6bb4285851a7a0270728ad7c2ddcab33ef 100644 (file)
@@ -5,6 +5,10 @@
 
 #include <asm/byteorder.h>
 
+typedef __be16 fdt16_t;
+typedef __be32 fdt32_t;
+typedef __be64 fdt64_t;
+
 #define fdt32_to_cpu(x) be32_to_cpu(x)
 #define cpu_to_fdt32(x) cpu_to_be32(x)
 #define fdt64_to_cpu(x) be64_to_cpu(x)
index 0215ffd630690b35016dce6df131ea5f81902a27..cc4b019720600617ebb9447f60320e6bb4d2aa2a 100644 (file)
@@ -101,6 +101,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
                          struct memblock_type *type_b, phys_addr_t *out_start,
                          phys_addr_t *out_end, int *out_nid);
 
+void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
+                              phys_addr_t *out_end);
+
 /**
  * for_each_mem_range - iterate through memblock areas from type_a and not
  * included in type_b. Or just type_a if type_b is NULL.
@@ -142,6 +145,21 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
             __next_mem_range_rev(&i, nid, flags, type_a, type_b,       \
                                  p_start, p_end, p_nid))
 
+/**
+ * for_each_reserved_mem_region - iterate over all reserved memblock areas
+ * @i: u64 used as loop variable
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ *
+ * Walks over reserved areas of memblock. Available as soon as memblock
+ * is initialized.
+ */
+#define for_each_reserved_mem_region(i, p_start, p_end)                        \
+       for (i = 0UL,                                                   \
+            __next_reserved_mem_region(&i, p_start, p_end);            \
+            i != (u64)ULLONG_MAX;                                      \
+            __next_reserved_mem_region(&i, p_start, p_end))
+
 #ifdef CONFIG_MOVABLE_NODE
 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
 {
index 99959a34f4f15e6d66b8a6681b256134634164ee..2e872f92dbac0cecc2c5b3a65fbe7ff8678e48d5 100644 (file)
@@ -1635,6 +1635,8 @@ extern void free_highmem_page(struct page *page);
 extern void adjust_managed_page_count(struct page *page, long count);
 extern void mem_init_print_info(const char *str);
 
+extern void reserve_bootmem_region(unsigned long start, unsigned long end);
+
 /* Free the reserved page into the buddy system, so it gets managed. */
 static inline void __free_reserved_page(struct page *page)
 {
@@ -1724,7 +1726,8 @@ extern void sparse_memory_present_with_active_regions(int nid);
 
 #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
     !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
-static inline int __early_pfn_to_nid(unsigned long pfn)
+static inline int __early_pfn_to_nid(unsigned long pfn,
+                                       struct mminit_pfnnid_cache *state)
 {
        return 0;
 }
@@ -1732,7 +1735,8 @@ static inline int __early_pfn_to_nid(unsigned long pfn)
 /* please see mm/page_alloc.c */
 extern int __meminit early_pfn_to_nid(unsigned long pfn);
 /* there is a per-arch backend function. */
-extern int __meminit __early_pfn_to_nid(unsigned long pfn);
+extern int __meminit __early_pfn_to_nid(unsigned long pfn,
+                                       struct mminit_pfnnid_cache *state);
 #endif
 
 extern void set_dma_reserve(unsigned long new_dma_reserve);
index 54d74f6eb233521d6cb84b2720a15c3cb2e6b734..754c25966a0a7828901deaf2c69dc1d5243736f4 100644 (file)
@@ -762,6 +762,14 @@ typedef struct pglist_data {
        /* Number of pages migrated during the rate limiting time interval */
        unsigned long numabalancing_migrate_nr_pages;
 #endif
+
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+       /*
+        * If memory initialisation on large machines is deferred then this
+        * is the first PFN that needs to be initialised.
+        */
+       unsigned long first_deferred_pfn;
+#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 } pg_data_t;
 
 #define node_present_pages(nid)        (NODE_DATA(nid)->node_present_pages)
@@ -1216,11 +1224,16 @@ void sparse_init(void);
 #define sparse_index_init(_sec, _nid)  do {} while (0)
 #endif /* CONFIG_SPARSEMEM */
 
-#ifdef CONFIG_NODES_SPAN_OTHER_NODES
-bool early_pfn_in_nid(unsigned long pfn, int nid);
-#else
-#define early_pfn_in_nid(pfn, nid)     (1)
-#endif
+/*
+ * During memory init memblocks map pfns to nids. The search is expensive and
+ * this caches recent lookups. The implementation of __early_pfn_to_nid
+ * may treat start/end as pfns or sections.
+ */
+struct mminit_pfnnid_cache {
+       unsigned long last_start;
+       unsigned long last_end;
+       int last_nid;
+};
 
 #ifndef early_pfn_valid
 #define early_pfn_valid(pfn)   (1)
index 7ffe0851d24438876faa9d756178d1654c7e825e..d67b1932cc59869cd5c3dc5d24efa5994b368386 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/moduleparam.h>
 #include <linux/jump_label.h>
 #include <linux/export.h>
+#include <linux/rbtree_latch.h>
 
 #include <linux/percpu.h>
 #include <asm/module.h>
@@ -210,6 +211,13 @@ enum module_state {
        MODULE_STATE_UNFORMED,  /* Still setting it up. */
 };
 
+struct module;
+
+struct mod_tree_node {
+       struct module *mod;
+       struct latch_tree_node node;
+};
+
 struct module {
        enum module_state state;
 
@@ -232,6 +240,9 @@ struct module {
        unsigned int num_syms;
 
        /* Kernel parameters. */
+#ifdef CONFIG_SYSFS
+       struct mutex param_lock;
+#endif
        struct kernel_param *kp;
        unsigned int num_kp;
 
@@ -271,8 +282,15 @@ struct module {
        /* Startup function. */
        int (*init)(void);
 
-       /* If this is non-NULL, vfree after init() returns */
-       void *module_init;
+       /*
+        * If this is non-NULL, vfree() after init() returns.
+        *
+        * Cacheline align here, such that:
+        *   module_init, module_core, init_size, core_size,
+        *   init_text_size, core_text_size and mtn_core::{mod,node[0]}
+        * are on the same cacheline.
+        */
+       void *module_init       ____cacheline_aligned;
 
        /* Here is the actual code + data, vfree'd on unload. */
        void *module_core;
@@ -283,6 +301,16 @@ struct module {
        /* The size of the executable code in each section.  */
        unsigned int init_text_size, core_text_size;
 
+#ifdef CONFIG_MODULES_TREE_LOOKUP
+       /*
+        * We want mtn_core::{mod,node[0]} to be in the same cacheline as the
+        * above entries such that a regular lookup will only touch one
+        * cacheline.
+        */
+       struct mod_tree_node    mtn_core;
+       struct mod_tree_node    mtn_init;
+#endif
+
        /* Size of RO sections of the module (text+rodata) */
        unsigned int init_ro_size, core_ro_size;
 
@@ -369,7 +397,7 @@ struct module {
        ctor_fn_t *ctors;
        unsigned int num_ctors;
 #endif
-};
+} ____cacheline_aligned;
 #ifndef MODULE_ARCH_INIT
 #define MODULE_ARCH_INIT {}
 #endif
@@ -423,14 +451,22 @@ struct symsearch {
        bool unused;
 };
 
-/* Search for an exported symbol by name. */
+/*
+ * Search for an exported symbol by name.
+ *
+ * Must be called with module_mutex held or preemption disabled.
+ */
 const struct kernel_symbol *find_symbol(const char *name,
                                        struct module **owner,
                                        const unsigned long **crc,
                                        bool gplok,
                                        bool warn);
 
-/* Walk the exported symbol table */
+/*
+ * Walk the exported symbol table
+ *
+ * Must be called with module_mutex held or preemption disabled.
+ */
 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
                                    struct module *owner,
                                    void *data), void *data);
index 6480dcaca275b62541964ebfe390f0c0da8d371f..c12f2147c350593fb827b7b29aebb33e8f9f0b4a 100644 (file)
@@ -67,8 +67,9 @@ enum {
 
 struct kernel_param {
        const char *name;
+       struct module *mod;
        const struct kernel_param_ops *ops;
-       u16 perm;
+       const u16 perm;
        s8 level;
        u8 flags;
        union {
@@ -108,7 +109,7 @@ struct kparam_array
  *
  * @perm is 0 if the the variable is not to appear in sysfs, or 0444
  * for world-readable, 0644 for root-writable, etc.  Note that if it
- * is writable, you may need to use kparam_block_sysfs_write() around
+ * is writable, you may need to use kernel_param_lock() around
  * accesses (esp. charp, which can be kfreed when it changes).
  *
  * The @type is simply pasted to refer to a param_ops_##type and a
@@ -216,16 +217,16 @@ struct kparam_array
    parameters. */
 #define __module_param_call(prefix, name, ops, arg, perm, level, flags)        \
        /* Default value instead of permissions? */                     \
-       static const char __param_str_##name[] = prefix #name; \
+       static const char __param_str_##name[] = prefix #name;          \
        static struct kernel_param __moduleparam_const __param_##name   \
        __used                                                          \
     __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
-       = { __param_str_##name, ops, VERIFY_OCTAL_PERMISSIONS(perm),    \
-           level, flags, { arg } }
+       = { __param_str_##name, THIS_MODULE, ops,                       \
+           VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
 
 /* Obsolete - use module_param_cb() */
 #define module_param_call(name, set, get, arg, perm)                   \
-       static struct kernel_param_ops __param_ops_##name =             \
+       static const struct kernel_param_ops __param_ops_##name =               \
                { .flags = 0, (void *)set, (void *)get };               \
        __module_param_call(MODULE_PARAM_PREFIX,                        \
                            name, &__param_ops_##name, arg,             \
@@ -238,58 +239,14 @@ __check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
        return 0;
 }
 
-/**
- * kparam_block_sysfs_write - make sure a parameter isn't written via sysfs.
- * @name: the name of the parameter
- *
- * There's no point blocking write on a paramter that isn't writable via sysfs!
- */
-#define kparam_block_sysfs_write(name)                 \
-       do {                                            \
-               BUG_ON(!(__param_##name.perm & 0222));  \
-               __kernel_param_lock();                  \
-       } while (0)
-
-/**
- * kparam_unblock_sysfs_write - allows sysfs to write to a parameter again.
- * @name: the name of the parameter
- */
-#define kparam_unblock_sysfs_write(name)               \
-       do {                                            \
-               BUG_ON(!(__param_##name.perm & 0222));  \
-               __kernel_param_unlock();                \
-       } while (0)
-
-/**
- * kparam_block_sysfs_read - make sure a parameter isn't read via sysfs.
- * @name: the name of the parameter
- *
- * This also blocks sysfs writes.
- */
-#define kparam_block_sysfs_read(name)                  \
-       do {                                            \
-               BUG_ON(!(__param_##name.perm & 0444));  \
-               __kernel_param_lock();                  \
-       } while (0)
-
-/**
- * kparam_unblock_sysfs_read - allows sysfs to read a parameter again.
- * @name: the name of the parameter
- */
-#define kparam_unblock_sysfs_read(name)                        \
-       do {                                            \
-               BUG_ON(!(__param_##name.perm & 0444));  \
-               __kernel_param_unlock();                \
-       } while (0)
-
 #ifdef CONFIG_SYSFS
-extern void __kernel_param_lock(void);
-extern void __kernel_param_unlock(void);
+extern void kernel_param_lock(struct module *mod);
+extern void kernel_param_unlock(struct module *mod);
 #else
-static inline void __kernel_param_lock(void)
+static inline void kernel_param_lock(struct module *mod)
 {
 }
-static inline void __kernel_param_unlock(void)
+static inline void kernel_param_unlock(struct module *mod)
 {
 }
 #endif
@@ -386,64 +343,70 @@ static inline void destroy_params(const struct kernel_param *params,
 #define __param_check(name, p, type) \
        static inline type __always_unused *__check_##name(void) { return(p); }
 
-extern struct kernel_param_ops param_ops_byte;
+extern const struct kernel_param_ops param_ops_byte;
 extern int param_set_byte(const char *val, const struct kernel_param *kp);
 extern int param_get_byte(char *buffer, const struct kernel_param *kp);
 #define param_check_byte(name, p) __param_check(name, p, unsigned char)
 
-extern struct kernel_param_ops param_ops_short;
+extern const struct kernel_param_ops param_ops_short;
 extern int param_set_short(const char *val, const struct kernel_param *kp);
 extern int param_get_short(char *buffer, const struct kernel_param *kp);
 #define param_check_short(name, p) __param_check(name, p, short)
 
-extern struct kernel_param_ops param_ops_ushort;
+extern const struct kernel_param_ops param_ops_ushort;
 extern int param_set_ushort(const char *val, const struct kernel_param *kp);
 extern int param_get_ushort(char *buffer, const struct kernel_param *kp);
 #define param_check_ushort(name, p) __param_check(name, p, unsigned short)
 
-extern struct kernel_param_ops param_ops_int;
+extern const struct kernel_param_ops param_ops_int;
 extern int param_set_int(const char *val, const struct kernel_param *kp);
 extern int param_get_int(char *buffer, const struct kernel_param *kp);
 #define param_check_int(name, p) __param_check(name, p, int)
 
-extern struct kernel_param_ops param_ops_uint;
+extern const struct kernel_param_ops param_ops_uint;
 extern int param_set_uint(const char *val, const struct kernel_param *kp);
 extern int param_get_uint(char *buffer, const struct kernel_param *kp);
 #define param_check_uint(name, p) __param_check(name, p, unsigned int)
 
-extern struct kernel_param_ops param_ops_long;
+extern const struct kernel_param_ops param_ops_long;
 extern int param_set_long(const char *val, const struct kernel_param *kp);
 extern int param_get_long(char *buffer, const struct kernel_param *kp);
 #define param_check_long(name, p) __param_check(name, p, long)
 
-extern struct kernel_param_ops param_ops_ulong;
+extern const struct kernel_param_ops param_ops_ulong;
 extern int param_set_ulong(const char *val, const struct kernel_param *kp);
 extern int param_get_ulong(char *buffer, const struct kernel_param *kp);
 #define param_check_ulong(name, p) __param_check(name, p, unsigned long)
 
-extern struct kernel_param_ops param_ops_ullong;
+extern const struct kernel_param_ops param_ops_ullong;
 extern int param_set_ullong(const char *val, const struct kernel_param *kp);
 extern int param_get_ullong(char *buffer, const struct kernel_param *kp);
 #define param_check_ullong(name, p) __param_check(name, p, unsigned long long)
 
-extern struct kernel_param_ops param_ops_charp;
+extern const struct kernel_param_ops param_ops_charp;
 extern int param_set_charp(const char *val, const struct kernel_param *kp);
 extern int param_get_charp(char *buffer, const struct kernel_param *kp);
 #define param_check_charp(name, p) __param_check(name, p, char *)
 
 /* We used to allow int as well as bool.  We're taking that away! */
-extern struct kernel_param_ops param_ops_bool;
+extern const struct kernel_param_ops param_ops_bool;
 extern int param_set_bool(const char *val, const struct kernel_param *kp);
 extern int param_get_bool(char *buffer, const struct kernel_param *kp);
 #define param_check_bool(name, p) __param_check(name, p, bool)
 
-extern struct kernel_param_ops param_ops_invbool;
+extern const struct kernel_param_ops param_ops_bool_enable_only;
+extern int param_set_bool_enable_only(const char *val,
+                                     const struct kernel_param *kp);
+/* getter is the same as for the regular bool */
+#define param_check_bool_enable_only param_check_bool
+
+extern const struct kernel_param_ops param_ops_invbool;
 extern int param_set_invbool(const char *val, const struct kernel_param *kp);
 extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
 #define param_check_invbool(name, p) __param_check(name, p, bool)
 
 /* An int, which can only be set like a bool (though it shows as an int). */
-extern struct kernel_param_ops param_ops_bint;
+extern const struct kernel_param_ops param_ops_bint;
 extern int param_set_bint(const char *val, const struct kernel_param *kp);
 #define param_get_bint param_get_int
 #define param_check_bint param_check_int
@@ -487,9 +450,9 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
                            perm, -1, 0);                               \
        __MODULE_PARM_TYPE(name, "array of " #type)
 
-extern struct kernel_param_ops param_array_ops;
+extern const struct kernel_param_ops param_array_ops;
 
-extern struct kernel_param_ops param_ops_string;
+extern const struct kernel_param_ops param_ops_string;
 extern int param_set_copystring(const char *val, const struct kernel_param *);
 extern int param_get_string(char *buffer, const struct kernel_param *kp);
 
index 32201c269890433817f1fac83026d95fa760b996..b8e72aad919cfc72ea6710ac3786dc35bbe47201 100644 (file)
@@ -500,6 +500,7 @@ enum {
        NFSPROC4_CLNT_SEEK,
        NFSPROC4_CLNT_ALLOCATE,
        NFSPROC4_CLNT_DEALLOCATE,
+       NFSPROC4_CLNT_LAYOUTSTATS,
 };
 
 /* nfs41 types */
index b95f914ce083891325b6b8defa3f9995851cf76b..f91b5ade30c98fe8b03d06bc40deb0c434edb633 100644 (file)
@@ -219,6 +219,7 @@ struct nfs_inode {
 #define NFS_INO_COMMIT         (7)             /* inode is committing unstable writes */
 #define NFS_INO_LAYOUTCOMMIT   (9)             /* layoutcommit required */
 #define NFS_INO_LAYOUTCOMMITTING (10)          /* layoutcommit inflight */
+#define NFS_INO_LAYOUTSTATS    (11)            /* layoutstats inflight */
 
 static inline struct nfs_inode *NFS_I(const struct inode *inode)
 {
index 5e1273d4de14064198489a7aaccb73a88f36e7b8..a2ea1491d3dfc487611445490fb10adf9972d777 100644 (file)
@@ -237,5 +237,6 @@ struct nfs_server {
 #define NFS_CAP_SEEK           (1U << 19)
 #define NFS_CAP_ALLOCATE       (1U << 20)
 #define NFS_CAP_DEALLOCATE     (1U << 21)
+#define NFS_CAP_LAYOUTSTATS    (1U << 22)
 
 #endif
index 3eb072dbce833dd268b0189b9ff98a2aa85cc7bc..f2f650f136ee6fe181dfa27b71d7b944fc1f9357 100644 (file)
@@ -67,7 +67,6 @@ struct nfs_rw_ops {
        const fmode_t rw_mode;
        struct nfs_pgio_header *(*rw_alloc_header)(void);
        void (*rw_free_header)(struct nfs_pgio_header *);
-       void (*rw_release)(struct nfs_pgio_header *);
        int  (*rw_done)(struct rpc_task *, struct nfs_pgio_header *,
                        struct inode *);
        void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *);
index 93ab6071bbe967b56ea44c56111157f8a9135e3f..7bbe50504211d65cc096baa8bc6d45e2e9449125 100644 (file)
@@ -316,6 +316,49 @@ struct nfs4_layoutreturn {
        int rpc_status;
 };
 
+#define PNFS_LAYOUTSTATS_MAXSIZE 256
+
+struct nfs42_layoutstat_args;
+struct nfs42_layoutstat_devinfo;
+typedef        void (*layoutstats_encode_t)(struct xdr_stream *,
+               struct nfs42_layoutstat_args *,
+               struct nfs42_layoutstat_devinfo *);
+
+/* Per file per deviceid layoutstats */
+struct nfs42_layoutstat_devinfo {
+       struct nfs4_deviceid dev_id;
+       __u64 offset;
+       __u64 length;
+       __u64 read_count;
+       __u64 read_bytes;
+       __u64 write_count;
+       __u64 write_bytes;
+       __u32 layout_type;
+       layoutstats_encode_t layoutstats_encode;
+       void *layout_private;
+};
+
+struct nfs42_layoutstat_args {
+       struct nfs4_sequence_args seq_args;
+       struct nfs_fh *fh;
+       struct inode *inode;
+       nfs4_stateid stateid;
+       int num_dev;
+       struct nfs42_layoutstat_devinfo *devinfo;
+};
+
+struct nfs42_layoutstat_res {
+       struct nfs4_sequence_res seq_res;
+       int num_dev;
+       int rpc_status;
+};
+
+struct nfs42_layoutstat_data {
+       struct inode *inode;
+       struct nfs42_layoutstat_args args;
+       struct nfs42_layoutstat_res res;
+};
+
 struct stateowner_id {
        __u64   create_time;
        __u32   uniquifier;
@@ -984,17 +1027,14 @@ struct nfs4_readlink_res {
        struct nfs4_sequence_res        seq_res;
 };
 
-#define NFS4_SETCLIENTID_NAMELEN       (127)
 struct nfs4_setclientid {
        const nfs4_verifier *           sc_verifier;
-       unsigned int                    sc_name_len;
-       char                            sc_name[NFS4_SETCLIENTID_NAMELEN + 1];
        u32                             sc_prog;
        unsigned int                    sc_netid_len;
        char                            sc_netid[RPCBIND_MAXNETIDLEN + 1];
        unsigned int                    sc_uaddr_len;
        char                            sc_uaddr[RPCBIND_MAXUADDRLEN + 1];
-       u32                             sc_cb_ident;
+       struct nfs_client               *sc_clnt;
        struct rpc_cred                 *sc_cred;
 };
 
@@ -1142,12 +1182,9 @@ struct nfs41_state_protection {
        struct nfs4_op_map allow;
 };
 
-#define NFS4_EXCHANGE_ID_LEN   (48)
 struct nfs41_exchange_id_args {
        struct nfs_client               *client;
        nfs4_verifier                   *verifier;
-       unsigned int                    id_len;
-       char                            id[NFS4_EXCHANGE_ID_LEN];
        u32                             flags;
        struct nfs41_state_protection   state_protect;
 };
index 9ac1a62fc6f5a1085b69c0acd717d6231a628ac3..b02f72bb8e325bb5f17f7b9c2cad8f1f266379a8 100644 (file)
@@ -4,15 +4,20 @@
  *
  *   GPL LICENSE SUMMARY
  *
- *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
  *
  *   This program is free software; you can redistribute it and/or modify
  *   it under the terms of version 2 of the GNU General Public License as
  *   published by the Free Software Foundation.
  *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
  *   BSD LICENSE
  *
- *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
- * Intel PCIe NTB Linux driver
+ * PCIe NTB Linux driver
  *
  * Contact Information:
- * Jon Mason <jon.mason@intel.com>
+ * Allen Hubbe <Allen.Hubbe@emc.com>
  */
 
-struct ntb_transport_qp;
+#ifndef _NTB_H_
+#define _NTB_H_
 
-struct ntb_client {
-       struct device_driver driver;
-       int (*probe)(struct pci_dev *pdev);
-       void (*remove)(struct pci_dev *pdev);
+#include <linux/completion.h>
+#include <linux/device.h>
+
+struct ntb_client;
+struct ntb_dev;
+struct pci_dev;
+
+/**
+ * enum ntb_topo - NTB connection topology
+ * @NTB_TOPO_NONE:     Topology is unknown or invalid.
+ * @NTB_TOPO_PRI:      On primary side of local ntb.
+ * @NTB_TOPO_SEC:      On secondary side of remote ntb.
+ * @NTB_TOPO_B2B_USD:  On primary side of local ntb upstream of remote ntb.
+ * @NTB_TOPO_B2B_DSD:  On primary side of local ntb downstream of remote ntb.
+ */
+enum ntb_topo {
+       NTB_TOPO_NONE = -1,
+       NTB_TOPO_PRI,
+       NTB_TOPO_SEC,
+       NTB_TOPO_B2B_USD,
+       NTB_TOPO_B2B_DSD,
+};
+
+static inline int ntb_topo_is_b2b(enum ntb_topo topo)
+{
+       switch ((int)topo) {
+       case NTB_TOPO_B2B_USD:
+       case NTB_TOPO_B2B_DSD:
+               return 1;
+       }
+       return 0;
+}
+
+static inline char *ntb_topo_string(enum ntb_topo topo)
+{
+       switch (topo) {
+       case NTB_TOPO_NONE:     return "NTB_TOPO_NONE";
+       case NTB_TOPO_PRI:      return "NTB_TOPO_PRI";
+       case NTB_TOPO_SEC:      return "NTB_TOPO_SEC";
+       case NTB_TOPO_B2B_USD:  return "NTB_TOPO_B2B_USD";
+       case NTB_TOPO_B2B_DSD:  return "NTB_TOPO_B2B_DSD";
+       }
+       return "NTB_TOPO_INVALID";
+}
+
+/**
+ * enum ntb_speed - NTB link training speed
+ * @NTB_SPEED_AUTO:    Request the max supported speed.
+ * @NTB_SPEED_NONE:    Link is not trained to any speed.
+ * @NTB_SPEED_GEN1:    Link is trained to gen1 speed.
+ * @NTB_SPEED_GEN2:    Link is trained to gen2 speed.
+ * @NTB_SPEED_GEN3:    Link is trained to gen3 speed.
+ */
+enum ntb_speed {
+       NTB_SPEED_AUTO = -1,
+       NTB_SPEED_NONE = 0,
+       NTB_SPEED_GEN1 = 1,
+       NTB_SPEED_GEN2 = 2,
+       NTB_SPEED_GEN3 = 3,
+};
+
+/**
+ * enum ntb_width - NTB link training width
+ * @NTB_WIDTH_AUTO:    Request the max supported width.
+ * @NTB_WIDTH_NONE:    Link is not trained to any width.
+ * @NTB_WIDTH_1:       Link is trained to 1 lane width.
+ * @NTB_WIDTH_2:       Link is trained to 2 lane width.
+ * @NTB_WIDTH_4:       Link is trained to 4 lane width.
+ * @NTB_WIDTH_8:       Link is trained to 8 lane width.
+ * @NTB_WIDTH_12:      Link is trained to 12 lane width.
+ * @NTB_WIDTH_16:      Link is trained to 16 lane width.
+ * @NTB_WIDTH_32:      Link is trained to 32 lane width.
+ */
+enum ntb_width {
+       NTB_WIDTH_AUTO = -1,
+       NTB_WIDTH_NONE = 0,
+       NTB_WIDTH_1 = 1,
+       NTB_WIDTH_2 = 2,
+       NTB_WIDTH_4 = 4,
+       NTB_WIDTH_8 = 8,
+       NTB_WIDTH_12 = 12,
+       NTB_WIDTH_16 = 16,
+       NTB_WIDTH_32 = 32,
+};
+
+/**
+ * struct ntb_client_ops - ntb client operations
+ * @probe:             Notify client of a new device.
+ * @remove:            Notify client to remove a device.
+ */
+struct ntb_client_ops {
+       int (*probe)(struct ntb_client *client, struct ntb_dev *ntb);
+       void (*remove)(struct ntb_client *client, struct ntb_dev *ntb);
+};
+
+static inline int ntb_client_ops_is_valid(const struct ntb_client_ops *ops)
+{
+       /* commented callbacks are not required: */
+       return
+               ops->probe                      &&
+               ops->remove                     &&
+               1;
+}
+
+/**
+ * struct ntb_ctx_ops - ntb driver context operations
+ * @link_event:                See ntb_link_event().
+ * @db_event:          See ntb_db_event().
+ */
+struct ntb_ctx_ops {
+       void (*link_event)(void *ctx);
+       void (*db_event)(void *ctx, int db_vector);
+};
+
+static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
+{
+       /* commented callbacks are not required: */
+       return
+               /* ops->link_event              && */
+               /* ops->db_event                && */
+               1;
+}
+
+/**
+ * struct ntb_ctx_ops - ntb device operations
+ * @mw_count:          See ntb_mw_count().
+ * @mw_get_range:      See ntb_mw_get_range().
+ * @mw_set_trans:      See ntb_mw_set_trans().
+ * @mw_clear_trans:    See ntb_mw_clear_trans().
+ * @link_is_up:                See ntb_link_is_up().
+ * @link_enable:       See ntb_link_enable().
+ * @link_disable:      See ntb_link_disable().
+ * @db_is_unsafe:      See ntb_db_is_unsafe().
+ * @db_valid_mask:     See ntb_db_valid_mask().
+ * @db_vector_count:   See ntb_db_vector_count().
+ * @db_vector_mask:    See ntb_db_vector_mask().
+ * @db_read:           See ntb_db_read().
+ * @db_set:            See ntb_db_set().
+ * @db_clear:          See ntb_db_clear().
+ * @db_read_mask:      See ntb_db_read_mask().
+ * @db_set_mask:       See ntb_db_set_mask().
+ * @db_clear_mask:     See ntb_db_clear_mask().
+ * @peer_db_addr:      See ntb_peer_db_addr().
+ * @peer_db_read:      See ntb_peer_db_read().
+ * @peer_db_set:       See ntb_peer_db_set().
+ * @peer_db_clear:     See ntb_peer_db_clear().
+ * @peer_db_read_mask: See ntb_peer_db_read_mask().
+ * @peer_db_set_mask:  See ntb_peer_db_set_mask().
+ * @peer_db_clear_mask:        See ntb_peer_db_clear_mask().
+ * @spad_is_unsafe:    See ntb_spad_is_unsafe().
+ * @spad_count:                See ntb_spad_count().
+ * @spad_read:         See ntb_spad_read().
+ * @spad_write:                See ntb_spad_write().
+ * @peer_spad_addr:    See ntb_peer_spad_addr().
+ * @peer_spad_read:    See ntb_peer_spad_read().
+ * @peer_spad_write:   See ntb_peer_spad_write().
+ */
+struct ntb_dev_ops {
+       int (*mw_count)(struct ntb_dev *ntb);
+       int (*mw_get_range)(struct ntb_dev *ntb, int idx,
+                           phys_addr_t *base, resource_size_t *size,
+                       resource_size_t *align, resource_size_t *align_size);
+       int (*mw_set_trans)(struct ntb_dev *ntb, int idx,
+                           dma_addr_t addr, resource_size_t size);
+       int (*mw_clear_trans)(struct ntb_dev *ntb, int idx);
+
+       int (*link_is_up)(struct ntb_dev *ntb,
+                         enum ntb_speed *speed, enum ntb_width *width);
+       int (*link_enable)(struct ntb_dev *ntb,
+                          enum ntb_speed max_speed, enum ntb_width max_width);
+       int (*link_disable)(struct ntb_dev *ntb);
+
+       int (*db_is_unsafe)(struct ntb_dev *ntb);
+       u64 (*db_valid_mask)(struct ntb_dev *ntb);
+       int (*db_vector_count)(struct ntb_dev *ntb);
+       u64 (*db_vector_mask)(struct ntb_dev *ntb, int db_vector);
+
+       u64 (*db_read)(struct ntb_dev *ntb);
+       int (*db_set)(struct ntb_dev *ntb, u64 db_bits);
+       int (*db_clear)(struct ntb_dev *ntb, u64 db_bits);
+
+       u64 (*db_read_mask)(struct ntb_dev *ntb);
+       int (*db_set_mask)(struct ntb_dev *ntb, u64 db_bits);
+       int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
+
+       int (*peer_db_addr)(struct ntb_dev *ntb,
+                           phys_addr_t *db_addr, resource_size_t *db_size);
+       u64 (*peer_db_read)(struct ntb_dev *ntb);
+       int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits);
+       int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits);
+
+       u64 (*peer_db_read_mask)(struct ntb_dev *ntb);
+       int (*peer_db_set_mask)(struct ntb_dev *ntb, u64 db_bits);
+       int (*peer_db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
+
+       int (*spad_is_unsafe)(struct ntb_dev *ntb);
+       int (*spad_count)(struct ntb_dev *ntb);
+
+       u32 (*spad_read)(struct ntb_dev *ntb, int idx);
+       int (*spad_write)(struct ntb_dev *ntb, int idx, u32 val);
+
+       int (*peer_spad_addr)(struct ntb_dev *ntb, int idx,
+                             phys_addr_t *spad_addr);
+       u32 (*peer_spad_read)(struct ntb_dev *ntb, int idx);
+       int (*peer_spad_write)(struct ntb_dev *ntb, int idx, u32 val);
 };
 
-enum {
-       NTB_LINK_DOWN = 0,
-       NTB_LINK_UP,
+static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
+{
+       /* commented callbacks are not required: */
+       return
+               ops->mw_count                           &&
+               ops->mw_get_range                       &&
+               ops->mw_set_trans                       &&
+               /* ops->mw_clear_trans                  && */
+               ops->link_is_up                         &&
+               ops->link_enable                        &&
+               ops->link_disable                       &&
+               /* ops->db_is_unsafe                    && */
+               ops->db_valid_mask                      &&
+
+               /* both set, or both unset */
+               (!ops->db_vector_count == !ops->db_vector_mask) &&
+
+               ops->db_read                            &&
+               /* ops->db_set                          && */
+               ops->db_clear                           &&
+               /* ops->db_read_mask                    && */
+               ops->db_set_mask                        &&
+               ops->db_clear_mask                      &&
+               ops->peer_db_addr                       &&
+               /* ops->peer_db_read                    && */
+               ops->peer_db_set                        &&
+               /* ops->peer_db_clear                   && */
+               /* ops->peer_db_read_mask               && */
+               /* ops->peer_db_set_mask                && */
+               /* ops->peer_db_clear_mask              && */
+               /* ops->spad_is_unsafe                  && */
+               ops->spad_count                         &&
+               ops->spad_read                          &&
+               ops->spad_write                         &&
+               ops->peer_spad_addr                     &&
+               /* ops->peer_spad_read                  && */
+               ops->peer_spad_write                    &&
+               1;
+}
+
+/**
+ * struct ntb_client - client interested in ntb devices
+ * @drv:               Linux driver object.
+ * @ops:               See &ntb_client_ops.
+ */
+struct ntb_client {
+       struct device_driver            drv;
+       const struct ntb_client_ops     ops;
 };
 
-int ntb_register_client(struct ntb_client *drvr);
-void ntb_unregister_client(struct ntb_client *drvr);
-int ntb_register_client_dev(char *device_name);
-void ntb_unregister_client_dev(char *device_name);
-
-struct ntb_queue_handlers {
-       void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
-                          void *data, int len);
-       void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
-                          void *data, int len);
-       void (*event_handler)(void *data, int status);
+#define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv)
+
+/**
+ * struct ntb_device - ntb device
+ * @dev:               Linux device object.
+ * @pdev:              Pci device entry of the ntb.
+ * @topo:              Detected topology of the ntb.
+ * @ops:               See &ntb_dev_ops.
+ * @ctx:               See &ntb_ctx_ops.
+ * @ctx_ops:           See &ntb_ctx_ops.
+ */
+struct ntb_dev {
+       struct device                   dev;
+       struct pci_dev                  *pdev;
+       enum ntb_topo                   topo;
+       const struct ntb_dev_ops        *ops;
+       void                            *ctx;
+       const struct ntb_ctx_ops        *ctx_ops;
+
+       /* private: */
+
+       /* synchronize setting, clearing, and calling ctx_ops */
+       spinlock_t                      ctx_lock;
+       /* block unregister until device is fully released */
+       struct completion               released;
 };
 
-unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
-unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
-struct ntb_transport_qp *
-ntb_transport_create_queue(void *data, struct pci_dev *pdev,
-                          const struct ntb_queue_handlers *handlers);
-void ntb_transport_free_queue(struct ntb_transport_qp *qp);
-int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
-                            unsigned int len);
-int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
-                            unsigned int len);
-void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
-void ntb_transport_link_up(struct ntb_transport_qp *qp);
-void ntb_transport_link_down(struct ntb_transport_qp *qp);
-bool ntb_transport_link_query(struct ntb_transport_qp *qp);
+#define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev)
+
+/**
+ * ntb_register_client() - register a client for interest in ntb devices
+ * @client:    Client context.
+ *
+ * The client will be added to the list of clients interested in ntb devices.
+ * The client will be notified of any ntb devices that are not already
+ * associated with a client, or if ntb devices are registered later.
+ *
+ * Return: Zero if the client is registered, otherwise an error number.
+ */
+#define ntb_register_client(client) \
+       __ntb_register_client((client), THIS_MODULE, KBUILD_MODNAME)
+
+int __ntb_register_client(struct ntb_client *client, struct module *mod,
+                         const char *mod_name);
+
+/**
+ * ntb_unregister_client() - unregister a client for interest in ntb devices
+ * @client:    Client context.
+ *
+ * The client will be removed from the list of clients interested in ntb
+ * devices.  If any ntb devices are associated with the client, the client will
+ * be notified to remove those devices.
+ */
+void ntb_unregister_client(struct ntb_client *client);
+
+#define module_ntb_client(__ntb_client) \
+       module_driver(__ntb_client, ntb_register_client, \
+                       ntb_unregister_client)
+
+/**
+ * ntb_register_device() - register a ntb device
+ * @ntb:       NTB device context.
+ *
+ * The device will be added to the list of ntb devices.  If any clients are
+ * interested in ntb devices, each client will be notified of the ntb device,
+ * until at most one client accepts the device.
+ *
+ * Return: Zero if the device is registered, otherwise an error number.
+ */
+int ntb_register_device(struct ntb_dev *ntb);
+
+/**
+ * ntb_register_device() - unregister a ntb device
+ * @ntb:       NTB device context.
+ *
+ * The device will be removed from the list of ntb devices.  If the ntb device
+ * is associated with a client, the client will be notified to remove the
+ * device.
+ */
+void ntb_unregister_device(struct ntb_dev *ntb);
+
+/**
+ * ntb_set_ctx() - associate a driver context with an ntb device
+ * @ntb:       NTB device context.
+ * @ctx:       Driver context.
+ * @ctx_ops:   Driver context operations.
+ *
+ * Associate a driver context and operations with a ntb device.  The context is
+ * provided by the client driver, and the driver may associate a different
+ * context with each ntb device.
+ *
+ * Return: Zero if the context is associated, otherwise an error number.
+ */
+int ntb_set_ctx(struct ntb_dev *ntb, void *ctx,
+               const struct ntb_ctx_ops *ctx_ops);
+
+/**
+ * ntb_clear_ctx() - disassociate any driver context from an ntb device
+ * @ntb:       NTB device context.
+ *
+ * Clear any association that may exist between a driver context and the ntb
+ * device.
+ */
+void ntb_clear_ctx(struct ntb_dev *ntb);
+
+/**
+ * ntb_link_event() - notify driver context of a change in link status
+ * @ntb:       NTB device context.
+ *
+ * Notify the driver context that the link status may have changed.  The driver
+ * should call ntb_link_is_up() to get the current status.
+ */
+void ntb_link_event(struct ntb_dev *ntb);
+
+/**
+ * ntb_db_event() - notify driver context of a doorbell event
+ * @ntb:       NTB device context.
+ * @vector:    Interrupt vector number.
+ *
+ * Notify the driver context of a doorbell event.  If hardware supports
+ * multiple interrupt vectors for doorbells, the vector number indicates which
+ * vector received the interrupt.  The vector number is relative to the first
+ * vector used for doorbells, starting at zero, and must be less than
+ ** ntb_db_vector_count().  The driver may call ntb_db_read() to check which
+ * doorbell bits need service, and ntb_db_vector_mask() to determine which of
+ * those bits are associated with the vector number.
+ */
+void ntb_db_event(struct ntb_dev *ntb, int vector);
+
+/**
+ * ntb_mw_count() - get the number of memory windows
+ * @ntb:       NTB device context.
+ *
+ * Hardware and topology may support a different number of memory windows.
+ *
+ * Return: the number of memory windows.
+ */
+static inline int ntb_mw_count(struct ntb_dev *ntb)
+{
+       return ntb->ops->mw_count(ntb);
+}
+
+/**
+ * ntb_mw_get_range() - get the range of a memory window
+ * @ntb:       NTB device context.
+ * @idx:       Memory window number.
+ * @base:      OUT - the base address for mapping the memory window
+ * @size:      OUT - the size for mapping the memory window
+ * @align:     OUT - the base alignment for translating the memory window
+ * @align_size:        OUT - the size alignment for translating the memory window
+ *
+ * Get the range of a memory window.  NULL may be given for any output
+ * parameter if the value is not needed.  The base and size may be used for
+ * mapping the memory window, to access the peer memory.  The alignment and
+ * size may be used for translating the memory window, for the peer to access
+ * memory on the local system.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_mw_get_range(struct ntb_dev *ntb, int idx,
+                                  phys_addr_t *base, resource_size_t *size,
+               resource_size_t *align, resource_size_t *align_size)
+{
+       return ntb->ops->mw_get_range(ntb, idx, base, size,
+                       align, align_size);
+}
+
+/**
+ * ntb_mw_set_trans() - set the translation of a memory window
+ * @ntb:       NTB device context.
+ * @idx:       Memory window number.
+ * @addr:      The dma address local memory to expose to the peer.
+ * @size:      The size of the local memory to expose to the peer.
+ *
+ * Set the translation of a memory window.  The peer may access local memory
+ * through the window starting at the address, up to the size.  The address
+ * must be aligned to the alignment specified by ntb_mw_get_range().  The size
+ * must be aligned to the size alignment specified by ntb_mw_get_range().
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
+                                  dma_addr_t addr, resource_size_t size)
+{
+       return ntb->ops->mw_set_trans(ntb, idx, addr, size);
+}
+
+/**
+ * ntb_mw_clear_trans() - clear the translation of a memory window
+ * @ntb:       NTB device context.
+ * @idx:       Memory window number.
+ *
+ * Clear the translation of a memory window.  The peer may no longer access
+ * local memory through the window.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx)
+{
+       if (!ntb->ops->mw_clear_trans)
+               return ntb->ops->mw_set_trans(ntb, idx, 0, 0);
+
+       return ntb->ops->mw_clear_trans(ntb, idx);
+}
+
+/**
+ * ntb_link_is_up() - get the current ntb link state
+ * @ntb:       NTB device context.
+ * @speed:     OUT - The link speed expressed as PCIe generation number.
+ * @width:     OUT - The link width expressed as the number of PCIe lanes.
+ *
+ * Set the translation of a memory window.  The peer may access local memory
+ * through the window starting at the address, up to the size.  The address
+ * must be aligned to the alignment specified by ntb_mw_get_range().  The size
+ * must be aligned to the size alignment specified by ntb_mw_get_range().
+ *
+ * Return: One if the link is up, zero if the link is down, otherwise a
+ *             negative value indicating the error number.
+ */
+static inline int ntb_link_is_up(struct ntb_dev *ntb,
+                                enum ntb_speed *speed, enum ntb_width *width)
+{
+       return ntb->ops->link_is_up(ntb, speed, width);
+}
+
+/**
+ * ntb_link_enable() - enable the link on the secondary side of the ntb
+ * @ntb:       NTB device context.
+ * @max_speed: The maximum link speed expressed as PCIe generation number.
+ * @max_width: The maximum link width expressed as the number of PCIe lanes.
+ *
+ * Enable the link on the secondary side of the ntb.  This can only be done
+ * from the primary side of the ntb in primary or b2b topology.  The ntb device
+ * should train the link to its maximum speed and width, or the requested speed
+ * and width, whichever is smaller, if supported.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_link_enable(struct ntb_dev *ntb,
+                                 enum ntb_speed max_speed,
+                                 enum ntb_width max_width)
+{
+       return ntb->ops->link_enable(ntb, max_speed, max_width);
+}
+
+/**
+ * ntb_link_disable() - disable the link on the secondary side of the ntb
+ * @ntb:       NTB device context.
+ *
+ * Disable the link on the secondary side of the ntb.  This can only be
+ * done from the primary side of the ntb in primary or b2b topology.  The ntb
+ * device should disable the link.  Returning from this call must indicate that
+ * a barrier has passed, though with no more writes may pass in either
+ * direction across the link, except if this call returns an error number.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_link_disable(struct ntb_dev *ntb)
+{
+       return ntb->ops->link_disable(ntb);
+}
+
+/**
+ * ntb_db_is_unsafe() - check if it is safe to use hardware doorbell
+ * @ntb:       NTB device context.
+ *
+ * It is possible for some ntb hardware to be affected by errata.  Hardware
+ * drivers can advise clients to avoid using doorbells.  Clients may ignore
+ * this advice, though caution is recommended.
+ *
+ * Return: Zero if it is safe to use doorbells, or One if it is not safe.
+ */
+static inline int ntb_db_is_unsafe(struct ntb_dev *ntb)
+{
+       if (!ntb->ops->db_is_unsafe)
+               return 0;
+
+       return ntb->ops->db_is_unsafe(ntb);
+}
+
+/**
+ * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb
+ * @ntb:       NTB device context.
+ *
+ * Hardware may support different number or arrangement of doorbell bits.
+ *
+ * Return: A mask of doorbell bits supported by the ntb.
+ */
+static inline u64 ntb_db_valid_mask(struct ntb_dev *ntb)
+{
+       return ntb->ops->db_valid_mask(ntb);
+}
+
+/**
+ * ntb_db_vector_count() - get the number of doorbell interrupt vectors
+ * @ntb:       NTB device context.
+ *
+ * Hardware may support different number of interrupt vectors.
+ *
+ * Return: The number of doorbell interrupt vectors.
+ */
+static inline int ntb_db_vector_count(struct ntb_dev *ntb)
+{
+       if (!ntb->ops->db_vector_count)
+               return 1;
+
+       return ntb->ops->db_vector_count(ntb);
+}
+
+/**
+ * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector
+ * @ntb:       NTB device context.
+ * @vector:    Doorbell vector number.
+ *
+ * Each interrupt vector may have a different number or arrangement of bits.
+ *
+ * Return: A mask of doorbell bits serviced by a vector.
+ */
+static inline u64 ntb_db_vector_mask(struct ntb_dev *ntb, int vector)
+{
+       if (!ntb->ops->db_vector_mask)
+               return ntb_db_valid_mask(ntb);
+
+       return ntb->ops->db_vector_mask(ntb, vector);
+}
+
+/**
+ * ntb_db_read() - read the local doorbell register
+ * @ntb:       NTB device context.
+ *
+ * Read the local doorbell register, and return the bits that are set.
+ *
+ * Return: The bits currently set in the local doorbell register.
+ */
+static inline u64 ntb_db_read(struct ntb_dev *ntb)
+{
+       return ntb->ops->db_read(ntb);
+}
+
+/**
+ * ntb_db_set() - set bits in the local doorbell register
+ * @ntb:       NTB device context.
+ * @db_bits:   Doorbell bits to set.
+ *
+ * Set bits in the local doorbell register, which may generate a local doorbell
+ * interrupt.  Bits that were already set must remain set.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+       if (!ntb->ops->db_set)
+               return -EINVAL;
+
+       return ntb->ops->db_set(ntb, db_bits);
+}
+
+/**
+ * ntb_db_clear() - clear bits in the local doorbell register
+ * @ntb:       NTB device context.
+ * @db_bits:   Doorbell bits to clear.
+ *
+ * Clear bits in the local doorbell register, arming the bits for the next
+ * doorbell.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+       return ntb->ops->db_clear(ntb, db_bits);
+}
+
+/**
+ * ntb_db_read_mask() - read the local doorbell mask
+ * @ntb:       NTB device context.
+ *
+ * Read the local doorbell mask register, and return the bits that are set.
+ *
+ * This is unusual, though hardware is likely to support it.
+ *
+ * Return: The bits currently set in the local doorbell mask register.
+ */
+static inline u64 ntb_db_read_mask(struct ntb_dev *ntb)
+{
+       if (!ntb->ops->db_read_mask)
+               return 0;
+
+       return ntb->ops->db_read_mask(ntb);
+}
+
+/**
+ * ntb_db_set_mask() - set bits in the local doorbell mask
+ * @ntb:       NTB device context.
+ * @db_bits:   Doorbell mask bits to set.
+ *
+ * Set bits in the local doorbell mask register, preventing doorbell interrupts
+ * from being generated for those doorbell bits.  Bits that were already set
+ * must remain set.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+       return ntb->ops->db_set_mask(ntb, db_bits);
+}
+
+/**
+ * ntb_db_clear_mask() - clear bits in the local doorbell mask
+ * @ntb:       NTB device context.
+ * @db_bits:   Doorbell bits to clear.
+ *
+ * Clear bits in the local doorbell mask register, allowing doorbell interrupts
+ * from being generated for those doorbell bits.  If a doorbell bit is already
+ * set at the time the mask is cleared, and the corresponding mask bit is
+ * changed from set to clear, then the ntb driver must ensure that
+ * ntb_db_event() is called.  If the hardware does not generate the interrupt
+ * on clearing the mask bit, then the driver must call ntb_db_event() anyway.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+       return ntb->ops->db_clear_mask(ntb, db_bits);
+}
+
+/**
+ * ntb_peer_db_addr() - address and size of the peer doorbell register
+ * @ntb:       NTB device context.
+ * @db_addr:   OUT - The address of the peer doorbell register.
+ * @db_size:   OUT - The number of bytes to write the peer doorbell register.
+ *
+ * Return the address of the peer doorbell register.  This may be used, for
+ * example, by drivers that offload memory copy operations to a dma engine.
+ * The drivers may wish to ring the peer doorbell at the completion of memory
+ * copy operations.  For efficiency, and to simplify ordering of operations
+ * between the dma memory copies and the ringing doorbell, the driver may
+ * append one additional dma memory copy with the doorbell register as the
+ * destination, after the memory copy operations.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_addr(struct ntb_dev *ntb,
+                                  phys_addr_t *db_addr,
+                                  resource_size_t *db_size)
+{
+       return ntb->ops->peer_db_addr(ntb, db_addr, db_size);
+}
+
+/**
+ * ntb_peer_db_read() - read the peer doorbell register
+ * @ntb:       NTB device context.
+ *
+ * Read the peer doorbell register, and return the bits that are set.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: The bits currently set in the peer doorbell register.
+ */
+static inline u64 ntb_peer_db_read(struct ntb_dev *ntb)
+{
+       if (!ntb->ops->peer_db_read)
+               return 0;
+
+       return ntb->ops->peer_db_read(ntb);
+}
+
+/**
+ * ntb_peer_db_set() - set bits in the peer doorbell register
+ * @ntb:       NTB device context.
+ * @db_bits:   Doorbell bits to set.
+ *
+ * Set bits in the peer doorbell register, which may generate a peer doorbell
+ * interrupt.  Bits that were already set must remain set.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+       return ntb->ops->peer_db_set(ntb, db_bits);
+}
+
+/**
+ * ntb_peer_db_clear() - clear bits in the local doorbell register
+ * @ntb:       NTB device context.
+ * @db_bits:   Doorbell bits to clear.
+ *
+ * Clear bits in the peer doorbell register, arming the bits for the next
+ * doorbell.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+       if (!ntb->ops->db_clear)
+               return -EINVAL;
+
+       return ntb->ops->peer_db_clear(ntb, db_bits);
+}
+
+/**
+ * ntb_peer_db_read_mask() - read the peer doorbell mask
+ * @ntb:       NTB device context.
+ *
+ * Read the peer doorbell mask register, and return the bits that are set.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: The bits currently set in the peer doorbell mask register.
+ */
+static inline u64 ntb_peer_db_read_mask(struct ntb_dev *ntb)
+{
+       if (!ntb->ops->db_read_mask)
+               return 0;
+
+       return ntb->ops->peer_db_read_mask(ntb);
+}
+
+/**
+ * ntb_peer_db_set_mask() - set bits in the peer doorbell mask
+ * @ntb:       NTB device context.
+ * @db_bits:   Doorbell mask bits to set.
+ *
+ * Set bits in the peer doorbell mask register, preventing doorbell interrupts
+ * from being generated for those doorbell bits.  Bits that were already set
+ * must remain set.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+       if (!ntb->ops->db_set_mask)
+               return -EINVAL;
+
+       return ntb->ops->peer_db_set_mask(ntb, db_bits);
+}
+
+/**
+ * ntb_peer_db_clear_mask() - clear bits in the peer doorbell mask
+ * @ntb:       NTB device context.
+ * @db_bits:   Doorbell bits to clear.
+ *
+ * Clear bits in the peer doorbell mask register, allowing doorbell interrupts
+ * from being generated for those doorbell bits.  If the hardware does not
+ * generate the interrupt on clearing the mask bit, then the driver should not
+ * implement this function!
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+       if (!ntb->ops->db_clear_mask)
+               return -EINVAL;
+
+       return ntb->ops->peer_db_clear_mask(ntb, db_bits);
+}
+
+/**
+ * ntb_spad_is_unsafe() - check if it is safe to use the hardware scratchpads
+ * @ntb:       NTB device context.
+ *
+ * It is possible for some ntb hardware to be affected by errata.  Hardware
+ * drivers can advise clients to avoid using scratchpads.  Clients may ignore
+ * this advice, though caution is recommended.
+ *
+ * Return: Zero if it is safe to use scratchpads, or One if it is not safe.
+ */
+static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb)
+{
+       if (!ntb->ops->spad_is_unsafe)
+               return 0;
+
+       return ntb->ops->spad_is_unsafe(ntb);
+}
+
+/**
+ * ntb_mw_count() - get the number of scratchpads
+ * @ntb:       NTB device context.
+ *
+ * Hardware and topology may support a different number of scratchpads.
+ *
+ * Return: the number of scratchpads.
+ */
+static inline int ntb_spad_count(struct ntb_dev *ntb)
+{
+       return ntb->ops->spad_count(ntb);
+}
+
+/**
+ * ntb_spad_read() - read the local scratchpad register
+ * @ntb:       NTB device context.
+ * @idx:       Scratchpad index.
+ *
+ * Read the local scratchpad register, and return the value.
+ *
+ * Return: The value of the local scratchpad register.
+ */
+static inline u32 ntb_spad_read(struct ntb_dev *ntb, int idx)
+{
+       return ntb->ops->spad_read(ntb, idx);
+}
+
+/**
+ * ntb_spad_write() - write the local scratchpad register
+ * @ntb:       NTB device context.
+ * @idx:       Scratchpad index.
+ * @val:       Scratchpad value.
+ *
+ * Write the value to the local scratchpad register.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
+{
+       return ntb->ops->spad_write(ntb, idx, val);
+}
+
+/**
+ * ntb_peer_spad_addr() - address of the peer scratchpad register
+ * @ntb:       NTB device context.
+ * @idx:       Scratchpad index.
+ * @spad_addr: OUT - The address of the peer scratchpad register.
+ *
+ * Return the address of the peer doorbell register.  This may be used, for
+ * example, by drivers that offload memory copy operations to a dma engine.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
+                                    phys_addr_t *spad_addr)
+{
+       return ntb->ops->peer_spad_addr(ntb, idx, spad_addr);
+}
+
+/**
+ * ntb_peer_spad_read() - read the peer scratchpad register
+ * @ntb:       NTB device context.
+ * @idx:       Scratchpad index.
+ *
+ * Read the peer scratchpad register, and return the value.
+ *
+ * Return: The value of the local scratchpad register.
+ */
+static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
+{
+       return ntb->ops->peer_spad_read(ntb, idx);
+}
+
+/**
+ * ntb_peer_spad_write() - write the peer scratchpad register
+ * @ntb:       NTB device context.
+ * @idx:       Scratchpad index.
+ * @val:       Scratchpad value.
+ *
+ * Write the value to the peer scratchpad register.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int idx, u32 val)
+{
+       return ntb->ops->peer_spad_write(ntb, idx, val);
+}
+
+#endif
diff --git a/include/linux/ntb_transport.h b/include/linux/ntb_transport.h
new file mode 100644 (file)
index 0000000..2862861
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * PCIe NTB Transport Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+struct ntb_transport_qp;
+
+struct ntb_transport_client {
+       struct device_driver driver;
+       int (*probe)(struct device *client_dev);
+       void (*remove)(struct device *client_dev);
+};
+
+int ntb_transport_register_client(struct ntb_transport_client *drvr);
+void ntb_transport_unregister_client(struct ntb_transport_client *drvr);
+int ntb_transport_register_client_dev(char *device_name);
+void ntb_transport_unregister_client_dev(char *device_name);
+
+struct ntb_queue_handlers {
+       void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+                          void *data, int len);
+       void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+                          void *data, int len);
+       void (*event_handler)(void *data, int status);
+};
+
+unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
+unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
+struct ntb_transport_qp *
+ntb_transport_create_queue(void *data, struct device *client_dev,
+                          const struct ntb_queue_handlers *handlers);
+void ntb_transport_free_queue(struct ntb_transport_qp *qp);
+int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+                            unsigned int len);
+int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+                            unsigned int len);
+void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
+void ntb_transport_link_up(struct ntb_transport_qp *qp);
+void ntb_transport_link_down(struct ntb_transport_qp *qp);
+bool ntb_transport_link_query(struct ntb_transport_qp *qp);
index b871ff9d81d7207333fa021e6a95cb6bdbcf34ac..edc068d19c79248564239e3823982a819dad7411 100644 (file)
@@ -120,6 +120,12 @@ extern struct device_node *of_aliases;
 extern struct device_node *of_stdout;
 extern raw_spinlock_t devtree_lock;
 
+/* flag descriptions (need to be visible even when !CONFIG_OF) */
+#define OF_DYNAMIC     1 /* node and properties were allocated via kmalloc */
+#define OF_DETACHED    2 /* node has been detached from the device tree */
+#define OF_POPULATED   3 /* device already created for the node */
+#define OF_POPULATED_BUS       4 /* of_platform_populate recursed to children of this node */
+
 #ifdef CONFIG_OF
 void of_core_init(void);
 
@@ -128,7 +134,7 @@ static inline bool is_of_node(struct fwnode_handle *fwnode)
        return fwnode && fwnode->type == FWNODE_OF;
 }
 
-static inline struct device_node *of_node(struct fwnode_handle *fwnode)
+static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
 {
        return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL;
 }
@@ -219,12 +225,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
 #define of_node_cmp(s1, s2)            strcasecmp((s1), (s2))
 #endif
 
-/* flag descriptions */
-#define OF_DYNAMIC     1 /* node and properties were allocated via kmalloc */
-#define OF_DETACHED    2 /* node has been detached from the device tree */
-#define OF_POPULATED   3 /* device already created for the node */
-#define OF_POPULATED_BUS       4 /* of_platform_populate recursed to children of this node */
-
 #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
 #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
 
@@ -387,7 +387,7 @@ static inline bool is_of_node(struct fwnode_handle *fwnode)
        return false;
 }
 
-static inline struct device_node *of_node(struct fwnode_handle *fwnode)
+static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
 {
        return NULL;
 }
@@ -428,6 +428,11 @@ static inline struct device_node *of_find_node_opts_by_path(const char *path,
        return NULL;
 }
 
+static inline struct device_node *of_find_node_by_phandle(phandle handle)
+{
+       return NULL;
+}
+
 static inline struct device_node *of_get_parent(const struct device_node *node)
 {
        return NULL;
@@ -673,7 +678,10 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
 #if defined(CONFIG_OF) && defined(CONFIG_NUMA)
 extern int of_node_to_nid(struct device_node *np);
 #else
-static inline int of_node_to_nid(struct device_node *device) { return 0; }
+static inline int of_node_to_nid(struct device_node *device)
+{
+       return NUMA_NO_NODE;
+}
 #endif
 
 static inline struct device_node *of_find_matching_node(
@@ -821,7 +829,7 @@ static inline int of_property_read_string_index(struct device_node *np,
  * @propname:  name of the property to be searched.
  *
  * Search for a property in a device node.
- * Returns true if the property exist false otherwise.
+ * Returns true if the property exists false otherwise.
  */
 static inline bool of_property_read_bool(const struct device_node *np,
                                         const char *propname)
index 22801b10cef5dd279ade089b9c10e9aa4f134aeb..4c508549833a53fc76b59ba6a0bda354fc4d051f 100644 (file)
@@ -33,6 +33,8 @@ extern int of_device_add(struct platform_device *pdev);
 extern int of_device_register(struct platform_device *ofdev);
 extern void of_device_unregister(struct platform_device *ofdev);
 
+extern const void *of_device_get_match_data(const struct device *dev);
+
 extern ssize_t of_device_get_modalias(struct device *dev,
                                        char *str, ssize_t len);
 
@@ -65,6 +67,11 @@ static inline int of_driver_match_device(struct device *dev,
 static inline void of_device_uevent(struct device *dev,
                        struct kobj_uevent_env *env) { }
 
+static inline const void *of_device_get_match_data(const struct device *dev)
+{
+       return NULL;
+}
+
 static inline int of_device_get_modalias(struct device *dev,
                                   char *str, ssize_t len)
 {
index fd627a58068f3473a58240064efd7ed7171ebfad..df9ef380181285a5b196b18a925942346c836d63 100644 (file)
@@ -37,7 +37,7 @@ extern bool of_fdt_is_big_endian(const void *blob,
                                 unsigned long node);
 extern int of_fdt_match(const void *blob, unsigned long node,
                        const char *const *compat);
-extern void of_fdt_unflatten_tree(unsigned long *blob,
+extern void of_fdt_unflatten_tree(const unsigned long *blob,
                               struct device_node **mynodes);
 
 /* TBD: Temporary export of fdt globals - remove when code fully merged */
index fb0814ca65c7328b0bb2bcf9be958dbff2a05c04..a6c78e00ea9684a784938ed39229c2018ffd8e75 100644 (file)
@@ -671,4 +671,10 @@ static inline int add_to_page_cache(struct page *page,
        return error;
 }
 
+static inline unsigned long dir_pages(struct inode *inode)
+{
+       return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >>
+                              PAGE_CACHE_SHIFT;
+}
+
 #endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/platform_data/wkup_m3.h b/include/linux/platform_data/wkup_m3.h
new file mode 100644 (file)
index 0000000..3f1d77e
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * TI Wakeup M3 remote processor platform data
+ *
+ * Copyright (C) 2014-2015 Texas Instruments, Inc.
+ *
+ * Dave Gerlach <d-gerlach@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_PLATFORM_DATA_WKUP_M3_H
+#define _LINUX_PLATFORM_DATA_WKUP_M3_H
+
+struct platform_device;
+
+struct wkup_m3_platform_data {
+       const char *reset_name;
+
+       int (*assert_reset)(struct platform_device *pdev, const char *name);
+       int (*deassert_reset)(struct platform_device *pdev, const char *name);
+};
+
+#endif /* _LINUX_PLATFORM_DATA_WKUP_M3_H */
index 58f1e75ba105ca9096f8fbf1d60b808e08087c03..bba08f44cc97da8f881fba53a9d20c4e80ee2969 100644 (file)
@@ -222,6 +222,15 @@ static inline void platform_set_drvdata(struct platform_device *pdev,
        module_driver(__platform_driver, platform_driver_register, \
                        platform_driver_unregister)
 
+/* builtin_platform_driver() - Helper macro for builtin drivers that
+ * don't do anything special in driver init.  This eliminates some
+ * boilerplate.  Each driver may only use this macro once, and
+ * calling it replaces device_initcall().  Note this is meant to be
+ * a parallel of module_platform_driver() above, but w/o _exit stuff.
+ */
+#define builtin_platform_driver(__platform_driver) \
+       builtin_driver(__platform_driver, platform_driver_register)
+
 /* module_platform_driver_probe() - Helper macro for drivers that don't do
  * anything special in module init/exit.  This eliminates a lot of
  * boilerplate.  Each module may only use this macro once, and
@@ -240,6 +249,20 @@ static void __exit __platform_driver##_exit(void) \
 } \
 module_exit(__platform_driver##_exit);
 
+/* builtin_platform_driver_probe() - Helper macro for drivers that don't do
+ * anything special in device init.  This eliminates some boilerplate.  Each
+ * driver may only use this macro once, and using it replaces device_initcall.
+ * This is meant to be a parallel of module_platform_driver_probe above, but
+ * without the __exit parts.
+ */
+#define builtin_platform_driver_probe(__platform_driver, __platform_probe) \
+static int __init __platform_driver##_init(void) \
+{ \
+       return platform_driver_probe(&(__platform_driver), \
+                                    __platform_probe);    \
+} \
+device_initcall(__platform_driver##_init); \
+
 #define platform_create_bundle(driver, probe, res, n_res, data, size) \
        __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE)
 extern struct platform_device *__platform_create_bundle(
index 0f1534acaf60983da1cc84548659ccc1398d79e9..84991f1851739490774436726e49b5c43f16db75 100644 (file)
@@ -293,6 +293,8 @@ struct preempt_notifier {
        struct preempt_ops *ops;
 };
 
+void preempt_notifier_inc(void);
+void preempt_notifier_dec(void);
 void preempt_notifier_register(struct preempt_notifier *notifier);
 void preempt_notifier_unregister(struct preempt_notifier *notifier);
 
index fb31765e935a0590ae0a6d82dda16a4a89dc36a4..830c4992088d5806125577723c6a7c1272161689 100644 (file)
@@ -31,6 +31,7 @@
 
 #include <linux/kernel.h>
 #include <linux/stddef.h>
+#include <linux/rcupdate.h>
 
 struct rb_node {
        unsigned long  __rb_parent_color;
@@ -73,11 +74,11 @@ extern struct rb_node *rb_first_postorder(const struct rb_root *);
 extern struct rb_node *rb_next_postorder(const struct rb_node *);
 
 /* Fast replacement of a single node without remove/rebalance/add/rebalance */
-extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, 
+extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
                            struct rb_root *root);
 
-static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
-                               struct rb_node ** rb_link)
+static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
+                               struct rb_node **rb_link)
 {
        node->__rb_parent_color = (unsigned long)parent;
        node->rb_left = node->rb_right = NULL;
@@ -85,6 +86,15 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
        *rb_link = node;
 }
 
+static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
+                                   struct rb_node **rb_link)
+{
+       node->__rb_parent_color = (unsigned long)parent;
+       node->rb_left = node->rb_right = NULL;
+
+       rcu_assign_pointer(*rb_link, node);
+}
+
 #define rb_entry_safe(ptr, type, member) \
        ({ typeof(ptr) ____ptr = (ptr); \
           ____ptr ? rb_entry(____ptr, type, member) : NULL; \
index 378c5ee75f78f5e1a521d87afeaf13fe21999af5..14d7b831b63a8377dce8161f3bf0be55bf385258 100644 (file)
@@ -123,11 +123,11 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
 {
        if (parent) {
                if (parent->rb_left == old)
-                       parent->rb_left = new;
+                       WRITE_ONCE(parent->rb_left, new);
                else
-                       parent->rb_right = new;
+                       WRITE_ONCE(parent->rb_right, new);
        } else
-               root->rb_node = new;
+               WRITE_ONCE(root->rb_node, new);
 }
 
 extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
@@ -137,7 +137,8 @@ static __always_inline struct rb_node *
 __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
                     const struct rb_augment_callbacks *augment)
 {
-       struct rb_node *child = node->rb_right, *tmp = node->rb_left;
+       struct rb_node *child = node->rb_right;
+       struct rb_node *tmp = node->rb_left;
        struct rb_node *parent, *rebalance;
        unsigned long pc;
 
@@ -167,6 +168,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
                tmp = parent;
        } else {
                struct rb_node *successor = child, *child2;
+
                tmp = child->rb_left;
                if (!tmp) {
                        /*
@@ -180,6 +182,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
                         */
                        parent = successor;
                        child2 = successor->rb_right;
+
                        augment->copy(node, successor);
                } else {
                        /*
@@ -201,19 +204,23 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
                                successor = tmp;
                                tmp = tmp->rb_left;
                        } while (tmp);
-                       parent->rb_left = child2 = successor->rb_right;
-                       successor->rb_right = child;
+                       child2 = successor->rb_right;
+                       WRITE_ONCE(parent->rb_left, child2);
+                       WRITE_ONCE(successor->rb_right, child);
                        rb_set_parent(child, successor);
+
                        augment->copy(node, successor);
                        augment->propagate(parent, successor);
                }
 
-               successor->rb_left = tmp = node->rb_left;
+               tmp = node->rb_left;
+               WRITE_ONCE(successor->rb_left, tmp);
                rb_set_parent(tmp, successor);
 
                pc = node->__rb_parent_color;
                tmp = __rb_parent(pc);
                __rb_change_child(node, successor, tmp, root);
+
                if (child2) {
                        successor->__rb_parent_color = pc;
                        rb_set_parent_color(child2, parent, RB_BLACK);
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
new file mode 100644 (file)
index 0000000..4f3432c
--- /dev/null
@@ -0,0 +1,212 @@
+/*
+ * Latched RB-trees
+ *
+ * Copyright (C) 2015 Intel Corp., Peter Zijlstra <peterz@infradead.org>
+ *
+ * Since RB-trees have non-atomic modifications they're not immediately suited
+ * for RCU/lockless queries. Even though we made RB-tree lookups non-fatal for
+ * lockless lookups; we cannot guarantee they return a correct result.
+ *
+ * The simplest solution is a seqlock + RB-tree, this will allow lockless
+ * lookups; but has the constraint (inherent to the seqlock) that read sides
+ * cannot nest in write sides.
+ *
+ * If we need to allow unconditional lookups (say as required for NMI context
+ * usage) we need a more complex setup; this data structure provides this by
+ * employing the latch technique -- see @raw_write_seqcount_latch -- to
+ * implement a latched RB-tree which does allow for unconditional lookups by
+ * virtue of always having (at least) one stable copy of the tree.
+ *
+ * However, while we have the guarantee that there is at all times one stable
+ * copy, this does not guarantee an iteration will not observe modifications.
+ * What might have been a stable copy at the start of the iteration, need not
+ * remain so for the duration of the iteration.
+ *
+ * Therefore, this does require a lockless RB-tree iteration to be non-fatal;
+ * see the comment in lib/rbtree.c. Note however that we only require the first
+ * condition -- not seeing partial stores -- because the latch thing isolates
+ * us from loops. If we were to interrupt a modification the lookup would be
+ * pointed at the stable tree and complete while the modification was halted.
+ */
+
+#ifndef RB_TREE_LATCH_H
+#define RB_TREE_LATCH_H
+
+#include <linux/rbtree.h>
+#include <linux/seqlock.h>
+
+struct latch_tree_node {
+       struct rb_node node[2];
+};
+
+struct latch_tree_root {
+       seqcount_t      seq;
+       struct rb_root  tree[2];
+};
+
+/**
+ * latch_tree_ops - operators to define the tree order
+ * @less: used for insertion; provides the (partial) order between two elements.
+ * @comp: used for lookups; provides the order between the search key and an element.
+ *
+ * The operators are related like:
+ *
+ *     comp(a->key,b) < 0  := less(a,b)
+ *     comp(a->key,b) > 0  := less(b,a)
+ *     comp(a->key,b) == 0 := !less(a,b) && !less(b,a)
+ *
+ * If these operators define a partial order on the elements we make no
+ * guarantee on which of the elements matching the key is found. See
+ * latch_tree_find().
+ */
+struct latch_tree_ops {
+       bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b);
+       int  (*comp)(void *key,                 struct latch_tree_node *b);
+};
+
+static __always_inline struct latch_tree_node *
+__lt_from_rb(struct rb_node *node, int idx)
+{
+       return container_of(node, struct latch_tree_node, node[idx]);
+}
+
+static __always_inline void
+__lt_insert(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx,
+           bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b))
+{
+       struct rb_root *root = &ltr->tree[idx];
+       struct rb_node **link = &root->rb_node;
+       struct rb_node *node = &ltn->node[idx];
+       struct rb_node *parent = NULL;
+       struct latch_tree_node *ltp;
+
+       while (*link) {
+               parent = *link;
+               ltp = __lt_from_rb(parent, idx);
+
+               if (less(ltn, ltp))
+                       link = &parent->rb_left;
+               else
+                       link = &parent->rb_right;
+       }
+
+       rb_link_node_rcu(node, parent, link);
+       rb_insert_color(node, root);
+}
+
+static __always_inline void
+__lt_erase(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx)
+{
+       rb_erase(&ltn->node[idx], &ltr->tree[idx]);
+}
+
+static __always_inline struct latch_tree_node *
+__lt_find(void *key, struct latch_tree_root *ltr, int idx,
+         int (*comp)(void *key, struct latch_tree_node *node))
+{
+       struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node);
+       struct latch_tree_node *ltn;
+       int c;
+
+       while (node) {
+               ltn = __lt_from_rb(node, idx);
+               c = comp(key, ltn);
+
+               if (c < 0)
+                       node = rcu_dereference_raw(node->rb_left);
+               else if (c > 0)
+                       node = rcu_dereference_raw(node->rb_right);
+               else
+                       return ltn;
+       }
+
+       return NULL;
+}
+
+/**
+ * latch_tree_insert() - insert @node into the trees @root
+ * @node: nodes to insert
+ * @root: trees to insert @node into
+ * @ops: operators defining the node order
+ *
+ * It inserts @node into @root in an ordered fashion such that we can always
+ * observe one complete tree. See the comment for raw_write_seqcount_latch().
+ *
+ * The inserts use rcu_assign_pointer() to publish the element such that the
+ * tree structure is stored before we can observe the new @node.
+ *
+ * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
+ * serialized.
+ */
+static __always_inline void
+latch_tree_insert(struct latch_tree_node *node,
+                 struct latch_tree_root *root,
+                 const struct latch_tree_ops *ops)
+{
+       raw_write_seqcount_latch(&root->seq);
+       __lt_insert(node, root, 0, ops->less);
+       raw_write_seqcount_latch(&root->seq);
+       __lt_insert(node, root, 1, ops->less);
+}
+
+/**
+ * latch_tree_erase() - removes @node from the trees @root
+ * @node: nodes to remote
+ * @root: trees to remove @node from
+ * @ops: operators defining the node order
+ *
+ * Removes @node from the trees @root in an ordered fashion such that we can
+ * always observe one complete tree. See the comment for
+ * raw_write_seqcount_latch().
+ *
+ * It is assumed that @node will observe one RCU quiescent state before being
+ * reused of freed.
+ *
+ * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
+ * serialized.
+ */
+static __always_inline void
+latch_tree_erase(struct latch_tree_node *node,
+                struct latch_tree_root *root,
+                const struct latch_tree_ops *ops)
+{
+       raw_write_seqcount_latch(&root->seq);
+       __lt_erase(node, root, 0);
+       raw_write_seqcount_latch(&root->seq);
+       __lt_erase(node, root, 1);
+}
+
+/**
+ * latch_tree_find() - find the node matching @key in the trees @root
+ * @key: search key
+ * @root: trees to search for @key
+ * @ops: operators defining the node order
+ *
+ * Does a lockless lookup in the trees @root for the node matching @key.
+ *
+ * It is assumed that this is called while holding the appropriate RCU read
+ * side lock.
+ *
+ * If the operators define a partial order on the elements (there are multiple
+ * elements which have the same key value) it is undefined which of these
+ * elements will be found. Nor is it possible to iterate the tree to find
+ * further elements with the same key value.
+ *
+ * Returns: a pointer to the node matching @key or NULL.
+ */
+static __always_inline struct latch_tree_node *
+latch_tree_find(void *key, struct latch_tree_root *root,
+               const struct latch_tree_ops *ops)
+{
+       struct latch_tree_node *node;
+       unsigned int seq;
+
+       do {
+               seq = raw_read_seqcount_latch(&root->seq);
+               node = __lt_find(key, root, seq & 1, ops->comp);
+       } while (read_seqcount_retry(&root->seq, seq));
+
+       return node;
+}
+
+#endif /* RB_TREE_LATCH_H */
index 33a056bb886faeedeb9690faefd3a4adeeedd14b..4cf5f51b4c9c43c2900d8fd5fad2ea93f873b33b 100644 (file)
@@ -632,21 +632,6 @@ static inline void rcu_preempt_sleep_check(void)
  */
 #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
 
-/**
- * lockless_dereference() - safely load a pointer for later dereference
- * @p: The pointer to load
- *
- * Similar to rcu_dereference(), but for situations where the pointed-to
- * object's lifetime is managed by something other than RCU.  That
- * "something other" might be reference counting or simple immortality.
- */
-#define lockless_dereference(p) \
-({ \
-       typeof(p) _________p1 = READ_ONCE(p); \
-       smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
-       (_________p1); \
-})
-
 /**
  * rcu_assign_pointer() - assign to RCU-protected pointer
  * @p: pointer to assign to
index 78b8a9b9d40a2ad70fe11986e8e6ffb3b04fe6c4..9c4e1384f63602f3eba04b46e22e323fa4a5c90f 100644 (file)
 #define REMOTEPROC_H
 
 #include <linux/types.h>
-#include <linux/klist.h>
 #include <linux/mutex.h>
 #include <linux/virtio.h>
 #include <linux/completion.h>
 #include <linux/idr.h>
+#include <linux/of.h>
 
 /**
  * struct resource_table - firmware resource table header
@@ -330,11 +330,13 @@ struct rproc;
  * @start:     power on the device and boot it
  * @stop:      power off the device
  * @kick:      kick a virtqueue (virtqueue id given as a parameter)
+ * @da_to_va:  optional platform hook to perform address translations
  */
 struct rproc_ops {
        int (*start)(struct rproc *rproc);
        int (*stop)(struct rproc *rproc);
        void (*kick)(struct rproc *rproc, int vqid);
+       void * (*da_to_va)(struct rproc *rproc, u64 da, int len);
 };
 
 /**
@@ -375,7 +377,7 @@ enum rproc_crash_type {
 
 /**
  * struct rproc - represents a physical remote processor device
- * @node: klist node of this rproc object
+ * @node: list node of this rproc object
  * @domain: iommu domain
  * @name: human readable name of the rproc
  * @firmware: name of firmware file to be loaded
@@ -407,7 +409,7 @@ enum rproc_crash_type {
  * @has_iommu: flag to indicate if remote processor is behind an MMU
  */
 struct rproc {
-       struct klist_node node;
+       struct list_head node;
        struct iommu_domain *domain;
        const char *name;
        const char *firmware;
@@ -481,6 +483,7 @@ struct rproc_vdev {
        u32 rsc_offset;
 };
 
+struct rproc *rproc_get_by_phandle(phandle phandle);
 struct rproc *rproc_alloc(struct device *dev, const char *name,
                                const struct rproc_ops *ops,
                                const char *firmware, int len);
index 50a8486c524bb29491bd4540afa4781596abdfdd..9b1ef0c820a72dab0fb8dea4658f26d9bc4ad6ab 100644 (file)
@@ -265,13 +265,16 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
        unsigned long offset, unsigned long size,
        gfp_t gfp_mask);
 
+size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
+                     size_t buflen, off_t skip, bool to_buffer);
+
 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
-                          void *buf, size_t buflen);
+                          const void *buf, size_t buflen);
 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
                         void *buf, size_t buflen);
 
 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
-                           void *buf, size_t buflen, off_t skip);
+                           const void *buf, size_t buflen, off_t skip);
 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
                          void *buf, size_t buflen, off_t skip);
 
index 8aa4a251742f1220cfdc129d706cd3b6c8cb19b6..ae21f1591615e06cec2115563c3f821fe36c868e 100644 (file)
@@ -192,8 +192,6 @@ struct task_group;
 #ifdef CONFIG_SCHED_DEBUG
 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
 extern void proc_sched_set_task(struct task_struct *p);
-extern void
-print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
 #endif
 
 /*
@@ -838,7 +836,7 @@ extern struct user_struct root_user;
 struct backing_dev_info;
 struct reclaim_state;
 
-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+#ifdef CONFIG_SCHED_INFO
 struct sched_info {
        /* cumulative counters */
        unsigned long pcount;         /* # of times run on this cpu */
@@ -848,7 +846,7 @@ struct sched_info {
        unsigned long long last_arrival,/* when we last ran on a cpu */
                           last_queued; /* when we were last queued to run */
 };
-#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
+#endif /* CONFIG_SCHED_INFO */
 
 #ifdef CONFIG_TASK_DELAY_ACCT
 struct task_delay_info {
@@ -1397,7 +1395,7 @@ struct task_struct {
        int rcu_tasks_idle_cpu;
 #endif /* #ifdef CONFIG_TASKS_RCU */
 
-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+#ifdef CONFIG_SCHED_INFO
        struct sched_info sched_info;
 #endif
 
index afbb1fd77c7722a0f71ed6701582ac58065ece6a..912a7c482649e63bc3232ddd5a88461c20e01e49 100644 (file)
@@ -123,6 +123,7 @@ __printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
 __printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
 
 int seq_path(struct seq_file *, const struct path *, const char *);
+int seq_file_path(struct seq_file *, struct file *, const char *);
 int seq_dentry(struct seq_file *, struct dentry *, const char *);
 int seq_path_root(struct seq_file *m, const struct path *path,
                  const struct path *root, const char *esc);
index 486e685a226a82d5cb841e61fb2ebf1562c5adb7..e0582106ef4faba81db1ff7912246623c3f7f1c2 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/spinlock.h>
 #include <linux/preempt.h>
 #include <linux/lockdep.h>
+#include <linux/compiler.h>
 #include <asm/processor.h>
 
 /*
@@ -274,9 +275,87 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
        s->sequence++;
 }
 
-/*
+static inline int raw_read_seqcount_latch(seqcount_t *s)
+{
+       return lockless_dereference(s->sequence);
+}
+
+/**
  * raw_write_seqcount_latch - redirect readers to even/odd copy
  * @s: pointer to seqcount_t
+ *
+ * The latch technique is a multiversion concurrency control method that allows
+ * queries during non-atomic modifications. If you can guarantee queries never
+ * interrupt the modification -- e.g. the concurrency is strictly between CPUs
+ * -- you most likely do not need this.
+ *
+ * Where the traditional RCU/lockless data structures rely on atomic
+ * modifications to ensure queries observe either the old or the new state the
+ * latch allows the same for non-atomic updates. The trade-off is doubling the
+ * cost of storage; we have to maintain two copies of the entire data
+ * structure.
+ *
+ * Very simply put: we first modify one copy and then the other. This ensures
+ * there is always one copy in a stable state, ready to give us an answer.
+ *
+ * The basic form is a data structure like:
+ *
+ * struct latch_struct {
+ *     seqcount_t              seq;
+ *     struct data_struct      data[2];
+ * };
+ *
+ * Where a modification, which is assumed to be externally serialized, does the
+ * following:
+ *
+ * void latch_modify(struct latch_struct *latch, ...)
+ * {
+ *     smp_wmb();      <- Ensure that the last data[1] update is visible
+ *     latch->seq++;
+ *     smp_wmb();      <- Ensure that the seqcount update is visible
+ *
+ *     modify(latch->data[0], ...);
+ *
+ *     smp_wmb();      <- Ensure that the data[0] update is visible
+ *     latch->seq++;
+ *     smp_wmb();      <- Ensure that the seqcount update is visible
+ *
+ *     modify(latch->data[1], ...);
+ * }
+ *
+ * The query will have a form like:
+ *
+ * struct entry *latch_query(struct latch_struct *latch, ...)
+ * {
+ *     struct entry *entry;
+ *     unsigned seq, idx;
+ *
+ *     do {
+ *             seq = lockless_dereference(latch->seq);
+ *
+ *             idx = seq & 0x01;
+ *             entry = data_query(latch->data[idx], ...);
+ *
+ *             smp_rmb();
+ *     } while (seq != latch->seq);
+ *
+ *     return entry;
+ * }
+ *
+ * So during the modification, queries are first redirected to data[1]. Then we
+ * modify data[0]. When that is complete, we redirect queries back to data[0]
+ * and we can modify data[1].
+ *
+ * NOTE: The non-requirement for atomic modifications does _NOT_ include
+ *       the publishing of new entries in the case where data is a dynamic
+ *       data structure.
+ *
+ *       An iteration might start in data[0] and get suspended long enough
+ *       to miss an entire modification sequence, once it resumes it might
+ *       observe the new entry.
+ *
+ * NOTE: When data is a dynamic data structure; one should use regular RCU
+ *       patterns to manage the lifetimes of the objects within.
  */
 static inline void raw_write_seqcount_latch(seqcount_t *s)
 {
index 2ca67b55e0fe2f0abf7e432ee8402608b6d57201..8df43c9f11dc295889639364101f324150d769ff 100644 (file)
@@ -37,7 +37,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
 void xprt_free_bc_request(struct rpc_rqst *req);
 int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
 void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
-int bc_send(struct rpc_rqst *req);
 
 /*
  * Determine if a shared backchannel is in use
index 598ba80ec30c974f02477a216a21077213a25aa5..131032f15cc187e0c7cbd0df80a08f4c44220800 100644 (file)
@@ -56,6 +56,7 @@ struct rpc_clnt {
        struct rpc_rtt *        cl_rtt;         /* RTO estimator data */
        const struct rpc_timeout *cl_timeout;   /* Timeout strategy */
 
+       atomic_t                cl_swapper;     /* swapfile count */
        int                     cl_nodelen;     /* nodename length */
        char                    cl_nodename[UNX_MAXNODENAME+1];
        struct rpc_pipe_dir_head cl_pipedir_objects;
index 5f1e6bd4c316d143751d19aac15af77aa7c1ac21..d703f0ef37d8f87436310247c19ca37f60ea692b 100644 (file)
@@ -205,8 +205,7 @@ struct rpc_wait_queue {
  */
 struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
 struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
-struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
-                               const struct rpc_call_ops *ops);
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
 void           rpc_put_task(struct rpc_task *);
 void           rpc_put_task_async(struct rpc_task *);
 void           rpc_exit_task(struct rpc_task *);
@@ -269,4 +268,20 @@ static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
 }
 #endif
 
+#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
+int rpc_clnt_swap_activate(struct rpc_clnt *clnt);
+void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt);
+#else
+static inline int
+rpc_clnt_swap_activate(struct rpc_clnt *clnt)
+{
+       return -EINVAL;
+}
+
+static inline void
+rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
+{
+}
+#endif /* CONFIG_SUNRPC_SWAP */
+
 #endif /* _LINUX_SUNRPC_SCHED_H_ */
index 8b93ef53df3c95df08625bef6ba58341d583e1e0..0fb9acbb478095b576445d74d264ad5e5193a55e 100644 (file)
@@ -133,6 +133,9 @@ struct rpc_xprt_ops {
        void            (*close)(struct rpc_xprt *xprt);
        void            (*destroy)(struct rpc_xprt *xprt);
        void            (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq);
+       int             (*enable_swap)(struct rpc_xprt *xprt);
+       void            (*disable_swap)(struct rpc_xprt *xprt);
+       void            (*inject_disconnect)(struct rpc_xprt *xprt);
 };
 
 /*
@@ -180,7 +183,7 @@ struct rpc_xprt {
        atomic_t                num_reqs;       /* total slots */
        unsigned long           state;          /* transport state */
        unsigned char           resvport   : 1; /* use a reserved port */
-       unsigned int            swapper;        /* we're swapping over this
+       atomic_t                swapper;        /* we're swapping over this
                                                   transport */
        unsigned int            bind_index;     /* bind function index */
 
@@ -212,7 +215,8 @@ struct rpc_xprt {
 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
        struct svc_serv         *bc_serv;       /* The RPC service which will */
                                                /* process the callback */
-       unsigned int            bc_alloc_count; /* Total number of preallocs */
+       int                     bc_alloc_count; /* Total number of preallocs */
+       atomic_t                bc_free_slots;
        spinlock_t              bc_pa_lock;     /* Protects the preallocated
                                                 * items */
        struct list_head        bc_pa_list;     /* List of preallocated
@@ -241,6 +245,7 @@ struct rpc_xprt {
        const char              *address_strings[RPC_DISPLAY_MAX];
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
        struct dentry           *debugfs;               /* debugfs directory */
+       atomic_t                inject_disconnect;
 #endif
 };
 
@@ -327,6 +332,18 @@ static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *
        return p + xprt->tsh_size;
 }
 
+static inline int
+xprt_enable_swap(struct rpc_xprt *xprt)
+{
+       return xprt->ops->enable_swap(xprt);
+}
+
+static inline void
+xprt_disable_swap(struct rpc_xprt *xprt)
+{
+       xprt->ops->disable_swap(xprt);
+}
+
 /*
  * Transport switch helper functions
  */
@@ -345,7 +362,6 @@ void                        xprt_release_rqst_cong(struct rpc_task *task);
 void                   xprt_disconnect_done(struct rpc_xprt *xprt);
 void                   xprt_force_disconnect(struct rpc_xprt *xprt);
 void                   xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
-int                    xs_swapper(struct rpc_xprt *xprt, int enable);
 
 bool                   xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
 void                   xprt_unlock_connect(struct rpc_xprt *, void *);
@@ -431,6 +447,23 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
        return test_and_set_bit(XPRT_BINDING, &xprt->state);
 }
 
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+extern unsigned int rpc_inject_disconnect;
+static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
+{
+       if (!rpc_inject_disconnect)
+               return;
+       if (atomic_dec_return(&xprt->inject_disconnect))
+               return;
+       atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect);
+       xprt->ops->inject_disconnect(xprt);
+}
+#else
+static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
+{
+}
+#endif
+
 #endif /* __KERNEL__*/
 
 #endif /* _LINUX_SUNRPC_XPRT_H */
index c984c85981eae2881ebdac43ffdbfba9b55b70b4..b17613052cc3fd9d8827ede1944d3489bdd2285d 100644 (file)
@@ -56,7 +56,8 @@
 
 #define RPCRDMA_INLINE_PAD_THRESH  (512)/* payload threshold to pad (bytes) */
 
-/* memory registration strategies */
+/* Memory registration strategies, by number.
+ * This is part of a kernel / user space API. Do not remove. */
 enum rpcrdma_memreg {
        RPCRDMA_BOUNCEBUFFERS = 0,
        RPCRDMA_REGISTER,
index 795d5fea569777f12842d243cc8714bcdcd40701..fa7bc29925c929a36e0f1e53a3a7efc1a18cc9e6 100644 (file)
@@ -188,6 +188,9 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
 void unregister_sysctl_table(struct ctl_table_header * table);
 
 extern int sysctl_init(void);
+
+extern struct ctl_table sysctl_mount_point[];
+
 #else /* CONFIG_SYSCTL */
 static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
 {
index 99382c0df17eb3fc367e3b3eafd5dca3cb63975a..9f65758311a4efff6a325f0baeb33f86ec68e06f 100644 (file)
@@ -210,6 +210,10 @@ int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
 int __must_check sysfs_move_dir_ns(struct kobject *kobj,
                                   struct kobject *new_parent_kobj,
                                   const void *new_ns);
+int __must_check sysfs_create_mount_point(struct kobject *parent_kobj,
+                                         const char *name);
+void sysfs_remove_mount_point(struct kobject *parent_kobj,
+                             const char *name);
 
 int __must_check sysfs_create_file_ns(struct kobject *kobj,
                                      const struct attribute *attr,
@@ -298,6 +302,17 @@ static inline int sysfs_move_dir_ns(struct kobject *kobj,
        return 0;
 }
 
+static inline int sysfs_create_mount_point(struct kobject *parent_kobj,
+                                          const char *name)
+{
+       return 0;
+}
+
+static inline void sysfs_remove_mount_point(struct kobject *parent_kobj,
+                                           const char *name)
+{
+}
+
 static inline int sysfs_create_file_ns(struct kobject *kobj,
                                       const struct attribute *attr,
                                       const void *ns)
index 51865d05b267002bd4303a358b2fa4fbd909d47d..ce63a2c3a612936737ae14236294e93f44a2474b 100644 (file)
@@ -3,17 +3,21 @@
 #include <linux/types.h>
 #include <uapi/linux/virtio_types.h>
 
-/*
- * Low-level memory accessors for handling virtio in modern little endian and in
- * compatibility native endian format.
- */
+static inline bool virtio_legacy_is_little_endian(void)
+{
+#ifdef __LITTLE_ENDIAN
+       return true;
+#else
+       return false;
+#endif
+}
 
 static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val)
 {
        if (little_endian)
                return le16_to_cpu((__force __le16)val);
        else
-               return (__force u16)val;
+               return be16_to_cpu((__force __be16)val);
 }
 
 static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
@@ -21,7 +25,7 @@ static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
        if (little_endian)
                return (__force __virtio16)cpu_to_le16(val);
        else
-               return (__force __virtio16)val;
+               return (__force __virtio16)cpu_to_be16(val);
 }
 
 static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
@@ -29,7 +33,7 @@ static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
        if (little_endian)
                return le32_to_cpu((__force __le32)val);
        else
-               return (__force u32)val;
+               return be32_to_cpu((__force __be32)val);
 }
 
 static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
@@ -37,7 +41,7 @@ static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
        if (little_endian)
                return (__force __virtio32)cpu_to_le32(val);
        else
-               return (__force __virtio32)val;
+               return (__force __virtio32)cpu_to_be32(val);
 }
 
 static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
@@ -45,7 +49,7 @@ static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
        if (little_endian)
                return le64_to_cpu((__force __le64)val);
        else
-               return (__force u64)val;
+               return be64_to_cpu((__force __be64)val);
 }
 
 static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
@@ -53,7 +57,7 @@ static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
        if (little_endian)
                return (__force __virtio64)cpu_to_le64(val);
        else
-               return (__force __virtio64)val;
+               return (__force __virtio64)cpu_to_be64(val);
 }
 
 #endif /* _LINUX_VIRTIO_BYTEORDER */
index 1e306f727edcfcecc528e81483998f947ea46459..e5ce8ab0b8b01a4e4d8bc17d3efd67f9c0b2d517 100644 (file)
@@ -205,35 +205,41 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
        return 0;
 }
 
+static inline bool virtio_is_little_endian(struct virtio_device *vdev)
+{
+       return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
+               virtio_legacy_is_little_endian();
+}
+
 /* Memory accessors */
 static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
 {
-       return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+       return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
 }
 
 static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
 {
-       return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+       return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
 }
 
 static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
 {
-       return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+       return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
 }
 
 static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
 {
-       return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+       return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
 }
 
 static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
 {
-       return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+       return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
 }
 
 static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
 {
-       return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+       return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
 }
 
 /* Config space accessors. */
index a3fa537e717a5f714fcb719aa6f56474270dcffe..bc6c28d0426339cc3814ec09d4da3db830c869d9 100644 (file)
@@ -226,33 +226,39 @@ static inline void vringh_notify(struct vringh *vrh)
                vrh->notify(vrh);
 }
 
+static inline bool vringh_is_little_endian(const struct vringh *vrh)
+{
+       return vrh->little_endian ||
+               virtio_legacy_is_little_endian();
+}
+
 static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
 {
-       return __virtio16_to_cpu(vrh->little_endian, val);
+       return __virtio16_to_cpu(vringh_is_little_endian(vrh), val);
 }
 
 static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
 {
-       return __cpu_to_virtio16(vrh->little_endian, val);
+       return __cpu_to_virtio16(vringh_is_little_endian(vrh), val);
 }
 
 static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
 {
-       return __virtio32_to_cpu(vrh->little_endian, val);
+       return __virtio32_to_cpu(vringh_is_little_endian(vrh), val);
 }
 
 static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
 {
-       return __cpu_to_virtio32(vrh->little_endian, val);
+       return __cpu_to_virtio32(vringh_is_little_endian(vrh), val);
 }
 
 static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
 {
-       return __virtio64_to_cpu(vrh->little_endian, val);
+       return __virtio64_to_cpu(vringh_is_little_endian(vrh), val);
 }
 
 static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
 {
-       return __cpu_to_virtio64(vrh->little_endian, val);
+       return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
 }
 #endif /* _LINUX_VRINGH_H */
index a746bf5216f896c82e191b90078d7ffa92aaadf4..f47feada5b42c99da7d17aefb9b0e24fc5a06e1d 100644 (file)
@@ -65,6 +65,8 @@ struct watchdog_ops {
  * @driver-data:Pointer to the drivers private data.
  * @lock:      Lock for watchdog core internal use only.
  * @status:    Field that contains the devices internal status bits.
+ * @deferred: entry in wtd_deferred_reg_list which is used to
+ *                        register early initialized watchdogs.
  *
  * The watchdog_device structure contains all information about a
  * watchdog timer device.
@@ -95,6 +97,7 @@ struct watchdog_device {
 #define WDOG_ALLOW_RELEASE     2       /* Did we receive the magic char ? */
 #define WDOG_NO_WAY_OUT                3       /* Is 'nowayout' feature set ? */
 #define WDOG_UNREGISTERED      4       /* Has the device been unregistered */
+       struct list_head deferred;
 };
 
 #define WATCHDOG_NOWAYOUT              IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT)
diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h
new file mode 100644 (file)
index 0000000..098236c
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * V4L2 flash LED sub-device registration helpers.
+ *
+ *     Copyright (C) 2015 Samsung Electronics Co., Ltd
+ *     Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _V4L2_FLASH_H
+#define _V4L2_FLASH_H
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+struct led_classdev_flash;
+struct led_classdev;
+struct v4l2_flash;
+enum led_brightness;
+
+/*
+ * struct v4l2_flash_ctrl_data - flash control initialization data, filled
+ *                             basing on the features declared by the LED flash
+ *                             class driver in the v4l2_flash_config
+ * @config:    initialization data for a control
+ * @cid:       contains v4l2 flash control id if the config
+ *             field was initialized, 0 otherwise
+ */
+struct v4l2_flash_ctrl_data {
+       struct v4l2_ctrl_config config;
+       u32 cid;
+};
+
+struct v4l2_flash_ops {
+       /* setup strobing the flash by hardware pin state assertion */
+       int (*external_strobe_set)(struct v4l2_flash *v4l2_flash,
+                                       bool enable);
+       /* convert intensity to brightness in a device specific manner */
+       enum led_brightness (*intensity_to_led_brightness)
+               (struct v4l2_flash *v4l2_flash, s32 intensity);
+       /* convert brightness to intensity in a device specific manner */
+       s32 (*led_brightness_to_intensity)
+               (struct v4l2_flash *v4l2_flash, enum led_brightness);
+};
+
+/**
+ * struct v4l2_flash_config - V4L2 Flash sub-device initialization data
+ * @dev_name:                  the name of the media entity,
+                               unique in the system
+ * @torch_intensity:           constraints for the LED in torch mode
+ * @indicator_intensity:       constraints for the indicator LED
+ * @flash_faults:              bitmask of flash faults that the LED flash class
+                               device can report; corresponding LED_FAULT* bit
+                               definitions are available in the header file
+                               <linux/led-class-flash.h>
+ * @has_external_strobe:       external strobe capability
+ */
+struct v4l2_flash_config {
+       char dev_name[32];
+       struct led_flash_setting torch_intensity;
+       struct led_flash_setting indicator_intensity;
+       u32 flash_faults;
+       unsigned int has_external_strobe:1;
+};
+
+/**
+ * struct v4l2_flash - Flash sub-device context
+ * @fled_cdev:         LED flash class device controlled by this sub-device
+ * @iled_cdev:         LED class device representing indicator LED associated
+ *                     with the LED flash class device
+ * @ops:               V4L2 specific flash ops
+ * @sd:                        V4L2 sub-device
+ * @hdl:               flash controls handler
+ * @ctrls:             array of pointers to controls, whose values define
+ *                     the sub-device state
+ */
+struct v4l2_flash {
+       struct led_classdev_flash *fled_cdev;
+       struct led_classdev_flash *iled_cdev;
+       const struct v4l2_flash_ops *ops;
+
+       struct v4l2_subdev sd;
+       struct v4l2_ctrl_handler hdl;
+       struct v4l2_ctrl **ctrls;
+};
+
+static inline struct v4l2_flash *v4l2_subdev_to_v4l2_flash(
+                                                       struct v4l2_subdev *sd)
+{
+       return container_of(sd, struct v4l2_flash, sd);
+}
+
+static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c)
+{
+       return container_of(c->handler, struct v4l2_flash, hdl);
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+/**
+ * v4l2_flash_init - initialize V4L2 flash led sub-device
+ * @dev:       flash device, e.g. an I2C device
+ * @of_node:   of_node of the LED, may be NULL if the same as device's
+ * @fled_cdev: LED flash class device to wrap
+ * @iled_cdev: LED flash class device representing indicator LED associated
+ *             with fled_cdev, may be NULL
+ * @flash_ops: V4L2 Flash device ops
+ * @config:    initialization data for V4L2 Flash sub-device
+ *
+ * Create V4L2 Flash sub-device wrapping given LED subsystem device.
+ *
+ * Returns: A valid pointer, or, when an error occurs, the return
+ * value is encoded using ERR_PTR(). Use IS_ERR() to check and
+ * PTR_ERR() to obtain the numeric return value.
+ */
+struct v4l2_flash *v4l2_flash_init(
+       struct device *dev, struct device_node *of_node,
+       struct led_classdev_flash *fled_cdev,
+       struct led_classdev_flash *iled_cdev,
+       const struct v4l2_flash_ops *ops,
+       struct v4l2_flash_config *config);
+
+/**
+ * v4l2_flash_release - release V4L2 Flash sub-device
+ * @flash: the V4L2 Flash sub-device to release
+ *
+ * Release V4L2 Flash sub-device.
+ */
+void v4l2_flash_release(struct v4l2_flash *v4l2_flash);
+
+#else
+static inline struct v4l2_flash *v4l2_flash_init(
+       struct device *dev, struct device_node *of_node,
+       struct led_classdev_flash *fled_cdev,
+       struct led_classdev_flash *iled_cdev,
+       const struct v4l2_flash_ops *ops,
+       struct v4l2_flash_config *config)
+{
+       return NULL;
+}
+
+static inline void v4l2_flash_release(struct v4l2_flash *v4l2_flash)
+{
+}
+#endif /* CONFIG_V4L2_FLASH_LED_CLASS */
+
+#endif /* _V4L2_FLASH_H */
index dc20102ff600bc9834ed6d83963cfc09816a8c0e..4e18318eb425f27e696d08ce926867ee13459a49 100644 (file)
@@ -605,6 +605,8 @@ struct v4l2_subdev {
        struct video_device *devnode;
        /* pointer to the physical device, if any */
        struct device *dev;
+       /* The device_node of the subdev, usually the same as dev->of_node. */
+       struct device_node *of_node;
        /* Links this subdev to a global subdev_list or @notifier->done list. */
        struct list_head async_list;
        /* Pointer to respective struct v4l2_async_subdev. */
index 16a923a3a43a8825d167b75025baca0b1b5d741c..e602f8177ebfbf3148bce0734a87ecac1b0eb590 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/atomic.h>
 #include <net/neighbour.h>
+#include <net/sock.h>
 
 #define        AX25_T1CLAMPLO                  1
 #define        AX25_T1CLAMPHI                  (30 * HZ)
@@ -246,7 +247,20 @@ typedef struct ax25_cb {
        atomic_t                refcount;
 } ax25_cb;
 
-#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo)
+struct ax25_sock {
+       struct sock             sk;
+       struct ax25_cb          *cb;
+};
+
+static inline struct ax25_sock *ax25_sk(const struct sock *sk)
+{
+       return (struct ax25_sock *) sk;
+}
+
+static inline struct ax25_cb *sk_to_ax25(const struct sock *sk)
+{
+       return ax25_sk(sk)->cb;
+}
 
 #define ax25_for_each(__ax25, list) \
        hlist_for_each_entry(__ax25, list, ax25_node)
index 14d539c040d70dfebaa5dae16312fc48536559a7..05a8c1aea25187c1692efcb1e350bb2c7f75a30b 100644 (file)
@@ -277,7 +277,6 @@ struct cg_proto;
   *    @sk_incoming_cpu: record cpu processing incoming packets
   *    @sk_txhash: computed flow hash for use on transmit
   *    @sk_filter: socket filtering instructions
-  *    @sk_protinfo: private area, net family specific, when not using slab
   *    @sk_timer: sock cleanup timer
   *    @sk_stamp: time stamp of last packet received
   *    @sk_tsflags: SO_TIMESTAMPING socket options
@@ -416,7 +415,6 @@ struct sock {
        const struct cred       *sk_peer_cred;
        long                    sk_rcvtimeo;
        long                    sk_sndtimeo;
-       void                    *sk_protinfo;
        struct timer_list       sk_timer;
        ktime_t                 sk_stamp;
        u16                     sk_tsflags;
index 006983b296dd6da3afa466e43f40ec0699cad36f..34117b8b72e49d84fb9477326ad10a490de1060a 100644 (file)
@@ -247,10 +247,6 @@ struct iscsi_conn_ops {
        u8      DataDigest;                     /* [0,1] == [None,CRC32C] */
        u32     MaxRecvDataSegmentLength;       /* [512..2**24-1] */
        u32     MaxXmitDataSegmentLength;       /* [512..2**24-1] */
-       u8      OFMarker;                       /* [0,1] == [No,Yes] */
-       u8      IFMarker;                       /* [0,1] == [No,Yes] */
-       u32     OFMarkInt;                      /* [1..65535] */
-       u32     IFMarkInt;                      /* [1..65535] */
        /*
         * iSER specific connection parameters
         */
@@ -531,12 +527,6 @@ struct iscsi_conn {
        u32                     exp_statsn;
        /* Per connection status sequence number */
        u32                     stat_sn;
-       /* IFMarkInt's Current Value */
-       u32                     if_marker;
-       /* OFMarkInt's Current Value */
-       u32                     of_marker;
-       /* Used for calculating OFMarker offset to next PDU */
-       u32                     of_marker_offset;
 #define IPV6_ADDRESS_SPACE                             48
        unsigned char           login_ip[IPV6_ADDRESS_SPACE];
        unsigned char           local_ip[IPV6_ADDRESS_SPACE];
@@ -754,10 +744,10 @@ struct iscsi_node_stat_grps {
 };
 
 struct iscsi_node_acl {
+       struct se_node_acl      se_node_acl;
        struct iscsi_node_attrib node_attrib;
        struct iscsi_node_auth  node_auth;
        struct iscsi_node_stat_grps node_stat_grps;
-       struct se_node_acl      se_node_acl;
 };
 
 struct iscsi_tpg_attrib {
index 5f122570699339f5f5cf85fb14a90e2ad9fc51b7..1e5c8f949bae4947b8645bd1cf9d7d8966708713 100644 (file)
@@ -3,18 +3,7 @@
 
 #define TRANSPORT_FLAG_PASSTHROUGH             1
 
-struct target_backend_cits {
-       struct config_item_type tb_dev_cit;
-       struct config_item_type tb_dev_attrib_cit;
-       struct config_item_type tb_dev_pr_cit;
-       struct config_item_type tb_dev_wwn_cit;
-       struct config_item_type tb_dev_alua_tg_pt_gps_cit;
-       struct config_item_type tb_dev_stat_cit;
-};
-
-struct se_subsystem_api {
-       struct list_head sub_api_list;
-
+struct target_backend_ops {
        char name[16];
        char inquiry_prod[16];
        char inquiry_rev[4];
@@ -52,7 +41,7 @@ struct se_subsystem_api {
        int (*format_prot)(struct se_device *);
        void (*free_prot)(struct se_device *);
 
-       struct target_backend_cits tb_cits;
+       struct configfs_attribute **tb_dev_attrib_attrs;
 };
 
 struct sbc_ops {
@@ -60,12 +49,12 @@ struct sbc_ops {
                                     u32, enum dma_data_direction);
        sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
        sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
-       sense_reason_t (*execute_write_same_unmap)(struct se_cmd *cmd);
-       sense_reason_t (*execute_unmap)(struct se_cmd *cmd);
+       sense_reason_t (*execute_unmap)(struct se_cmd *cmd,
+                               sector_t lba, sector_t nolb);
 };
 
-int    transport_subsystem_register(struct se_subsystem_api *);
-void   transport_subsystem_release(struct se_subsystem_api *);
+int    transport_backend_register(const struct target_backend_ops *);
+void   target_backend_unregister(const struct target_backend_ops *);
 
 void   target_complete_cmd(struct se_cmd *, u8);
 void   target_complete_cmd_with_length(struct se_cmd *, u8, int);
@@ -79,22 +68,19 @@ sense_reason_t      sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
 u32    sbc_get_device_rev(struct se_device *dev);
 u32    sbc_get_device_type(struct se_device *dev);
 sector_t       sbc_get_write_same_sectors(struct se_cmd *cmd);
-sense_reason_t sbc_execute_unmap(struct se_cmd *cmd,
-       sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv,
-                                     sector_t lba, sector_t nolb),
-       void *priv);
 void   sbc_dif_generate(struct se_cmd *);
-sense_reason_t sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int,
+sense_reason_t sbc_dif_verify(struct se_cmd *, sector_t, unsigned int,
                                     unsigned int, struct scatterlist *, int);
-sense_reason_t sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int,
-                                   unsigned int, struct scatterlist *, int);
-sense_reason_t sbc_dif_read_strip(struct se_cmd *);
-
+void sbc_dif_copy_prot(struct se_cmd *, unsigned int, bool,
+                      struct scatterlist *, int);
 void   transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
 int    transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
 int    transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
 int    transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
 
+extern struct configfs_attribute *sbc_attrib_attrs[];
+extern struct configfs_attribute *passthrough_attrib_attrs[];
+
 /* core helpers also used by command snooping in pscsi */
 void   *transport_kmap_data_sg(struct se_cmd *);
 void   transport_kunmap_data_sg(struct se_cmd *);
@@ -103,39 +89,7 @@ int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool);
 sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
                struct scatterlist *, u32, struct scatterlist *, u32);
 
-void   array_free(void *array, int n);
-
-/* From target_core_configfs.c to setup default backend config_item_types */
-void   target_core_setup_sub_cits(struct se_subsystem_api *);
-
-/* attribute helpers from target_core_device.c for backend drivers */
-bool   se_dev_check_wce(struct se_device *);
-int    se_dev_set_max_unmap_lba_count(struct se_device *, u32);
-int    se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
-int    se_dev_set_unmap_granularity(struct se_device *, u32);
-int    se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
-int    se_dev_set_max_write_same_len(struct se_device *, u32);
-int    se_dev_set_emulate_model_alias(struct se_device *, int);
-int    se_dev_set_emulate_dpo(struct se_device *, int);
-int    se_dev_set_emulate_fua_write(struct se_device *, int);
-int    se_dev_set_emulate_fua_read(struct se_device *, int);
-int    se_dev_set_emulate_write_cache(struct se_device *, int);
-int    se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
-int    se_dev_set_emulate_tas(struct se_device *, int);
-int    se_dev_set_emulate_tpu(struct se_device *, int);
-int    se_dev_set_emulate_tpws(struct se_device *, int);
-int    se_dev_set_emulate_caw(struct se_device *, int);
-int    se_dev_set_emulate_3pc(struct se_device *, int);
-int    se_dev_set_pi_prot_type(struct se_device *, int);
-int    se_dev_set_pi_prot_format(struct se_device *, int);
-int    se_dev_set_enforce_pr_isids(struct se_device *, int);
-int    se_dev_set_force_pr_aptpl(struct se_device *, int);
-int    se_dev_set_is_nonrot(struct se_device *, int);
-int    se_dev_set_emulate_rest_reord(struct se_device *dev, int);
-int    se_dev_set_queue_depth(struct se_device *, u32);
-int    se_dev_set_max_sectors(struct se_device *, u32);
-int    se_dev_set_optimal_sectors(struct se_device *, u32);
-int    se_dev_set_block_size(struct se_device *, u32);
+bool   target_lun_is_rdonly(struct se_cmd *);
 sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
        sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
 
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h
deleted file mode 100644 (file)
index 186f7a9..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-#ifndef TARGET_CORE_BACKEND_CONFIGFS_H
-#define TARGET_CORE_BACKEND_CONFIGFS_H
-
-#include <target/configfs_macros.h>
-
-#define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name)                                \
-static ssize_t _backend##_dev_show_attr_##_name(                       \
-       struct se_dev_attrib *da,                                       \
-       char *page)                                                     \
-{                                                                      \
-       return snprintf(page, PAGE_SIZE, "%u\n",                        \
-                       (u32)da->da_dev->dev_attrib._name);             \
-}
-
-#define DEF_TB_DEV_ATTRIB_STORE(_backend, _name)                       \
-static ssize_t _backend##_dev_store_attr_##_name(                      \
-       struct se_dev_attrib *da,                                       \
-       const char *page,                                               \
-       size_t count)                                                   \
-{                                                                      \
-       unsigned long val;                                              \
-       int ret;                                                        \
-                                                                       \
-       ret = kstrtoul(page, 0, &val);                                  \
-       if (ret < 0) {                                                  \
-               pr_err("kstrtoul() failed with ret: %d\n", ret);        \
-               return -EINVAL;                                         \
-       }                                                               \
-       ret = se_dev_set_##_name(da->da_dev, (u32)val);                 \
-                                                                       \
-       return (!ret) ? count : -EINVAL;                                \
-}
-
-#define DEF_TB_DEV_ATTRIB(_backend, _name)                             \
-DEF_TB_DEV_ATTRIB_SHOW(_backend, _name);                               \
-DEF_TB_DEV_ATTRIB_STORE(_backend, _name);
-
-#define DEF_TB_DEV_ATTRIB_RO(_backend, name)                           \
-DEF_TB_DEV_ATTRIB_SHOW(_backend, name);
-
-CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
-#define TB_DEV_ATTR(_backend, _name, _mode)                            \
-static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
-               __CONFIGFS_EATTR(_name, _mode,                          \
-               _backend##_dev_show_attr_##_name,                       \
-               _backend##_dev_store_attr_##_name);
-
-#define TB_DEV_ATTR_RO(_backend, _name)                                                \
-static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
-       __CONFIGFS_EATTR_RO(_name,                                      \
-       _backend##_dev_show_attr_##_name);
-
-/*
- * Default list of target backend device attributes as defined by
- * struct se_dev_attrib
- */
-
-#define DEF_TB_DEFAULT_ATTRIBS(_backend)                               \
-       DEF_TB_DEV_ATTRIB(_backend, emulate_model_alias);               \
-       TB_DEV_ATTR(_backend, emulate_model_alias, S_IRUGO | S_IWUSR);  \
-       DEF_TB_DEV_ATTRIB(_backend, emulate_dpo);                       \
-       TB_DEV_ATTR(_backend, emulate_dpo, S_IRUGO | S_IWUSR);          \
-       DEF_TB_DEV_ATTRIB(_backend, emulate_fua_write);                 \
-       TB_DEV_ATTR(_backend, emulate_fua_write, S_IRUGO | S_IWUSR);    \
-       DEF_TB_DEV_ATTRIB(_backend, emulate_fua_read);                  \
-       TB_DEV_ATTR(_backend, emulate_fua_read, S_IRUGO | S_IWUSR);     \
-       DEF_TB_DEV_ATTRIB(_backend, emulate_write_cache);               \
-       TB_DEV_ATTR(_backend, emulate_write_cache, S_IRUGO | S_IWUSR);  \
-       DEF_TB_DEV_ATTRIB(_backend, emulate_ua_intlck_ctrl);            \
-       TB_DEV_ATTR(_backend, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); \
-       DEF_TB_DEV_ATTRIB(_backend, emulate_tas);                       \
-       TB_DEV_ATTR(_backend, emulate_tas, S_IRUGO | S_IWUSR);          \
-       DEF_TB_DEV_ATTRIB(_backend, emulate_tpu);                       \
-       TB_DEV_ATTR(_backend, emulate_tpu, S_IRUGO | S_IWUSR);          \
-       DEF_TB_DEV_ATTRIB(_backend, emulate_tpws);                      \
-       TB_DEV_ATTR(_backend, emulate_tpws, S_IRUGO | S_IWUSR);         \
-       DEF_TB_DEV_ATTRIB(_backend, emulate_caw);                       \
-       TB_DEV_ATTR(_backend, emulate_caw, S_IRUGO | S_IWUSR);          \
-       DEF_TB_DEV_ATTRIB(_backend, emulate_3pc);                       \
-       TB_DEV_ATTR(_backend, emulate_3pc, S_IRUGO | S_IWUSR);          \
-       DEF_TB_DEV_ATTRIB(_backend, pi_prot_type);                      \
-       TB_DEV_ATTR(_backend, pi_prot_type, S_IRUGO | S_IWUSR);         \
-       DEF_TB_DEV_ATTRIB_RO(_backend, hw_pi_prot_type);                \
-       TB_DEV_ATTR_RO(_backend, hw_pi_prot_type);                      \
-       DEF_TB_DEV_ATTRIB(_backend, pi_prot_format);                    \
-       TB_DEV_ATTR(_backend, pi_prot_format, S_IRUGO | S_IWUSR);       \
-       DEF_TB_DEV_ATTRIB(_backend, enforce_pr_isids);                  \
-       TB_DEV_ATTR(_backend, enforce_pr_isids, S_IRUGO | S_IWUSR);     \
-       DEF_TB_DEV_ATTRIB(_backend, is_nonrot);                         \
-       TB_DEV_ATTR(_backend, is_nonrot, S_IRUGO | S_IWUSR);            \
-       DEF_TB_DEV_ATTRIB(_backend, emulate_rest_reord);                \
-       TB_DEV_ATTR(_backend, emulate_rest_reord, S_IRUGO | S_IWUSR);   \
-       DEF_TB_DEV_ATTRIB(_backend, force_pr_aptpl);                    \
-       TB_DEV_ATTR(_backend, force_pr_aptpl, S_IRUGO | S_IWUSR);       \
-       DEF_TB_DEV_ATTRIB_RO(_backend, hw_block_size);                  \
-       TB_DEV_ATTR_RO(_backend, hw_block_size);                        \
-       DEF_TB_DEV_ATTRIB(_backend, block_size);                        \
-       TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR);           \
-       DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors);                 \
-       TB_DEV_ATTR_RO(_backend, hw_max_sectors);                       \
-       DEF_TB_DEV_ATTRIB(_backend, optimal_sectors);                   \
-       TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR);      \
-       DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth);                 \
-       TB_DEV_ATTR_RO(_backend, hw_queue_depth);                       \
-       DEF_TB_DEV_ATTRIB(_backend, queue_depth);                       \
-       TB_DEV_ATTR(_backend, queue_depth, S_IRUGO | S_IWUSR);          \
-       DEF_TB_DEV_ATTRIB(_backend, max_unmap_lba_count);               \
-       TB_DEV_ATTR(_backend, max_unmap_lba_count, S_IRUGO | S_IWUSR);  \
-       DEF_TB_DEV_ATTRIB(_backend, max_unmap_block_desc_count);        \
-       TB_DEV_ATTR(_backend, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); \
-       DEF_TB_DEV_ATTRIB(_backend, unmap_granularity);                 \
-       TB_DEV_ATTR(_backend, unmap_granularity, S_IRUGO | S_IWUSR);    \
-       DEF_TB_DEV_ATTRIB(_backend, unmap_granularity_alignment);       \
-       TB_DEV_ATTR(_backend, unmap_granularity_alignment, S_IRUGO | S_IWUSR); \
-       DEF_TB_DEV_ATTRIB(_backend, max_write_same_len);                \
-       TB_DEV_ATTR(_backend, max_write_same_len, S_IRUGO | S_IWUSR);
-
-#endif /* TARGET_CORE_BACKEND_CONFIGFS_H */
index aec6f6a4477c79454af758817537dd8d3a3300b8..17ae2d6a4891e57245c16fbeb4e2a8a6462d32d0 100644 (file)
@@ -9,12 +9,8 @@
 #include <net/sock.h>
 #include <net/tcp.h>
 
-#define TARGET_CORE_MOD_VERSION                "v4.1.0"
-#define TARGET_CORE_VERSION            TARGET_CORE_MOD_VERSION
+#define TARGET_CORE_VERSION            "v5.0"
 
-/* Maximum Number of LUNs per Target Portal Group */
-/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
-#define TRANSPORT_MAX_LUNS_PER_TPG             256
 /*
  * Maximum size of a CDB that can be stored in se_cmd without allocating
  * memory dynamically for the CDB.
 #define DA_MAX_WRITE_SAME_LEN                  0
 /* Use a model alias based on the configfs backend device name */
 #define DA_EMULATE_MODEL_ALIAS                 0
-/* Emulation for Direct Page Out */
-#define DA_EMULATE_DPO                         0
-/* Emulation for Forced Unit Access WRITEs */
-#define DA_EMULATE_FUA_WRITE                   1
-/* Emulation for Forced Unit Access READs */
-#define DA_EMULATE_FUA_READ                    0
 /* Emulation for WriteCache and SYNCHRONIZE_CACHE */
 #define DA_EMULATE_WRITE_CACHE                 0
 /* Emulation for UNIT ATTENTION Interlock Control */
@@ -116,18 +106,6 @@ enum hba_flags_table {
        HBA_FLAGS_PSCSI_MODE    = 0x02,
 };
 
-/* struct se_lun->lun_status */
-enum transport_lun_status_table {
-       TRANSPORT_LUN_STATUS_FREE = 0,
-       TRANSPORT_LUN_STATUS_ACTIVE = 1,
-};
-
-/* struct se_portal_group->se_tpg_type */
-enum transport_tpg_type_table {
-       TRANSPORT_TPG_TYPE_NORMAL = 0,
-       TRANSPORT_TPG_TYPE_DISCOVERY = 1,
-};
-
 /* Special transport agnostic struct se_cmd->t_states */
 enum transport_state_table {
        TRANSPORT_NO_STATE      = 0,
@@ -158,14 +136,13 @@ enum se_cmd_flags_table {
        SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
        SCF_COMPARE_AND_WRITE           = 0x00080000,
        SCF_COMPARE_AND_WRITE_POST      = 0x00100000,
+       SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
 };
 
 /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
 enum transport_lunflags_table {
-       TRANSPORT_LUNFLAGS_NO_ACCESS            = 0x00,
-       TRANSPORT_LUNFLAGS_INITIATOR_ACCESS     = 0x01,
-       TRANSPORT_LUNFLAGS_READ_ONLY            = 0x02,
-       TRANSPORT_LUNFLAGS_READ_WRITE           = 0x04,
+       TRANSPORT_LUNFLAGS_READ_ONLY            = 0x01,
+       TRANSPORT_LUNFLAGS_READ_WRITE           = 0x02,
 };
 
 /*
@@ -314,22 +291,13 @@ struct t10_alua_tg_pt_gp {
        struct se_device *tg_pt_gp_dev;
        struct config_group tg_pt_gp_group;
        struct list_head tg_pt_gp_list;
-       struct list_head tg_pt_gp_mem_list;
-       struct se_port *tg_pt_gp_alua_port;
+       struct list_head tg_pt_gp_lun_list;
+       struct se_lun *tg_pt_gp_alua_lun;
        struct se_node_acl *tg_pt_gp_alua_nacl;
        struct delayed_work tg_pt_gp_transition_work;
        struct completion *tg_pt_gp_transition_complete;
 };
 
-struct t10_alua_tg_pt_gp_member {
-       bool tg_pt_gp_assoc;
-       atomic_t tg_pt_gp_mem_ref_cnt;
-       spinlock_t tg_pt_gp_mem_lock;
-       struct t10_alua_tg_pt_gp *tg_pt_gp;
-       struct se_port *tg_pt;
-       struct list_head tg_pt_gp_mem_list;
-};
-
 struct t10_vpd {
        unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
        int protocol_identifier_set;
@@ -374,15 +342,16 @@ struct t10_pr_registration {
        int pr_res_scope;
        /* Used for fabric initiator WWPNs using a ISID */
        bool isid_present_at_reg;
-       u32 pr_res_mapped_lun;
-       u32 pr_aptpl_target_lun;
+       u64 pr_res_mapped_lun;
+       u64 pr_aptpl_target_lun;
+       u16 tg_pt_sep_rtpi;
        u32 pr_res_generation;
        u64 pr_reg_bin_isid;
        u64 pr_res_key;
        atomic_t pr_res_holders;
        struct se_node_acl *pr_reg_nacl;
+       /* Used by ALL_TG_PT=1 registration with deve->pr_ref taken */
        struct se_dev_entry *pr_reg_deve;
-       struct se_lun *pr_reg_tg_pt_lun;
        struct list_head pr_reg_list;
        struct list_head pr_reg_abort_list;
        struct list_head pr_reg_aptpl_list;
@@ -422,7 +391,7 @@ struct se_tmr_req {
        u8                      response;
        int                     call_transport;
        /* Reference to ITT that Task Mgmt should be performed */
-       u32                     ref_task_tag;
+       u64                     ref_task_tag;
        void                    *fabric_tmr_ptr;
        struct se_cmd           *task_cmd;
        struct se_device        *tmr_dev;
@@ -475,6 +444,7 @@ struct se_cmd {
        u8                      scsi_asc;
        u8                      scsi_ascq;
        u16                     scsi_sense_length;
+       u64                     tag; /* SAM command identifier aka task tag */
        /* Delay for ALUA Active/NonOptimized state access in milliseconds */
        int                     alua_nonop_delay;
        /* See include/linux/dma-mapping.h */
@@ -493,7 +463,7 @@ struct se_cmd {
        /* Total size in bytes associated with command */
        u32                     data_length;
        u32                     residual_count;
-       u32                     orig_fe_lun;
+       u64                     orig_fe_lun;
        /* Persistent Reservation key */
        u64                     pr_res_key;
        /* Used for sense data */
@@ -501,7 +471,6 @@ struct se_cmd {
        struct list_head        se_delayed_node;
        struct list_head        se_qf_node;
        struct se_device      *se_dev;
-       struct se_dev_entry   *se_deve;
        struct se_lun           *se_lun;
        /* Only used for internal passthrough and legacy TCM fabric modules */
        struct se_session       *se_sess;
@@ -511,9 +480,8 @@ struct se_cmd {
        struct kref             cmd_kref;
        const struct target_core_fabric_ops *se_tfo;
        sense_reason_t          (*execute_cmd)(struct se_cmd *);
-       sense_reason_t          (*execute_rw)(struct se_cmd *, struct scatterlist *,
-                                             u32, enum dma_data_direction);
        sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
+       void                    *protocol_data;
 
        unsigned char           *t_task_cdb;
        unsigned char           __t_task_cdb[TCM_MAX_COMMAND_SIZE];
@@ -569,7 +537,6 @@ struct se_cmd {
 struct se_ua {
        u8                      ua_asc;
        u8                      ua_ascq;
-       struct se_node_acl      *ua_nacl;
        struct list_head        ua_nacl_list;
 };
 
@@ -585,10 +552,10 @@ struct se_node_acl {
        char                    acl_tag[MAX_ACL_TAG_SIZE];
        /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
        atomic_t                acl_pr_ref_count;
-       struct se_dev_entry     **device_list;
+       struct hlist_head       lun_entry_hlist;
        struct se_session       *nacl_sess;
        struct se_portal_group *se_tpg;
-       spinlock_t              device_list_lock;
+       struct mutex            lun_entry_mutex;
        spinlock_t              nacl_sess_lock;
        struct config_group     acl_group;
        struct config_group     acl_attrib_group;
@@ -632,33 +599,37 @@ struct se_ml_stat_grps {
 
 struct se_lun_acl {
        char                    initiatorname[TRANSPORT_IQN_LEN];
-       u32                     mapped_lun;
+       u64                     mapped_lun;
        struct se_node_acl      *se_lun_nacl;
        struct se_lun           *se_lun;
-       struct list_head        lacl_list;
        struct config_group     se_lun_group;
        struct se_ml_stat_grps  ml_stat_grps;
 };
 
 struct se_dev_entry {
-       bool                    def_pr_registered;
        /* See transport_lunflags_table */
-       u32                     lun_flags;
-       u32                     mapped_lun;
-       u32                     total_cmds;
+       u64                     mapped_lun;
        u64                     pr_res_key;
        u64                     creation_time;
+       u32                     lun_flags;
        u32                     attach_count;
-       u64                     read_bytes;
-       u64                     write_bytes;
+       atomic_long_t           total_cmds;
+       atomic_long_t           read_bytes;
+       atomic_long_t           write_bytes;
        atomic_t                ua_count;
        /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
-       atomic_t                pr_ref_count;
-       struct se_lun_acl       *se_lun_acl;
+       struct kref             pr_kref;
+       struct completion       pr_comp;
+       struct se_lun_acl __rcu *se_lun_acl;
        spinlock_t              ua_lock;
-       struct se_lun           *se_lun;
+       struct se_lun __rcu     *se_lun;
+#define DEF_PR_REG_ACTIVE              1
+       unsigned long           deve_flags;
        struct list_head        alua_port_list;
+       struct list_head        lun_link;
        struct list_head        ua_list;
+       struct hlist_node       link;
+       struct rcu_head         rcu_head;
 };
 
 struct se_dev_attrib {
@@ -703,25 +674,48 @@ struct se_port_stat_grps {
        struct config_group scsi_transport_group;
 };
 
+struct scsi_port_stats {
+       atomic_long_t   cmd_pdus;
+       atomic_long_t   tx_data_octets;
+       atomic_long_t   rx_data_octets;
+};
+
 struct se_lun {
+       u64                     unpacked_lun;
 #define SE_LUN_LINK_MAGIC                      0xffff7771
        u32                     lun_link_magic;
-       /* See transport_lun_status_table */
-       enum transport_lun_status_table lun_status;
        u32                     lun_access;
        u32                     lun_flags;
-       u32                     unpacked_lun;
+       u32                     lun_index;
+
+       /* RELATIVE TARGET PORT IDENTIFER */
+       u16                     lun_rtpi;
        atomic_t                lun_acl_count;
-       spinlock_t              lun_acl_lock;
-       spinlock_t              lun_sep_lock;
-       struct completion       lun_shutdown_comp;
-       struct list_head        lun_acl_list;
-       struct se_device        *lun_se_dev;
-       struct se_port          *lun_sep;
+       struct se_device __rcu  *lun_se_dev;
+
+       struct list_head        lun_deve_list;
+       spinlock_t              lun_deve_lock;
+
+       /* ALUA state */
+       int                     lun_tg_pt_secondary_stat;
+       int                     lun_tg_pt_secondary_write_md;
+       atomic_t                lun_tg_pt_secondary_offline;
+       struct mutex            lun_tg_pt_md_mutex;
+
+       /* ALUA target port group linkage */
+       struct list_head        lun_tg_pt_gp_link;
+       struct t10_alua_tg_pt_gp *lun_tg_pt_gp;
+       spinlock_t              lun_tg_pt_gp_lock;
+
+       struct se_portal_group  *lun_tpg;
+       struct scsi_port_stats  lun_stats;
        struct config_group     lun_group;
        struct se_port_stat_grps port_stat_grps;
        struct completion       lun_ref_comp;
        struct percpu_ref       lun_ref;
+       struct list_head        lun_dev_link;
+       struct hlist_node       link;
+       struct rcu_head         rcu_head;
 };
 
 struct se_dev_stat_grps {
@@ -744,7 +738,6 @@ struct se_device {
 #define DF_EMULATED_VPD_UNIT_SERIAL            0x00000004
 #define DF_USING_UDEV_PATH                     0x00000008
 #define DF_USING_ALIAS                         0x00000010
-       u32                     dev_port_count;
        /* Physical device queue depth */
        u32                     queue_depth;
        /* Used for SPC-2 reservations enforce of ISIDs */
@@ -761,7 +754,7 @@ struct se_device {
        atomic_t                dev_ordered_id;
        atomic_t                dev_ordered_sync;
        atomic_t                dev_qf_count;
-       int                     export_count;
+       u32                     export_count;
        spinlock_t              delayed_cmd_lock;
        spinlock_t              execute_task_lock;
        spinlock_t              dev_reservation_lock;
@@ -803,12 +796,15 @@ struct se_device {
 #define SE_UDEV_PATH_LEN 512           /* must be less than PAGE_SIZE */
        unsigned char           udev_path[SE_UDEV_PATH_LEN];
        /* Pointer to template of function pointers for transport */
-       struct se_subsystem_api *transport;
+       const struct target_backend_ops *transport;
        /* Linked list for struct se_hba struct se_device list */
        struct list_head        dev_list;
        struct se_lun           xcopy_lun;
        /* Protection Information */
        int                     prot_length;
+       /* For se_lun->lun_se_dev RCU read-side critical access */
+       u32                     hba_index;
+       struct rcu_head         rcu_head;
 };
 
 struct se_hba {
@@ -825,33 +821,7 @@ struct se_hba {
        spinlock_t              device_lock;
        struct config_group     hba_group;
        struct mutex            hba_access_mutex;
-       struct se_subsystem_api *transport;
-};
-
-struct scsi_port_stats {
-       u64     cmd_pdus;
-       u64     tx_data_octets;
-       u64     rx_data_octets;
-};
-
-struct se_port {
-       /* RELATIVE TARGET PORT IDENTIFER */
-       u16             sep_rtpi;
-       int             sep_tg_pt_secondary_stat;
-       int             sep_tg_pt_secondary_write_md;
-       u32             sep_index;
-       struct scsi_port_stats sep_stats;
-       /* Used for ALUA Target Port Groups membership */
-       atomic_t        sep_tg_pt_secondary_offline;
-       /* Used for PR ALL_TG_PT=1 */
-       atomic_t        sep_tg_pt_ref_cnt;
-       spinlock_t      sep_alua_lock;
-       struct mutex    sep_tg_pt_md_mutex;
-       struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
-       struct se_lun *sep_lun;
-       struct se_portal_group *sep_tpg;
-       struct list_head sep_alua_list;
-       struct list_head sep_list;
+       struct target_backend   *backend;
 };
 
 struct se_tpg_np {
@@ -860,24 +830,26 @@ struct se_tpg_np {
 };
 
 struct se_portal_group {
-       /* Type of target portal group, see transport_tpg_type_table */
-       enum transport_tpg_type_table se_tpg_type;
+       /*
+        * PROTOCOL IDENTIFIER value per SPC4, 7.5.1.
+        *
+        * Negative values can be used by fabric drivers for internal use TPGs.
+        */
+       int                     proto_id;
        /* Number of ACLed Initiator Nodes for this TPG */
        u32                     num_node_acls;
        /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
        atomic_t                tpg_pr_ref_count;
        /* Spinlock for adding/removing ACLed Nodes */
-       spinlock_t              acl_node_lock;
+       struct mutex            acl_node_mutex;
        /* Spinlock for adding/removing sessions */
        spinlock_t              session_lock;
-       spinlock_t              tpg_lun_lock;
-       /* Pointer to $FABRIC_MOD portal group */
-       void                    *se_tpg_fabric_ptr;
+       struct mutex            tpg_lun_mutex;
        struct list_head        se_tpg_node;
        /* linked list for initiator ACL list */
        struct list_head        acl_node_list;
-       struct se_lun           **tpg_lun_list;
-       struct se_lun           tpg_virt_lun0;
+       struct hlist_head       tpg_lun_hlist;
+       struct se_lun           *tpg_virt_lun0;
        /* List of TCM sessions associated wth this TPG */
        struct list_head        tpg_sess_list;
        /* Pointer to $FABRIC_MOD dependent code */
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h
deleted file mode 100644 (file)
index b99c011..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-#define TARGET_CORE_CONFIGFS_VERSION TARGET_CORE_MOD_VERSION
-
-#define TARGET_CORE_CONFIG_ROOT        "/sys/kernel/config"
-
-#define TARGET_CORE_NAME_MAX_LEN       64
-#define TARGET_FABRIC_NAME_SIZE                32
-
-struct target_fabric_configfs_template {
-       struct config_item_type tfc_discovery_cit;
-       struct config_item_type tfc_wwn_cit;
-       struct config_item_type tfc_wwn_fabric_stats_cit;
-       struct config_item_type tfc_tpg_cit;
-       struct config_item_type tfc_tpg_base_cit;
-       struct config_item_type tfc_tpg_lun_cit;
-       struct config_item_type tfc_tpg_port_cit;
-       struct config_item_type tfc_tpg_port_stat_cit;
-       struct config_item_type tfc_tpg_np_cit;
-       struct config_item_type tfc_tpg_np_base_cit;
-       struct config_item_type tfc_tpg_attrib_cit;
-       struct config_item_type tfc_tpg_auth_cit;
-       struct config_item_type tfc_tpg_param_cit;
-       struct config_item_type tfc_tpg_nacl_cit;
-       struct config_item_type tfc_tpg_nacl_base_cit;
-       struct config_item_type tfc_tpg_nacl_attrib_cit;
-       struct config_item_type tfc_tpg_nacl_auth_cit;
-       struct config_item_type tfc_tpg_nacl_param_cit;
-       struct config_item_type tfc_tpg_nacl_stat_cit;
-       struct config_item_type tfc_tpg_mappedlun_cit;
-       struct config_item_type tfc_tpg_mappedlun_stat_cit;
-};
-
-struct target_fabric_configfs {
-       char                    tf_name[TARGET_FABRIC_NAME_SIZE];
-       atomic_t                tf_access_cnt;
-       struct list_head        tf_list;
-       struct config_group     tf_group;
-       struct config_group     tf_disc_group;
-       struct config_group     *tf_default_groups[2];
-       /* Pointer to fabric's config_item */
-       struct config_item      *tf_fabric;
-       /* Passed from fabric modules */
-       struct config_item_type *tf_fabric_cit;
-       /* Pointer to fabric's struct module */
-       struct module *tf_module;
-       struct target_core_fabric_ops tf_ops;
-       struct target_fabric_configfs_template tf_cit_tmpl;
-};
-
index 0f4dc3768587bc2d41370d015c69502757322324..18afef91b447f950cb5f78b022355a1db6664af0 100644 (file)
@@ -4,20 +4,11 @@
 struct target_core_fabric_ops {
        struct module *module;
        const char *name;
+       size_t node_acl_size;
        char *(*get_fabric_name)(void);
-       u8 (*get_fabric_proto_ident)(struct se_portal_group *);
        char *(*tpg_get_wwn)(struct se_portal_group *);
        u16 (*tpg_get_tag)(struct se_portal_group *);
        u32 (*tpg_get_default_depth)(struct se_portal_group *);
-       u32 (*tpg_get_pr_transport_id)(struct se_portal_group *,
-                               struct se_node_acl *,
-                               struct t10_pr_registration *, int *,
-                               unsigned char *);
-       u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *,
-                               struct se_node_acl *,
-                               struct t10_pr_registration *, int *);
-       char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *,
-                               const char *, u32 *, char **);
        int (*tpg_check_demo_mode)(struct se_portal_group *);
        int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
        int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
@@ -36,10 +27,6 @@ struct target_core_fabric_ops {
         * WRITE_STRIP and READ_INSERT operations.
         */
        int (*tpg_check_prot_fabric_only)(struct se_portal_group *);
-       struct se_node_acl *(*tpg_alloc_fabric_acl)(
-                                       struct se_portal_group *);
-       void (*tpg_release_fabric_acl)(struct se_portal_group *,
-                                       struct se_node_acl *);
        u32 (*tpg_get_inst_index)(struct se_portal_group *);
        /*
         * Optional to release struct se_cmd and fabric dependent allocated
@@ -50,7 +37,6 @@ struct target_core_fabric_ops {
         */
        int (*check_stop_free)(struct se_cmd *);
        void (*release_cmd)(struct se_cmd *);
-       void (*put_session)(struct se_session *);
        /*
         * Called with spin_lock_bh(struct se_portal_group->session_lock held.
         */
@@ -66,7 +52,6 @@ struct target_core_fabric_ops {
        int (*write_pending)(struct se_cmd *);
        int (*write_pending_status)(struct se_cmd *);
        void (*set_default_node_attributes)(struct se_node_acl *);
-       u32 (*get_task_tag)(struct se_cmd *);
        int (*get_cmd_state)(struct se_cmd *);
        int (*queue_data_in)(struct se_cmd *);
        int (*queue_status)(struct se_cmd *);
@@ -88,9 +73,8 @@ struct target_core_fabric_ops {
        struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
                                struct config_group *, const char *);
        void (*fabric_drop_np)(struct se_tpg_np *);
-       struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
-                               struct config_group *, const char *);
-       void (*fabric_drop_nodeacl)(struct se_node_acl *);
+       int (*fabric_init_nodeacl)(struct se_node_acl *, const char *);
+       void (*fabric_cleanup_nodeacl)(struct se_node_acl *);
 
        struct configfs_attribute **tfc_discovery_attrs;
        struct configfs_attribute **tfc_wwn_attrs;
@@ -132,16 +116,16 @@ void      transport_deregister_session(struct se_session *);
 void   transport_init_se_cmd(struct se_cmd *,
                const struct target_core_fabric_ops *,
                struct se_session *, u32, int, int, unsigned char *);
-sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32);
+sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u64);
 sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
 int    target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
-               unsigned char *, unsigned char *, u32, u32, int, int, int,
+               unsigned char *, unsigned char *, u64, u32, int, int, int,
                struct scatterlist *, u32, struct scatterlist *, u32,
                struct scatterlist *, u32);
 int    target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
-               unsigned char *, u32, u32, int, int, int);
+               unsigned char *, u64, u32, int, int, int);
 int    target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
-               unsigned char *sense, u32 unpacked_lun,
+               unsigned char *sense, u64 unpacked_lun,
                void *fabric_tmr_ptr, unsigned char tm_type,
                gfp_t, unsigned int, int);
 int    transport_handle_cdb_direct(struct se_cmd *);
@@ -155,8 +139,8 @@ bool        transport_wait_for_tasks(struct se_cmd *);
 int    transport_check_aborted_status(struct se_cmd *, int);
 int    transport_send_check_condition_and_sense(struct se_cmd *,
                sense_reason_t, int);
-int    target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
-int    target_put_sess_cmd(struct se_session *, struct se_cmd *);
+int    target_get_sess_cmd(struct se_cmd *, bool);
+int    target_put_sess_cmd(struct se_cmd *);
 void   target_sess_cmd_list_set_waiting(struct se_session *);
 void   target_wait_for_sess_cmds(struct se_session *);
 
@@ -167,52 +151,19 @@ void      core_tmr_release_req(struct se_tmr_req *);
 int    transport_generic_handle_tmr(struct se_cmd *);
 void   transport_generic_request_failure(struct se_cmd *, sense_reason_t);
 void   __target_execute_cmd(struct se_cmd *);
-int    transport_lookup_tmr_lun(struct se_cmd *, u32);
+int    transport_lookup_tmr_lun(struct se_cmd *, u64);
 
 struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
                unsigned char *);
 struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
                unsigned char *);
-void   core_tpg_clear_object_luns(struct se_portal_group *);
-struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *,
-               struct se_node_acl *, const char *, u32);
-int    core_tpg_del_initiator_node_acl(struct se_portal_group *,
-               struct se_node_acl *, int);
 int    core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
                unsigned char *, u32, int);
 int    core_tpg_set_initiator_node_tag(struct se_portal_group *,
                struct se_node_acl *, const char *);
-int    core_tpg_register(const struct target_core_fabric_ops *,
-               struct se_wwn *, struct se_portal_group *, void *, int);
+int    core_tpg_register(struct se_wwn *, struct se_portal_group *, int);
 int    core_tpg_deregister(struct se_portal_group *);
 
-/* SAS helpers */
-u8     sas_get_fabric_proto_ident(struct se_portal_group *);
-u32    sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
-               struct t10_pr_registration *, int *, unsigned char *);
-u32    sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
-               struct t10_pr_registration *, int *);
-char   *sas_parse_pr_out_transport_id(struct se_portal_group *, const char *,
-               u32 *, char **);
-
-/* FC helpers */
-u8     fc_get_fabric_proto_ident(struct se_portal_group *);
-u32    fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
-               struct t10_pr_registration *, int *, unsigned char *);
-u32    fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
-               struct t10_pr_registration *, int *);
-char   *fc_parse_pr_out_transport_id(struct se_portal_group *, const char *,
-               u32 *, char **);
-
-/* iSCSI helpers */
-u8     iscsi_get_fabric_proto_ident(struct se_portal_group *);
-u32    iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
-               struct t10_pr_registration *, int *, unsigned char *);
-u32    iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
-               struct t10_pr_registration *, int *);
-char   *iscsi_parse_pr_out_transport_id(struct se_portal_group *, const char *,
-               u32 *, char **);
-
 /*
  * The LIO target core uses DMA_TO_DEVICE to mean that data is going
  * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
index d3f4832db289b5da2787427360e7b933677d9fd5..b6fce900a8334613f45f169a0c28802327222f56 100644 (file)
@@ -313,6 +313,9 @@ struct drm_amdgpu_gem_op {
 #define AMDGPU_VA_OP_MAP                       1
 #define AMDGPU_VA_OP_UNMAP                     2
 
+/* Delay the page table update till the next CS */
+#define AMDGPU_VM_DELAY_UPDATE         (1 << 0)
+
 /* Mapping flags */
 /* readable mapping */
 #define AMDGPU_VM_PAGE_READABLE                (1 << 1)
@@ -348,6 +351,7 @@ struct drm_amdgpu_gem_va {
 
 #define AMDGPU_CHUNK_ID_IB             0x01
 #define AMDGPU_CHUNK_ID_FENCE          0x02
+#define AMDGPU_CHUNK_ID_DEPENDENCIES   0x03
 
 struct drm_amdgpu_cs_chunk {
        uint32_t                chunk_id;
@@ -399,6 +403,14 @@ struct drm_amdgpu_cs_chunk_ib {
        uint32_t ring;
 };
 
+struct drm_amdgpu_cs_chunk_dep {
+       uint32_t ip_type;
+       uint32_t ip_instance;
+       uint32_t ring;
+       uint32_t ctx_id;
+       uint64_t handle;
+};
+
 struct drm_amdgpu_cs_chunk_fence {
        uint32_t handle;
        uint32_t offset;
index 25084a052a1eff964d19a9683d5d6470590e4c7e..c9aca042e61d197927409531af4f8cdd88218adc 100644 (file)
@@ -755,4 +755,7 @@ struct fuse_notify_retrieve_in {
        uint64_t        dummy4;
 };
 
+/* Device ioctls: */
+#define FUSE_DEV_IOC_CLONE     _IOR(229, 0, uint32_t)
+
 #endif /* _LINUX_FUSE_H */
index 50ae24335444f3f0188bdbafb7e409ead40a0975..3cb5e1d85ddd1894fa3561de99157a25662ea612 100644 (file)
 #define TUNGETFILTER _IOR('T', 219, struct sock_fprog)
 #define TUNSETVNETLE _IOW('T', 220, int)
 #define TUNGETVNETLE _IOR('T', 221, int)
+/* The TUNSETVNETBE and TUNGETVNETBE ioctls are for cross-endian support on
+ * little-endian hosts. Not all kernel configurations support them, but all
+ * configurations that support SET also support GET.
+ */
+#define TUNSETVNETBE _IOW('T', 222, int)
+#define TUNGETVNETBE _IOR('T', 223, int)
 
 /* TUNSETIFF ifr flags */
 #define IFF_TUN                0x0001
index 83d6236a2f083d787f4ed887b71aa41c76330d9e..eaf94919291aaf78419a5f43b17d66eb07f12600 100644 (file)
 #define _UAPI_LINUX_IN_H
 
 #include <linux/types.h>
+#include <linux/libc-compat.h>
 #include <linux/socket.h>
 
+#if __UAPI_DEF_IN_IPPROTO
 /* Standard well-defined IP protocols.  */
 enum {
   IPPROTO_IP = 0,              /* Dummy protocol for TCP               */
@@ -75,12 +77,14 @@ enum {
 #define IPPROTO_RAW            IPPROTO_RAW
   IPPROTO_MAX
 };
+#endif
 
-
+#if __UAPI_DEF_IN_ADDR
 /* Internet address. */
 struct in_addr {
        __be32  s_addr;
 };
+#endif
 
 #define IP_TOS         1
 #define IP_TTL         2
@@ -158,6 +162,7 @@ struct in_addr {
 
 /* Request struct for multicast socket ops */
 
+#if __UAPI_DEF_IP_MREQ
 struct ip_mreq  {
        struct in_addr imr_multiaddr;   /* IP multicast address of group */
        struct in_addr imr_interface;   /* local IP address of interface */
@@ -209,14 +214,18 @@ struct group_filter {
 #define GROUP_FILTER_SIZE(numsrc) \
        (sizeof(struct group_filter) - sizeof(struct __kernel_sockaddr_storage) \
        + (numsrc) * sizeof(struct __kernel_sockaddr_storage))
+#endif
 
+#if __UAPI_DEF_IN_PKTINFO
 struct in_pktinfo {
        int             ipi_ifindex;
        struct in_addr  ipi_spec_dst;
        struct in_addr  ipi_addr;
 };
+#endif
 
 /* Structure describing an Internet (IP) socket address. */
+#if  __UAPI_DEF_SOCKADDR_IN
 #define __SOCK_SIZE__  16              /* sizeof(struct sockaddr)      */
 struct sockaddr_in {
   __kernel_sa_family_t sin_family;     /* Address family               */
@@ -228,8 +237,9 @@ struct sockaddr_in {
                        sizeof(unsigned short int) - sizeof(struct in_addr)];
 };
 #define sin_zero       __pad           /* for BSD UNIX comp. -FvK      */
+#endif
 
-
+#if __UAPI_DEF_IN_CLASS
 /*
  * Definitions of the bits in an Internet address integer.
  * On subnets, host and network parts are found according
@@ -280,7 +290,7 @@ struct sockaddr_in {
 #define INADDR_ALLHOSTS_GROUP  0xe0000001U     /* 224.0.0.1   */
 #define INADDR_ALLRTRS_GROUP    0xe0000002U    /* 224.0.0.2 */
 #define INADDR_MAX_LOCAL_GROUP  0xe00000ffU    /* 224.0.0.255 */
-
+#endif
 
 /* <asm/byteorder.h> contains the htonl type stuff.. */
 #include <asm/byteorder.h> 
index fa673e9cc040aefcee4e96ee3e4bd6892c5be562..7d024ceb075d8d4cd657c1c25db37a748940fc89 100644 (file)
 
 /* GLIBC headers included first so don't define anything
  * that would already be defined. */
+#define __UAPI_DEF_IN_ADDR             0
+#define __UAPI_DEF_IN_IPPROTO          0
+#define __UAPI_DEF_IN_PKTINFO          0
+#define __UAPI_DEF_IP_MREQ             0
+#define __UAPI_DEF_SOCKADDR_IN         0
+#define __UAPI_DEF_IN_CLASS            0
+
 #define __UAPI_DEF_IN6_ADDR            0
 /* The exception is the in6_addr macros which must be defined
  * if the glibc code didn't define them. This guard matches
 /* Linux headers included first, and we must define everything
  * we need. The expectation is that glibc will check the
  * __UAPI_DEF_* defines and adjust appropriately. */
+#define __UAPI_DEF_IN_ADDR             1
+#define __UAPI_DEF_IN_IPPROTO          1
+#define __UAPI_DEF_IN_PKTINFO          1
+#define __UAPI_DEF_IP_MREQ             1
+#define __UAPI_DEF_SOCKADDR_IN         1
+#define __UAPI_DEF_IN_CLASS            1
+
 #define __UAPI_DEF_IN6_ADDR            1
 /* We unconditionally define the in6_addr macros and glibc must
  * coordinate. */
  * that we need. */
 #else /* !defined(__GLIBC__) */
 
+/* Definitions for in.h */
+#define __UAPI_DEF_IN_ADDR             1
+#define __UAPI_DEF_IN_IPPROTO          1
+#define __UAPI_DEF_IN_PKTINFO          1
+#define __UAPI_DEF_IP_MREQ             1
+#define __UAPI_DEF_SOCKADDR_IN         1
+#define __UAPI_DEF_IN_CLASS            1
+
 /* Definitions for in6.h */
 #define __UAPI_DEF_IN6_ADDR            1
 #define __UAPI_DEF_IN6_ADDR_ALT                1
index bb6a5b4cb3c558eb7d82fce2ab2a9b8b0fadf0ca..ab3731917bac326a3a94ac1baa174bdcb35ceaab 100644 (file)
@@ -103,6 +103,20 @@ struct vhost_memory {
 /* Get accessor: reads index, writes value in num */
 #define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
 
+/* Set the vring byte order in num. Valid values are VHOST_VRING_LITTLE_ENDIAN
+ * or VHOST_VRING_BIG_ENDIAN (other values return -EINVAL).
+ * The byte order cannot be changed while the device is active: trying to do so
+ * returns -EBUSY.
+ * This is a legacy only API that is simply ignored when VIRTIO_F_VERSION_1 is
+ * set.
+ * Not all kernel configurations support this ioctl, but all configurations that
+ * support SET also support GET.
+ */
+#define VHOST_VRING_LITTLE_ENDIAN 0
+#define VHOST_VRING_BIG_ENDIAN 1
+#define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)
+#define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state)
+
 /* The following ioctls use eventfd file descriptors to signal and poll
  * for events. */
 
index 7d1ffd2ae536996e45a8ae5a88981d7e32c65e15..af09b4fb43d291856708eb78197adb74dd1b4f48 100644 (file)
@@ -435,6 +435,7 @@ config TASKSTATS
 config TASK_DELAY_ACCT
        bool "Enable per-task delay accounting"
        depends on TASKSTATS
+       select SCHED_INFO
        help
          Collect information on time spent by a task waiting for system
          resources like cpu, synchronous block I/O completion and swapping
@@ -820,7 +821,7 @@ config IKCONFIG_PROC
 
 config LOG_BUF_SHIFT
        int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
-       range 12 21
+       range 12 25
        default 17
        depends on PRINTK
        help
@@ -1941,26 +1942,21 @@ config MODULE_COMPRESS
        bool "Compress modules on installation"
        depends on MODULES
        help
-         This option compresses the kernel modules when 'make
-         modules_install' is run.
 
-         The modules will be compressed either using gzip or xz depend on the
-         choice made in "Compression algorithm".
+         Compresses kernel modules when 'make modules_install' is run; gzip or
+         xz depending on "Compression algorithm" below.
 
-         module-init-tools has support for gzip format while kmod handle gzip
-         and xz compressed modules.
+         module-init-tools MAY support gzip, and kmod MAY support gzip and xz.
 
-         When a kernel module is installed from outside of the main kernel
-         source and uses the Kbuild system for installing modules then that
-         kernel module will also be compressed when it is installed.
+         Out-of-tree kernel modules installed using Kbuild will also be
+         compressed upon installation.
 
-         This option provides little benefit when the modules are to be used inside
-         an initrd or initramfs, it generally is more efficient to compress the whole
-         initrd or initramfs instead.
+         Note: for modules inside an initrd or initramfs, it's more efficient
+         to compress the whole initrd or initramfs instead.
 
-         This is fully compatible with signed modules while the signed module is
-         compressed. module-init-tools or kmod handles decompression and provide to
-         other layer the uncompressed but signed payload.
+         Note: This is fully compatible with signed modules.
+
+         If in doubt, say N.
 
 choice
        prompt "Compression algorithm"
@@ -1982,6 +1978,10 @@ endchoice
 
 endif # MODULES
 
+config MODULES_TREE_LOOKUP
+       def_bool y
+       depends on PERF_EVENTS || TRACING
+
 config INIT_ALL_POSSIBLE
        bool
        help
index c599aea23bb1cbaec3eeb55ec2a181fcd735fd2c..c5d5626289cee3a46f7e1b7d2441ca0496ba4471 100644 (file)
@@ -1004,6 +1004,8 @@ static noinline void __init kernel_init_freeable(void)
        smp_init();
        sched_init_smp();
 
+       page_alloc_init_late();
+
        do_basic_setup();
 
        /* Open the /dev/console on the rootfs, this should never fail */
index 2b6fdbb9e0e9aeee2ae962ddd19a3dd1ba83174b..66c4f567eb7368d21ff11377f629c53cc169bc8b 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -76,7 +76,7 @@ struct msg_sender {
 
 static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id)
 {
-       struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id);
+       struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&msg_ids(ns), id);
 
        if (IS_ERR(ipcp))
                return ERR_CAST(ipcp);
@@ -196,7 +196,7 @@ static void expunge_all(struct msg_queue *msq, int res)
                 * or dealing with -EAGAIN cases. See lockless receive part 1
                 * and 2 in do_msgrcv().
                 */
-               smp_mb();
+               smp_wmb(); /* barrier (B) */
                msr->r_msg = ERR_PTR(res);
        }
 }
@@ -580,7 +580,8 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
                                /* initialize pipelined send ordering */
                                msr->r_msg = NULL;
                                wake_up_process(msr->r_tsk);
-                               smp_mb(); /* see barrier comment below */
+                               /* barrier (B) see barrier comment below */
+                               smp_wmb();
                                msr->r_msg = ERR_PTR(-E2BIG);
                        } else {
                                msr->r_msg = NULL;
@@ -589,11 +590,12 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
                                wake_up_process(msr->r_tsk);
                                /*
                                 * Ensure that the wakeup is visible before
-                                * setting r_msg, as the receiving end depends
-                                * on it. See lockless receive part 1 and 2 in
-                                * do_msgrcv().
+                                * setting r_msg, as the receiving can otherwise
+                                * exit - once r_msg is set, the receiver can
+                                * continue. See lockless receive part 1 and 2
+                                * in do_msgrcv(). Barrier (B).
                                 */
-                               smp_mb();
+                               smp_wmb();
                                msr->r_msg = msg;
 
                                return 1;
@@ -932,12 +934,38 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
                /* Lockless receive, part 2:
                 * Wait until pipelined_send or expunge_all are outside of
                 * wake_up_process(). There is a race with exit(), see
-                * ipc/mqueue.c for the details.
+                * ipc/mqueue.c for the details. The correct serialization
+                * ensures that a receiver cannot continue without the wakeup
+                * being visibible _before_ setting r_msg:
+                *
+                * CPU 0                             CPU 1
+                * <loop receiver>
+                *   smp_rmb(); (A) <-- pair -.      <waker thread>
+                *   <load ->r_msg>           |        msr->r_msg = NULL;
+                *                            |        wake_up_process();
+                * <continue>                 `------> smp_wmb(); (B)
+                *                                     msr->r_msg = msg;
+                *
+                * Where (A) orders the message value read and where (B) orders
+                * the write to the r_msg -- done in both pipelined_send and
+                * expunge_all.
                 */
-               msg = (struct msg_msg *)msr_d.r_msg;
-               while (msg == NULL) {
-                       cpu_relax();
+               for (;;) {
+                       /*
+                        * Pairs with writer barrier in pipelined_send
+                        * or expunge_all.
+                        */
+                       smp_rmb(); /* barrier (A) */
                        msg = (struct msg_msg *)msr_d.r_msg;
+                       if (msg)
+                               break;
+
+                       /*
+                        * The cpu_relax() call is a compiler barrier
+                        * which forces everything in this loop to be
+                        * re-loaded.
+                        */
+                       cpu_relax();
                }
 
                /* Lockless receive, part 3:
index d1a6edd17eba2ce2f1e3bf83cfead51a6e6117a5..bc3d530cb23efacb2e5695ad85a9bd3898524fa2 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -391,7 +391,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
        struct kern_ipc_perm *ipcp;
        struct sem_array *sma;
 
-       ipcp = ipc_obtain_object(&sem_ids(ns), id);
+       ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
        if (IS_ERR(ipcp))
                return ERR_CAST(ipcp);
 
@@ -410,7 +410,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
 
 static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 {
-       struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id);
+       struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
 
        if (IS_ERR(ipcp))
                return ERR_CAST(ipcp);
index 6d767071c3673dce09984933c3a444d78880bd10..06e5cf2fe019faee43aa9f8ca9f17cad4973b74d 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -129,7 +129,7 @@ void __init shm_init(void)
 
 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
 {
-       struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
+       struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
 
        if (IS_ERR(ipcp))
                return ERR_CAST(ipcp);
@@ -155,8 +155,11 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
 {
        struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
 
-       if (IS_ERR(ipcp))
-               return (struct shmid_kernel *)ipcp;
+       /*
+        * We raced in the idr lookup or with shm_destroy().  Either way, the
+        * ID is busted.
+        */
+       BUG_ON(IS_ERR(ipcp));
 
        return container_of(ipcp, struct shmid_kernel, shm_perm);
 }
@@ -191,7 +194,6 @@ static void shm_open(struct vm_area_struct *vma)
        struct shmid_kernel *shp;
 
        shp = shm_lock(sfd->ns, sfd->id);
-       BUG_ON(IS_ERR(shp));
        shp->shm_atim = get_seconds();
        shp->shm_lprid = task_tgid_vnr(current);
        shp->shm_nattch++;
@@ -258,7 +260,6 @@ static void shm_close(struct vm_area_struct *vma)
        down_write(&shm_ids(ns).rwsem);
        /* remove from the list of attaches of the shm segment */
        shp = shm_lock(ns, sfd->id);
-       BUG_ON(IS_ERR(shp));
        shp->shm_lprid = task_tgid_vnr(current);
        shp->shm_dtim = get_seconds();
        shp->shm_nattch--;
@@ -1191,7 +1192,6 @@ out_fput:
 out_nattch:
        down_write(&shm_ids(ns).rwsem);
        shp = shm_lock(ns, shmid);
-       BUG_ON(IS_ERR(shp));
        shp->shm_nattch--;
        if (shm_may_destroy(ns, shp))
                shm_destroy(ns, shp);
index ff3323ef8d8b4ea62f04306748e76cebd2f573e8..be4230020a1f718c31b02012554600c710b928b9 100644 (file)
@@ -467,10 +467,7 @@ void ipc_rcu_free(struct rcu_head *head)
 {
        struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
 
-       if (is_vmalloc_addr(p))
-               vfree(p);
-       else
-               kfree(p);
+       kvfree(p);
 }
 
 /**
@@ -558,7 +555,7 @@ void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out)
  * Call inside the RCU critical section.
  * The ipc object is *not* locked on exit.
  */
-struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id)
+struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id)
 {
        struct kern_ipc_perm *out;
        int lid = ipcid_to_idx(id);
@@ -584,21 +581,24 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
        struct kern_ipc_perm *out;
 
        rcu_read_lock();
-       out = ipc_obtain_object(ids, id);
+       out = ipc_obtain_object_idr(ids, id);
        if (IS_ERR(out))
-               goto err1;
+               goto err;
 
        spin_lock(&out->lock);
 
-       /* ipc_rmid() may have already freed the ID while ipc_lock
-        * was spinning: here verify that the structure is still valid
+       /*
+        * ipc_rmid() may have already freed the ID while ipc_lock()
+        * was spinning: here verify that the structure is still valid.
+        * Upon races with RMID, return -EIDRM, thus indicating that
+        * the ID points to a removed identifier.
         */
        if (ipc_valid_object(out))
                return out;
 
        spin_unlock(&out->lock);
-       out = ERR_PTR(-EINVAL);
-err1:
+       out = ERR_PTR(-EIDRM);
+err:
        rcu_read_unlock();
        return out;
 }
@@ -608,7 +608,7 @@ err1:
  * @ids: ipc identifier set
  * @id: ipc id to look for
  *
- * Similar to ipc_obtain_object() but also checks
+ * Similar to ipc_obtain_object_idr() but also checks
  * the ipc object reference counter.
  *
  * Call inside the RCU critical section.
@@ -616,13 +616,13 @@ err1:
  */
 struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id)
 {
-       struct kern_ipc_perm *out = ipc_obtain_object(ids, id);
+       struct kern_ipc_perm *out = ipc_obtain_object_idr(ids, id);
 
        if (IS_ERR(out))
                goto out;
 
        if (ipc_checkid(out, id))
-               return ERR_PTR(-EIDRM);
+               return ERR_PTR(-EINVAL);
 out:
        return out;
 }
index 1a5a0fcd099ce1ac0594062a68aa2c092778b175..3a8a5a0eca6252f04cf188fa6921291f9ec6fc31 100644 (file)
@@ -132,7 +132,7 @@ void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
 void ipc_rcu_free(struct rcu_head *head);
 
 struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
-struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id);
+struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id);
 
 void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);
 void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out);
index 60c302cfb4d3cbb976f0b2d69f28a96899b1d4ce..43c4c920f30a92ca73e16d8ac3c9249b9a4ae4ca 100644 (file)
@@ -137,7 +137,7 @@ endif
 
 ifneq ($(wildcard $(obj)/.x509.list),)
 ifneq ($(shell cat $(obj)/.x509.list),$(X509_CERTIFICATES))
-$(info X.509 certificate list changed)
+$(warning X.509 certificate list changed to "$(X509_CERTIFICATES)" from "$(shell cat $(obj)/.x509.list)")
 $(shell rm $(obj)/.x509.list)
 endif
 endif
index 9ef9fc8a774b08820a0dea814ecc4456dd83376c..f89d9292eee62540ff1c2f81ed0a658cbd279642 100644 (file)
@@ -1939,8 +1939,6 @@ static struct file_system_type cgroup_fs_type = {
        .kill_sb = cgroup_kill_sb,
 };
 
-static struct kobject *cgroup_kobj;
-
 /**
  * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
  * @task: target task
@@ -5070,13 +5068,13 @@ int __init cgroup_init(void)
                        ss->bind(init_css_set.subsys[ssid]);
        }
 
-       cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
-       if (!cgroup_kobj)
-               return -ENOMEM;
+       err = sysfs_create_mount_point(fs_kobj, "cgroup");
+       if (err)
+               return err;
 
        err = register_filesystem(&cgroup_fs_type);
        if (err < 0) {
-               kobject_put(cgroup_kobj);
+               sysfs_remove_mount_point(fs_kobj, "cgroup");
                return err;
        }
 
diff --git a/kernel/configs/xen.config b/kernel/configs/xen.config
new file mode 100644 (file)
index 0000000..ff75622
--- /dev/null
@@ -0,0 +1,48 @@
+# global stuff - these enable us to allow some
+# of the not so generic stuff below for xen
+CONFIG_PARAVIRT=y
+CONFIG_NET=y
+CONFIG_NET_CORE=y
+CONFIG_NETDEVICES=y
+CONFIG_BLOCK=y
+CONFIG_WATCHDOG=y
+CONFIG_TARGET_CORE=y
+CONFIG_SCSI=y
+CONFIG_FB=y
+CONFIG_INPUT_MISC=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_TTY=y
+# Technically not required but otherwise produces
+# pretty useless systems starting from allnoconfig
+# You want TCP/IP and ELF binaries right?
+CONFIG_INET=y
+CONFIG_BINFMT_ELF=y
+# generic config
+CONFIG_XEN=y
+CONFIG_XEN_DOM0=y
+# backend drivers
+CONFIG_XEN_BACKEND=y
+CONFIG_XEN_BLKDEV_BACKEND=m
+CONFIG_XEN_NETDEV_BACKEND=m
+CONFIG_HVC_XEN=y
+CONFIG_XEN_WDT=m
+CONFIG_XEN_SCSI_BACKEND=m
+# frontend drivers
+CONFIG_XEN_FBDEV_FRONTEND=m
+CONFIG_HVC_XEN_FRONTEND=y
+CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m
+CONFIG_XEN_SCSI_FRONTEND=m
+# others
+CONFIG_XEN_BALLOON=y
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_XEN_DEV_EVTCHN=m
+CONFIG_XEN_BLKDEV_FRONTEND=m
+CONFIG_XEN_NETDEV_FRONTEND=m
+CONFIG_XENFS=m
+CONFIG_XEN_COMPAT_XENFS=y
+CONFIG_XEN_SYS_HYPERVISOR=y
+CONFIG_XEN_XENBUS_FRONTEND=y
+CONFIG_XEN_GNTDEV=m
+CONFIG_XEN_GRANT_DEV_ALLOC=m
+CONFIG_SWIOTLB_XEN=y
+CONFIG_XEN_PRIVCMD=m
index d1f37ddd19608d26a32dd130d491cf7c98c41d9c..d3dae3419b99566c127f1682b29f39bb184bbdb1 100644 (file)
@@ -4358,14 +4358,6 @@ static void ring_buffer_wakeup(struct perf_event *event)
        rcu_read_unlock();
 }
 
-static void rb_free_rcu(struct rcu_head *rcu_head)
-{
-       struct ring_buffer *rb;
-
-       rb = container_of(rcu_head, struct ring_buffer, rcu_head);
-       rb_free(rb);
-}
-
 struct ring_buffer *ring_buffer_get(struct perf_event *event)
 {
        struct ring_buffer *rb;
@@ -5794,7 +5786,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
                 * need to add enough zero bytes after the string to handle
                 * the 64bit alignment we do later.
                 */
-               name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
+               name = file_path(file, buf, PATH_MAX - sizeof(u64));
                if (IS_ERR(name)) {
                        name = "//toolong";
                        goto cpy_name;
index 2deb24c7a40dd979313eb1fcff58044d2d948888..2bbad9c1274c3199338e653bbb5c8bd640815fa7 100644 (file)
@@ -11,6 +11,7 @@
 struct ring_buffer {
        atomic_t                        refcount;
        struct rcu_head                 rcu_head;
+       struct irq_work                 irq_work;
 #ifdef CONFIG_PERF_USE_VMALLOC
        struct work_struct              work;
        int                             page_order;     /* allocation order  */
@@ -55,6 +56,15 @@ struct ring_buffer {
 };
 
 extern void rb_free(struct ring_buffer *rb);
+
+static inline void rb_free_rcu(struct rcu_head *rcu_head)
+{
+       struct ring_buffer *rb;
+
+       rb = container_of(rcu_head, struct ring_buffer, rcu_head);
+       rb_free(rb);
+}
+
 extern struct ring_buffer *
 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
 extern void perf_event_wakeup(struct perf_event *event);
index 96472824a752f76fe651ec1bfb7ab7a52411a12c..b2be01b1aa9dcb7a70792fa381c264b229a106d0 100644 (file)
@@ -221,6 +221,8 @@ void perf_output_end(struct perf_output_handle *handle)
        rcu_read_unlock();
 }
 
+static void rb_irq_work(struct irq_work *work);
+
 static void
 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
 {
@@ -241,6 +243,16 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
 
        INIT_LIST_HEAD(&rb->event_list);
        spin_lock_init(&rb->event_lock);
+       init_irq_work(&rb->irq_work, rb_irq_work);
+}
+
+static void ring_buffer_put_async(struct ring_buffer *rb)
+{
+       if (!atomic_dec_and_test(&rb->refcount))
+               return;
+
+       rb->rcu_head.next = (void *)rb;
+       irq_work_queue(&rb->irq_work);
 }
 
 /*
@@ -319,7 +331,7 @@ err_put:
        rb_free_aux(rb);
 
 err:
-       ring_buffer_put(rb);
+       ring_buffer_put_async(rb);
        handle->event = NULL;
 
        return NULL;
@@ -370,7 +382,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
 
        local_set(&rb->aux_nest, 0);
        rb_free_aux(rb);
-       ring_buffer_put(rb);
+       ring_buffer_put_async(rb);
 }
 
 /*
@@ -557,7 +569,18 @@ static void __rb_free_aux(struct ring_buffer *rb)
 void rb_free_aux(struct ring_buffer *rb)
 {
        if (atomic_dec_and_test(&rb->aux_refcount))
+               irq_work_queue(&rb->irq_work);
+}
+
+static void rb_irq_work(struct irq_work *work)
+{
+       struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
+
+       if (!atomic_read(&rb->aux_refcount))
                __rb_free_aux(rb);
+
+       if (rb->rcu_head.next == (void *)rb)
+               call_rcu(&rb->rcu_head, rb_free_rcu);
 }
 
 #ifndef CONFIG_PERF_USE_VMALLOC
index a744098e4eb76f16624bdf7090676b864e413028..7080ae1eb6c10dbaabe670d6dd038f2aba6b8c31 100644 (file)
@@ -92,6 +92,12 @@ void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters)
 }
 EXPORT_SYMBOL(__gcov_merge_time_profile);
 
+void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters)
+{
+       /* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_icall_topn);
+
 /**
  * gcov_enable_events - enable event reporting through gcov_event()
  *
index 826ba9fb5e3277292d38ef4c95ba9713378b4543..e25e92fb44face315265d43d981b71e193693f2a 100644 (file)
@@ -18,7 +18,9 @@
 #include <linux/vmalloc.h>
 #include "gcov.h"
 
-#if __GNUC__ == 4 && __GNUC_MINOR__ >= 9
+#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1
+#define GCOV_COUNTERS                  10
+#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
 #define GCOV_COUNTERS                  9
 #else
 #define GCOV_COUNTERS                  8
index 9019f15deab201127065e4d6987677fdcdd63676..52ebaca1b9fc16aaeba97873fadc9dc158f43f39 100644 (file)
@@ -302,7 +302,7 @@ static int jump_label_add_module(struct module *mod)
                        continue;
 
                key = iterk;
-               if (__module_address(iter->key) == mod) {
+               if (within_module(iter->key, mod)) {
                        /*
                         * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
                         */
@@ -339,7 +339,7 @@ static void jump_label_del_module(struct module *mod)
 
                key = (struct static_key *)(unsigned long)iter->key;
 
-               if (__module_address(iter->key) == mod)
+               if (within_module(iter->key, mod))
                        continue;
 
                prev = &key->next;
@@ -443,14 +443,16 @@ static void jump_label_update(struct static_key *key, int enable)
 {
        struct jump_entry *stop = __stop___jump_table;
        struct jump_entry *entry = jump_label_get_entries(key);
-
 #ifdef CONFIG_MODULES
-       struct module *mod = __module_address((unsigned long)key);
+       struct module *mod;
 
        __jump_label_mod_update(key, enable);
 
+       preempt_disable();
+       mod = __module_address((unsigned long)key);
        if (mod)
                stop = mod->jump_entries + mod->num_jump_entries;
+       preempt_enable();
 #endif
        /* if there are no users, entry can be NULL */
        if (entry)
index 7a36fdcca5bfb064a6709021782c98bd2a6de179..a785c1015e25bf1ecacd3a6d92956e3e630e7f37 100644 (file)
@@ -84,6 +84,17 @@ struct resource crashk_low_res = {
 
 int kexec_should_crash(struct task_struct *p)
 {
+       /*
+        * If crash_kexec_post_notifiers is enabled, don't run
+        * crash_kexec() here yet, which must be run after panic
+        * notifiers in panic().
+        */
+       if (crash_kexec_post_notifiers)
+               return 0;
+       /*
+        * There are 4 panic() calls in do_exit() path, each of which
+        * corresponds to each of these 4 conditions.
+        */
        if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
                return 1;
        return 0;
index f80a97f7da1f14b96c87efaf82f9f8c88612ab54..3e0e19763d246a998acb109c6979e4df593e3958 100644 (file)
 DEFINE_MUTEX(module_mutex);
 EXPORT_SYMBOL_GPL(module_mutex);
 static LIST_HEAD(modules);
-#ifdef CONFIG_KGDB_KDB
-struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
-#endif /* CONFIG_KGDB_KDB */
 
-#ifdef CONFIG_MODULE_SIG
-#ifdef CONFIG_MODULE_SIG_FORCE
-static bool sig_enforce = true;
-#else
-static bool sig_enforce = false;
+#ifdef CONFIG_MODULES_TREE_LOOKUP
+
+/*
+ * Use a latched RB-tree for __module_address(); this allows us to use
+ * RCU-sched lookups of the address from any context.
+ *
+ * Because modules have two address ranges: init and core, we need two
+ * latch_tree_nodes entries. Therefore we need the back-pointer from
+ * mod_tree_node.
+ *
+ * Because init ranges are short lived we mark them unlikely and have placed
+ * them outside the critical cacheline in struct module.
+ *
+ * This is conditional on PERF_EVENTS || TRACING because those can really hit
+ * __module_address() hard by doing a lot of stack unwinding; potentially from
+ * NMI context.
+ */
 
-static int param_set_bool_enable_only(const char *val,
-                                     const struct kernel_param *kp)
+static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
 {
-       int err;
-       bool test;
-       struct kernel_param dummy_kp = *kp;
+       struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
+       struct module *mod = mtn->mod;
 
-       dummy_kp.arg = &test;
+       if (unlikely(mtn == &mod->mtn_init))
+               return (unsigned long)mod->module_init;
 
-       err = param_set_bool(val, &dummy_kp);
-       if (err)
-               return err;
+       return (unsigned long)mod->module_core;
+}
+
+static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
+{
+       struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
+       struct module *mod = mtn->mod;
 
-       /* Don't let them unset it once it's set! */
-       if (!test && sig_enforce)
-               return -EROFS;
+       if (unlikely(mtn == &mod->mtn_init))
+               return (unsigned long)mod->init_size;
+
+       return (unsigned long)mod->core_size;
+}
+
+static __always_inline bool
+mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
+{
+       return __mod_tree_val(a) < __mod_tree_val(b);
+}
+
+static __always_inline int
+mod_tree_comp(void *key, struct latch_tree_node *n)
+{
+       unsigned long val = (unsigned long)key;
+       unsigned long start, end;
+
+       start = __mod_tree_val(n);
+       if (val < start)
+               return -1;
+
+       end = start + __mod_tree_size(n);
+       if (val >= end)
+               return 1;
 
-       if (test)
-               sig_enforce = true;
        return 0;
 }
 
-static const struct kernel_param_ops param_ops_bool_enable_only = {
-       .flags = KERNEL_PARAM_OPS_FL_NOARG,
-       .set = param_set_bool_enable_only,
-       .get = param_get_bool,
+static const struct latch_tree_ops mod_tree_ops = {
+       .less = mod_tree_less,
+       .comp = mod_tree_comp,
 };
-#define param_check_bool_enable_only param_check_bool
 
+static struct mod_tree_root {
+       struct latch_tree_root root;
+       unsigned long addr_min;
+       unsigned long addr_max;
+} mod_tree __cacheline_aligned = {
+       .addr_min = -1UL,
+};
+
+#define module_addr_min mod_tree.addr_min
+#define module_addr_max mod_tree.addr_max
+
+static noinline void __mod_tree_insert(struct mod_tree_node *node)
+{
+       latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
+}
+
+static void __mod_tree_remove(struct mod_tree_node *node)
+{
+       latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
+}
+
+/*
+ * These modifications: insert, remove_init and remove; are serialized by the
+ * module_mutex.
+ */
+static void mod_tree_insert(struct module *mod)
+{
+       mod->mtn_core.mod = mod;
+       mod->mtn_init.mod = mod;
+
+       __mod_tree_insert(&mod->mtn_core);
+       if (mod->init_size)
+               __mod_tree_insert(&mod->mtn_init);
+}
+
+static void mod_tree_remove_init(struct module *mod)
+{
+       if (mod->init_size)
+               __mod_tree_remove(&mod->mtn_init);
+}
+
+static void mod_tree_remove(struct module *mod)
+{
+       __mod_tree_remove(&mod->mtn_core);
+       mod_tree_remove_init(mod);
+}
+
+static struct module *mod_find(unsigned long addr)
+{
+       struct latch_tree_node *ltn;
+
+       ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
+       if (!ltn)
+               return NULL;
+
+       return container_of(ltn, struct mod_tree_node, node)->mod;
+}
+
+#else /* MODULES_TREE_LOOKUP */
+
+static unsigned long module_addr_min = -1UL, module_addr_max = 0;
+
+static void mod_tree_insert(struct module *mod) { }
+static void mod_tree_remove_init(struct module *mod) { }
+static void mod_tree_remove(struct module *mod) { }
+
+static struct module *mod_find(unsigned long addr)
+{
+       struct module *mod;
+
+       list_for_each_entry_rcu(mod, &modules, list) {
+               if (within_module(addr, mod))
+                       return mod;
+       }
+
+       return NULL;
+}
+
+#endif /* MODULES_TREE_LOOKUP */
+
+/*
+ * Bounds of module text, for speeding up __module_address.
+ * Protected by module_mutex.
+ */
+static void __mod_update_bounds(void *base, unsigned int size)
+{
+       unsigned long min = (unsigned long)base;
+       unsigned long max = min + size;
+
+       if (min < module_addr_min)
+               module_addr_min = min;
+       if (max > module_addr_max)
+               module_addr_max = max;
+}
+
+static void mod_update_bounds(struct module *mod)
+{
+       __mod_update_bounds(mod->module_core, mod->core_size);
+       if (mod->init_size)
+               __mod_update_bounds(mod->module_init, mod->init_size);
+}
+
+#ifdef CONFIG_KGDB_KDB
+struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
+#endif /* CONFIG_KGDB_KDB */
+
+static void module_assert_mutex(void)
+{
+       lockdep_assert_held(&module_mutex);
+}
+
+static void module_assert_mutex_or_preempt(void)
+{
+#ifdef CONFIG_LOCKDEP
+       if (unlikely(!debug_locks))
+               return;
+
+       WARN_ON(!rcu_read_lock_sched_held() &&
+               !lockdep_is_held(&module_mutex));
+#endif
+}
+
+static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
+#ifndef CONFIG_MODULE_SIG_FORCE
 module_param(sig_enforce, bool_enable_only, 0644);
 #endif /* !CONFIG_MODULE_SIG_FORCE */
-#endif /* CONFIG_MODULE_SIG */
 
 /* Block module loading/unloading? */
 int modules_disabled = 0;
@@ -153,10 +306,6 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
 
 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
 
-/* Bounds of module allocation, for speeding __module_address.
- * Protected by module_mutex. */
-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
-
 int register_module_notifier(struct notifier_block *nb)
 {
        return blocking_notifier_chain_register(&module_notify_list, nb);
@@ -318,6 +467,8 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
 #endif
        };
 
+       module_assert_mutex_or_preempt();
+
        if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
                return true;
 
@@ -457,6 +608,8 @@ static struct module *find_module_all(const char *name, size_t len,
 {
        struct module *mod;
 
+       module_assert_mutex();
+
        list_for_each_entry(mod, &modules, list) {
                if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
                        continue;
@@ -1169,11 +1322,17 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
 {
        const unsigned long *crc;
 
-       /* Since this should be found in kernel (which can't be removed),
-        * no locking is necessary. */
+       /*
+        * Since this should be found in kernel (which can't be removed), no
+        * locking is necessary -- use preempt_disable() to placate lockdep.
+        */
+       preempt_disable();
        if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
-                        &crc, true, false))
+                        &crc, true, false)) {
+               preempt_enable();
                BUG();
+       }
+       preempt_enable();
        return check_version(sechdrs, versindex,
                             VMLINUX_SYMBOL_STR(module_layout), mod, crc,
                             NULL);
@@ -1661,6 +1820,10 @@ static void mod_sysfs_fini(struct module *mod)
        mod_kobject_put(mod);
 }
 
+static void init_param_lock(struct module *mod)
+{
+       mutex_init(&mod->param_lock);
+}
 #else /* !CONFIG_SYSFS */
 
 static int mod_sysfs_setup(struct module *mod,
@@ -1683,6 +1846,9 @@ static void del_usage_links(struct module *mod)
 {
 }
 
+static void init_param_lock(struct module *mod)
+{
+}
 #endif /* CONFIG_SYSFS */
 
 static void mod_sysfs_teardown(struct module *mod)
@@ -1852,10 +2018,11 @@ static void free_module(struct module *mod)
        mutex_lock(&module_mutex);
        /* Unlink carefully: kallsyms could be walking list. */
        list_del_rcu(&mod->list);
+       mod_tree_remove(mod);
        /* Remove this module from bug list, this uses list_del_rcu */
        module_bug_cleanup(mod);
-       /* Wait for RCU synchronizing before releasing mod->list and buglist. */
-       synchronize_rcu();
+       /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
+       synchronize_sched();
        mutex_unlock(&module_mutex);
 
        /* This may be NULL, but that's OK */
@@ -2384,22 +2551,6 @@ void * __weak module_alloc(unsigned long size)
        return vmalloc_exec(size);
 }
 
-static void *module_alloc_update_bounds(unsigned long size)
-{
-       void *ret = module_alloc(size);
-
-       if (ret) {
-               mutex_lock(&module_mutex);
-               /* Update module bounds. */
-               if ((unsigned long)ret < module_addr_min)
-                       module_addr_min = (unsigned long)ret;
-               if ((unsigned long)ret + size > module_addr_max)
-                       module_addr_max = (unsigned long)ret + size;
-               mutex_unlock(&module_mutex);
-       }
-       return ret;
-}
-
 #ifdef CONFIG_DEBUG_KMEMLEAK
 static void kmemleak_load_module(const struct module *mod,
                                 const struct load_info *info)
@@ -2805,7 +2956,7 @@ static int move_module(struct module *mod, struct load_info *info)
        void *ptr;
 
        /* Do the allocs. */
-       ptr = module_alloc_update_bounds(mod->core_size);
+       ptr = module_alloc(mod->core_size);
        /*
         * The pointer to this block is stored in the module structure
         * which is inside the block. Just mark it as not being a
@@ -2819,7 +2970,7 @@ static int move_module(struct module *mod, struct load_info *info)
        mod->module_core = ptr;
 
        if (mod->init_size) {
-               ptr = module_alloc_update_bounds(mod->init_size);
+               ptr = module_alloc(mod->init_size);
                /*
                 * The pointer to this block is stored in the module structure
                 * which is inside the block. This block doesn't need to be
@@ -3119,6 +3270,7 @@ static noinline int do_init_module(struct module *mod)
        mod->symtab = mod->core_symtab;
        mod->strtab = mod->core_strtab;
 #endif
+       mod_tree_remove_init(mod);
        unset_module_init_ro_nx(mod);
        module_arch_freeing_init(mod);
        mod->module_init = NULL;
@@ -3127,11 +3279,11 @@ static noinline int do_init_module(struct module *mod)
        mod->init_text_size = 0;
        /*
         * We want to free module_init, but be aware that kallsyms may be
-        * walking this with preempt disabled.  In all the failure paths,
-        * we call synchronize_rcu/synchronize_sched, but we don't want
-        * to slow down the success path, so use actual RCU here.
+        * walking this with preempt disabled.  In all the failure paths, we
+        * call synchronize_sched(), but we don't want to slow down the success
+        * path, so use actual RCU here.
         */
-       call_rcu(&freeinit->rcu, do_free_init);
+       call_rcu_sched(&freeinit->rcu, do_free_init);
        mutex_unlock(&module_mutex);
        wake_up_all(&module_wq);
 
@@ -3188,7 +3340,9 @@ again:
                err = -EEXIST;
                goto out;
        }
+       mod_update_bounds(mod);
        list_add_rcu(&mod->list, &modules);
+       mod_tree_insert(mod);
        err = 0;
 
 out:
@@ -3304,6 +3458,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
        if (err)
                goto unlink_mod;
 
+       init_param_lock(mod);
+
        /* Now we've got everything in the final locations, we can
         * find optional sections. */
        err = find_module_sections(mod, info);
@@ -3402,8 +3558,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
        /* Unlink carefully: kallsyms could be walking list. */
        list_del_rcu(&mod->list);
        wake_up_all(&module_wq);
-       /* Wait for RCU synchronizing before releasing mod->list. */
-       synchronize_rcu();
+       /* Wait for RCU-sched synchronizing before releasing mod->list. */
+       synchronize_sched();
        mutex_unlock(&module_mutex);
  free_module:
        /* Free lock-classes; relies on the preceding sync_rcu() */
@@ -3527,19 +3683,15 @@ const char *module_address_lookup(unsigned long addr,
                            char **modname,
                            char *namebuf)
 {
-       struct module *mod;
        const char *ret = NULL;
+       struct module *mod;
 
        preempt_disable();
-       list_for_each_entry_rcu(mod, &modules, list) {
-               if (mod->state == MODULE_STATE_UNFORMED)
-                       continue;
-               if (within_module(addr, mod)) {
-                       if (modname)
-                               *modname = mod->name;
-                       ret = get_ksymbol(mod, addr, size, offset);
-                       break;
-               }
+       mod = __module_address(addr);
+       if (mod) {
+               if (modname)
+                       *modname = mod->name;
+               ret = get_ksymbol(mod, addr, size, offset);
        }
        /* Make a copy in here where it's safe */
        if (ret) {
@@ -3547,6 +3699,7 @@ const char *module_address_lookup(unsigned long addr,
                ret = namebuf;
        }
        preempt_enable();
+
        return ret;
 }
 
@@ -3670,6 +3823,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
        unsigned int i;
        int ret;
 
+       module_assert_mutex();
+
        list_for_each_entry(mod, &modules, list) {
                if (mod->state == MODULE_STATE_UNFORMED)
                        continue;
@@ -3844,13 +3999,15 @@ struct module *__module_address(unsigned long addr)
        if (addr < module_addr_min || addr > module_addr_max)
                return NULL;
 
-       list_for_each_entry_rcu(mod, &modules, list) {
+       module_assert_mutex_or_preempt();
+
+       mod = mod_find(addr);
+       if (mod) {
+               BUG_ON(!within_module(addr, mod));
                if (mod->state == MODULE_STATE_UNFORMED)
-                       continue;
-               if (within_module(addr, mod))
-                       return mod;
+                       mod = NULL;
        }
-       return NULL;
+       return mod;
 }
 EXPORT_SYMBOL_GPL(__module_address);
 
index 8136ad76e5fd3ea2bcc6d556a9508e86c49761aa..04e91ff7560b3a35445f006a5be6e1d786bde245 100644 (file)
@@ -32,7 +32,7 @@ static unsigned long tainted_mask;
 static int pause_on_oops;
 static int pause_on_oops_flag;
 static DEFINE_SPINLOCK(pause_on_oops_lock);
-static bool crash_kexec_post_notifiers;
+bool crash_kexec_post_notifiers;
 int panic_on_warn __read_mostly;
 
 int panic_timeout = CONFIG_PANIC_TIMEOUT;
@@ -142,7 +142,8 @@ void panic(const char *fmt, ...)
         * Note: since some panic_notifiers can make crashed kernel
         * more unstable, it can increase risks of the kdump failure too.
         */
-       crash_kexec(NULL);
+       if (crash_kexec_post_notifiers)
+               crash_kexec(NULL);
 
        bust_spinlocks(0);
 
index 30288c1e15dd1afe441f7d4dd0cd8f8b288eed28..b6554aa71094473094a97010ec4e3f739f92b5c8 100644 (file)
 #include <linux/slab.h>
 #include <linux/ctype.h>
 
-/* Protects all parameters, and incidentally kmalloced_param list. */
+#ifdef CONFIG_SYSFS
+/* Protects all built-in parameters, modules use their own param_lock */
 static DEFINE_MUTEX(param_lock);
 
+/* Use the module's mutex, or if built-in use the built-in mutex */
+#ifdef CONFIG_MODULES
+#define KPARAM_MUTEX(mod)      ((mod) ? &(mod)->param_lock : &param_lock)
+#else
+#define KPARAM_MUTEX(mod)      (&param_lock)
+#endif
+
+static inline void check_kparam_locked(struct module *mod)
+{
+       BUG_ON(!mutex_is_locked(KPARAM_MUTEX(mod)));
+}
+#else
+static inline void check_kparam_locked(struct module *mod)
+{
+}
+#endif /* !CONFIG_SYSFS */
+
 /* This just allows us to keep track of which parameters are kmalloced. */
 struct kmalloced_param {
        struct list_head list;
        char val[];
 };
 static LIST_HEAD(kmalloced_params);
+static DEFINE_SPINLOCK(kmalloced_params_lock);
 
 static void *kmalloc_parameter(unsigned int size)
 {
@@ -43,7 +62,10 @@ static void *kmalloc_parameter(unsigned int size)
        if (!p)
                return NULL;
 
+       spin_lock(&kmalloced_params_lock);
        list_add(&p->list, &kmalloced_params);
+       spin_unlock(&kmalloced_params_lock);
+
        return p->val;
 }
 
@@ -52,6 +74,7 @@ static void maybe_kfree_parameter(void *param)
 {
        struct kmalloced_param *p;
 
+       spin_lock(&kmalloced_params_lock);
        list_for_each_entry(p, &kmalloced_params, list) {
                if (p->val == param) {
                        list_del(&p->list);
@@ -59,6 +82,7 @@ static void maybe_kfree_parameter(void *param)
                        break;
                }
        }
+       spin_unlock(&kmalloced_params_lock);
 }
 
 static char dash2underscore(char c)
@@ -119,10 +143,10 @@ static int parse_one(char *param,
                                return -EINVAL;
                        pr_debug("handling %s with %p\n", param,
                                params[i].ops->set);
-                       mutex_lock(&param_lock);
+                       kernel_param_lock(params[i].mod);
                        param_check_unsafe(&params[i]);
                        err = params[i].ops->set(val, &params[i]);
-                       mutex_unlock(&param_lock);
+                       kernel_param_unlock(params[i].mod);
                        return err;
                }
        }
@@ -254,7 +278,7 @@ char *parse_args(const char *doing,
                return scnprintf(buffer, PAGE_SIZE, format,             \
                                *((type *)kp->arg));                    \
        }                                                               \
-       struct kernel_param_ops param_ops_##name = {                    \
+       const struct kernel_param_ops param_ops_##name = {                      \
                .set = param_set_##name,                                \
                .get = param_get_##name,                                \
        };                                                              \
@@ -306,7 +330,7 @@ static void param_free_charp(void *arg)
        maybe_kfree_parameter(*((char **)arg));
 }
 
-struct kernel_param_ops param_ops_charp = {
+const struct kernel_param_ops param_ops_charp = {
        .set = param_set_charp,
        .get = param_get_charp,
        .free = param_free_charp,
@@ -331,13 +355,44 @@ int param_get_bool(char *buffer, const struct kernel_param *kp)
 }
 EXPORT_SYMBOL(param_get_bool);
 
-struct kernel_param_ops param_ops_bool = {
+const struct kernel_param_ops param_ops_bool = {
        .flags = KERNEL_PARAM_OPS_FL_NOARG,
        .set = param_set_bool,
        .get = param_get_bool,
 };
 EXPORT_SYMBOL(param_ops_bool);
 
+int param_set_bool_enable_only(const char *val, const struct kernel_param *kp)
+{
+       int err = 0;
+       bool new_value;
+       bool orig_value = *(bool *)kp->arg;
+       struct kernel_param dummy_kp = *kp;
+
+       dummy_kp.arg = &new_value;
+
+       err = param_set_bool(val, &dummy_kp);
+       if (err)
+               return err;
+
+       /* Don't let them unset it once it's set! */
+       if (!new_value && orig_value)
+               return -EROFS;
+
+       if (new_value)
+               err = param_set_bool(val, kp);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(param_set_bool_enable_only);
+
+const struct kernel_param_ops param_ops_bool_enable_only = {
+       .flags = KERNEL_PARAM_OPS_FL_NOARG,
+       .set = param_set_bool_enable_only,
+       .get = param_get_bool,
+};
+EXPORT_SYMBOL_GPL(param_ops_bool_enable_only);
+
 /* This one must be bool. */
 int param_set_invbool(const char *val, const struct kernel_param *kp)
 {
@@ -359,7 +414,7 @@ int param_get_invbool(char *buffer, const struct kernel_param *kp)
 }
 EXPORT_SYMBOL(param_get_invbool);
 
-struct kernel_param_ops param_ops_invbool = {
+const struct kernel_param_ops param_ops_invbool = {
        .set = param_set_invbool,
        .get = param_get_invbool,
 };
@@ -367,12 +422,11 @@ EXPORT_SYMBOL(param_ops_invbool);
 
 int param_set_bint(const char *val, const struct kernel_param *kp)
 {
-       struct kernel_param boolkp;
+       /* Match bool exactly, by re-using it. */
+       struct kernel_param boolkp = *kp;
        bool v;
        int ret;
 
-       /* Match bool exactly, by re-using it. */
-       boolkp = *kp;
        boolkp.arg = &v;
 
        ret = param_set_bool(val, &boolkp);
@@ -382,7 +436,7 @@ int param_set_bint(const char *val, const struct kernel_param *kp)
 }
 EXPORT_SYMBOL(param_set_bint);
 
-struct kernel_param_ops param_ops_bint = {
+const struct kernel_param_ops param_ops_bint = {
        .flags = KERNEL_PARAM_OPS_FL_NOARG,
        .set = param_set_bint,
        .get = param_get_int,
@@ -390,7 +444,8 @@ struct kernel_param_ops param_ops_bint = {
 EXPORT_SYMBOL(param_ops_bint);
 
 /* We break the rule and mangle the string. */
-static int param_array(const char *name,
+static int param_array(struct module *mod,
+                      const char *name,
                       const char *val,
                       unsigned int min, unsigned int max,
                       void *elem, int elemsize,
@@ -421,7 +476,7 @@ static int param_array(const char *name,
                /* nul-terminate and parse */
                save = val[len];
                ((char *)val)[len] = '\0';
-               BUG_ON(!mutex_is_locked(&param_lock));
+               check_kparam_locked(mod);
                ret = set(val, &kp);
 
                if (ret != 0)
@@ -443,7 +498,7 @@ static int param_array_set(const char *val, const struct kernel_param *kp)
        const struct kparam_array *arr = kp->arr;
        unsigned int temp_num;
 
-       return param_array(kp->name, val, 1, arr->max, arr->elem,
+       return param_array(kp->mod, kp->name, val, 1, arr->max, arr->elem,
                           arr->elemsize, arr->ops->set, kp->level,
                           arr->num ?: &temp_num);
 }
@@ -452,14 +507,13 @@ static int param_array_get(char *buffer, const struct kernel_param *kp)
 {
        int i, off, ret;
        const struct kparam_array *arr = kp->arr;
-       struct kernel_param p;
+       struct kernel_param p = *kp;
 
-       p = *kp;
        for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) {
                if (i)
                        buffer[off++] = ',';
                p.arg = arr->elem + arr->elemsize * i;
-               BUG_ON(!mutex_is_locked(&param_lock));
+               check_kparam_locked(p.mod);
                ret = arr->ops->get(buffer + off, &p);
                if (ret < 0)
                        return ret;
@@ -479,7 +533,7 @@ static void param_array_free(void *arg)
                        arr->ops->free(arr->elem + arr->elemsize * i);
 }
 
-struct kernel_param_ops param_array_ops = {
+const struct kernel_param_ops param_array_ops = {
        .set = param_array_set,
        .get = param_array_get,
        .free = param_array_free,
@@ -507,7 +561,7 @@ int param_get_string(char *buffer, const struct kernel_param *kp)
 }
 EXPORT_SYMBOL(param_get_string);
 
-struct kernel_param_ops param_ops_string = {
+const struct kernel_param_ops param_ops_string = {
        .set = param_set_copystring,
        .get = param_get_string,
 };
@@ -542,9 +596,9 @@ static ssize_t param_attr_show(struct module_attribute *mattr,
        if (!attribute->param->ops->get)
                return -EPERM;
 
-       mutex_lock(&param_lock);
+       kernel_param_lock(mk->mod);
        count = attribute->param->ops->get(buf, attribute->param);
-       mutex_unlock(&param_lock);
+       kernel_param_unlock(mk->mod);
        if (count > 0) {
                strcat(buf, "\n");
                ++count;
@@ -554,7 +608,7 @@ static ssize_t param_attr_show(struct module_attribute *mattr,
 
 /* sysfs always hands a nul-terminated string in buf.  We rely on that. */
 static ssize_t param_attr_store(struct module_attribute *mattr,
-                               struct module_kobject *km,
+                               struct module_kobject *mk,
                                const char *buf, size_t len)
 {
        int err;
@@ -563,10 +617,10 @@ static ssize_t param_attr_store(struct module_attribute *mattr,
        if (!attribute->param->ops->set)
                return -EPERM;
 
-       mutex_lock(&param_lock);
+       kernel_param_lock(mk->mod);
        param_check_unsafe(attribute->param);
        err = attribute->param->ops->set(buf, attribute->param);
-       mutex_unlock(&param_lock);
+       kernel_param_unlock(mk->mod);
        if (!err)
                return len;
        return err;
@@ -580,17 +634,18 @@ static ssize_t param_attr_store(struct module_attribute *mattr,
 #endif
 
 #ifdef CONFIG_SYSFS
-void __kernel_param_lock(void)
+void kernel_param_lock(struct module *mod)
 {
-       mutex_lock(&param_lock);
+       mutex_lock(KPARAM_MUTEX(mod));
 }
-EXPORT_SYMBOL(__kernel_param_lock);
 
-void __kernel_param_unlock(void)
+void kernel_param_unlock(struct module *mod)
 {
-       mutex_unlock(&param_lock);
+       mutex_unlock(KPARAM_MUTEX(mod));
 }
-EXPORT_SYMBOL(__kernel_param_unlock);
+
+EXPORT_SYMBOL(kernel_param_lock);
+EXPORT_SYMBOL(kernel_param_unlock);
 
 /*
  * add_sysfs_param - add a parameter to sysfs
@@ -856,6 +911,7 @@ static void __init version_sysfs_builtin(void)
                mk = locate_module_kobject(vattr->module_name);
                if (mk) {
                        err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr);
+                       WARN_ON_ONCE(err);
                        kobject_uevent(&mk->kobj, KOBJ_ADD);
                        kobject_put(&mk->kobj);
                }
index 7e01f78f041778abe405c9115c15e10a77f64d03..9e302315e33db1b1d57227d55a43b19266e428ce 100644 (file)
@@ -187,7 +187,7 @@ config DPM_WATCHDOG
 config DPM_WATCHDOG_TIMEOUT
        int "Watchdog timeout in seconds"
        range 1 120
-       default 12
+       default 60
        depends on DPM_WATCHDOG
 
 config PM_TRACE
index 2329daae5255374ca8db8619d577ed8e8274fccb..690f78f210f2cf4ec9436c9f37a3bbf5eb876397 100644 (file)
@@ -552,7 +552,7 @@ int hibernation_platform_enter(void)
 
        error = disable_nonboot_cpus();
        if (error)
-               goto Platform_finish;
+               goto Enable_cpus;
 
        local_irq_disable();
        syscore_suspend();
@@ -568,6 +568,8 @@ int hibernation_platform_enter(void)
  Power_up:
        syscore_resume();
        local_irq_enable();
+
+ Enable_cpus:
        enable_nonboot_cpus();
 
  Platform_finish:
index de553849f3ac91378acae6b981140763faa9530f..cf8c24203368651af417eba7525a053e9cc8ff93 100644 (file)
@@ -207,14 +207,14 @@ static int console_may_schedule;
  * need to be changed in the future, when the requirements change.
  *
  * /dev/kmsg exports the structured data in the following line format:
- *   "<level>,<sequnum>,<timestamp>,<contflag>;<message text>\n"
+ *   "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
+ *
+ * Users of the export format should ignore possible additional values
+ * separated by ',', and find the message after the ';' character.
  *
  * The optional key/value pairs are attached as continuation lines starting
  * with a space character and terminated by a newline. All possible
  * non-prinatable characters are escaped in the "\xff" notation.
- *
- * Users of the export format should ignore possible additional values
- * separated by ',', and find the message after the ';' character.
  */
 
 enum log_flags {
index e9dbaeb8fd65c7d0af385043cddf2b7d7ef7fa8c..0b4570cfacaeb2f5290d8f4b98d256943d22333c 100644 (file)
@@ -81,10 +81,7 @@ static struct page **relay_alloc_page_array(unsigned int n_pages)
  */
 static void relay_free_page_array(struct page **array)
 {
-       if (is_vmalloc_addr(array))
-               vfree(array);
-       else
-               kfree(array);
+       kvfree(array);
 }
 
 /**
index b803e1b8ab0cf7eb381600a32855b822be3c8c89..78b4bad10081c6b23894ac1d5d7b6900ab32362a 100644 (file)
@@ -2164,7 +2164,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
        set_task_cpu(p, cpu);
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+#ifdef CONFIG_SCHED_INFO
        if (likely(sched_info_on()))
                memset(&p->sched_info, 0, sizeof(p->sched_info));
 #endif
@@ -2320,13 +2320,27 @@ void wake_up_new_task(struct task_struct *p)
 
 static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
 
+void preempt_notifier_inc(void)
+{
+       static_key_slow_inc(&preempt_notifier_key);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_inc);
+
+void preempt_notifier_dec(void)
+{
+       static_key_slow_dec(&preempt_notifier_key);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_dec);
+
 /**
  * preempt_notifier_register - tell me when current is being preempted & rescheduled
  * @notifier: notifier struct to register
  */
 void preempt_notifier_register(struct preempt_notifier *notifier)
 {
-       static_key_slow_inc(&preempt_notifier_key);
+       if (!static_key_false(&preempt_notifier_key))
+               WARN(1, "registering preempt_notifier while notifiers disabled\n");
+
        hlist_add_head(&notifier->link, &current->preempt_notifiers);
 }
 EXPORT_SYMBOL_GPL(preempt_notifier_register);
@@ -2340,7 +2354,6 @@ EXPORT_SYMBOL_GPL(preempt_notifier_register);
 void preempt_notifier_unregister(struct preempt_notifier *notifier)
 {
        hlist_del(&notifier->link);
-       static_key_slow_dec(&preempt_notifier_key);
 }
 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
 
index 315c68e015d955d6227a83b6b951482cffd8a68e..4222ec50ab88451d187b38fa12019a3cd15f8f57 100644 (file)
@@ -142,7 +142,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
                0LL, 0L);
 #endif
 #ifdef CONFIG_NUMA_BALANCING
-       SEQ_printf(m, " %d", task_node(p));
+       SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
 #endif
 #ifdef CONFIG_CGROUP_SCHED
        SEQ_printf(m, " %s", task_group_path(task_group(p)));
@@ -517,11 +517,21 @@ __initcall(init_sched_debug_procfs);
        SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
 
 
+#ifdef CONFIG_NUMA_BALANCING
+void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
+               unsigned long tpf, unsigned long gsf, unsigned long gpf)
+{
+       SEQ_printf(m, "numa_faults node=%d ", node);
+       SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
+       SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
+}
+#endif
+
+
 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
 {
 #ifdef CONFIG_NUMA_BALANCING
        struct mempolicy *pol;
-       int node, i;
 
        if (p->mm)
                P(mm->numa_scan_seq);
@@ -533,26 +543,12 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m)
        mpol_get(pol);
        task_unlock(p);
 
-       SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0));
-
-       for_each_online_node(node) {
-               for (i = 0; i < 2; i++) {
-                       unsigned long nr_faults = -1;
-                       int cpu_current, home_node;
-
-                       if (p->numa_faults)
-                               nr_faults = p->numa_faults[2*node + i];
-
-                       cpu_current = !i ? (task_node(p) == node) :
-                               (pol && node_isset(node, pol->v.nodes));
-
-                       home_node = (p->numa_preferred_nid == node);
-
-                       SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n",
-                               i, node, cpu_current, home_node, nr_faults);
-               }
-       }
-
+       P(numa_pages_migrated);
+       P(numa_preferred_nid);
+       P(total_numa_faults);
+       SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
+                       task_node(p), task_numa_group_id(p));
+       show_numa_stats(p, m);
        mpol_put(pol);
 #endif
 }
index 3d57cc0ca0a6b831cb6c394d528b483f6daaaf50..65c8f3ebdc3c5d58d148780c84c90fe54dc6e4dc 100644 (file)
@@ -8473,7 +8473,27 @@ void print_cfs_stats(struct seq_file *m, int cpu)
                print_cfs_rq(m, cpu, cfs_rq);
        rcu_read_unlock();
 }
-#endif
+
+#ifdef CONFIG_NUMA_BALANCING
+void show_numa_stats(struct task_struct *p, struct seq_file *m)
+{
+       int node;
+       unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
+
+       for_each_online_node(node) {
+               if (p->numa_faults) {
+                       tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
+                       tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
+               }
+               if (p->numa_group) {
+                       gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
+                       gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
+               }
+               print_numa_stats(m, node, tsf, tpf, gsf, gpf);
+       }
+}
+#endif /* CONFIG_NUMA_BALANCING */
+#endif /* CONFIG_SCHED_DEBUG */
 
 __init void init_sched_fair_class(void)
 {
index 885889190a1f65e410ba4fc7ae2d4fd523976e58..84d48790bb6d0607e5dd67dd449f0353c51b0136 100644 (file)
@@ -1689,9 +1689,22 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
 
 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
+
+#ifdef CONFIG_SCHED_DEBUG
 extern void print_cfs_stats(struct seq_file *m, int cpu);
 extern void print_rt_stats(struct seq_file *m, int cpu);
 extern void print_dl_stats(struct seq_file *m, int cpu);
+extern void
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
+
+#ifdef CONFIG_NUMA_BALANCING
+extern void
+show_numa_stats(struct task_struct *p, struct seq_file *m);
+extern void
+print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
+       unsigned long tpf, unsigned long gsf, unsigned long gpf);
+#endif /* CONFIG_NUMA_BALANCING */
+#endif /* CONFIG_SCHED_DEBUG */
 
 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
 extern void init_rt_rq(struct rt_rq *rt_rq);
index 077ebbd5e10f14dc646148aae9231acf007e8a4d..b0fbc7632de5f9b13d8ccd2c42d73560c347669a 100644 (file)
@@ -47,7 +47,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 # define schedstat_set(var, val)       do { } while (0)
 #endif
 
-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+#ifdef CONFIG_SCHED_INFO
 static inline void sched_info_reset_dequeued(struct task_struct *t)
 {
        t->sched_info.last_queued = 0;
@@ -156,7 +156,7 @@ sched_info_switch(struct rq *rq,
 #define sched_info_depart(rq, t)               do { } while (0)
 #define sched_info_arrive(rq, next)            do { } while (0)
 #define sched_info_switch(rq, t, next)         do { } while (0)
-#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
+#endif /* CONFIG_SCHED_INFO */
 
 /*
  * The following are functions that support scheduler-internal time accounting.
index 812fcc3fd3906f7a095888a3fde0fe4893027cb6..19b62b522158acb6414cd7440b25e64bd16add35 100644 (file)
@@ -1538,12 +1538,6 @@ static struct ctl_table vm_table[] = {
        { }
 };
 
-#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
-static struct ctl_table binfmt_misc_table[] = {
-       { }
-};
-#endif
-
 static struct ctl_table fs_table[] = {
        {
                .procname       = "inode-nr",
@@ -1697,7 +1691,7 @@ static struct ctl_table fs_table[] = {
        {
                .procname       = "binfmt_misc",
                .mode           = 0555,
-               .child          = binfmt_misc_table,
+               .child          = sysctl_mount_point,
        },
 #endif
        {
index ffc4cc3dcd47b4e277df56735e11d51abdd87f6d..49eca0beed32ebc2972cdd6ccdb1b2b65bbd769f 100644 (file)
@@ -12,5 +12,3 @@ obj-$(CONFIG_TICK_ONESHOT)                    += tick-oneshot.o tick-sched.o
 obj-$(CONFIG_TIMER_STATS)                      += timer_stats.o
 obj-$(CONFIG_DEBUG_FS)                         += timekeeping_debug.o
 obj-$(CONFIG_TEST_UDELAY)                      += test_udelay.o
-
-$(obj)/time.o: $(objtree)/include/config/
index 30b7a409bf1ea19001e3eeb966679137bfe74029..bca3667a2de1f1a221d0fffe0f282137d82fa90e 100644 (file)
@@ -319,32 +319,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
  * We want to use this from any context including NMI and tracing /
  * instrumenting the timekeeping code itself.
  *
- * So we handle this differently than the other timekeeping accessor
- * functions which retry when the sequence count has changed. The
- * update side does:
- *
- * smp_wmb();  <- Ensure that the last base[1] update is visible
- * tkf->seq++;
- * smp_wmb();  <- Ensure that the seqcount update is visible
- * update(tkf->base[0], tkr);
- * smp_wmb();  <- Ensure that the base[0] update is visible
- * tkf->seq++;
- * smp_wmb();  <- Ensure that the seqcount update is visible
- * update(tkf->base[1], tkr);
- *
- * The reader side does:
- *
- * do {
- *     seq = tkf->seq;
- *     smp_rmb();
- *     idx = seq & 0x01;
- *     now = now(tkf->base[idx]);
- *     smp_rmb();
- * } while (seq != tkf->seq)
- *
- * As long as we update base[0] readers are forced off to
- * base[1]. Once base[0] is updated readers are redirected to base[0]
- * and the base[1] update takes place.
+ * Employ the latch technique; see @raw_write_seqcount_latch.
  *
  * So if a NMI hits the update of base[0] then it will use base[1]
  * which is still consistent. In the worst case this can result is a
@@ -407,7 +382,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
        u64 now;
 
        do {
-               seq = raw_read_seqcount(&tkf->seq);
+               seq = raw_read_seqcount_latch(&tkf->seq);
                tkr = tkf->base + (seq & 0x01);
                now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
        } while (read_seqcount_retry(&tkf->seq, seq));
index 520499dd85af42e96b2bbd8c729df36d238ad27a..5e097fa9faf7016470b8283931023a15d20ed97d 100644 (file)
@@ -1566,7 +1566,7 @@ static void migrate_timers(int cpu)
 
        BUG_ON(cpu_online(cpu));
        old_base = per_cpu_ptr(&tvec_bases, cpu);
-       new_base = this_cpu_ptr(&tvec_bases);
+       new_base = get_cpu_ptr(&tvec_bases);
        /*
         * The caller is globally serialized and nobody else
         * takes two locks at once, deadlock is not possible.
@@ -1590,6 +1590,7 @@ static void migrate_timers(int cpu)
 
        spin_unlock(&old_base->lock);
        spin_unlock_irq(&new_base->lock);
+       put_cpu_ptr(&tvec_bases);
 }
 
 static int timer_cpu_notify(struct notifier_block *self,
index 5243d4b030876bc558f34f392d96805710a719c6..4c4f06176f748616b180254e94eda6bbed7dde25 100644 (file)
@@ -285,12 +285,7 @@ static bool wq_disable_numa;
 module_param_named(disable_numa, wq_disable_numa, bool, 0444);
 
 /* see the comment above the definition of WQ_POWER_EFFICIENT */
-#ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT
-static bool wq_power_efficient = true;
-#else
-static bool wq_power_efficient;
-#endif
-
+static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 
 static bool wq_numa_enabled;           /* unbound NUMA affinity enabled */
index b908048f8d6a8e2b33723b222e1a4a88b2841774..e2894b23efb60eeffef04b028a5a27a76e41e0d4 100644 (file)
@@ -841,9 +841,14 @@ config SCHED_DEBUG
          that can help debug the scheduler. The runtime overhead of this
          option is minimal.
 
+config SCHED_INFO
+       bool
+       default n
+
 config SCHEDSTATS
        bool "Collect scheduler statistics"
        depends on DEBUG_KERNEL && PROC_FS
+       select SCHED_INFO
        help
          If you say Y here, additional code will be inserted into the
          scheduler and related routines to collect statistics about
index ff37c8c2f7b24fbaa0bb6e04faa8aef5edd3dbbc..6897b527581a8d9fcb65b4a2e01815d3d34498ed 100644 (file)
@@ -45,6 +45,9 @@ CFLAGS_kobject.o += -DDEBUG
 CFLAGS_kobject_uevent.o += -DDEBUG
 endif
 
+obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
+CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
+
 obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
 obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
 obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
index 0c3bd9552b6fc4fa5e380ac013caf5ac618b1faf..cff145f032a550ff1703208cbadc033e2a6336b6 100644 (file)
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -66,7 +66,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
        struct module *mod;
        const struct bug_entry *bug = NULL;
 
-       rcu_read_lock();
+       rcu_read_lock_sched();
        list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
                unsigned i;
 
@@ -77,7 +77,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
        }
        bug = NULL;
 out:
-       rcu_read_unlock();
+       rcu_read_unlock_sched();
 
        return bug;
 }
@@ -88,6 +88,8 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
        char *secstrings;
        unsigned int i;
 
+       lockdep_assert_held(&module_mutex);
+
        mod->bug_table = NULL;
        mod->num_bugs = 0;
 
@@ -113,6 +115,7 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
 
 void module_bug_cleanup(struct module *mod)
 {
+       lockdep_assert_held(&module_mutex);
        list_del_rcu(&mod->bug_list);
 }
 
index dfe6ec17c0a5fa774ec34d23de696be7b99a6182..1ad33e555805a22cc901d4ed029826eea0b78250 100644 (file)
@@ -19,7 +19,7 @@
 static struct crypto_shash *crct10dif_tfm;
 static struct static_key crct10dif_fallback __read_mostly;
 
-__u16 crc_t10dif(const unsigned char *buffer, size_t len)
+__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
 {
        struct {
                struct shash_desc shash;
@@ -28,17 +28,23 @@ __u16 crc_t10dif(const unsigned char *buffer, size_t len)
        int err;
 
        if (static_key_false(&crct10dif_fallback))
-               return crc_t10dif_generic(0, buffer, len);
+               return crc_t10dif_generic(crc, buffer, len);
 
        desc.shash.tfm = crct10dif_tfm;
        desc.shash.flags = 0;
-       *(__u16 *)desc.ctx = 0;
+       *(__u16 *)desc.ctx = crc;
 
        err = crypto_shash_update(&desc.shash, buffer, len);
        BUG_ON(err);
 
        return *(__u16 *)desc.ctx;
 }
+EXPORT_SYMBOL(crc_t10dif_update);
+
+__u16 crc_t10dif(const unsigned char *buffer, size_t len)
+{
+       return crc_t10dif_update(0, buffer, len);
+}
 EXPORT_SYMBOL(crc_t10dif);
 
 static int __init crc_t10dif_mod_init(void)
diff --git a/lib/debug_info.c b/lib/debug_info.c
new file mode 100644 (file)
index 0000000..2edbe27
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * This file exists solely to ensure debug information for some core
+ * data structures is included in the final image even for
+ * CONFIG_DEBUG_INFO_REDUCED. Please do not add actual code. However,
+ * adding appropriate #includes is fine.
+ */
+#include <stdarg.h>
+
+#include <linux/cred.h>
+#include <linux/crypto.h>
+#include <linux/dcache.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/fscache-cache.h>
+#include <linux/io.h>
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <net/addrconf.h>
+#include <net/sock.h>
+#include <net/tcp.h>
index d214866eeea2cff341cbe76ec0b09c15b05f6ad2..daf0afb6d979e1074cdcee38cd69d2de3a94e162 100644 (file)
@@ -602,12 +602,12 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
 EXPORT_SYMBOL(devm_gen_pool_create);
 
 /**
- * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
+ * gen_pool_get - Obtain the gen_pool (if any) for a device
  * @dev: device to retrieve the gen_pool from
  *
  * Returns the gen_pool for the device if one is present, or NULL.
  */
-struct gen_pool *dev_get_gen_pool(struct device *dev)
+struct gen_pool *gen_pool_get(struct device *dev)
 {
        struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
                                        NULL);
@@ -616,11 +616,11 @@ struct gen_pool *dev_get_gen_pool(struct device *dev)
                return NULL;
        return *p;
 }
-EXPORT_SYMBOL_GPL(dev_get_gen_pool);
+EXPORT_SYMBOL_GPL(gen_pool_get);
 
 #ifdef CONFIG_OF
 /**
- * of_get_named_gen_pool - find a pool by phandle property
+ * of_gen_pool_get - find a pool by phandle property
  * @np: device node
  * @propname: property name containing phandle(s)
  * @index: index into the phandle array
@@ -629,7 +629,7 @@ EXPORT_SYMBOL_GPL(dev_get_gen_pool);
  * address of the device tree node pointed at by the phandle property,
  * or NULL if not found.
  */
-struct gen_pool *of_get_named_gen_pool(struct device_node *np,
+struct gen_pool *of_gen_pool_get(struct device_node *np,
        const char *propname, int index)
 {
        struct platform_device *pdev;
@@ -642,7 +642,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np,
        of_node_put(np_pool);
        if (!pdev)
                return NULL;
-       return dev_get_gen_pool(&pdev->dev);
+       return gen_pool_get(&pdev->dev);
 }
-EXPORT_SYMBOL_GPL(of_get_named_gen_pool);
+EXPORT_SYMBOL_GPL(of_gen_pool_get);
 #endif /* CONFIG_OF */
index b29015102698b393e0fdb84e2f7e6170d1e2a0a9..3fe401067e20ba81c762fb00ae00730de6b9502f 100644 (file)
@@ -289,5 +289,5 @@ exit:
        kfree(elts);
        return err;
 }
-module_init(list_sort_test);
+late_initcall(list_sort_test);
 #endif /* CONFIG_TEST_LIST_SORT */
index c16c81a3d430e84a8084de93e97900715a8e6b8e..1356454e36de9f1c083b2c84f297ba4878498c0a 100644 (file)
  *  parentheses and have some accompanying text comment.
  */
 
+/*
+ * Notes on lockless lookups:
+ *
+ * All stores to the tree structure (rb_left and rb_right) must be done using
+ * WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the
+ * tree structure as seen in program order.
+ *
+ * These two requirements will allow lockless iteration of the tree -- not
+ * correct iteration mind you, tree rotations are not atomic so a lookup might
+ * miss entire subtrees.
+ *
+ * But they do guarantee that any such traversal will only see valid elements
+ * and that it will indeed complete -- does not get stuck in a loop.
+ *
+ * It also guarantees that if the lookup returns an element it is the 'correct'
+ * one. But not returning an element does _NOT_ mean it's not present.
+ *
+ * NOTE:
+ *
+ * Stores to __rb_parent_color are not important for simple lookups so those
+ * are left undone as of now. Nor did I check for loops involving parent
+ * pointers.
+ */
+
 static inline void rb_set_black(struct rb_node *rb)
 {
        rb->__rb_parent_color |= RB_BLACK;
@@ -129,8 +153,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
                                 * This still leaves us in violation of 4), the
                                 * continuation into Case 3 will fix that.
                                 */
-                               parent->rb_right = tmp = node->rb_left;
-                               node->rb_left = parent;
+                               tmp = node->rb_left;
+                               WRITE_ONCE(parent->rb_right, tmp);
+                               WRITE_ONCE(node->rb_left, parent);
                                if (tmp)
                                        rb_set_parent_color(tmp, parent,
                                                            RB_BLACK);
@@ -149,8 +174,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
                         *     /                 \
                         *    n                   U
                         */
-                       gparent->rb_left = tmp;  /* == parent->rb_right */
-                       parent->rb_right = gparent;
+                       WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */
+                       WRITE_ONCE(parent->rb_right, gparent);
                        if (tmp)
                                rb_set_parent_color(tmp, gparent, RB_BLACK);
                        __rb_rotate_set_parents(gparent, parent, root, RB_RED);
@@ -171,8 +196,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
                        tmp = parent->rb_left;
                        if (node == tmp) {
                                /* Case 2 - right rotate at parent */
-                               parent->rb_left = tmp = node->rb_right;
-                               node->rb_right = parent;
+                               tmp = node->rb_right;
+                               WRITE_ONCE(parent->rb_left, tmp);
+                               WRITE_ONCE(node->rb_right, parent);
                                if (tmp)
                                        rb_set_parent_color(tmp, parent,
                                                            RB_BLACK);
@@ -183,8 +209,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
                        }
 
                        /* Case 3 - left rotate at gparent */
-                       gparent->rb_right = tmp;  /* == parent->rb_left */
-                       parent->rb_left = gparent;
+                       WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */
+                       WRITE_ONCE(parent->rb_left, gparent);
                        if (tmp)
                                rb_set_parent_color(tmp, gparent, RB_BLACK);
                        __rb_rotate_set_parents(gparent, parent, root, RB_RED);
@@ -224,8 +250,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
                                 *      / \         / \
                                 *     Sl  Sr      N   Sl
                                 */
-                               parent->rb_right = tmp1 = sibling->rb_left;
-                               sibling->rb_left = parent;
+                               tmp1 = sibling->rb_left;
+                               WRITE_ONCE(parent->rb_right, tmp1);
+                               WRITE_ONCE(sibling->rb_left, parent);
                                rb_set_parent_color(tmp1, parent, RB_BLACK);
                                __rb_rotate_set_parents(parent, sibling, root,
                                                        RB_RED);
@@ -275,9 +302,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
                                 *                       \
                                 *                        Sr
                                 */
-                               sibling->rb_left = tmp1 = tmp2->rb_right;
-                               tmp2->rb_right = sibling;
-                               parent->rb_right = tmp2;
+                               tmp1 = tmp2->rb_right;
+                               WRITE_ONCE(sibling->rb_left, tmp1);
+                               WRITE_ONCE(tmp2->rb_right, sibling);
+                               WRITE_ONCE(parent->rb_right, tmp2);
                                if (tmp1)
                                        rb_set_parent_color(tmp1, sibling,
                                                            RB_BLACK);
@@ -297,8 +325,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
                         *        / \         / \
                         *      (sl) sr      N  (sl)
                         */
-                       parent->rb_right = tmp2 = sibling->rb_left;
-                       sibling->rb_left = parent;
+                       tmp2 = sibling->rb_left;
+                       WRITE_ONCE(parent->rb_right, tmp2);
+                       WRITE_ONCE(sibling->rb_left, parent);
                        rb_set_parent_color(tmp1, sibling, RB_BLACK);
                        if (tmp2)
                                rb_set_parent(tmp2, parent);
@@ -310,8 +339,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
                        sibling = parent->rb_left;
                        if (rb_is_red(sibling)) {
                                /* Case 1 - right rotate at parent */
-                               parent->rb_left = tmp1 = sibling->rb_right;
-                               sibling->rb_right = parent;
+                               tmp1 = sibling->rb_right;
+                               WRITE_ONCE(parent->rb_left, tmp1);
+                               WRITE_ONCE(sibling->rb_right, parent);
                                rb_set_parent_color(tmp1, parent, RB_BLACK);
                                __rb_rotate_set_parents(parent, sibling, root,
                                                        RB_RED);
@@ -336,9 +366,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
                                        break;
                                }
                                /* Case 3 - right rotate at sibling */
-                               sibling->rb_right = tmp1 = tmp2->rb_left;
-                               tmp2->rb_left = sibling;
-                               parent->rb_left = tmp2;
+                               tmp1 = tmp2->rb_left;
+                               WRITE_ONCE(sibling->rb_right, tmp1);
+                               WRITE_ONCE(tmp2->rb_left, sibling);
+                               WRITE_ONCE(parent->rb_left, tmp2);
                                if (tmp1)
                                        rb_set_parent_color(tmp1, sibling,
                                                            RB_BLACK);
@@ -347,8 +378,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
                                sibling = tmp2;
                        }
                        /* Case 4 - left rotate at parent + color flips */
-                       parent->rb_left = tmp2 = sibling->rb_right;
-                       sibling->rb_right = parent;
+                       tmp2 = sibling->rb_right;
+                       WRITE_ONCE(parent->rb_left, tmp2);
+                       WRITE_ONCE(sibling->rb_right, parent);
                        rb_set_parent_color(tmp1, sibling, RB_BLACK);
                        if (tmp2)
                                rb_set_parent(tmp2, parent);
index 99fbc2f238c4976a1b95df715d22d35f739b4cc7..d105a9f56878e57e2587f2704121c0210b8fb941 100644 (file)
@@ -650,9 +650,8 @@ EXPORT_SYMBOL(sg_miter_stop);
  * Returns the number of copied bytes.
  *
  **/
-static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
-                            void *buf, size_t buflen, off_t skip,
-                            bool to_buffer)
+size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
+                     size_t buflen, off_t skip, bool to_buffer)
 {
        unsigned int offset = 0;
        struct sg_mapping_iter miter;
@@ -689,6 +688,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
        local_irq_restore(flags);
        return offset;
 }
+EXPORT_SYMBOL(sg_copy_buffer);
 
 /**
  * sg_copy_from_buffer - Copy from a linear buffer to an SG list
@@ -701,9 +701,9 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
  *
  **/
 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
-                          void *buf, size_t buflen)
+                          const void *buf, size_t buflen)
 {
-       return sg_copy_buffer(sgl, nents, buf, buflen, 0, false);
+       return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
 }
 EXPORT_SYMBOL(sg_copy_from_buffer);
 
@@ -729,16 +729,16 @@ EXPORT_SYMBOL(sg_copy_to_buffer);
  * @sgl:                The SG list
  * @nents:              Number of SG entries
  * @buf:                Where to copy from
- * @skip:               Number of bytes to skip before copying
  * @buflen:             The number of bytes to copy
+ * @skip:               Number of bytes to skip before copying
  *
  * Returns the number of copied bytes.
  *
  **/
 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
-                           void *buf, size_t buflen, off_t skip)
+                           const void *buf, size_t buflen, off_t skip)
 {
-       return sg_copy_buffer(sgl, nents, buf, buflen, skip, false);
+       return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
 }
 EXPORT_SYMBOL(sg_pcopy_from_buffer);
 
@@ -747,8 +747,8 @@ EXPORT_SYMBOL(sg_pcopy_from_buffer);
  * @sgl:                The SG list
  * @nents:              Number of SG entries
  * @buf:                Where to copy to
- * @skip:               Number of bytes to skip before copying
  * @buflen:             The number of bytes to copy
+ * @skip:               Number of bytes to skip before copying
  *
  * Returns the number of copied bytes.
  *
index c180af880ed5169cdbf1799c0650b61d68130990..e79de2bd12cd046c416537b8c6550ca3aea95a01 100644 (file)
@@ -636,3 +636,21 @@ config MAX_STACK_SIZE_MB
          changed to a smaller value in which case that is used.
 
          A sane initial value is 80 MB.
+
+# For architectures that support deferred memory initialisation
+config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+       bool
+
+config DEFERRED_STRUCT_PAGE_INIT
+       bool "Defer initialisation of struct pages to kswapd"
+       default n
+       depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+       depends on MEMORY_HOTPLUG
+       help
+         Ordinarily all struct pages are initialised during early boot in a
+         single thread. On very large machines this can take a considerable
+         amount of time. If this option is set, large machines will bring up
+         a subset of memmap at boot and then initialise the rest in parallel
+         when kswapd starts. This has a potential performance impact on
+         processes running early in the lifetime of the systemm until kswapd
+         finishes the initialisation.
index 7756da31b02bcbb2a7f7036a4bbbdd093883ad6c..dac5bf59309d04d119b699ba47d849993edbec09 100644 (file)
@@ -287,7 +287,7 @@ void wb_wakeup_delayed(struct bdi_writeback *wb)
 #define INIT_BW                (100 << (20 - PAGE_SHIFT))
 
 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
-                  gfp_t gfp)
+                  int blkcg_id, gfp_t gfp)
 {
        int i, err;
 
@@ -311,21 +311,29 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
        INIT_LIST_HEAD(&wb->work_list);
        INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
 
+       wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
+       if (!wb->congested)
+               return -ENOMEM;
+
        err = fprop_local_init_percpu(&wb->completions, gfp);
        if (err)
-               return err;
+               goto out_put_cong;
 
        for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
                err = percpu_counter_init(&wb->stat[i], 0, gfp);
-               if (err) {
-                       while (--i)
-                               percpu_counter_destroy(&wb->stat[i]);
-                       fprop_local_destroy_percpu(&wb->completions);
-                       return err;
-               }
+               if (err)
+                       goto out_destroy_stat;
        }
 
        return 0;
+
+out_destroy_stat:
+       while (--i)
+               percpu_counter_destroy(&wb->stat[i]);
+       fprop_local_destroy_percpu(&wb->completions);
+out_put_cong:
+       wb_congested_put(wb->congested);
+       return err;
 }
 
 /*
@@ -361,6 +369,7 @@ static void wb_exit(struct bdi_writeback *wb)
                percpu_counter_destroy(&wb->stat[i]);
 
        fprop_local_destroy_percpu(&wb->completions);
+       wb_congested_put(wb->congested);
 }
 
 #ifdef CONFIG_CGROUP_WRITEBACK
@@ -392,9 +401,6 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
        struct bdi_writeback_congested *new_congested = NULL, *congested;
        struct rb_node **node, *parent;
        unsigned long flags;
-
-       if (blkcg_id == 1)
-               return &bdi->wb_congested;
 retry:
        spin_lock_irqsave(&cgwb_lock, flags);
 
@@ -419,7 +425,6 @@ retry:
                new_congested = NULL;
                rb_link_node(&congested->rb_node, parent, node);
                rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
-               atomic_inc(&bdi->usage_cnt);
                goto found;
        }
 
@@ -450,24 +455,23 @@ found:
  */
 void wb_congested_put(struct bdi_writeback_congested *congested)
 {
-       struct backing_dev_info *bdi = congested->bdi;
        unsigned long flags;
 
-       if (congested->blkcg_id == 1)
-               return;
-
        local_irq_save(flags);
        if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
                local_irq_restore(flags);
                return;
        }
 
-       rb_erase(&congested->rb_node, &congested->bdi->cgwb_congested_tree);
+       /* bdi might already have been destroyed leaving @congested unlinked */
+       if (congested->bdi) {
+               rb_erase(&congested->rb_node,
+                        &congested->bdi->cgwb_congested_tree);
+               congested->bdi = NULL;
+       }
+
        spin_unlock_irqrestore(&cgwb_lock, flags);
        kfree(congested);
-
-       if (atomic_dec_and_test(&bdi->usage_cnt))
-               wake_up_all(&cgwb_release_wait);
 }
 
 static void cgwb_release_workfn(struct work_struct *work)
@@ -480,7 +484,6 @@ static void cgwb_release_workfn(struct work_struct *work)
 
        css_put(wb->memcg_css);
        css_put(wb->blkcg_css);
-       wb_congested_put(wb->congested);
 
        fprop_local_destroy_percpu(&wb->memcg_completions);
        percpu_ref_exit(&wb->refcnt);
@@ -541,7 +544,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
        if (!wb)
                return -ENOMEM;
 
-       ret = wb_init(wb, bdi, gfp);
+       ret = wb_init(wb, bdi, blkcg_css->id, gfp);
        if (ret)
                goto err_free;
 
@@ -553,12 +556,6 @@ static int cgwb_create(struct backing_dev_info *bdi,
        if (ret)
                goto err_ref_exit;
 
-       wb->congested = wb_congested_get_create(bdi, blkcg_css->id, gfp);
-       if (!wb->congested) {
-               ret = -ENOMEM;
-               goto err_fprop_exit;
-       }
-
        wb->memcg_css = memcg_css;
        wb->blkcg_css = blkcg_css;
        INIT_WORK(&wb->release_work, cgwb_release_workfn);
@@ -588,12 +585,10 @@ static int cgwb_create(struct backing_dev_info *bdi,
        if (ret) {
                if (ret == -EEXIST)
                        ret = 0;
-               goto err_put_congested;
+               goto err_fprop_exit;
        }
        goto out_put;
 
-err_put_congested:
-       wb_congested_put(wb->congested);
 err_fprop_exit:
        fprop_local_destroy_percpu(&wb->memcg_completions);
 err_ref_exit:
@@ -662,26 +657,41 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
        return wb;
 }
 
-static void cgwb_bdi_init(struct backing_dev_info *bdi)
+static int cgwb_bdi_init(struct backing_dev_info *bdi)
 {
-       bdi->wb.memcg_css = mem_cgroup_root_css;
-       bdi->wb.blkcg_css = blkcg_root_css;
-       bdi->wb_congested.blkcg_id = 1;
+       int ret;
+
        INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
        bdi->cgwb_congested_tree = RB_ROOT;
        atomic_set(&bdi->usage_cnt, 1);
+
+       ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
+       if (!ret) {
+               bdi->wb.memcg_css = mem_cgroup_root_css;
+               bdi->wb.blkcg_css = blkcg_root_css;
+       }
+       return ret;
 }
 
 static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
 {
        struct radix_tree_iter iter;
+       struct bdi_writeback_congested *congested, *congested_n;
        void **slot;
 
        WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 
        spin_lock_irq(&cgwb_lock);
+
        radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
                cgwb_kill(*slot);
+
+       rbtree_postorder_for_each_entry_safe(congested, congested_n,
+                                       &bdi->cgwb_congested_tree, rb_node) {
+               rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree);
+               congested->bdi = NULL;  /* mark @congested unlinked */
+       }
+
        spin_unlock_irq(&cgwb_lock);
 
        /*
@@ -732,15 +742,28 @@ void wb_blkcg_offline(struct blkcg *blkcg)
 
 #else  /* CONFIG_CGROUP_WRITEBACK */
 
-static void cgwb_bdi_init(struct backing_dev_info *bdi) { }
+static int cgwb_bdi_init(struct backing_dev_info *bdi)
+{
+       int err;
+
+       bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
+       if (!bdi->wb_congested)
+               return -ENOMEM;
+
+       err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
+       if (err) {
+               kfree(bdi->wb_congested);
+               return err;
+       }
+       return 0;
+}
+
 static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
 
 #endif /* CONFIG_CGROUP_WRITEBACK */
 
 int bdi_init(struct backing_dev_info *bdi)
 {
-       int err;
-
        bdi->dev = NULL;
 
        bdi->min_ratio = 0;
@@ -749,15 +772,7 @@ int bdi_init(struct backing_dev_info *bdi)
        INIT_LIST_HEAD(&bdi->bdi_list);
        init_waitqueue_head(&bdi->wb_waitq);
 
-       err = wb_init(&bdi->wb, bdi, GFP_KERNEL);
-       if (err)
-               return err;
-
-       bdi->wb_congested.state = 0;
-       bdi->wb.congested = &bdi->wb_congested;
-
-       cgwb_bdi_init(bdi);
-       return 0;
+       return cgwb_bdi_init(bdi);
 }
 EXPORT_SYMBOL(bdi_init);
 
index 477be696511d669230b47c73d52a8b3c1836c457..a23dd19346548223152a4922a0e2e7d90cb56b10 100644 (file)
@@ -164,7 +164,7 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
        end = PFN_DOWN(physaddr + size);
 
        for (; cursor < end; cursor++) {
-               __free_pages_bootmem(pfn_to_page(cursor), 0);
+               __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
                totalram_pages++;
        }
 }
@@ -172,7 +172,7 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 {
        struct page *page;
-       unsigned long *map, start, end, pages, count = 0;
+       unsigned long *map, start, end, pages, cur, count = 0;
 
        if (!bdata->node_bootmem_map)
                return 0;
@@ -210,17 +210,17 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
                if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
                        int order = ilog2(BITS_PER_LONG);
 
-                       __free_pages_bootmem(pfn_to_page(start), order);
+                       __free_pages_bootmem(pfn_to_page(start), start, order);
                        count += BITS_PER_LONG;
                        start += BITS_PER_LONG;
                } else {
-                       unsigned long cur = start;
+                       cur = start;
 
                        start = ALIGN(start + 1, BITS_PER_LONG);
                        while (vec && cur != start) {
                                if (vec & 1) {
                                        page = pfn_to_page(cur);
-                                       __free_pages_bootmem(page, 0);
+                                       __free_pages_bootmem(page, cur, 0);
                                        count++;
                                }
                                vec >>= 1;
@@ -229,12 +229,13 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
                }
        }
 
+       cur = bdata->node_min_pfn;
        page = virt_to_page(bdata->node_bootmem_map);
        pages = bdata->node_low_pfn - bdata->node_min_pfn;
        pages = bootmem_bootmap_pages(pages);
        count += pages;
        while (pages--)
-               __free_pages_bootmem(page++, 0);
+               __free_pages_bootmem(page++, cur++, 0);
 
        bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
 
index 11f10efd637c2d67e071c482951e2bb38a105d6b..1283fc82545861d155c4eef7013bf8e285040fed 100644 (file)
@@ -2563,7 +2563,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = inode_to_bdi(inode);
-       err = file_remove_suid(file);
+       err = file_remove_privs(file);
        if (err)
                goto out;
 
index a25e359a40396c9db36042e632b96fd912e99ded..36b23f1e2ca62612e6e1d1b2b9d74c3cd7e87db7 100644 (file)
@@ -155,7 +155,8 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
 }
 
 extern int __isolate_free_page(struct page *page, unsigned int order);
-extern void __free_pages_bootmem(struct page *page, unsigned int order);
+extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
+                                       unsigned int order);
 extern void prep_compound_page(struct page *page, unsigned long order);
 #ifdef CONFIG_MEMORY_FAILURE
 extern bool is_free_buddy_page(struct page *page);
@@ -361,10 +362,7 @@ do { \
 } while (0)
 
 extern void mminit_verify_pageflags_layout(void);
-extern void mminit_verify_page_links(struct page *page,
-               enum zone_type zone, unsigned long nid, unsigned long pfn);
 extern void mminit_verify_zonelist(void);
-
 #else
 
 static inline void mminit_dprintk(enum mminit_level level,
@@ -376,11 +374,6 @@ static inline void mminit_verify_pageflags_layout(void)
 {
 }
 
-static inline void mminit_verify_page_links(struct page *page,
-               enum zone_type zone, unsigned long nid, unsigned long pfn)
-{
-}
-
 static inline void mminit_verify_zonelist(void)
 {
 }
index 1b444c730846ddf59d9ce591aacce123f975bc13..87108e77e476a326d69ec748f4e300d713fbd192 100644 (file)
@@ -819,6 +819,38 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
 }
 
 
+/**
+ * __next_reserved_mem_region - next function for for_each_reserved_region()
+ * @idx: pointer to u64 loop variable
+ * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
+ * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
+ *
+ * Iterate over all reserved memory regions.
+ */
+void __init_memblock __next_reserved_mem_region(u64 *idx,
+                                          phys_addr_t *out_start,
+                                          phys_addr_t *out_end)
+{
+       struct memblock_type *rsv = &memblock.reserved;
+
+       if (*idx >= 0 && *idx < rsv->cnt) {
+               struct memblock_region *r = &rsv->regions[*idx];
+               phys_addr_t base = r->base;
+               phys_addr_t size = r->size;
+
+               if (out_start)
+                       *out_start = base;
+               if (out_end)
+                       *out_end = base + size - 1;
+
+               *idx += 1;
+               return;
+       }
+
+       /* signal end of iteration */
+       *idx = ULLONG_MAX;
+}
+
 /**
  * __next__mem_range - next function for for_each_free_mem_range() etc.
  * @idx: pointer to u64 loop variable
@@ -1387,7 +1419,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
        end = PFN_DOWN(base + size);
 
        for (; cursor < end; cursor++) {
-               __free_pages_bootmem(pfn_to_page(cursor), 0);
+               __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
                totalram_pages++;
        }
 }
index 11b9ca1767408dddb147c4b225de0aa31b8f17e7..a84fbb772034f2e73eac300e254bd54f2a36ce03 100644 (file)
@@ -3726,7 +3726,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
                if (buf) {
                        char *p;
 
-                       p = d_path(&f->f_path, buf, PAGE_SIZE);
+                       p = file_path(f, buf, PAGE_SIZE);
                        if (IS_ERR(p))
                                p = "?";
                        printk("%s%s[%lx+%lx]", prefix, kbasename(p),
index 5f420f7fafa1c6b6bd74cdf3b6e9cfb5e7874f87..fdadf918de7691a57b3611cd7c45dec250b7c732 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/export.h>
 #include <linux/memory.h>
 #include <linux/notifier.h>
+#include <linux/sched.h>
 #include "internal.h"
 
 #ifdef CONFIG_DEBUG_MEMORY_INIT
@@ -130,14 +131,6 @@ void __init mminit_verify_pageflags_layout(void)
        BUG_ON(or_mask != add_mask);
 }
 
-void __meminit mminit_verify_page_links(struct page *page, enum zone_type zone,
-                       unsigned long nid, unsigned long pfn)
-{
-       BUG_ON(page_to_nid(page) != nid);
-       BUG_ON(page_zonenum(page) != zone);
-       BUG_ON(page_to_pfn(page) != pfn);
-}
-
 static __init int set_mminit_loglevel(char *str)
 {
        get_option(&str, &mminit_loglevel);
index 5258386fa1beb44842dbbf9a7b85de07d54d83e1..e57cf24babd671c0757eaf320673777774a0658c 100644 (file)
@@ -86,7 +86,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
        end = PFN_DOWN(addr + size);
 
        for (; cursor < end; cursor++) {
-               __free_pages_bootmem(pfn_to_page(cursor), 0);
+               __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
                totalram_pages++;
        }
 }
@@ -101,7 +101,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
                while (start + (1UL << order) > end)
                        order--;
 
-               __free_pages_bootmem(pfn_to_page(start), order);
+               __free_pages_bootmem(pfn_to_page(start), start, order);
 
                start += (1UL << order);
        }
@@ -130,6 +130,9 @@ static unsigned long __init free_low_memory_core_early(void)
 
        memblock_clear_hotplug(0, -1);
 
+       for_each_reserved_mem_region(i, &start, &end)
+               reserve_bootmem_region(start, end);
+
        for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
                                NULL)
                count += __free_memory_core(start, end);
index 05e7447d960b0628d9dea7ea22a02a0f15488b9d..58ea3643b9e9968a723f498d7df55b34c51a179e 100644 (file)
@@ -2085,7 +2085,7 @@ static int __meminit init_user_reserve(void)
        sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
        return 0;
 }
-module_init(init_user_reserve)
+subsys_initcall(init_user_reserve);
 
 /*
  * Initialise sysctl_admin_reserve_kbytes.
@@ -2106,4 +2106,4 @@ static int __meminit init_admin_reserve(void)
        sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
        return 0;
 }
-module_init(init_admin_reserve)
+subsys_initcall(init_admin_reserve);
index 5e6fa06f2784c8cf9b2defd40120c0be32ca444e..506eac8b38afb2cdbeb481dcebad098aac984618 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/mm.h>
 #include <linux/swap.h>
 #include <linux/interrupt.h>
+#include <linux/rwsem.h>
 #include <linux/pagemap.h>
 #include <linux/jiffies.h>
 #include <linux/bootmem.h>
@@ -61,6 +62,7 @@
 #include <linux/hugetlb.h>
 #include <linux/sched/rt.h>
 #include <linux/page_owner.h>
+#include <linux/kthread.h>
 
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
@@ -235,6 +237,77 @@ EXPORT_SYMBOL(nr_online_nodes);
 
 int page_group_by_mobility_disabled __read_mostly;
 
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+static inline void reset_deferred_meminit(pg_data_t *pgdat)
+{
+       pgdat->first_deferred_pfn = ULONG_MAX;
+}
+
+/* Returns true if the struct page for the pfn is uninitialised */
+static inline bool __meminit early_page_uninitialised(unsigned long pfn)
+{
+       int nid = early_pfn_to_nid(pfn);
+
+       if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
+               return true;
+
+       return false;
+}
+
+static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
+{
+       if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
+               return true;
+
+       return false;
+}
+
+/*
+ * Returns false when the remaining initialisation should be deferred until
+ * later in the boot cycle when it can be parallelised.
+ */
+static inline bool update_defer_init(pg_data_t *pgdat,
+                               unsigned long pfn, unsigned long zone_end,
+                               unsigned long *nr_initialised)
+{
+       /* Always populate low zones for address-contrained allocations */
+       if (zone_end < pgdat_end_pfn(pgdat))
+               return true;
+
+       /* Initialise at least 2G of the highest zone */
+       (*nr_initialised)++;
+       if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) &&
+           (pfn & (PAGES_PER_SECTION - 1)) == 0) {
+               pgdat->first_deferred_pfn = pfn;
+               return false;
+       }
+
+       return true;
+}
+#else
+static inline void reset_deferred_meminit(pg_data_t *pgdat)
+{
+}
+
+static inline bool early_page_uninitialised(unsigned long pfn)
+{
+       return false;
+}
+
+static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
+{
+       return false;
+}
+
+static inline bool update_defer_init(pg_data_t *pgdat,
+                               unsigned long pfn, unsigned long zone_end,
+                               unsigned long *nr_initialised)
+{
+       return true;
+}
+#endif
+
+
 void set_pageblock_migratetype(struct page *page, int migratetype)
 {
        if (unlikely(page_group_by_mobility_disabled &&
@@ -764,6 +837,75 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
        return 0;
 }
 
+static void __meminit __init_single_page(struct page *page, unsigned long pfn,
+                               unsigned long zone, int nid)
+{
+       set_page_links(page, zone, nid, pfn);
+       init_page_count(page);
+       page_mapcount_reset(page);
+       page_cpupid_reset_last(page);
+
+       INIT_LIST_HEAD(&page->lru);
+#ifdef WANT_PAGE_VIRTUAL
+       /* The shift won't overflow because ZONE_NORMAL is below 4G. */
+       if (!is_highmem_idx(zone))
+               set_page_address(page, __va(pfn << PAGE_SHIFT));
+#endif
+}
+
+static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
+                                       int nid)
+{
+       return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
+}
+
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+static void init_reserved_page(unsigned long pfn)
+{
+       pg_data_t *pgdat;
+       int nid, zid;
+
+       if (!early_page_uninitialised(pfn))
+               return;
+
+       nid = early_pfn_to_nid(pfn);
+       pgdat = NODE_DATA(nid);
+
+       for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+               struct zone *zone = &pgdat->node_zones[zid];
+
+               if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
+                       break;
+       }
+       __init_single_pfn(pfn, zid, nid);
+}
+#else
+static inline void init_reserved_page(unsigned long pfn)
+{
+}
+#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
+
+/*
+ * Initialised pages do not have PageReserved set. This function is
+ * called for each range allocated by the bootmem allocator and
+ * marks the pages PageReserved. The remaining valid pages are later
+ * sent to the buddy page allocator.
+ */
+void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
+{
+       unsigned long start_pfn = PFN_DOWN(start);
+       unsigned long end_pfn = PFN_UP(end);
+
+       for (; start_pfn < end_pfn; start_pfn++) {
+               if (pfn_valid(start_pfn)) {
+                       struct page *page = pfn_to_page(start_pfn);
+
+                       init_reserved_page(start_pfn);
+                       SetPageReserved(page);
+               }
+       }
+}
+
 static bool free_pages_prepare(struct page *page, unsigned int order)
 {
        bool compound = PageCompound(page);
@@ -818,7 +960,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        local_irq_restore(flags);
 }
 
-void __init __free_pages_bootmem(struct page *page, unsigned int order)
+static void __init __free_pages_boot_core(struct page *page,
+                                       unsigned long pfn, unsigned int order)
 {
        unsigned int nr_pages = 1 << order;
        struct page *p = page;
@@ -838,6 +981,223 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
        __free_pages(page, order);
 }
 
+#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
+       defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
+/* Only safe to use early in boot when initialisation is single-threaded */
+static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
+
+int __meminit early_pfn_to_nid(unsigned long pfn)
+{
+       int nid;
+
+       /* The system will behave unpredictably otherwise */
+       BUG_ON(system_state != SYSTEM_BOOTING);
+
+       nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
+       if (nid >= 0)
+               return nid;
+       /* just returns 0 */
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_NODES_SPAN_OTHER_NODES
+static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
+                                       struct mminit_pfnnid_cache *state)
+{
+       int nid;
+
+       nid = __early_pfn_to_nid(pfn, state);
+       if (nid >= 0 && nid != node)
+               return false;
+       return true;
+}
+
+/* Only safe to use early in boot when initialisation is single-threaded */
+static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
+{
+       return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
+}
+
+#else
+
+static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
+{
+       return true;
+}
+static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
+                                       struct mminit_pfnnid_cache *state)
+{
+       return true;
+}
+#endif
+
+
+void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
+                                                       unsigned int order)
+{
+       if (early_page_uninitialised(pfn))
+               return;
+       return __free_pages_boot_core(page, pfn, order);
+}
+
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+static void __init deferred_free_range(struct page *page,
+                                       unsigned long pfn, int nr_pages)
+{
+       int i;
+
+       if (!page)
+               return;
+
+       /* Free a large naturally-aligned chunk if possible */
+       if (nr_pages == MAX_ORDER_NR_PAGES &&
+           (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
+               set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+               __free_pages_boot_core(page, pfn, MAX_ORDER-1);
+               return;
+       }
+
+       for (i = 0; i < nr_pages; i++, page++, pfn++)
+               __free_pages_boot_core(page, pfn, 0);
+}
+
+static __initdata DECLARE_RWSEM(pgdat_init_rwsem);
+
+/* Initialise remaining memory on a node */
+static int __init deferred_init_memmap(void *data)
+{
+       pg_data_t *pgdat = data;
+       int nid = pgdat->node_id;
+       struct mminit_pfnnid_cache nid_init_state = { };
+       unsigned long start = jiffies;
+       unsigned long nr_pages = 0;
+       unsigned long walk_start, walk_end;
+       int i, zid;
+       struct zone *zone;
+       unsigned long first_init_pfn = pgdat->first_deferred_pfn;
+       const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
+
+       if (first_init_pfn == ULONG_MAX) {
+               up_read(&pgdat_init_rwsem);
+               return 0;
+       }
+
+       /* Bind memory initialisation thread to a local node if possible */
+       if (!cpumask_empty(cpumask))
+               set_cpus_allowed_ptr(current, cpumask);
+
+       /* Sanity check boundaries */
+       BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
+       BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
+       pgdat->first_deferred_pfn = ULONG_MAX;
+
+       /* Only the highest zone is deferred so find it */
+       for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+               zone = pgdat->node_zones + zid;
+               if (first_init_pfn < zone_end_pfn(zone))
+                       break;
+       }
+
+       for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
+               unsigned long pfn, end_pfn;
+               struct page *page = NULL;
+               struct page *free_base_page = NULL;
+               unsigned long free_base_pfn = 0;
+               int nr_to_free = 0;
+
+               end_pfn = min(walk_end, zone_end_pfn(zone));
+               pfn = first_init_pfn;
+               if (pfn < walk_start)
+                       pfn = walk_start;
+               if (pfn < zone->zone_start_pfn)
+                       pfn = zone->zone_start_pfn;
+
+               for (; pfn < end_pfn; pfn++) {
+                       if (!pfn_valid_within(pfn))
+                               goto free_range;
+
+                       /*
+                        * Ensure pfn_valid is checked every
+                        * MAX_ORDER_NR_PAGES for memory holes
+                        */
+                       if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
+                               if (!pfn_valid(pfn)) {
+                                       page = NULL;
+                                       goto free_range;
+                               }
+                       }
+
+                       if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
+                               page = NULL;
+                               goto free_range;
+                       }
+
+                       /* Minimise pfn page lookups and scheduler checks */
+                       if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
+                               page++;
+                       } else {
+                               nr_pages += nr_to_free;
+                               deferred_free_range(free_base_page,
+                                               free_base_pfn, nr_to_free);
+                               free_base_page = NULL;
+                               free_base_pfn = nr_to_free = 0;
+
+                               page = pfn_to_page(pfn);
+                               cond_resched();
+                       }
+
+                       if (page->flags) {
+                               VM_BUG_ON(page_zone(page) != zone);
+                               goto free_range;
+                       }
+
+                       __init_single_page(page, pfn, zid, nid);
+                       if (!free_base_page) {
+                               free_base_page = page;
+                               free_base_pfn = pfn;
+                               nr_to_free = 0;
+                       }
+                       nr_to_free++;
+
+                       /* Where possible, batch up pages for a single free */
+                       continue;
+free_range:
+                       /* Free the current block of pages to allocator */
+                       nr_pages += nr_to_free;
+                       deferred_free_range(free_base_page, free_base_pfn,
+                                                               nr_to_free);
+                       free_base_page = NULL;
+                       free_base_pfn = nr_to_free = 0;
+               }
+
+               first_init_pfn = max(end_pfn, first_init_pfn);
+       }
+
+       /* Sanity check that the next zone really is unpopulated */
+       WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
+
+       pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
+                                       jiffies_to_msecs(jiffies - start));
+       up_read(&pgdat_init_rwsem);
+       return 0;
+}
+
+void __init page_alloc_init_late(void)
+{
+       int nid;
+
+       for_each_node_state(nid, N_MEMORY) {
+               down_read(&pgdat_init_rwsem);
+               kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
+       }
+
+       /* Block until all are initialised */
+       down_write(&pgdat_init_rwsem);
+       up_write(&pgdat_init_rwsem);
+}
+#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
+
 #ifdef CONFIG_CMA
 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
 void __init init_cma_reserved_pageblock(struct page *page)
@@ -4150,6 +4510,9 @@ static void setup_zone_migrate_reserve(struct zone *zone)
        zone->nr_migrate_reserve_block = reserve;
 
        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
+               if (!early_page_nid_uninitialised(pfn, zone_to_nid(zone)))
+                       return;
+
                if (!pfn_valid(pfn))
                        continue;
                page = pfn_to_page(pfn);
@@ -4212,15 +4575,16 @@ static void setup_zone_migrate_reserve(struct zone *zone)
 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                unsigned long start_pfn, enum memmap_context context)
 {
-       struct page *page;
+       pg_data_t *pgdat = NODE_DATA(nid);
        unsigned long end_pfn = start_pfn + size;
        unsigned long pfn;
        struct zone *z;
+       unsigned long nr_initialised = 0;
 
        if (highest_memmap_pfn < end_pfn - 1)
                highest_memmap_pfn = end_pfn - 1;
 
-       z = &NODE_DATA(nid)->node_zones[zone];
+       z = &pgdat->node_zones[zone];
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
                /*
                 * There can be holes in boot-time mem_map[]s
@@ -4232,14 +4596,11 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                                continue;
                        if (!early_pfn_in_nid(pfn, nid))
                                continue;
+                       if (!update_defer_init(pgdat, pfn, end_pfn,
+                                               &nr_initialised))
+                               break;
                }
-               page = pfn_to_page(pfn);
-               set_page_links(page, zone, nid, pfn);
-               mminit_verify_page_links(page, zone, nid, pfn);
-               init_page_count(page);
-               page_mapcount_reset(page);
-               page_cpupid_reset_last(page);
-               SetPageReserved(page);
+
                /*
                 * Mark the block movable so that blocks are reserved for
                 * movable at startup. This will force kernel allocations
@@ -4254,17 +4615,14 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                 * check here not to call set_pageblock_migratetype() against
                 * pfn out of zone.
                 */
-               if ((z->zone_start_pfn <= pfn)
-                   && (pfn < zone_end_pfn(z))
-                   && !(pfn & (pageblock_nr_pages - 1)))
-                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+               if (!(pfn & (pageblock_nr_pages - 1))) {
+                       struct page *page = pfn_to_page(pfn);
 
-               INIT_LIST_HEAD(&page->lru);
-#ifdef WANT_PAGE_VIRTUAL
-               /* The shift won't overflow because ZONE_NORMAL is below 4G. */
-               if (!is_highmem_idx(zone))
-                       set_page_address(page, __va(pfn << PAGE_SHIFT));
-#endif
+                       __init_single_page(page, pfn, zone, nid);
+                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+               } else {
+                       __init_single_pfn(pfn, zone, nid);
+               }
        }
 }
 
@@ -4522,57 +4880,30 @@ int __meminit init_currently_empty_zone(struct zone *zone,
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
+
 /*
  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
  */
-int __meminit __early_pfn_to_nid(unsigned long pfn)
+int __meminit __early_pfn_to_nid(unsigned long pfn,
+                                       struct mminit_pfnnid_cache *state)
 {
        unsigned long start_pfn, end_pfn;
        int nid;
-       /*
-        * NOTE: The following SMP-unsafe globals are only used early in boot
-        * when the kernel is running single-threaded.
-        */
-       static unsigned long __meminitdata last_start_pfn, last_end_pfn;
-       static int __meminitdata last_nid;
 
-       if (last_start_pfn <= pfn && pfn < last_end_pfn)
-               return last_nid;
+       if (state->last_start <= pfn && pfn < state->last_end)
+               return state->last_nid;
 
        nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
        if (nid != -1) {
-               last_start_pfn = start_pfn;
-               last_end_pfn = end_pfn;
-               last_nid = nid;
+               state->last_start = start_pfn;
+               state->last_end = end_pfn;
+               state->last_nid = nid;
        }
 
        return nid;
 }
 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
 
-int __meminit early_pfn_to_nid(unsigned long pfn)
-{
-       int nid;
-
-       nid = __early_pfn_to_nid(pfn);
-       if (nid >= 0)
-               return nid;
-       /* just returns 0 */
-       return 0;
-}
-
-#ifdef CONFIG_NODES_SPAN_OTHER_NODES
-bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
-{
-       int nid;
-
-       nid = __early_pfn_to_nid(pfn);
-       if (nid >= 0 && nid != node)
-               return false;
-       return true;
-}
-#endif
-
 /**
  * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
@@ -5090,6 +5421,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
        /* pg_data_t should be reset to zero when it's allocated */
        WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
 
+       reset_deferred_meminit(pgdat);
        pgdat->node_id = nid;
        pgdat->node_start_pfn = node_start_pfn;
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
index 0993f5f36b011ec567e3b559f50bb4dafde90479..bd5f842b56d26aca3cf24225884d5a2eacd1580c 100644 (file)
@@ -310,4 +310,4 @@ static int __init pageowner_init(void)
 
        return 0;
 }
-module_init(pageowner_init)
+late_initcall(pageowner_init)
index 983b78694c4637adcd617de8085498acecfbd288..3e5f8f29c28640e44af5f5f9d1c3553986064588 100644 (file)
@@ -855,7 +855,7 @@ void __init setup_kmalloc_cache_index_table(void)
        }
 }
 
-static void new_kmalloc_cache(int idx, unsigned long flags)
+static void __init new_kmalloc_cache(int idx, unsigned long flags)
 {
        kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
                                        kmalloc_info[idx].size, flags);
index a7e72103f23bfbf81aad771343dac77c4f4d1958..41e4581af7c512fe49ad8a1677a501f290e14807 100644 (file)
@@ -2032,7 +2032,7 @@ static int swap_show(struct seq_file *swap, void *v)
        }
 
        file = si->swap_file;
-       len = seq_path(swap, &file->f_path, " \t\n\\");
+       len = seq_file_path(swap, file, " \t\n\\");
        seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
                        len < 40 ? 40 - len : 1, " ",
                        S_ISBLK(file_inode(file)->i_mode) ?
index 6f4c4c88db84ecb084ca5553113d9ad2963e4985..498454b3c06c3ddf0e8c3989e23bef9360587544 100644 (file)
@@ -843,7 +843,8 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
        if (err < 0) {
                if (err == -EIO)
                        c->status = Disconnected;
-               goto reterr;
+               if (err != -ERESTARTSYS)
+                       goto reterr;
        }
        if (req->status == REQ_STATUS_ERROR) {
                p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
@@ -1582,6 +1583,10 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
                        p9_free_req(clnt, req);
                        break;
                }
+               if (rsize < count) {
+                       pr_err("bogus RREAD count (%d > %d)\n", count, rsize);
+                       count = rsize;
+               }
 
                p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
                if (!count) {
@@ -1647,6 +1652,11 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
                if (*err) {
                        trace_9p_protocol_dump(clnt, req->rc);
                        p9_free_req(clnt, req);
+                       break;
+               }
+               if (rsize < count) {
+                       pr_err("bogus RWRITE count (%d > %d)\n", count, rsize);
+                       count = rsize;
                }
 
                p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
index 9c891d0412a298428e7abd306347beee749f5b0d..ae3a47f9d1d5298406ca33900624fe49c9c85719 100644 (file)
@@ -57,7 +57,7 @@ static const struct proto_ops ax25_proto_ops;
 
 static void ax25_free_sock(struct sock *sk)
 {
-       ax25_cb_put(ax25_sk(sk));
+       ax25_cb_put(sk_to_ax25(sk));
 }
 
 /*
@@ -306,7 +306,7 @@ void ax25_destroy_socket(ax25_cb *ax25)
                while ((skb = skb_dequeue(&ax25->sk->sk_receive_queue)) != NULL) {
                        if (skb->sk != ax25->sk) {
                                /* A pending connection */
-                               ax25_cb *sax25 = ax25_sk(skb->sk);
+                               ax25_cb *sax25 = sk_to_ax25(skb->sk);
 
                                /* Queue the unaccepted socket for death */
                                sock_orphan(skb->sk);
@@ -551,7 +551,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
                return -EFAULT;
 
        lock_sock(sk);
-       ax25 = ax25_sk(sk);
+       ax25 = sk_to_ax25(sk);
 
        switch (optname) {
        case AX25_WINDOW:
@@ -697,7 +697,7 @@ static int ax25_getsockopt(struct socket *sock, int level, int optname,
        length = min_t(unsigned int, maxlen, sizeof(int));
 
        lock_sock(sk);
-       ax25 = ax25_sk(sk);
+       ax25 = sk_to_ax25(sk);
 
        switch (optname) {
        case AX25_WINDOW:
@@ -796,7 +796,7 @@ out:
 static struct proto ax25_proto = {
        .name     = "AX25",
        .owner    = THIS_MODULE,
-       .obj_size = sizeof(struct sock),
+       .obj_size = sizeof(struct ax25_sock),
 };
 
 static int ax25_create(struct net *net, struct socket *sock, int protocol,
@@ -858,7 +858,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
        if (sk == NULL)
                return -ENOMEM;
 
-       ax25 = sk->sk_protinfo = ax25_create_cb();
+       ax25 = ax25_sk(sk)->cb = ax25_create_cb();
        if (!ax25) {
                sk_free(sk);
                return -ENOMEM;
@@ -910,7 +910,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
        sk->sk_state    = TCP_ESTABLISHED;
        sock_copy_flags(sk, osk);
 
-       oax25 = ax25_sk(osk);
+       oax25 = sk_to_ax25(osk);
 
        ax25->modulus = oax25->modulus;
        ax25->backoff = oax25->backoff;
@@ -938,7 +938,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
                }
        }
 
-       sk->sk_protinfo = ax25;
+       ax25_sk(sk)->cb = ax25;
        sk->sk_destruct = ax25_free_sock;
        ax25->sk    = sk;
 
@@ -956,7 +956,7 @@ static int ax25_release(struct socket *sock)
        sock_hold(sk);
        sock_orphan(sk);
        lock_sock(sk);
-       ax25 = ax25_sk(sk);
+       ax25 = sk_to_ax25(sk);
 
        if (sk->sk_type == SOCK_SEQPACKET) {
                switch (ax25->state) {
@@ -1066,7 +1066,7 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 
        lock_sock(sk);
 
-       ax25 = ax25_sk(sk);
+       ax25 = sk_to_ax25(sk);
        if (!sock_flag(sk, SOCK_ZAPPED)) {
                err = -EINVAL;
                goto out;
@@ -1113,7 +1113,7 @@ static int __must_check ax25_connect(struct socket *sock,
        struct sockaddr *uaddr, int addr_len, int flags)
 {
        struct sock *sk = sock->sk;
-       ax25_cb *ax25 = ax25_sk(sk), *ax25t;
+       ax25_cb *ax25 = sk_to_ax25(sk), *ax25t;
        struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr;
        ax25_digi *digi = NULL;
        int ct = 0, err = 0;
@@ -1394,7 +1394,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
 
        memset(fsa, 0, sizeof(*fsa));
        lock_sock(sk);
-       ax25 = ax25_sk(sk);
+       ax25 = sk_to_ax25(sk);
 
        if (peer != 0) {
                if (sk->sk_state != TCP_ESTABLISHED) {
@@ -1446,7 +1446,7 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                return -EINVAL;
 
        lock_sock(sk);
-       ax25 = ax25_sk(sk);
+       ax25 = sk_to_ax25(sk);
 
        if (sock_flag(sk, SOCK_ZAPPED)) {
                err = -EADDRNOTAVAIL;
@@ -1621,7 +1621,7 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        if (skb == NULL)
                goto out;
 
-       if (!ax25_sk(sk)->pidincl)
+       if (!sk_to_ax25(sk)->pidincl)
                skb_pull(skb, 1);               /* Remove PID */
 
        skb_reset_transport_header(skb);
@@ -1762,7 +1762,7 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 
        case SIOCAX25GETINFO:
        case SIOCAX25GETINFOOLD: {
-               ax25_cb *ax25 = ax25_sk(sk);
+               ax25_cb *ax25 = sk_to_ax25(sk);
                struct ax25_info_struct ax25_info;
 
                ax25_info.t1        = ax25->t1   / HZ;
index 29a3687237aa4288ed078e222c80b8fa85021a8f..bb5a0e4e98d9df09ec535a20ea73253817fe64db 100644 (file)
@@ -353,7 +353,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
                        return 0;
                }
 
-               ax25 = ax25_sk(make);
+               ax25 = sk_to_ax25(make);
                skb_set_owner_r(skb, make);
                skb_queue_head(&sk->sk_receive_queue, skb);
 
index 9070dfd6b4adcd247c482589c27d5d4519302b7c..f1a117f8cad22a245044014eae118e09bcdfe271 100644 (file)
@@ -915,6 +915,7 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
        session->conn = l2cap_conn_get(conn);
        session->user.probe = hidp_session_probe;
        session->user.remove = hidp_session_remove;
+       INIT_LIST_HEAD(&session->user.list);
        session->ctrl_sock = ctrl_sock;
        session->intr_sock = intr_sock;
        skb_queue_head_init(&session->ctrl_transmit);
index 51594fb7b9e72f80c06af1f04e9fd89b01aba9ba..45fffa4136421b8dd5ab301cf0a4ec529b36ff8e 100644 (file)
@@ -1634,7 +1634,7 @@ void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
        if (list_empty(&user->list))
                goto out_unlock;
 
-       list_del(&user->list);
+       list_del_init(&user->list);
        user->remove(conn, user);
 
 out_unlock:
@@ -1648,7 +1648,7 @@ static void l2cap_unregister_all_users(struct l2cap_conn *conn)
 
        while (!list_empty(&conn->users)) {
                user = list_first_entry(&conn->users, struct l2cap_user, list);
-               list_del(&user->list);
+               list_del_init(&user->list);
                user->remove(conn, user);
        }
 }
index 79e8f71aef5be312ab7aa547293fd047cf42e97c..cb7db320dd276aa0107d8e484270e3e8fb61dd6a 100644 (file)
@@ -352,8 +352,8 @@ ceph_parse_options(char *options, const char *dev_name,
        /* start with defaults */
        opt->flags = CEPH_OPT_DEFAULT;
        opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
-       opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
-       opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;   /* seconds */
+       opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT;
+       opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;
 
        /* get mon ip(s) */
        /* ip1[:port1][,ip2[:port2]...] */
@@ -439,13 +439,32 @@ ceph_parse_options(char *options, const char *dev_name,
                        pr_warn("ignoring deprecated osdtimeout option\n");
                        break;
                case Opt_osdkeepalivetimeout:
-                       opt->osd_keepalive_timeout = intval;
+                       /* 0 isn't well defined right now, reject it */
+                       if (intval < 1 || intval > INT_MAX / 1000) {
+                               pr_err("osdkeepalive out of range\n");
+                               err = -EINVAL;
+                               goto out;
+                       }
+                       opt->osd_keepalive_timeout =
+                                       msecs_to_jiffies(intval * 1000);
                        break;
                case Opt_osd_idle_ttl:
-                       opt->osd_idle_ttl = intval;
+                       /* 0 isn't well defined right now, reject it */
+                       if (intval < 1 || intval > INT_MAX / 1000) {
+                               pr_err("osd_idle_ttl out of range\n");
+                               err = -EINVAL;
+                               goto out;
+                       }
+                       opt->osd_idle_ttl = msecs_to_jiffies(intval * 1000);
                        break;
                case Opt_mount_timeout:
-                       opt->mount_timeout = intval;
+                       /* 0 is "wait forever" (i.e. infinite timeout) */
+                       if (intval < 0 || intval > INT_MAX / 1000) {
+                               pr_err("mount_timeout out of range\n");
+                               err = -EINVAL;
+                               goto out;
+                       }
+                       opt->mount_timeout = msecs_to_jiffies(intval * 1000);
                        break;
 
                case Opt_share:
@@ -512,12 +531,14 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client)
                seq_puts(m, "notcp_nodelay,");
 
        if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
-               seq_printf(m, "mount_timeout=%d,", opt->mount_timeout);
+               seq_printf(m, "mount_timeout=%d,",
+                          jiffies_to_msecs(opt->mount_timeout) / 1000);
        if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
-               seq_printf(m, "osd_idle_ttl=%d,", opt->osd_idle_ttl);
+               seq_printf(m, "osd_idle_ttl=%d,",
+                          jiffies_to_msecs(opt->osd_idle_ttl) / 1000);
        if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
                seq_printf(m, "osdkeepalivetimeout=%d,",
-                          opt->osd_keepalive_timeout);
+                   jiffies_to_msecs(opt->osd_keepalive_timeout) / 1000);
 
        /* drop redundant comma */
        if (m->count != pos)
@@ -626,8 +647,8 @@ static int have_mon_and_osd_map(struct ceph_client *client)
  */
 int __ceph_open_session(struct ceph_client *client, unsigned long started)
 {
-       int err;
-       unsigned long timeout = client->options->mount_timeout * HZ;
+       unsigned long timeout = client->options->mount_timeout;
+       long err;
 
        /* open session, and wait for mon and osd maps */
        err = ceph_monc_open_session(&client->monc);
@@ -635,16 +656,15 @@ int __ceph_open_session(struct ceph_client *client, unsigned long started)
                return err;
 
        while (!have_mon_and_osd_map(client)) {
-               err = -EIO;
                if (timeout && time_after_eq(jiffies, started + timeout))
-                       return err;
+                       return -ETIMEDOUT;
 
                /* wait */
                dout("mount waiting for mon_map\n");
                err = wait_event_interruptible_timeout(client->auth_wq,
                        have_mon_and_osd_map(client) || (client->auth_err < 0),
-                       timeout);
-               if (err == -EINTR || err == -ERESTARTSYS)
+                       ceph_timeout_jiffies(timeout));
+               if (err < 0)
                        return err;
                if (client->auth_err < 0)
                        return client->auth_err;
@@ -721,5 +741,5 @@ module_exit(exit_ceph_lib);
 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
 MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
-MODULE_DESCRIPTION("Ceph filesystem for Linux");
+MODULE_DESCRIPTION("Ceph core library");
 MODULE_LICENSE("GPL");
index 9d84ce4ea0dfa8928cc498b9f0515735666b7857..80d7c3a97cb84355e82e9d8f4c83fbf5b0d82893 100644 (file)
@@ -1,15 +1,11 @@
-
 #ifdef __KERNEL__
 # include <linux/slab.h>
+# include <linux/crush/crush.h>
 #else
-# include <stdlib.h>
-# include <assert.h>
-# define kfree(x) do { if (x) free(x); } while (0)
-# define BUG_ON(x) assert(!(x))
+# include "crush_compat.h"
+# include "crush.h"
 #endif
 
-#include <linux/crush/crush.h>
-
 const char *crush_bucket_alg_name(int alg)
 {
        switch (alg) {
@@ -134,6 +130,9 @@ void crush_destroy(struct crush_map *map)
                kfree(map->rules);
        }
 
+#ifndef __KERNEL__
+       kfree(map->choose_tries);
+#endif
        kfree(map);
 }
 
index 6192c7fc958ce84f6dc19b4005cf3580add5f88a..aae534c901a43169d73a8dc8c043dab47cc69941 100644 (file)
  *
  */
 
-#if defined(__linux__)
-#include <linux/types.h>
-#elif defined(__FreeBSD__)
-#include <sys/types.h>
-#endif
-
 #ifndef CEPH_CRUSH_LN_H
 #define CEPH_CRUSH_LN_H
 
+#ifdef __KERNEL__
+# include <linux/types.h>
+#else
+# include "crush_compat.h"
+#endif
 
-// RH_LH_tbl[2*k] = 2^48/(1.0+k/128.0)
-// RH_LH_tbl[2*k+1] = 2^48*log2(1.0+k/128.0)
-
-static int64_t __RH_LH_tbl[128*2+2] = {
+/*
+ * RH_LH_tbl[2*k] = 2^48/(1.0+k/128.0)
+ * RH_LH_tbl[2*k+1] = 2^48*log2(1.0+k/128.0)
+ */
+static __s64 __RH_LH_tbl[128*2+2] = {
   0x0001000000000000ll, 0x0000000000000000ll, 0x0000fe03f80fe040ll, 0x000002dfca16dde1ll,
   0x0000fc0fc0fc0fc1ll, 0x000005b9e5a170b4ll, 0x0000fa232cf25214ll, 0x0000088e68ea899all,
   0x0000f83e0f83e0f9ll, 0x00000b5d69bac77ell, 0x0000f6603d980f67ll, 0x00000e26fd5c8555ll,
@@ -89,11 +89,12 @@ static int64_t __RH_LH_tbl[128*2+2] = {
   0x0000820820820821ll, 0x0000fa2f045e7832ll, 0x000081848da8faf1ll, 0x0000fba577877d7dll,
   0x0000810204081021ll, 0x0000fd1a708bbe11ll, 0x0000808080808081ll, 0x0000fe8df263f957ll,
   0x0000800000000000ll, 0x0000ffff00000000ll,
-  };
-
+};
 
-    // LL_tbl[k] = 2^48*log2(1.0+k/2^15);
-static int64_t __LL_tbl[256] = {
+/*
+ * LL_tbl[k] = 2^48*log2(1.0+k/2^15)
+ */
+static __s64 __LL_tbl[256] = {
   0x0000000000000000ull, 0x00000002e2a60a00ull, 0x000000070cb64ec5ull, 0x00000009ef50ce67ull,
   0x0000000cd1e588fdull, 0x0000000fb4747e9cull, 0x0000001296fdaf5eull, 0x0000001579811b58ull,
   0x000000185bfec2a1ull, 0x0000001b3e76a552ull, 0x0000001e20e8c380ull, 0x0000002103551d43ull,
@@ -160,7 +161,4 @@ static int64_t __LL_tbl[256] = {
   0x000002d4562d2ec6ull, 0x000002d73330209dull, 0x000002da102d63b0ull, 0x000002dced24f814ull,
 };
 
-
-
-
 #endif
index 5bb63e37a8a10f3419a399b32339cf5baf279762..ed123af49eba563a004d5b69e8b08c648480cb59 100644 (file)
@@ -1,6 +1,8 @@
-
-#include <linux/types.h>
-#include <linux/crush/hash.h>
+#ifdef __KERNEL__
+# include <linux/crush/hash.h>
+#else
+# include "hash.h"
+#endif
 
 /*
  * Robert Jenkins' function for mixing 32-bit values
index 5b47736d27d94584bf33b2e60af56b5e76fc141c..393bfb22d5bbafd83c20f5953b98c6709b637452 100644 (file)
@@ -1,27 +1,31 @@
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2015 Intel Corporation All Rights Reserved
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation.  See file COPYING.
+ *
+ */
 
 #ifdef __KERNEL__
 # include <linux/string.h>
 # include <linux/slab.h>
 # include <linux/bug.h>
 # include <linux/kernel.h>
-# ifndef dprintk
-#  define dprintk(args...)
-# endif
+# include <linux/crush/crush.h>
+# include <linux/crush/hash.h>
 #else
-# include <string.h>
-# include <stdio.h>
-# include <stdlib.h>
-# include <assert.h>
-# define BUG_ON(x) assert(!(x))
-# define dprintk(args...) /* printf(args) */
-# define kmalloc(x, f) malloc(x)
-# define kfree(x) free(x)
+# include "crush_compat.h"
+# include "crush.h"
+# include "hash.h"
 #endif
-
-#include <linux/crush/crush.h>
-#include <linux/crush/hash.h>
 #include "crush_ln_table.h"
 
+#define dprintk(args...) /* printf(args) */
+
 /*
  * Implement the core CRUSH mapping algorithm.
  */
@@ -139,7 +143,7 @@ static int bucket_list_choose(struct crush_bucket_list *bucket,
        int i;
 
        for (i = bucket->h.size-1; i >= 0; i--) {
-               __u64 w = crush_hash32_4(bucket->h.hash,x, bucket->h.items[i],
+               __u64 w = crush_hash32_4(bucket->h.hash, x, bucket->h.items[i],
                                         r, bucket->h.id);
                w &= 0xffff;
                dprintk("list_choose i=%d x=%d r=%d item %d weight %x "
@@ -238,43 +242,46 @@ static int bucket_straw_choose(struct crush_bucket_straw *bucket,
        return bucket->h.items[high];
 }
 
-// compute 2^44*log2(input+1)
-uint64_t crush_ln(unsigned xin)
+/* compute 2^44*log2(input+1) */
+static __u64 crush_ln(unsigned int xin)
 {
-    unsigned x=xin, x1;
-    int iexpon, index1, index2;
-    uint64_t RH, LH, LL, xl64, result;
+       unsigned int x = xin, x1;
+       int iexpon, index1, index2;
+       __u64 RH, LH, LL, xl64, result;
 
-    x++;
+       x++;
 
-    // normalize input
-    iexpon = 15;
-    while(!(x&0x18000)) { x<<=1; iexpon--; }
+       /* normalize input */
+       iexpon = 15;
+       while (!(x & 0x18000)) {
+               x <<= 1;
+               iexpon--;
+       }
 
-    index1 = (x>>8)<<1;
-    // RH ~ 2^56/index1
-    RH = __RH_LH_tbl[index1 - 256];
-    // LH ~ 2^48 * log2(index1/256)
-    LH = __RH_LH_tbl[index1 + 1 - 256];
+       index1 = (x >> 8) << 1;
+       /* RH ~ 2^56/index1 */
+       RH = __RH_LH_tbl[index1 - 256];
+       /* LH ~ 2^48 * log2(index1/256) */
+       LH = __RH_LH_tbl[index1 + 1 - 256];
 
-    // RH*x ~ 2^48 * (2^15 + xf), xf<2^8
-    xl64 = (int64_t)x * RH;
-    xl64 >>= 48;
-    x1 = xl64;
+       /* RH*x ~ 2^48 * (2^15 + xf), xf<2^8 */
+       xl64 = (__s64)x * RH;
+       xl64 >>= 48;
+       x1 = xl64;
 
-    result = iexpon;
-    result <<= (12 + 32);
+       result = iexpon;
+       result <<= (12 + 32);
 
-    index2 = x1 & 0xff;
-    // LL ~ 2^48*log2(1.0+index2/2^15)
-    LL = __LL_tbl[index2];
+       index2 = x1 & 0xff;
+       /* LL ~ 2^48*log2(1.0+index2/2^15) */
+       LL = __LL_tbl[index2];
 
-    LH = LH + LL;
+       LH = LH + LL;
 
-    LH >>= (48-12 - 32);
-    result += LH;
+       LH >>= (48 - 12 - 32);
+       result += LH;
 
-    return result;
+       return result;
 }
 
 
@@ -290,9 +297,9 @@ uint64_t crush_ln(unsigned xin)
 static int bucket_straw2_choose(struct crush_bucket_straw2 *bucket,
                                int x, int r)
 {
-       unsigned i, high = 0;
-       unsigned u;
-       unsigned w;
+       unsigned int i, high = 0;
+       unsigned int u;
+       unsigned int w;
        __s64 ln, draw, high_draw = 0;
 
        for (i = 0; i < bucket->h.size; i++) {
@@ -567,6 +574,10 @@ reject:
                out[outpos] = item;
                outpos++;
                count--;
+#ifndef __KERNEL__
+               if (map->choose_tries && ftotal <= map->choose_total_tries)
+                       map->choose_tries[ftotal]++;
+#endif
        }
 
        dprintk("CHOOSE returns %d\n", outpos);
@@ -610,6 +621,20 @@ static void crush_choose_indep(const struct crush_map *map,
        }
 
        for (ftotal = 0; left > 0 && ftotal < tries; ftotal++) {
+#ifdef DEBUG_INDEP
+               if (out2 && ftotal) {
+                       dprintk("%u %d a: ", ftotal, left);
+                       for (rep = outpos; rep < endpos; rep++) {
+                               dprintk(" %d", out[rep]);
+                       }
+                       dprintk("\n");
+                       dprintk("%u %d b: ", ftotal, left);
+                       for (rep = outpos; rep < endpos; rep++) {
+                               dprintk(" %d", out2[rep]);
+                       }
+                       dprintk("\n");
+               }
+#endif
                for (rep = outpos; rep < endpos; rep++) {
                        if (out[rep] != CRUSH_ITEM_UNDEF)
                                continue;
@@ -726,6 +751,24 @@ static void crush_choose_indep(const struct crush_map *map,
                        out2[rep] = CRUSH_ITEM_NONE;
                }
        }
+#ifndef __KERNEL__
+       if (map->choose_tries && ftotal <= map->choose_total_tries)
+               map->choose_tries[ftotal]++;
+#endif
+#ifdef DEBUG_INDEP
+       if (out2) {
+               dprintk("%u %d a: ", ftotal, left);
+               for (rep = outpos; rep < endpos; rep++) {
+                       dprintk(" %d", out[rep]);
+               }
+               dprintk("\n");
+               dprintk("%u %d b: ", ftotal, left);
+               for (rep = outpos; rep < endpos; rep++) {
+                       dprintk(" %d", out2[rep]);
+               }
+               dprintk("\n");
+       }
+#endif
 }
 
 /**
@@ -790,8 +833,15 @@ int crush_do_rule(const struct crush_map *map,
 
                switch (curstep->op) {
                case CRUSH_RULE_TAKE:
-                       w[0] = curstep->arg1;
-                       wsize = 1;
+                       if ((curstep->arg1 >= 0 &&
+                            curstep->arg1 < map->max_devices) ||
+                           (-1-curstep->arg1 < map->max_buckets &&
+                            map->buckets[-1-curstep->arg1])) {
+                               w[0] = curstep->arg1;
+                               wsize = 1;
+                       } else {
+                               dprintk(" bad take value %d\n", curstep->arg1);
+                       }
                        break;
 
                case CRUSH_RULE_SET_CHOOSE_TRIES:
@@ -877,7 +927,7 @@ int crush_do_rule(const struct crush_map *map,
                                                0);
                                } else {
                                        out_size = ((numrep < (result_max-osize)) ?
-                                                    numrep : (result_max-osize));
+                                                   numrep : (result_max-osize));
                                        crush_choose_indep(
                                                map,
                                                map->buckets[-1-w[i]],
@@ -923,5 +973,3 @@ int crush_do_rule(const struct crush_map *map,
        }
        return result_len;
 }
-
-
index 073262fea6ddab4acb4cd128ee402ab54b870552..1679f47280e2678202238e667c1b617c7665d5fa 100644 (file)
@@ -278,7 +278,6 @@ static void _ceph_msgr_exit(void)
        ceph_msgr_slab_exit();
 
        BUG_ON(zero_page == NULL);
-       kunmap(zero_page);
        page_cache_release(zero_page);
        zero_page = NULL;
 }
@@ -1545,7 +1544,7 @@ static int write_partial_message_data(struct ceph_connection *con)
                page = ceph_msg_data_next(&msg->cursor, &page_offset, &length,
                                                        &last_piece);
                ret = ceph_tcp_sendpage(con->sock, page, page_offset,
-                                     length, last_piece);
+                                       length, !last_piece);
                if (ret <= 0) {
                        if (do_datacrc)
                                msg->footer.data_crc = cpu_to_le32(crc);
index 2b3cf05e87b0fc44a150f1c314f2db98f1ab7dfc..9d6ff1215928cb69787a85421fdbab9e2a1c8bf5 100644 (file)
@@ -298,21 +298,28 @@ void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
 }
 EXPORT_SYMBOL(ceph_monc_request_next_osdmap);
 
+/*
+ * Wait for an osdmap with a given epoch.
+ *
+ * @epoch: epoch to wait for
+ * @timeout: in jiffies, 0 means "wait forever"
+ */
 int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
                          unsigned long timeout)
 {
        unsigned long started = jiffies;
-       int ret;
+       long ret;
 
        mutex_lock(&monc->mutex);
        while (monc->have_osdmap < epoch) {
                mutex_unlock(&monc->mutex);
 
-               if (timeout != 0 && time_after_eq(jiffies, started + timeout))
+               if (timeout && time_after_eq(jiffies, started + timeout))
                        return -ETIMEDOUT;
 
                ret = wait_event_interruptible_timeout(monc->client->auth_wq,
-                                        monc->have_osdmap >= epoch, timeout);
+                                               monc->have_osdmap >= epoch,
+                                               ceph_timeout_jiffies(timeout));
                if (ret < 0)
                        return ret;
 
index c4ec9239249ae6541a8ee378f95230a42c2f3a3d..50033677c0fa5134d540fba82cc8e298ce1367a0 100644 (file)
@@ -296,6 +296,9 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
        case CEPH_OSD_OP_CMPXATTR:
                ceph_osd_data_release(&op->xattr.osd_data);
                break;
+       case CEPH_OSD_OP_STAT:
+               ceph_osd_data_release(&op->raw_data_in);
+               break;
        default:
                break;
        }
@@ -450,7 +453,7 @@ __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
  */
 static struct ceph_osd_req_op *
 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
-                               u16 opcode)
+                u16 opcode, u32 flags)
 {
        struct ceph_osd_req_op *op;
 
@@ -460,14 +463,15 @@ _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
        op = &osd_req->r_ops[which];
        memset(op, 0, sizeof (*op));
        op->op = opcode;
+       op->flags = flags;
 
        return op;
 }
 
 void osd_req_op_init(struct ceph_osd_request *osd_req,
-                               unsigned int which, u16 opcode)
+                    unsigned int which, u16 opcode, u32 flags)
 {
-       (void)_osd_req_op_init(osd_req, which, opcode);
+       (void)_osd_req_op_init(osd_req, which, opcode, flags);
 }
 EXPORT_SYMBOL(osd_req_op_init);
 
@@ -476,7 +480,8 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
                                u64 offset, u64 length,
                                u64 truncate_size, u32 truncate_seq)
 {
-       struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode);
+       struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
+                                                     opcode, 0);
        size_t payload_len = 0;
 
        BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
@@ -515,7 +520,8 @@ EXPORT_SYMBOL(osd_req_op_extent_update);
 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
                        u16 opcode, const char *class, const char *method)
 {
-       struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode);
+       struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
+                                                     opcode, 0);
        struct ceph_pagelist *pagelist;
        size_t payload_len = 0;
        size_t size;
@@ -552,7 +558,8 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
                          u16 opcode, const char *name, const void *value,
                          size_t size, u8 cmp_op, u8 cmp_mode)
 {
-       struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode);
+       struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
+                                                     opcode, 0);
        struct ceph_pagelist *pagelist;
        size_t payload_len;
 
@@ -585,7 +592,8 @@ void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
                                unsigned int which, u16 opcode,
                                u64 cookie, u64 version, int flag)
 {
-       struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode);
+       struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
+                                                     opcode, 0);
 
        BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH);
 
@@ -602,7 +610,8 @@ void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
                                u64 expected_write_size)
 {
        struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
-                                                     CEPH_OSD_OP_SETALLOCHINT);
+                                                     CEPH_OSD_OP_SETALLOCHINT,
+                                                     0);
 
        op->alloc_hint.expected_object_size = expected_object_size;
        op->alloc_hint.expected_write_size = expected_write_size;
@@ -786,7 +795,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
        }
 
        if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
-               osd_req_op_init(req, which, opcode);
+               osd_req_op_init(req, which, opcode, 0);
        } else {
                u32 object_size = le32_to_cpu(layout->fl_object_size);
                u32 object_base = off - objoff;
@@ -1088,7 +1097,7 @@ static void __move_osd_to_lru(struct ceph_osd_client *osdc,
        BUG_ON(!list_empty(&osd->o_osd_lru));
 
        list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
-       osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
+       osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
 }
 
 static void maybe_move_osd_to_lru(struct ceph_osd_client *osdc,
@@ -1199,7 +1208,7 @@ static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
 static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
 {
        schedule_delayed_work(&osdc->timeout_work,
-                       osdc->client->options->osd_keepalive_timeout * HZ);
+                             osdc->client->options->osd_keepalive_timeout);
 }
 
 static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
@@ -1567,10 +1576,9 @@ static void handle_timeout(struct work_struct *work)
 {
        struct ceph_osd_client *osdc =
                container_of(work, struct ceph_osd_client, timeout_work.work);
+       struct ceph_options *opts = osdc->client->options;
        struct ceph_osd_request *req;
        struct ceph_osd *osd;
-       unsigned long keepalive =
-               osdc->client->options->osd_keepalive_timeout * HZ;
        struct list_head slow_osds;
        dout("timeout\n");
        down_read(&osdc->map_sem);
@@ -1586,7 +1594,8 @@ static void handle_timeout(struct work_struct *work)
         */
        INIT_LIST_HEAD(&slow_osds);
        list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
-               if (time_before(jiffies, req->r_stamp + keepalive))
+               if (time_before(jiffies,
+                               req->r_stamp + opts->osd_keepalive_timeout))
                        break;
 
                osd = req->r_osd;
@@ -1613,8 +1622,7 @@ static void handle_osds_timeout(struct work_struct *work)
        struct ceph_osd_client *osdc =
                container_of(work, struct ceph_osd_client,
                             osds_timeout_work.work);
-       unsigned long delay =
-               osdc->client->options->osd_idle_ttl * HZ >> 2;
+       unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
 
        dout("osds timeout\n");
        down_read(&osdc->map_sem);
@@ -2619,7 +2627,7 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
        osdc->event_count = 0;
 
        schedule_delayed_work(&osdc->osds_timeout_work,
-          round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
+           round_jiffies_relative(osdc->client->options->osd_idle_ttl));
 
        err = -ENOMEM;
        osdc->req_mempool = mempool_create_kmalloc_pool(10,
index 15796696d64ede6b6a3118f5077972bed0c64350..4a3125836b64a0e5264e005badb7d108ddf9c47b 100644 (file)
@@ -89,7 +89,7 @@ static int crush_decode_tree_bucket(void **p, void *end,
 {
        int j;
        dout("crush_decode_tree_bucket %p to %p\n", *p, end);
-       ceph_decode_32_safe(p, end, b->num_nodes, bad);
+       ceph_decode_8_safe(p, end, b->num_nodes, bad);
        b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
        if (b->node_weights == NULL)
                return -ENOMEM;
index 096d91447e06e8a759ab8daf5283ddd4d27922b9..d4f5f220a8e55e063db6d3f3923a32be1ac33223 100644 (file)
@@ -51,10 +51,7 @@ void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
                        set_page_dirty_lock(pages[i]);
                put_page(pages[i]);
        }
-       if (is_vmalloc_addr(pages))
-               vfree(pages);
-       else
-               kfree(pages);
+       kvfree(pages);
 }
 EXPORT_SYMBOL(ceph_put_page_vector);
 
index 476e5dda59e19822dba98a931369ff2666c59c0d..2a834c6179b9973e45274d793e7d744939e5f49e 100644 (file)
@@ -129,7 +129,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
        struct flow_dissector_key_ports *key_ports;
        struct flow_dissector_key_tags *key_tags;
        struct flow_dissector_key_keyid *key_keyid;
-       u8 ip_proto;
+       u8 ip_proto = 0;
 
        if (!data) {
                data = skb->data;
index 1e1fe9a68d835983d760d50f9ef6a11309ffcfc1..08f16db46070a1520fcdd6892477093e9474af4f 100644 (file)
@@ -1454,7 +1454,7 @@ void sk_destruct(struct sock *sk)
 
 static void __sk_free(struct sock *sk)
 {
-       if (unlikely(sock_diag_has_destroy_listeners(sk)))
+       if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
                sock_diag_broadcast_destroy(sk);
        else
                sk_destruct(sk);
@@ -2269,7 +2269,6 @@ static void sock_def_write_space(struct sock *sk)
 
 static void sock_def_destruct(struct sock *sk)
 {
-       kfree(sk->sk_protinfo);
 }
 
 void sk_send_sigurg(struct sock *sk)
index 04ffad311704852a5d2c35c99eea2f1c4293f5e1..0917123790eaf09b001c97a733039185fdb0a800 100644 (file)
@@ -112,7 +112,7 @@ static int dsa_slave_open(struct net_device *dev)
 
 clear_promisc:
        if (dev->flags & IFF_PROMISC)
-               dev_set_promiscuity(master, 0);
+               dev_set_promiscuity(master, -1);
 clear_allmulti:
        if (dev->flags & IFF_ALLMULTI)
                dev_set_allmulti(master, -1);
index 3bfccd83551ce71cc2b69efe233426f3e70ea854..c7358ea4ae93530a7f6ef110a2dc204f19ac830e 100644 (file)
@@ -1045,7 +1045,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                    nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
                        goto nla_put_failure;
                if (fi->fib_nh->nh_flags & RTNH_F_LINKDOWN) {
-                       in_dev = __in_dev_get_rcu(fi->fib_nh->nh_dev);
+                       in_dev = __in_dev_get_rtnl(fi->fib_nh->nh_dev);
                        if (in_dev &&
                            IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
                                rtm->rtm_flags |= RTNH_F_DEAD;
@@ -1074,7 +1074,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
 
                        rtnh->rtnh_flags = nh->nh_flags & 0xFF;
                        if (nh->nh_flags & RTNH_F_LINKDOWN) {
-                               in_dev = __in_dev_get_rcu(nh->nh_dev);
+                               in_dev = __in_dev_get_rtnl(nh->nh_dev);
                                if (in_dev &&
                                    IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
                                        rtnh->rtnh_flags |= RTNH_F_DEAD;
index 65de0684e22a17862663096da407eee16bc33a31..61eafc9b4545d5a9e1ea405e196753b3608c5fa0 100644 (file)
@@ -197,11 +197,4 @@ static int __init ipv4_netfilter_init(void)
 {
        return nf_register_afinfo(&nf_ip_afinfo);
 }
-
-static void __exit ipv4_netfilter_fini(void)
-{
-       nf_unregister_afinfo(&nf_ip_afinfo);
-}
-
-module_init(ipv4_netfilter_init);
-module_exit(ipv4_netfilter_fini);
+subsys_initcall(ipv4_netfilter_init);
index 36ba7c4f028305ec05d055e1575f5a857ac3d110..fda33f961d83ce44f05ab3298113877d01768fb2 100644 (file)
@@ -103,7 +103,7 @@ ieee80211_rate_control_ops_get(const char *name)
        const struct rate_control_ops *ops;
        const char *alg_name;
 
-       kparam_block_sysfs_write(ieee80211_default_rc_algo);
+       kernel_param_lock(THIS_MODULE);
        if (!name)
                alg_name = ieee80211_default_rc_algo;
        else
@@ -117,7 +117,7 @@ ieee80211_rate_control_ops_get(const char *name)
        /* try built-in one if specific alg requested but not found */
        if (!ops && strlen(CONFIG_MAC80211_RC_DEFAULT))
                ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT);
-       kparam_unblock_sysfs_write(ieee80211_default_rc_algo);
+       kernel_param_unlock(THIS_MODULE);
 
        return ops;
 }
index b92d3f49c23e0dd93ac2ff0766bb97f1add15baa..9d37ccd95062a6840d1bb1e140b173dd1fe0b9d0 100644 (file)
@@ -216,8 +216,8 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
        [TCA_FLOWER_KEY_IPV6_DST_MASK]  = { .len = sizeof(struct in6_addr) },
        [TCA_FLOWER_KEY_TCP_SRC]        = { .type = NLA_U16 },
        [TCA_FLOWER_KEY_TCP_DST]        = { .type = NLA_U16 },
-       [TCA_FLOWER_KEY_TCP_SRC]        = { .type = NLA_U16 },
-       [TCA_FLOWER_KEY_TCP_DST]        = { .type = NLA_U16 },
+       [TCA_FLOWER_KEY_UDP_SRC]        = { .type = NLA_U16 },
+       [TCA_FLOWER_KEY_UDP_DST]        = { .type = NLA_U16 },
 };
 
 static void fl_set_key_val(struct nlattr **tb,
index fc5e45b8a832d94367178536a341001021a19c44..abe7c2db24120a13992131e2be9a19c70a297de0 100644 (file)
@@ -599,7 +599,9 @@ out:
        return err;
 no_route:
        kfree_skb(nskb);
-       IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+
+       if (asoc)
+               IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
 
        /* FIXME: Returning the 'err' will effect all the associations
         * associated with a socket, although only one of the paths of the
index 5f6c4e61325b65822be525d75ebe3bb7357b97e7..1425ec2bbd5ae359a8e0408a89a6da6bb60bd87e 100644 (file)
@@ -2121,12 +2121,6 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
        if (sp->subscribe.sctp_data_io_event)
                sctp_ulpevent_read_sndrcvinfo(event, msg);
 
-#if 0
-       /* FIXME: we should be calling IP/IPv6 layers.  */
-       if (sk->sk_protinfo.af_inet.cmsg_flags)
-               ip_cmsg_recv(msg, skb);
-#endif
-
        err = copied;
 
        /* If skb's length exceeds the user's buffer, update the skb and
index 936ad0a15371ac4f1e49b8785ef645fa65583e83..b512fbd9d79a403ee980d40c1a21fc3fde47f215 100644 (file)
@@ -14,6 +14,6 @@ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
            sunrpc_syms.o cache.o rpc_pipe.o \
            svc_xprt.o
 sunrpc-$(CONFIG_SUNRPC_DEBUG) += debugfs.o
-sunrpc-$(CONFIG_SUNRPC_BACKCHANNEL) += backchannel_rqst.o bc_svc.o
+sunrpc-$(CONFIG_SUNRPC_BACKCHANNEL) += backchannel_rqst.o
 sunrpc-$(CONFIG_PROC_FS) += stats.o
 sunrpc-$(CONFIG_SYSCTL) += sysctl.o
index 47f38be4155fa9e6c3dbb98147027407645b2e91..02f53674dc39d6973d374f18b43bd0b1b93952c7 100644 (file)
@@ -72,7 +72,7 @@ static int param_get_hashtbl_sz(char *buffer, const struct kernel_param *kp)
 
 #define param_check_hashtbl_sz(name, p) __param_check(name, p, unsigned int);
 
-static struct kernel_param_ops param_ops_hashtbl_sz = {
+static const struct kernel_param_ops param_ops_hashtbl_sz = {
        .set = param_set_hashtbl_sz,
        .get = param_get_hashtbl_sz,
 };
index 9dd0ea8db463acc9daba0c51be89b1f17ec8f17d..9825ff0f91d6c0bde819105f639cae21883bbfad 100644 (file)
@@ -37,16 +37,18 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
 {
-       return xprt->bc_alloc_count > 0;
+       return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots);
 }
 
 static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
 {
+       atomic_add(n, &xprt->bc_free_slots);
        xprt->bc_alloc_count += n;
 }
 
 static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
 {
+       atomic_sub(n, &xprt->bc_free_slots);
        return xprt->bc_alloc_count -= n;
 }
 
@@ -60,13 +62,62 @@ static void xprt_free_allocation(struct rpc_rqst *req)
 
        dprintk("RPC:        free allocations for req= %p\n", req);
        WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
-       xbufp = &req->rq_private_buf;
+       xbufp = &req->rq_rcv_buf;
        free_page((unsigned long)xbufp->head[0].iov_base);
        xbufp = &req->rq_snd_buf;
        free_page((unsigned long)xbufp->head[0].iov_base);
        kfree(req);
 }
 
+static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
+{
+       struct page *page;
+       /* Preallocate one XDR receive buffer */
+       page = alloc_page(gfp_flags);
+       if (page == NULL)
+               return -ENOMEM;
+       buf->head[0].iov_base = page_address(page);
+       buf->head[0].iov_len = PAGE_SIZE;
+       buf->tail[0].iov_base = NULL;
+       buf->tail[0].iov_len = 0;
+       buf->page_len = 0;
+       buf->len = 0;
+       buf->buflen = PAGE_SIZE;
+       return 0;
+}
+
+static
+struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
+{
+       struct rpc_rqst *req;
+
+       /* Pre-allocate one backchannel rpc_rqst */
+       req = kzalloc(sizeof(*req), gfp_flags);
+       if (req == NULL)
+               return NULL;
+
+       req->rq_xprt = xprt;
+       INIT_LIST_HEAD(&req->rq_list);
+       INIT_LIST_HEAD(&req->rq_bc_list);
+
+       /* Preallocate one XDR receive buffer */
+       if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
+               printk(KERN_ERR "Failed to create bc receive xbuf\n");
+               goto out_free;
+       }
+       req->rq_rcv_buf.len = PAGE_SIZE;
+
+       /* Preallocate one XDR send buffer */
+       if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
+               printk(KERN_ERR "Failed to create bc snd xbuf\n");
+               goto out_free;
+       }
+       return req;
+out_free:
+       xprt_free_allocation(req);
+       return NULL;
+}
+
 /*
  * Preallocate up to min_reqs structures and related buffers for use
  * by the backchannel.  This function can be called multiple times
@@ -87,9 +138,7 @@ static void xprt_free_allocation(struct rpc_rqst *req)
  */
 int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
 {
-       struct page *page_rcv = NULL, *page_snd = NULL;
-       struct xdr_buf *xbufp = NULL;
-       struct rpc_rqst *req, *tmp;
+       struct rpc_rqst *req;
        struct list_head tmp_list;
        int i;
 
@@ -106,7 +155,7 @@ int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
        INIT_LIST_HEAD(&tmp_list);
        for (i = 0; i < min_reqs; i++) {
                /* Pre-allocate one backchannel rpc_rqst */
-               req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
+               req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
                if (req == NULL) {
                        printk(KERN_ERR "Failed to create bc rpc_rqst\n");
                        goto out_free;
@@ -115,41 +164,6 @@ int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
                /* Add the allocated buffer to the tmp list */
                dprintk("RPC:       adding req= %p\n", req);
                list_add(&req->rq_bc_pa_list, &tmp_list);
-
-               req->rq_xprt = xprt;
-               INIT_LIST_HEAD(&req->rq_list);
-               INIT_LIST_HEAD(&req->rq_bc_list);
-
-               /* Preallocate one XDR receive buffer */
-               page_rcv = alloc_page(GFP_KERNEL);
-               if (page_rcv == NULL) {
-                       printk(KERN_ERR "Failed to create bc receive xbuf\n");
-                       goto out_free;
-               }
-               xbufp = &req->rq_rcv_buf;
-               xbufp->head[0].iov_base = page_address(page_rcv);
-               xbufp->head[0].iov_len = PAGE_SIZE;
-               xbufp->tail[0].iov_base = NULL;
-               xbufp->tail[0].iov_len = 0;
-               xbufp->page_len = 0;
-               xbufp->len = PAGE_SIZE;
-               xbufp->buflen = PAGE_SIZE;
-
-               /* Preallocate one XDR send buffer */
-               page_snd = alloc_page(GFP_KERNEL);
-               if (page_snd == NULL) {
-                       printk(KERN_ERR "Failed to create bc snd xbuf\n");
-                       goto out_free;
-               }
-
-               xbufp = &req->rq_snd_buf;
-               xbufp->head[0].iov_base = page_address(page_snd);
-               xbufp->head[0].iov_len = 0;
-               xbufp->tail[0].iov_base = NULL;
-               xbufp->tail[0].iov_len = 0;
-               xbufp->page_len = 0;
-               xbufp->len = 0;
-               xbufp->buflen = PAGE_SIZE;
        }
 
        /*
@@ -167,7 +181,10 @@ out_free:
        /*
         * Memory allocation failed, free the temporary list
         */
-       list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) {
+       while (!list_empty(&tmp_list)) {
+               req = list_first_entry(&tmp_list,
+                               struct rpc_rqst,
+                               rq_bc_pa_list);
                list_del(&req->rq_bc_pa_list);
                xprt_free_allocation(req);
        }
@@ -217,9 +234,15 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
        struct rpc_rqst *req = NULL;
 
        dprintk("RPC:       allocate a backchannel request\n");
-       if (list_empty(&xprt->bc_pa_list))
+       if (atomic_read(&xprt->bc_free_slots) <= 0)
                goto not_found;
-
+       if (list_empty(&xprt->bc_pa_list)) {
+               req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
+               if (!req)
+                       goto not_found;
+               /* Note: this 'free' request adds it to xprt->bc_pa_list */
+               xprt_free_bc_request(req);
+       }
        req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
                                rq_bc_pa_list);
        req->rq_reply_bytes_recvd = 0;
@@ -245,11 +268,21 @@ void xprt_free_bc_request(struct rpc_rqst *req)
 
        req->rq_connect_cookie = xprt->connect_cookie - 1;
        smp_mb__before_atomic();
-       WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
        clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
        smp_mb__after_atomic();
 
-       if (!xprt_need_to_requeue(xprt)) {
+       /*
+        * Return it to the list of preallocations so that it
+        * may be reused by a new callback request.
+        */
+       spin_lock_bh(&xprt->bc_pa_lock);
+       if (xprt_need_to_requeue(xprt)) {
+               list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
+               xprt->bc_alloc_count++;
+               req = NULL;
+       }
+       spin_unlock_bh(&xprt->bc_pa_lock);
+       if (req != NULL) {
                /*
                 * The last remaining session was destroyed while this
                 * entry was in use.  Free the entry and don't attempt
@@ -260,14 +293,6 @@ void xprt_free_bc_request(struct rpc_rqst *req)
                xprt_free_allocation(req);
                return;
        }
-
-       /*
-        * Return it to the list of preallocations so that it
-        * may be reused by a new callback request.
-        */
-       spin_lock_bh(&xprt->bc_pa_lock);
-       list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
-       spin_unlock_bh(&xprt->bc_pa_lock);
 }
 
 /*
@@ -311,6 +336,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
 
        spin_lock(&xprt->bc_pa_lock);
        list_del(&req->rq_bc_pa_list);
+       xprt->bc_alloc_count--;
        spin_unlock(&xprt->bc_pa_lock);
 
        req->rq_private_buf.len = copied;
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
deleted file mode 100644 (file)
index 15c7a8a..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/******************************************************************************
-
-(c) 2007 Network Appliance, Inc.  All Rights Reserved.
-(c) 2009 NetApp.  All Rights Reserved.
-
-NetApp provides this source code under the GPL v2 License.
-The GPL v2 license is available at
-http://opensource.org/licenses/gpl-license.php.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-
-/*
- * The NFSv4.1 callback service helper routines.
- * They implement the transport level processing required to send the
- * reply over an existing open connection previously established by the client.
- */
-
-#include <linux/module.h>
-
-#include <linux/sunrpc/xprt.h>
-#include <linux/sunrpc/sched.h>
-#include <linux/sunrpc/bc_xprt.h>
-
-#define RPCDBG_FACILITY        RPCDBG_SVCDSP
-
-/* Empty callback ops */
-static const struct rpc_call_ops nfs41_callback_ops = {
-};
-
-
-/*
- * Send the callback reply
- */
-int bc_send(struct rpc_rqst *req)
-{
-       struct rpc_task *task;
-       int ret;
-
-       dprintk("RPC:       bc_send req= %p\n", req);
-       task = rpc_run_bc_task(req, &nfs41_callback_ops);
-       if (IS_ERR(task))
-               ret = PTR_ERR(task);
-       else {
-               WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
-               ret = task->tk_status;
-               rpc_put_task(task);
-       }
-       dprintk("RPC:       bc_send ret= %d\n", ret);
-       return ret;
-}
-
index e6ce1517367f884608640b2532080ab6566b9379..cbc6af923dd1cb0baabc95161989133150269d4f 100644 (file)
@@ -891,15 +891,8 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
                        task->tk_flags |= RPC_TASK_SOFT;
                if (clnt->cl_noretranstimeo)
                        task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
-               if (sk_memalloc_socks()) {
-                       struct rpc_xprt *xprt;
-
-                       rcu_read_lock();
-                       xprt = rcu_dereference(clnt->cl_xprt);
-                       if (xprt->swapper)
-                               task->tk_flags |= RPC_TASK_SWAPPER;
-                       rcu_read_unlock();
-               }
+               if (atomic_read(&clnt->cl_swapper))
+                       task->tk_flags |= RPC_TASK_SWAPPER;
                /* Add to the client's list of all tasks */
                spin_lock(&clnt->cl_lock);
                list_add_tail(&task->tk_task, &clnt->cl_tasks);
@@ -1031,15 +1024,14 @@ EXPORT_SYMBOL_GPL(rpc_call_async);
  * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
  * rpc_execute against it
  * @req: RPC request
- * @tk_ops: RPC call ops
  */
-struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
-                               const struct rpc_call_ops *tk_ops)
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
 {
        struct rpc_task *task;
        struct xdr_buf *xbufp = &req->rq_snd_buf;
        struct rpc_task_setup task_setup_data = {
-               .callback_ops = tk_ops,
+               .callback_ops = &rpc_default_ops,
+               .flags = RPC_TASK_SOFTCONN,
        };
 
        dprintk("RPC: rpc_run_bc_task req= %p\n", req);
@@ -1614,6 +1606,7 @@ call_allocate(struct rpc_task *task)
                                        req->rq_callsize + req->rq_rcvsize);
        if (req->rq_buffer != NULL)
                return;
+       xprt_inject_disconnect(xprt);
 
        dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
 
@@ -1951,33 +1944,36 @@ call_bc_transmit(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
 
-       if (!xprt_prepare_transmit(task)) {
-               /*
-                * Could not reserve the transport. Try again after the
-                * transport is released.
-                */
-               task->tk_status = 0;
-               task->tk_action = call_bc_transmit;
-               return;
-       }
+       if (!xprt_prepare_transmit(task))
+               goto out_retry;
 
-       task->tk_action = rpc_exit_task;
        if (task->tk_status < 0) {
                printk(KERN_NOTICE "RPC: Could not send backchannel reply "
                        "error: %d\n", task->tk_status);
-               return;
+               goto out_done;
        }
+       if (req->rq_connect_cookie != req->rq_xprt->connect_cookie)
+               req->rq_bytes_sent = 0;
 
        xprt_transmit(task);
+
+       if (task->tk_status == -EAGAIN)
+               goto out_nospace;
+
        xprt_end_transmit(task);
        dprint_status(task);
        switch (task->tk_status) {
        case 0:
                /* Success */
-               break;
        case -EHOSTDOWN:
        case -EHOSTUNREACH:
        case -ENETUNREACH:
+       case -ECONNRESET:
+       case -ECONNREFUSED:
+       case -EADDRINUSE:
+       case -ENOTCONN:
+       case -EPIPE:
+               break;
        case -ETIMEDOUT:
                /*
                 * Problem reaching the server.  Disconnect and let the
@@ -2002,6 +1998,13 @@ call_bc_transmit(struct rpc_task *task)
                break;
        }
        rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
+out_done:
+       task->tk_action = rpc_exit_task;
+       return;
+out_nospace:
+       req->rq_connect_cookie = req->rq_xprt->connect_cookie;
+out_retry:
+       task->tk_status = 0;
 }
 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
 
@@ -2476,3 +2479,59 @@ void rpc_show_tasks(struct net *net)
        spin_unlock(&sn->rpc_client_lock);
 }
 #endif
+
+#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
+int
+rpc_clnt_swap_activate(struct rpc_clnt *clnt)
+{
+       int ret = 0;
+       struct rpc_xprt *xprt;
+
+       if (atomic_inc_return(&clnt->cl_swapper) == 1) {
+retry:
+               rcu_read_lock();
+               xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
+               rcu_read_unlock();
+               if (!xprt) {
+                       /*
+                        * If we didn't get a reference, then we likely are
+                        * racing with a migration event. Wait for a grace
+                        * period and try again.
+                        */
+                       synchronize_rcu();
+                       goto retry;
+               }
+
+               ret = xprt_enable_swap(xprt);
+               xprt_put(xprt);
+       }
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate);
+
+void
+rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
+{
+       struct rpc_xprt *xprt;
+
+       if (atomic_dec_if_positive(&clnt->cl_swapper) == 0) {
+retry:
+               rcu_read_lock();
+               xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
+               rcu_read_unlock();
+               if (!xprt) {
+                       /*
+                        * If we didn't get a reference, then we likely are
+                        * racing with a migration event. Wait for a grace
+                        * period and try again.
+                        */
+                       synchronize_rcu();
+                       goto retry;
+               }
+
+               xprt_disable_swap(xprt);
+               xprt_put(xprt);
+       }
+}
+EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate);
+#endif /* CONFIG_SUNRPC_SWAP */
index 82962f7e6e888f619ad79754f038732d5d5b6333..e7b4d93566df42dfa5ecf985152235e539ed9933 100644 (file)
 #include "netns.h"
 
 static struct dentry *topdir;
+static struct dentry *rpc_fault_dir;
 static struct dentry *rpc_clnt_dir;
 static struct dentry *rpc_xprt_dir;
 
+unsigned int rpc_inject_disconnect;
+
 struct rpc_clnt_iter {
        struct rpc_clnt *clnt;
        loff_t          pos;
@@ -257,6 +260,8 @@ rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
                debugfs_remove_recursive(xprt->debugfs);
                xprt->debugfs = NULL;
        }
+
+       atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect);
 }
 
 void
@@ -266,11 +271,79 @@ rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt)
        xprt->debugfs = NULL;
 }
 
+static int
+fault_open(struct inode *inode, struct file *filp)
+{
+       filp->private_data = kmalloc(128, GFP_KERNEL);
+       if (!filp->private_data)
+               return -ENOMEM;
+       return 0;
+}
+
+static int
+fault_release(struct inode *inode, struct file *filp)
+{
+       kfree(filp->private_data);
+       return 0;
+}
+
+static ssize_t
+fault_disconnect_read(struct file *filp, char __user *user_buf,
+                     size_t len, loff_t *offset)
+{
+       char *buffer = (char *)filp->private_data;
+       size_t size;
+
+       size = sprintf(buffer, "%u\n", rpc_inject_disconnect);
+       return simple_read_from_buffer(user_buf, len, offset, buffer, size);
+}
+
+static ssize_t
+fault_disconnect_write(struct file *filp, const char __user *user_buf,
+                      size_t len, loff_t *offset)
+{
+       char buffer[16];
+
+       if (len >= sizeof(buffer))
+               len = sizeof(buffer) - 1;
+       if (copy_from_user(buffer, user_buf, len))
+               return -EFAULT;
+       buffer[len] = '\0';
+       if (kstrtouint(buffer, 10, &rpc_inject_disconnect))
+               return -EINVAL;
+       return len;
+}
+
+static const struct file_operations fault_disconnect_fops = {
+       .owner          = THIS_MODULE,
+       .open           = fault_open,
+       .read           = fault_disconnect_read,
+       .write          = fault_disconnect_write,
+       .release        = fault_release,
+};
+
+static struct dentry *
+inject_fault_dir(struct dentry *topdir)
+{
+       struct dentry *faultdir;
+
+       faultdir = debugfs_create_dir("inject_fault", topdir);
+       if (!faultdir)
+               return NULL;
+
+       if (!debugfs_create_file("disconnect", S_IFREG | S_IRUSR, faultdir,
+                                NULL, &fault_disconnect_fops))
+               return NULL;
+
+       return faultdir;
+}
+
 void __exit
 sunrpc_debugfs_exit(void)
 {
        debugfs_remove_recursive(topdir);
        topdir = NULL;
+       rpc_fault_dir = NULL;
        rpc_clnt_dir = NULL;
        rpc_xprt_dir = NULL;
 }
@@ -282,6 +355,10 @@ sunrpc_debugfs_init(void)
        if (!topdir)
                return;
 
+       rpc_fault_dir = inject_fault_dir(topdir);
+       if (!rpc_fault_dir)
+               goto out_remove;
+
        rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir);
        if (!rpc_clnt_dir)
                goto out_remove;
@@ -294,5 +371,6 @@ sunrpc_debugfs_init(void)
 out_remove:
        debugfs_remove_recursive(topdir);
        topdir = NULL;
+       rpc_fault_dir = NULL;
        rpc_clnt_dir = NULL;
 }
index 852ae606b02a37760a5a4dc1fd1860b8a45b89a6..5a16d8d8c831c4ad2805f5958b9ccef63449af82 100644 (file)
@@ -1350,6 +1350,11 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
 {
        struct kvec     *argv = &rqstp->rq_arg.head[0];
        struct kvec     *resv = &rqstp->rq_res.head[0];
+       struct rpc_task *task;
+       int proc_error;
+       int error;
+
+       dprintk("svc: %s(%p)\n", __func__, req);
 
        /* Build the svc_rqst used by the common processing routine */
        rqstp->rq_xprt = serv->sv_bc_xprt;
@@ -1372,21 +1377,36 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
 
        /*
         * Skip the next two words because they've already been
-        * processed in the trasport
+        * processed in the transport
         */
        svc_getu32(argv);       /* XID */
        svc_getnl(argv);        /* CALLDIR */
 
-       /* Returns 1 for send, 0 for drop */
-       if (svc_process_common(rqstp, argv, resv)) {
-               memcpy(&req->rq_snd_buf, &rqstp->rq_res,
-                                               sizeof(req->rq_snd_buf));
-               return bc_send(req);
-       } else {
-               /* drop request */
+       /* Parse and execute the bc call */
+       proc_error = svc_process_common(rqstp, argv, resv);
+
+       atomic_inc(&req->rq_xprt->bc_free_slots);
+       if (!proc_error) {
+               /* Processing error: drop the request */
                xprt_free_bc_request(req);
                return 0;
        }
+
+       /* Finally, send the reply synchronously */
+       memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
+       task = rpc_run_bc_task(req);
+       if (IS_ERR(task)) {
+               error = PTR_ERR(task);
+               goto out;
+       }
+
+       WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
+       error = task->tk_status;
+       rpc_put_task(task);
+
+out:
+       dprintk("svc: %s(), error=%d\n", __func__, error);
+       return error;
 }
 EXPORT_SYMBOL_GPL(bc_svc_process);
 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
index 1d4fe24af06a1115bd80538c5346ae2f843f1eb8..ab5dd621ae0c0795a0d86e1a9fb83c5cc2812486 100644 (file)
@@ -68,6 +68,7 @@ static void    xprt_init(struct rpc_xprt *xprt, struct net *net);
 static void    xprt_request_init(struct rpc_task *, struct rpc_xprt *);
 static void    xprt_connect_status(struct rpc_task *task);
 static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
+static void     __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
 static void     xprt_destroy(struct rpc_xprt *xprt);
 
 static DEFINE_SPINLOCK(xprt_list_lock);
@@ -250,6 +251,8 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
        }
        xprt_clear_locked(xprt);
 out_sleep:
+       if (req)
+               __xprt_put_cong(xprt, req);
        dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
        task->tk_timeout = 0;
        task->tk_status = -EAGAIN;
@@ -608,8 +611,8 @@ static void xprt_autoclose(struct work_struct *work)
        struct rpc_xprt *xprt =
                container_of(work, struct rpc_xprt, task_cleanup);
 
-       xprt->ops->close(xprt);
        clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
+       xprt->ops->close(xprt);
        xprt_release_write(xprt, NULL);
 }
 
@@ -967,6 +970,7 @@ void xprt_transmit(struct rpc_task *task)
                task->tk_status = status;
                return;
        }
+       xprt_inject_disconnect(xprt);
 
        dprintk("RPC: %5u xmit complete\n", task->tk_pid);
        task->tk_flags |= RPC_TASK_SENT;
@@ -1285,6 +1289,7 @@ void xprt_release(struct rpc_task *task)
        spin_unlock_bh(&xprt->transport_lock);
        if (req->rq_buffer)
                xprt->ops->buf_free(req->rq_buffer);
+       xprt_inject_disconnect(xprt);
        if (req->rq_cred != NULL)
                put_rpccred(req->rq_cred);
        task->tk_rqstp = NULL;
index 302d4ebf6fbfb2a2c15780ef966b52bb473b91e4..f1e8dafbd5079b3406a769ba4854ecba229edca6 100644 (file)
  * can take tens of usecs to complete.
  */
 
+/* Normal operation
+ *
+ * A Memory Region is prepared for RDMA READ or WRITE using the
+ * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
+ * finished, the Memory Region is unmapped using the ib_unmap_fmr
+ * verb (fmr_op_unmap).
+ */
+
+/* Transport recovery
+ *
+ * After a transport reconnect, fmr_op_map re-uses the MR already
+ * allocated for the RPC, but generates a fresh rkey then maps the
+ * MR again. This process is synchronous.
+ */
+
 #include "xprt_rdma.h"
 
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
@@ -50,19 +65,28 @@ fmr_op_init(struct rpcrdma_xprt *r_xprt)
        struct rpcrdma_mw *r;
        int i, rc;
 
+       spin_lock_init(&buf->rb_mwlock);
        INIT_LIST_HEAD(&buf->rb_mws);
        INIT_LIST_HEAD(&buf->rb_all);
 
-       i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
-       dprintk("RPC:       %s: initializing %d FMRs\n", __func__, i);
+       i = max_t(int, RPCRDMA_MAX_DATA_SEGS / RPCRDMA_MAX_FMR_SGES, 1);
+       i += 2;                         /* head + tail */
+       i *= buf->rb_max_requests;      /* one set for each RPC slot */
+       dprintk("RPC:       %s: initalizing %d FMRs\n", __func__, i);
 
+       rc = -ENOMEM;
        while (i--) {
                r = kzalloc(sizeof(*r), GFP_KERNEL);
                if (!r)
-                       return -ENOMEM;
+                       goto out;
 
-               r->r.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr);
-               if (IS_ERR(r->r.fmr))
+               r->r.fmr.physaddrs = kmalloc(RPCRDMA_MAX_FMR_SGES *
+                                            sizeof(u64), GFP_KERNEL);
+               if (!r->r.fmr.physaddrs)
+                       goto out_free;
+
+               r->r.fmr.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr);
+               if (IS_ERR(r->r.fmr.fmr))
                        goto out_fmr_err;
 
                list_add(&r->mw_list, &buf->rb_mws);
@@ -71,12 +95,24 @@ fmr_op_init(struct rpcrdma_xprt *r_xprt)
        return 0;
 
 out_fmr_err:
-       rc = PTR_ERR(r->r.fmr);
+       rc = PTR_ERR(r->r.fmr.fmr);
        dprintk("RPC:       %s: ib_alloc_fmr status %i\n", __func__, rc);
+       kfree(r->r.fmr.physaddrs);
+out_free:
        kfree(r);
+out:
        return rc;
 }
 
+static int
+__fmr_unmap(struct rpcrdma_mw *r)
+{
+       LIST_HEAD(l);
+
+       list_add(&r->r.fmr.fmr->list, &l);
+       return ib_unmap_fmr(&l);
+}
+
 /* Use the ib_map_phys_fmr() verb to register a memory region
  * for remote access via RDMA READ or RDMA WRITE.
  */
@@ -85,12 +121,24 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
           int nsegs, bool writing)
 {
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-       struct ib_device *device = ia->ri_id->device;
+       struct ib_device *device = ia->ri_device;
        enum dma_data_direction direction = rpcrdma_data_dir(writing);
        struct rpcrdma_mr_seg *seg1 = seg;
-       struct rpcrdma_mw *mw = seg1->rl_mw;
-       u64 physaddrs[RPCRDMA_MAX_DATA_SEGS];
        int len, pageoff, i, rc;
+       struct rpcrdma_mw *mw;
+
+       mw = seg1->rl_mw;
+       seg1->rl_mw = NULL;
+       if (!mw) {
+               mw = rpcrdma_get_mw(r_xprt);
+               if (!mw)
+                       return -ENOMEM;
+       } else {
+               /* this is a retransmit; generate a fresh rkey */
+               rc = __fmr_unmap(mw);
+               if (rc)
+                       return rc;
+       }
 
        pageoff = offset_in_page(seg1->mr_offset);
        seg1->mr_offset -= pageoff;     /* start of page */
@@ -100,7 +148,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
                nsegs = RPCRDMA_MAX_FMR_SGES;
        for (i = 0; i < nsegs;) {
                rpcrdma_map_one(device, seg, direction);
-               physaddrs[i] = seg->mr_dma;
+               mw->r.fmr.physaddrs[i] = seg->mr_dma;
                len += seg->mr_len;
                ++seg;
                ++i;
@@ -110,11 +158,13 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
                        break;
        }
 
-       rc = ib_map_phys_fmr(mw->r.fmr, physaddrs, i, seg1->mr_dma);
+       rc = ib_map_phys_fmr(mw->r.fmr.fmr, mw->r.fmr.physaddrs,
+                            i, seg1->mr_dma);
        if (rc)
                goto out_maperr;
 
-       seg1->mr_rkey = mw->r.fmr->rkey;
+       seg1->rl_mw = mw;
+       seg1->mr_rkey = mw->r.fmr.fmr->rkey;
        seg1->mr_base = seg1->mr_dma + pageoff;
        seg1->mr_nsegs = i;
        seg1->mr_len = len;
@@ -137,48 +187,28 @@ fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
 {
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
        struct rpcrdma_mr_seg *seg1 = seg;
-       struct ib_device *device;
+       struct rpcrdma_mw *mw = seg1->rl_mw;
        int rc, nsegs = seg->mr_nsegs;
-       LIST_HEAD(l);
 
-       list_add(&seg1->rl_mw->r.fmr->list, &l);
-       rc = ib_unmap_fmr(&l);
-       read_lock(&ia->ri_qplock);
-       device = ia->ri_id->device;
+       dprintk("RPC:       %s: FMR %p\n", __func__, mw);
+
+       seg1->rl_mw = NULL;
        while (seg1->mr_nsegs--)
-               rpcrdma_unmap_one(device, seg++);
-       read_unlock(&ia->ri_qplock);
+               rpcrdma_unmap_one(ia->ri_device, seg++);
+       rc = __fmr_unmap(mw);
        if (rc)
                goto out_err;
+       rpcrdma_put_mw(r_xprt, mw);
        return nsegs;
 
 out_err:
+       /* The FMR is abandoned, but remains in rb_all. fmr_op_destroy
+        * will attempt to release it when the transport is destroyed.
+        */
        dprintk("RPC:       %s: ib_unmap_fmr status %i\n", __func__, rc);
        return nsegs;
 }
 
-/* After a disconnect, unmap all FMRs.
- *
- * This is invoked only in the transport connect worker in order
- * to serialize with rpcrdma_register_fmr_external().
- */
-static void
-fmr_op_reset(struct rpcrdma_xprt *r_xprt)
-{
-       struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
-       struct rpcrdma_mw *r;
-       LIST_HEAD(list);
-       int rc;
-
-       list_for_each_entry(r, &buf->rb_all, mw_all)
-               list_add(&r->r.fmr->list, &list);
-
-       rc = ib_unmap_fmr(&list);
-       if (rc)
-               dprintk("RPC:       %s: ib_unmap_fmr failed %i\n",
-                       __func__, rc);
-}
-
 static void
 fmr_op_destroy(struct rpcrdma_buffer *buf)
 {
@@ -188,10 +218,13 @@ fmr_op_destroy(struct rpcrdma_buffer *buf)
        while (!list_empty(&buf->rb_all)) {
                r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
                list_del(&r->mw_all);
-               rc = ib_dealloc_fmr(r->r.fmr);
+               kfree(r->r.fmr.physaddrs);
+
+               rc = ib_dealloc_fmr(r->r.fmr.fmr);
                if (rc)
                        dprintk("RPC:       %s: ib_dealloc_fmr failed %i\n",
                                __func__, rc);
+
                kfree(r);
        }
 }
@@ -202,7 +235,6 @@ const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
        .ro_open                        = fmr_op_open,
        .ro_maxpages                    = fmr_op_maxpages,
        .ro_init                        = fmr_op_init,
-       .ro_reset                       = fmr_op_reset,
        .ro_destroy                     = fmr_op_destroy,
        .ro_displayname                 = "fmr",
 };
index d234521320a4bb45a9bbecd40e5e58bbb42d77d8..04ea914201b237cc6f42ce68caa6b5dbc7b29d59 100644 (file)
  * but most complex memory registration mode.
  */
 
+/* Normal operation
+ *
+ * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
+ * Work Request (frmr_op_map). When the RDMA operation is finished, this
+ * Memory Region is invalidated using a LOCAL_INV Work Request
+ * (frmr_op_unmap).
+ *
+ * Typically these Work Requests are not signaled, and neither are RDMA
+ * SEND Work Requests (with the exception of signaling occasionally to
+ * prevent provider work queue overflows). This greatly reduces HCA
+ * interrupt workload.
+ *
+ * As an optimization, frwr_op_unmap marks MRs INVALID before the
+ * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
+ * rb_mws immediately so that no work (like managing a linked list
+ * under a spinlock) is needed in the completion upcall.
+ *
+ * But this means that frwr_op_map() can occasionally encounter an MR
+ * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
+ * ordering prevents a subsequent FAST_REG WR from executing against
+ * that MR while it is still being invalidated.
+ */
+
+/* Transport recovery
+ *
+ * ->op_map and the transport connect worker cannot run at the same
+ * time, but ->op_unmap can fire while the transport connect worker
+ * is running. Thus MR recovery is handled in ->op_map, to guarantee
+ * that recovered MRs are owned by a sending RPC, and not one where
+ * ->op_unmap could fire at the same time transport reconnect is
+ * being done.
+ *
+ * When the underlying transport disconnects, MRs are left in one of
+ * three states:
+ *
+ * INVALID:    The MR was not in use before the QP entered ERROR state.
+ *             (Or, the LOCAL_INV WR has not completed or flushed yet).
+ *
+ * STALE:      The MR was being registered or unregistered when the QP
+ *             entered ERROR state, and the pending WR was flushed.
+ *
+ * VALID:      The MR was registered before the QP entered ERROR state.
+ *
+ * When frwr_op_map encounters STALE and VALID MRs, they are recovered
+ * with ib_dereg_mr and then are re-initialized. Beause MR recovery
+ * allocates fresh resources, it is deferred to a workqueue, and the
+ * recovered MRs are placed back on the rb_mws list when recovery is
+ * complete. frwr_op_map allocates another MR for the current RPC while
+ * the broken MR is reset.
+ *
+ * To ensure that frwr_op_map doesn't encounter an MR that is marked
+ * INVALID but that is about to be flushed due to a previous transport
+ * disconnect, the transport connect worker attempts to drain all
+ * pending send queue WRs before the transport is reconnected.
+ */
+
 #include "xprt_rdma.h"
 
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 # define RPCDBG_FACILITY       RPCDBG_TRANS
 #endif
 
+static struct workqueue_struct *frwr_recovery_wq;
+
+#define FRWR_RECOVERY_WQ_FLAGS         (WQ_UNBOUND | WQ_MEM_RECLAIM)
+
+int
+frwr_alloc_recovery_wq(void)
+{
+       frwr_recovery_wq = alloc_workqueue("frwr_recovery",
+                                          FRWR_RECOVERY_WQ_FLAGS, 0);
+       return !frwr_recovery_wq ? -ENOMEM : 0;
+}
+
+void
+frwr_destroy_recovery_wq(void)
+{
+       struct workqueue_struct *wq;
+
+       if (!frwr_recovery_wq)
+               return;
+
+       wq = frwr_recovery_wq;
+       frwr_recovery_wq = NULL;
+       destroy_workqueue(wq);
+}
+
+/* Deferred reset of a single FRMR. Generate a fresh rkey by
+ * replacing the MR.
+ *
+ * There's no recovery if this fails. The FRMR is abandoned, but
+ * remains in rb_all. It will be cleaned up when the transport is
+ * destroyed.
+ */
+static void
+__frwr_recovery_worker(struct work_struct *work)
+{
+       struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw,
+                                           r.frmr.fr_work);
+       struct rpcrdma_xprt *r_xprt = r->r.frmr.fr_xprt;
+       unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
+       struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
+
+       if (ib_dereg_mr(r->r.frmr.fr_mr))
+               goto out_fail;
+
+       r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(pd, depth);
+       if (IS_ERR(r->r.frmr.fr_mr))
+               goto out_fail;
+
+       dprintk("RPC:       %s: recovered FRMR %p\n", __func__, r);
+       r->r.frmr.fr_state = FRMR_IS_INVALID;
+       rpcrdma_put_mw(r_xprt, r);
+       return;
+
+out_fail:
+       pr_warn("RPC:       %s: FRMR %p unrecovered\n",
+               __func__, r);
+}
+
+/* A broken MR was discovered in a context that can't sleep.
+ * Defer recovery to the recovery worker.
+ */
+static void
+__frwr_queue_recovery(struct rpcrdma_mw *r)
+{
+       INIT_WORK(&r->r.frmr.fr_work, __frwr_recovery_worker);
+       queue_work(frwr_recovery_wq, &r->r.frmr.fr_work);
+}
+
 static int
 __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
            unsigned int depth)
@@ -128,7 +252,7 @@ frwr_sendcompletion(struct ib_wc *wc)
 
        /* WARNING: Only wr_id and status are reliable at this point */
        r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
-       dprintk("RPC:       %s: frmr %p (stale), status %s (%d)\n",
+       pr_warn("RPC:       %s: frmr %p flushed, status %s (%d)\n",
                __func__, r, ib_wc_status_msg(wc->status), wc->status);
        r->r.frmr.fr_state = FRMR_IS_STALE;
 }
@@ -137,16 +261,19 @@ static int
 frwr_op_init(struct rpcrdma_xprt *r_xprt)
 {
        struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
-       struct ib_device *device = r_xprt->rx_ia.ri_id->device;
+       struct ib_device *device = r_xprt->rx_ia.ri_device;
        unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
        struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
        int i;
 
+       spin_lock_init(&buf->rb_mwlock);
        INIT_LIST_HEAD(&buf->rb_mws);
        INIT_LIST_HEAD(&buf->rb_all);
 
-       i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
-       dprintk("RPC:       %s: initializing %d FRMRs\n", __func__, i);
+       i = max_t(int, RPCRDMA_MAX_DATA_SEGS / depth, 1);
+       i += 2;                         /* head + tail */
+       i *= buf->rb_max_requests;      /* one set for each RPC slot */
+       dprintk("RPC:       %s: initalizing %d FRMRs\n", __func__, i);
 
        while (i--) {
                struct rpcrdma_mw *r;
@@ -165,6 +292,7 @@ frwr_op_init(struct rpcrdma_xprt *r_xprt)
                list_add(&r->mw_list, &buf->rb_mws);
                list_add(&r->mw_all, &buf->rb_all);
                r->mw_sendcompletion = frwr_sendcompletion;
+               r->r.frmr.fr_xprt = r_xprt;
        }
 
        return 0;
@@ -178,12 +306,12 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
            int nsegs, bool writing)
 {
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-       struct ib_device *device = ia->ri_id->device;
+       struct ib_device *device = ia->ri_device;
        enum dma_data_direction direction = rpcrdma_data_dir(writing);
        struct rpcrdma_mr_seg *seg1 = seg;
-       struct rpcrdma_mw *mw = seg1->rl_mw;
-       struct rpcrdma_frmr *frmr = &mw->r.frmr;
-       struct ib_mr *mr = frmr->fr_mr;
+       struct rpcrdma_mw *mw;
+       struct rpcrdma_frmr *frmr;
+       struct ib_mr *mr;
        struct ib_send_wr fastreg_wr, *bad_wr;
        u8 key;
        int len, pageoff;
@@ -192,12 +320,25 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
        u64 pa;
        int page_no;
 
+       mw = seg1->rl_mw;
+       seg1->rl_mw = NULL;
+       do {
+               if (mw)
+                       __frwr_queue_recovery(mw);
+               mw = rpcrdma_get_mw(r_xprt);
+               if (!mw)
+                       return -ENOMEM;
+       } while (mw->r.frmr.fr_state != FRMR_IS_INVALID);
+       frmr = &mw->r.frmr;
+       frmr->fr_state = FRMR_IS_VALID;
+
        pageoff = offset_in_page(seg1->mr_offset);
        seg1->mr_offset -= pageoff;     /* start of page */
        seg1->mr_len += pageoff;
        len = -pageoff;
        if (nsegs > ia->ri_max_frmr_depth)
                nsegs = ia->ri_max_frmr_depth;
+
        for (page_no = i = 0; i < nsegs;) {
                rpcrdma_map_one(device, seg, direction);
                pa = seg->mr_dma;
@@ -216,8 +357,6 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
        dprintk("RPC:       %s: Using frmr %p to map %d segments (%d bytes)\n",
                __func__, mw, i, len);
 
-       frmr->fr_state = FRMR_IS_VALID;
-
        memset(&fastreg_wr, 0, sizeof(fastreg_wr));
        fastreg_wr.wr_id = (unsigned long)(void *)mw;
        fastreg_wr.opcode = IB_WR_FAST_REG_MR;
@@ -229,6 +368,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
        fastreg_wr.wr.fast_reg.access_flags = writing ?
                                IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
                                IB_ACCESS_REMOTE_READ;
+       mr = frmr->fr_mr;
        key = (u8)(mr->rkey & 0x000000FF);
        ib_update_fast_reg_key(mr, ++key);
        fastreg_wr.wr.fast_reg.rkey = mr->rkey;
@@ -238,6 +378,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
        if (rc)
                goto out_senderr;
 
+       seg1->rl_mw = mw;
        seg1->mr_rkey = mr->rkey;
        seg1->mr_base = seg1->mr_dma + pageoff;
        seg1->mr_nsegs = i;
@@ -246,10 +387,9 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
 
 out_senderr:
        dprintk("RPC:       %s: ib_post_send status %i\n", __func__, rc);
-       ib_update_fast_reg_key(mr, --key);
-       frmr->fr_state = FRMR_IS_INVALID;
        while (i--)
                rpcrdma_unmap_one(device, --seg);
+       __frwr_queue_recovery(mw);
        return rc;
 }
 
@@ -261,78 +401,46 @@ frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
 {
        struct rpcrdma_mr_seg *seg1 = seg;
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+       struct rpcrdma_mw *mw = seg1->rl_mw;
        struct ib_send_wr invalidate_wr, *bad_wr;
        int rc, nsegs = seg->mr_nsegs;
-       struct ib_device *device;
 
-       seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
+       dprintk("RPC:       %s: FRMR %p\n", __func__, mw);
+
+       seg1->rl_mw = NULL;
+       mw->r.frmr.fr_state = FRMR_IS_INVALID;
 
        memset(&invalidate_wr, 0, sizeof(invalidate_wr));
-       invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw;
+       invalidate_wr.wr_id = (unsigned long)(void *)mw;
        invalidate_wr.opcode = IB_WR_LOCAL_INV;
-       invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
+       invalidate_wr.ex.invalidate_rkey = mw->r.frmr.fr_mr->rkey;
        DECR_CQCOUNT(&r_xprt->rx_ep);
 
-       read_lock(&ia->ri_qplock);
-       device = ia->ri_id->device;
        while (seg1->mr_nsegs--)
-               rpcrdma_unmap_one(device, seg++);
+               rpcrdma_unmap_one(ia->ri_device, seg++);
+       read_lock(&ia->ri_qplock);
        rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
        read_unlock(&ia->ri_qplock);
        if (rc)
                goto out_err;
+
+       rpcrdma_put_mw(r_xprt, mw);
        return nsegs;
 
 out_err:
-       /* Force rpcrdma_buffer_get() to retry */
-       seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE;
        dprintk("RPC:       %s: ib_post_send status %i\n", __func__, rc);
+       __frwr_queue_recovery(mw);
        return nsegs;
 }
 
-/* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in
- * an unusable state. Find FRMRs in this state and dereg / reg
- * each.  FRMRs that are VALID and attached to an rpcrdma_req are
- * also torn down.
- *
- * This gives all in-use FRMRs a fresh rkey and leaves them INVALID.
- *
- * This is invoked only in the transport connect worker in order
- * to serialize with rpcrdma_register_frmr_external().
- */
-static void
-frwr_op_reset(struct rpcrdma_xprt *r_xprt)
-{
-       struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
-       struct ib_device *device = r_xprt->rx_ia.ri_id->device;
-       unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
-       struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
-       struct rpcrdma_mw *r;
-       int rc;
-
-       list_for_each_entry(r, &buf->rb_all, mw_all) {
-               if (r->r.frmr.fr_state == FRMR_IS_INVALID)
-                       continue;
-
-               __frwr_release(r);
-               rc = __frwr_init(r, pd, device, depth);
-               if (rc) {
-                       dprintk("RPC:       %s: mw %p left %s\n",
-                               __func__, r,
-                               (r->r.frmr.fr_state == FRMR_IS_STALE ?
-                                       "stale" : "valid"));
-                       continue;
-               }
-
-               r->r.frmr.fr_state = FRMR_IS_INVALID;
-       }
-}
-
 static void
 frwr_op_destroy(struct rpcrdma_buffer *buf)
 {
        struct rpcrdma_mw *r;
 
+       /* Ensure stale MWs for "buf" are no longer in flight */
+       flush_workqueue(frwr_recovery_wq);
+
        while (!list_empty(&buf->rb_all)) {
                r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
                list_del(&r->mw_all);
@@ -347,7 +455,6 @@ const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
        .ro_open                        = frwr_op_open,
        .ro_maxpages                    = frwr_op_maxpages,
        .ro_init                        = frwr_op_init,
-       .ro_reset                       = frwr_op_reset,
        .ro_destroy                     = frwr_op_destroy,
        .ro_displayname                 = "frwr",
 };
index ba518af167873dfe2e9c1f5f6723665bda2bd2e7..41985d07fdb744b5d9523b7c34af93c30f70522d 100644 (file)
@@ -50,8 +50,7 @@ physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
 {
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 
-       rpcrdma_map_one(ia->ri_id->device, seg,
-                       rpcrdma_data_dir(writing));
+       rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
        seg->mr_rkey = ia->ri_bind_mem->rkey;
        seg->mr_base = seg->mr_dma;
        seg->mr_nsegs = 1;
@@ -65,18 +64,10 @@ physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
 {
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 
-       read_lock(&ia->ri_qplock);
-       rpcrdma_unmap_one(ia->ri_id->device, seg);
-       read_unlock(&ia->ri_qplock);
-
+       rpcrdma_unmap_one(ia->ri_device, seg);
        return 1;
 }
 
-static void
-physical_op_reset(struct rpcrdma_xprt *r_xprt)
-{
-}
-
 static void
 physical_op_destroy(struct rpcrdma_buffer *buf)
 {
@@ -88,7 +79,6 @@ const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
        .ro_open                        = physical_op_open,
        .ro_maxpages                    = physical_op_maxpages,
        .ro_init                        = physical_op_init,
-       .ro_reset                       = physical_op_reset,
        .ro_destroy                     = physical_op_destroy,
        .ro_displayname                 = "physical",
 };
index 2c53ea9e1b83dae01ebdd1aa22d256174dfbae08..84ea37daef36b0aa885c27e5eda950dda818949a 100644 (file)
@@ -284,9 +284,6 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
        return (unsigned char *)iptr - (unsigned char *)headerp;
 
 out:
-       if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
-               return n;
-
        for (pos = 0; nchunks--;)
                pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
                                                      &req->rl_segments[pos]);
@@ -732,8 +729,8 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
        struct rpcrdma_msg *headerp;
        struct rpcrdma_req *req;
        struct rpc_rqst *rqst;
-       struct rpc_xprt *xprt = rep->rr_xprt;
-       struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
+       struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
+       struct rpc_xprt *xprt = &r_xprt->rx_xprt;
        __be32 *iptr;
        int rdmalen, status;
        unsigned long cwnd;
@@ -770,7 +767,6 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
                        rep->rr_len);
 repost:
                r_xprt->rx_stats.bad_reply_count++;
-               rep->rr_func = rpcrdma_reply_handler;
                if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
                        rpcrdma_recv_buffer_put(rep);
 
index 436da2caec955ded2b2f4f6a2056cb1191594373..680f888a9ddd045314b305ef772385c7c6d5624e 100644 (file)
@@ -240,6 +240,16 @@ xprt_rdma_connect_worker(struct work_struct *work)
        xprt_clear_connecting(xprt);
 }
 
+static void
+xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
+{
+       struct rpcrdma_xprt *r_xprt = container_of(xprt, struct rpcrdma_xprt,
+                                                  rx_xprt);
+
+       pr_info("rpcrdma: injecting transport disconnect on xprt=%p\n", xprt);
+       rdma_disconnect(r_xprt->rx_ia.ri_id);
+}
+
 /*
  * xprt_rdma_destroy
  *
@@ -612,12 +622,6 @@ xprt_rdma_send_request(struct rpc_task *task)
        if (req->rl_reply == NULL)              /* e.g. reconnection */
                rpcrdma_recv_buffer_get(req);
 
-       if (req->rl_reply) {
-               req->rl_reply->rr_func = rpcrdma_reply_handler;
-               /* this need only be done once, but... */
-               req->rl_reply->rr_xprt = xprt;
-       }
-
        /* Must suppress retransmit to maintain credits */
        if (req->rl_connect_cookie == xprt->connect_cookie)
                goto drop_connection;
@@ -676,6 +680,17 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
           r_xprt->rx_stats.bad_reply_count);
 }
 
+static int
+xprt_rdma_enable_swap(struct rpc_xprt *xprt)
+{
+       return -EINVAL;
+}
+
+static void
+xprt_rdma_disable_swap(struct rpc_xprt *xprt)
+{
+}
+
 /*
  * Plumbing for rpc transport switch and kernel module
  */
@@ -694,7 +709,10 @@ static struct rpc_xprt_ops xprt_rdma_procs = {
        .send_request           = xprt_rdma_send_request,
        .close                  = xprt_rdma_close,
        .destroy                = xprt_rdma_destroy,
-       .print_stats            = xprt_rdma_print_stats
+       .print_stats            = xprt_rdma_print_stats,
+       .enable_swap            = xprt_rdma_enable_swap,
+       .disable_swap           = xprt_rdma_disable_swap,
+       .inject_disconnect      = xprt_rdma_inject_disconnect
 };
 
 static struct xprt_class xprt_rdma = {
@@ -720,17 +738,24 @@ void xprt_rdma_cleanup(void)
        if (rc)
                dprintk("RPC:       %s: xprt_unregister returned %i\n",
                        __func__, rc);
+
+       frwr_destroy_recovery_wq();
 }
 
 int xprt_rdma_init(void)
 {
        int rc;
 
-       rc = xprt_register_transport(&xprt_rdma);
-
+       rc = frwr_alloc_recovery_wq();
        if (rc)
                return rc;
 
+       rc = xprt_register_transport(&xprt_rdma);
+       if (rc) {
+               frwr_destroy_recovery_wq();
+               return rc;
+       }
+
        dprintk("RPCRDMA Module Init, register RPC RDMA transport\n");
 
        dprintk("Defaults:\n");
index 52df265b472a9b2b79574c7d9363acba26ea5d8b..891c4ede2c20ea8d8c6bc79ee080f353d4df13d7 100644 (file)
@@ -80,7 +80,6 @@ static void
 rpcrdma_run_tasklet(unsigned long data)
 {
        struct rpcrdma_rep *rep;
-       void (*func)(struct rpcrdma_rep *);
        unsigned long flags;
 
        data = data;
@@ -89,14 +88,9 @@ rpcrdma_run_tasklet(unsigned long data)
                rep = list_entry(rpcrdma_tasklets_g.next,
                                 struct rpcrdma_rep, rr_list);
                list_del(&rep->rr_list);
-               func = rep->rr_func;
-               rep->rr_func = NULL;
                spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
 
-               if (func)
-                       func(rep);
-               else
-                       rpcrdma_recv_buffer_put(rep);
+               rpcrdma_reply_handler(rep);
 
                spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
        }
@@ -236,7 +230,7 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
                __func__, rep, wc->byte_len);
 
        rep->rr_len = wc->byte_len;
-       ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device,
+       ib_dma_sync_single_for_cpu(rep->rr_device,
                                   rdmab_addr(rep->rr_rdmabuf),
                                   rep->rr_len, DMA_FROM_DEVICE);
        prefetch(rdmab_to_msg(rep->rr_rdmabuf));
@@ -407,7 +401,7 @@ connected:
 
                pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
                        sap, rpc_get_port(sap),
-                       ia->ri_id->device->name,
+                       ia->ri_device->name,
                        ia->ri_ops->ro_displayname,
                        xprt->rx_buf.rb_max_requests,
                        ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
@@ -508,8 +502,9 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
                rc = PTR_ERR(ia->ri_id);
                goto out1;
        }
+       ia->ri_device = ia->ri_id->device;
 
-       ia->ri_pd = ib_alloc_pd(ia->ri_id->device);
+       ia->ri_pd = ib_alloc_pd(ia->ri_device);
        if (IS_ERR(ia->ri_pd)) {
                rc = PTR_ERR(ia->ri_pd);
                dprintk("RPC:       %s: ib_alloc_pd() failed %i\n",
@@ -517,7 +512,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
                goto out2;
        }
 
-       rc = ib_query_device(ia->ri_id->device, devattr);
+       rc = ib_query_device(ia->ri_device, devattr);
        if (rc) {
                dprintk("RPC:       %s: ib_query_device failed %d\n",
                        __func__, rc);
@@ -526,7 +521,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
 
        if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
                ia->ri_have_dma_lkey = 1;
-               ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey;
+               ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
        }
 
        if (memreg == RPCRDMA_FRMR) {
@@ -541,7 +536,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
                }
        }
        if (memreg == RPCRDMA_MTHCAFMR) {
-               if (!ia->ri_id->device->alloc_fmr) {
+               if (!ia->ri_device->alloc_fmr) {
                        dprintk("RPC:       %s: MTHCAFMR registration "
                                "not supported by HCA\n", __func__);
                        memreg = RPCRDMA_ALLPHYSICAL;
@@ -590,9 +585,6 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
        dprintk("RPC:       %s: memory registration strategy is '%s'\n",
                __func__, ia->ri_ops->ro_displayname);
 
-       /* Else will do memory reg/dereg for each chunk */
-       ia->ri_memreg_strategy = memreg;
-
        rwlock_init(&ia->ri_qplock);
        return 0;
 
@@ -622,17 +614,17 @@ rpcrdma_ia_close(struct rpcrdma_ia *ia)
                dprintk("RPC:       %s: ib_dereg_mr returned %i\n",
                        __func__, rc);
        }
+
        if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
                if (ia->ri_id->qp)
                        rdma_destroy_qp(ia->ri_id);
                rdma_destroy_id(ia->ri_id);
                ia->ri_id = NULL;
        }
-       if (ia->ri_pd != NULL && !IS_ERR(ia->ri_pd)) {
-               rc = ib_dealloc_pd(ia->ri_pd);
-               dprintk("RPC:       %s: ib_dealloc_pd returned %i\n",
-                       __func__, rc);
-       }
+
+       /* If the pd is still busy, xprtrdma missed freeing a resource */
+       if (ia->ri_pd && !IS_ERR(ia->ri_pd))
+               WARN_ON(ib_dealloc_pd(ia->ri_pd));
 }
 
 /*
@@ -693,8 +685,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
        INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
 
        cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1;
-       sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall,
-                                 rpcrdma_cq_async_error_upcall, ep, &cq_attr);
+       sendcq = ib_create_cq(ia->ri_device, rpcrdma_sendcq_upcall,
+                             rpcrdma_cq_async_error_upcall, ep, &cq_attr);
        if (IS_ERR(sendcq)) {
                rc = PTR_ERR(sendcq);
                dprintk("RPC:       %s: failed to create send CQ: %i\n",
@@ -710,8 +702,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
        }
 
        cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1;
-       recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall,
-                                 rpcrdma_cq_async_error_upcall, ep, &cq_attr);
+       recvcq = ib_create_cq(ia->ri_device, rpcrdma_recvcq_upcall,
+                             rpcrdma_cq_async_error_upcall, ep, &cq_attr);
        if (IS_ERR(recvcq)) {
                rc = PTR_ERR(recvcq);
                dprintk("RPC:       %s: failed to create recv CQ: %i\n",
@@ -817,8 +809,6 @@ retry:
                rpcrdma_flush_cqs(ep);
 
                xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
-               ia->ri_ops->ro_reset(xprt);
-
                id = rpcrdma_create_id(xprt, ia,
                                (struct sockaddr *)&xprt->rx_data.addr);
                if (IS_ERR(id)) {
@@ -832,7 +822,7 @@ retry:
                 * More stuff I haven't thought of!
                 * Rrrgh!
                 */
-               if (ia->ri_id->device != id->device) {
+               if (ia->ri_device != id->device) {
                        printk("RPC:       %s: can't reconnect on "
                                "different device!\n", __func__);
                        rdma_destroy_id(id);
@@ -974,7 +964,8 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
                goto out_free;
        }
 
-       rep->rr_buffer = &r_xprt->rx_buf;
+       rep->rr_device = ia->ri_device;
+       rep->rr_rxprt = r_xprt;
        return rep;
 
 out_free:
@@ -1098,31 +1089,33 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
        kfree(buf->rb_pool);
 }
 
-/* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving
- * some req segments uninitialized.
- */
-static void
-rpcrdma_buffer_put_mr(struct rpcrdma_mw **mw, struct rpcrdma_buffer *buf)
+struct rpcrdma_mw *
+rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
 {
-       if (*mw) {
-               list_add_tail(&(*mw)->mw_list, &buf->rb_mws);
-               *mw = NULL;
+       struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+       struct rpcrdma_mw *mw = NULL;
+
+       spin_lock(&buf->rb_mwlock);
+       if (!list_empty(&buf->rb_mws)) {
+               mw = list_first_entry(&buf->rb_mws,
+                                     struct rpcrdma_mw, mw_list);
+               list_del_init(&mw->mw_list);
        }
+       spin_unlock(&buf->rb_mwlock);
+
+       if (!mw)
+               pr_err("RPC:       %s: no MWs available\n", __func__);
+       return mw;
 }
 
-/* Cycle mw's back in reverse order, and "spin" them.
- * This delays and scrambles reuse as much as possible.
- */
-static void
-rpcrdma_buffer_put_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
+void
+rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
 {
-       struct rpcrdma_mr_seg *seg = req->rl_segments;
-       struct rpcrdma_mr_seg *seg1 = seg;
-       int i;
+       struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
 
-       for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++)
-               rpcrdma_buffer_put_mr(&seg->rl_mw, buf);
-       rpcrdma_buffer_put_mr(&seg1->rl_mw, buf);
+       spin_lock(&buf->rb_mwlock);
+       list_add_tail(&mw->mw_list, &buf->rb_mws);
+       spin_unlock(&buf->rb_mwlock);
 }
 
 static void
@@ -1132,115 +1125,10 @@ rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
        req->rl_niovs = 0;
        if (req->rl_reply) {
                buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply;
-               req->rl_reply->rr_func = NULL;
                req->rl_reply = NULL;
        }
 }
 
-/* rpcrdma_unmap_one() was already done during deregistration.
- * Redo only the ib_post_send().
- */
-static void
-rpcrdma_retry_local_inv(struct rpcrdma_mw *r, struct rpcrdma_ia *ia)
-{
-       struct rpcrdma_xprt *r_xprt =
-                               container_of(ia, struct rpcrdma_xprt, rx_ia);
-       struct ib_send_wr invalidate_wr, *bad_wr;
-       int rc;
-
-       dprintk("RPC:       %s: FRMR %p is stale\n", __func__, r);
-
-       /* When this FRMR is re-inserted into rb_mws, it is no longer stale */
-       r->r.frmr.fr_state = FRMR_IS_INVALID;
-
-       memset(&invalidate_wr, 0, sizeof(invalidate_wr));
-       invalidate_wr.wr_id = (unsigned long)(void *)r;
-       invalidate_wr.opcode = IB_WR_LOCAL_INV;
-       invalidate_wr.ex.invalidate_rkey = r->r.frmr.fr_mr->rkey;
-       DECR_CQCOUNT(&r_xprt->rx_ep);
-
-       dprintk("RPC:       %s: frmr %p invalidating rkey %08x\n",
-               __func__, r, r->r.frmr.fr_mr->rkey);
-
-       read_lock(&ia->ri_qplock);
-       rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
-       read_unlock(&ia->ri_qplock);
-       if (rc) {
-               /* Force rpcrdma_buffer_get() to retry */
-               r->r.frmr.fr_state = FRMR_IS_STALE;
-               dprintk("RPC:       %s: ib_post_send failed, %i\n",
-                       __func__, rc);
-       }
-}
-
-static void
-rpcrdma_retry_flushed_linv(struct list_head *stale,
-                          struct rpcrdma_buffer *buf)
-{
-       struct rpcrdma_ia *ia = rdmab_to_ia(buf);
-       struct list_head *pos;
-       struct rpcrdma_mw *r;
-       unsigned long flags;
-
-       list_for_each(pos, stale) {
-               r = list_entry(pos, struct rpcrdma_mw, mw_list);
-               rpcrdma_retry_local_inv(r, ia);
-       }
-
-       spin_lock_irqsave(&buf->rb_lock, flags);
-       list_splice_tail(stale, &buf->rb_mws);
-       spin_unlock_irqrestore(&buf->rb_lock, flags);
-}
-
-static struct rpcrdma_req *
-rpcrdma_buffer_get_frmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf,
-                        struct list_head *stale)
-{
-       struct rpcrdma_mw *r;
-       int i;
-
-       i = RPCRDMA_MAX_SEGS - 1;
-       while (!list_empty(&buf->rb_mws)) {
-               r = list_entry(buf->rb_mws.next,
-                              struct rpcrdma_mw, mw_list);
-               list_del(&r->mw_list);
-               if (r->r.frmr.fr_state == FRMR_IS_STALE) {
-                       list_add(&r->mw_list, stale);
-                       continue;
-               }
-               req->rl_segments[i].rl_mw = r;
-               if (unlikely(i-- == 0))
-                       return req;     /* Success */
-       }
-
-       /* Not enough entries on rb_mws for this req */
-       rpcrdma_buffer_put_sendbuf(req, buf);
-       rpcrdma_buffer_put_mrs(req, buf);
-       return NULL;
-}
-
-static struct rpcrdma_req *
-rpcrdma_buffer_get_fmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
-{
-       struct rpcrdma_mw *r;
-       int i;
-
-       i = RPCRDMA_MAX_SEGS - 1;
-       while (!list_empty(&buf->rb_mws)) {
-               r = list_entry(buf->rb_mws.next,
-                              struct rpcrdma_mw, mw_list);
-               list_del(&r->mw_list);
-               req->rl_segments[i].rl_mw = r;
-               if (unlikely(i-- == 0))
-                       return req;     /* Success */
-       }
-
-       /* Not enough entries on rb_mws for this req */
-       rpcrdma_buffer_put_sendbuf(req, buf);
-       rpcrdma_buffer_put_mrs(req, buf);
-       return NULL;
-}
-
 /*
  * Get a set of request/reply buffers.
  *
@@ -1253,12 +1141,11 @@ rpcrdma_buffer_get_fmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
 struct rpcrdma_req *
 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
 {
-       struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
-       struct list_head stale;
        struct rpcrdma_req *req;
        unsigned long flags;
 
        spin_lock_irqsave(&buffers->rb_lock, flags);
+
        if (buffers->rb_send_index == buffers->rb_max_requests) {
                spin_unlock_irqrestore(&buffers->rb_lock, flags);
                dprintk("RPC:       %s: out of request buffers\n", __func__);
@@ -1277,20 +1164,7 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
        }
        buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
 
-       INIT_LIST_HEAD(&stale);
-       switch (ia->ri_memreg_strategy) {
-       case RPCRDMA_FRMR:
-               req = rpcrdma_buffer_get_frmrs(req, buffers, &stale);
-               break;
-       case RPCRDMA_MTHCAFMR:
-               req = rpcrdma_buffer_get_fmrs(req, buffers);
-               break;
-       default:
-               break;
-       }
        spin_unlock_irqrestore(&buffers->rb_lock, flags);
-       if (!list_empty(&stale))
-               rpcrdma_retry_flushed_linv(&stale, buffers);
        return req;
 }
 
@@ -1302,19 +1176,10 @@ void
 rpcrdma_buffer_put(struct rpcrdma_req *req)
 {
        struct rpcrdma_buffer *buffers = req->rl_buffer;
-       struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
        unsigned long flags;
 
        spin_lock_irqsave(&buffers->rb_lock, flags);
        rpcrdma_buffer_put_sendbuf(req, buffers);
-       switch (ia->ri_memreg_strategy) {
-       case RPCRDMA_FRMR:
-       case RPCRDMA_MTHCAFMR:
-               rpcrdma_buffer_put_mrs(req, buffers);
-               break;
-       default:
-               break;
-       }
        spin_unlock_irqrestore(&buffers->rb_lock, flags);
 }
 
@@ -1344,10 +1209,9 @@ rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
 void
 rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
 {
-       struct rpcrdma_buffer *buffers = rep->rr_buffer;
+       struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
        unsigned long flags;
 
-       rep->rr_func = NULL;
        spin_lock_irqsave(&buffers->rb_lock, flags);
        buffers->rb_recv_bufs[--buffers->rb_recv_index] = rep;
        spin_unlock_irqrestore(&buffers->rb_lock, flags);
@@ -1376,9 +1240,9 @@ rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
        /*
         * All memory passed here was kmalloc'ed, therefore phys-contiguous.
         */
-       iov->addr = ib_dma_map_single(ia->ri_id->device,
+       iov->addr = ib_dma_map_single(ia->ri_device,
                        va, len, DMA_BIDIRECTIONAL);
-       if (ib_dma_mapping_error(ia->ri_id->device, iov->addr))
+       if (ib_dma_mapping_error(ia->ri_device, iov->addr))
                return -ENOMEM;
 
        iov->length = len;
@@ -1422,8 +1286,8 @@ rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
 {
        int rc;
 
-       ib_dma_unmap_single(ia->ri_id->device,
-                       iov->addr, iov->length, DMA_BIDIRECTIONAL);
+       ib_dma_unmap_single(ia->ri_device,
+                           iov->addr, iov->length, DMA_BIDIRECTIONAL);
 
        if (NULL == mr)
                return 0;
@@ -1516,15 +1380,18 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
        send_wr.num_sge = req->rl_niovs;
        send_wr.opcode = IB_WR_SEND;
        if (send_wr.num_sge == 4)       /* no need to sync any pad (constant) */
-               ib_dma_sync_single_for_device(ia->ri_id->device,
-                       req->rl_send_iov[3].addr, req->rl_send_iov[3].length,
-                       DMA_TO_DEVICE);
-       ib_dma_sync_single_for_device(ia->ri_id->device,
-               req->rl_send_iov[1].addr, req->rl_send_iov[1].length,
-               DMA_TO_DEVICE);
-       ib_dma_sync_single_for_device(ia->ri_id->device,
-               req->rl_send_iov[0].addr, req->rl_send_iov[0].length,
-               DMA_TO_DEVICE);
+               ib_dma_sync_single_for_device(ia->ri_device,
+                                             req->rl_send_iov[3].addr,
+                                             req->rl_send_iov[3].length,
+                                             DMA_TO_DEVICE);
+       ib_dma_sync_single_for_device(ia->ri_device,
+                                     req->rl_send_iov[1].addr,
+                                     req->rl_send_iov[1].length,
+                                     DMA_TO_DEVICE);
+       ib_dma_sync_single_for_device(ia->ri_device,
+                                     req->rl_send_iov[0].addr,
+                                     req->rl_send_iov[0].length,
+                                     DMA_TO_DEVICE);
 
        if (DECR_CQCOUNT(ep) > 0)
                send_wr.send_flags = 0;
@@ -1557,7 +1424,7 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
        recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
        recv_wr.num_sge = 1;
 
-       ib_dma_sync_single_for_cpu(ia->ri_id->device,
+       ib_dma_sync_single_for_cpu(ia->ri_device,
                                   rdmab_addr(rep->rr_rdmabuf),
                                   rdmab_length(rep->rr_rdmabuf),
                                   DMA_BIDIRECTIONAL);
index 58163b88738c2363c559be28d3f3807043065826..f49dd8b381221dceaef4847e4ae28d397ebcdf27 100644 (file)
@@ -62,6 +62,7 @@
 struct rpcrdma_ia {
        const struct rpcrdma_memreg_ops *ri_ops;
        rwlock_t                ri_qplock;
+       struct ib_device        *ri_device;
        struct rdma_cm_id       *ri_id;
        struct ib_pd            *ri_pd;
        struct ib_mr            *ri_bind_mem;
@@ -69,7 +70,6 @@ struct rpcrdma_ia {
        int                     ri_have_dma_lkey;
        struct completion       ri_done;
        int                     ri_async_rc;
-       enum rpcrdma_memreg     ri_memreg_strategy;
        unsigned int            ri_max_frmr_depth;
        struct ib_device_attr   ri_devattr;
        struct ib_qp_attr       ri_qp_attr;
@@ -173,9 +173,8 @@ struct rpcrdma_buffer;
 
 struct rpcrdma_rep {
        unsigned int            rr_len;
-       struct rpcrdma_buffer   *rr_buffer;
-       struct rpc_xprt         *rr_xprt;
-       void                    (*rr_func)(struct rpcrdma_rep *);
+       struct ib_device        *rr_device;
+       struct rpcrdma_xprt     *rr_rxprt;
        struct list_head        rr_list;
        struct rpcrdma_regbuf   *rr_rdmabuf;
 };
@@ -203,11 +202,18 @@ struct rpcrdma_frmr {
        struct ib_fast_reg_page_list    *fr_pgl;
        struct ib_mr                    *fr_mr;
        enum rpcrdma_frmr_state         fr_state;
+       struct work_struct              fr_work;
+       struct rpcrdma_xprt             *fr_xprt;
+};
+
+struct rpcrdma_fmr {
+       struct ib_fmr           *fmr;
+       u64                     *physaddrs;
 };
 
 struct rpcrdma_mw {
        union {
-               struct ib_fmr           *fmr;
+               struct rpcrdma_fmr      fmr;
                struct rpcrdma_frmr     frmr;
        } r;
        void                    (*mw_sendcompletion)(struct ib_wc *);
@@ -281,15 +287,17 @@ rpcr_to_rdmar(struct rpc_rqst *rqst)
  * One of these is associated with a transport instance
  */
 struct rpcrdma_buffer {
-       spinlock_t      rb_lock;        /* protects indexes */
-       u32             rb_max_requests;/* client max requests */
-       struct list_head rb_mws;        /* optional memory windows/fmrs/frmrs */
-       struct list_head rb_all;
-       int             rb_send_index;
+       spinlock_t              rb_mwlock;      /* protect rb_mws list */
+       struct list_head        rb_mws;
+       struct list_head        rb_all;
+       char                    *rb_pool;
+
+       spinlock_t              rb_lock;        /* protect buf arrays */
+       u32                     rb_max_requests;
+       int                     rb_send_index;
+       int                     rb_recv_index;
        struct rpcrdma_req      **rb_send_bufs;
-       int             rb_recv_index;
        struct rpcrdma_rep      **rb_recv_bufs;
-       char            *rb_pool;
 };
 #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
 
@@ -350,7 +358,6 @@ struct rpcrdma_memreg_ops {
                                   struct rpcrdma_create_data_internal *);
        size_t          (*ro_maxpages)(struct rpcrdma_xprt *);
        int             (*ro_init)(struct rpcrdma_xprt *);
-       void            (*ro_reset)(struct rpcrdma_xprt *);
        void            (*ro_destroy)(struct rpcrdma_buffer *);
        const char      *ro_displayname;
 };
@@ -413,6 +420,8 @@ int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *,
 int rpcrdma_buffer_create(struct rpcrdma_xprt *);
 void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
 
+struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *);
+void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *);
 struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
 void rpcrdma_buffer_put(struct rpcrdma_req *);
 void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
@@ -425,6 +434,9 @@ void rpcrdma_free_regbuf(struct rpcrdma_ia *,
 
 unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *);
 
+int frwr_alloc_recovery_wq(void);
+void frwr_destroy_recovery_wq(void);
+
 /*
  * Wrappers for chunk registration, shared by read/write chunk code.
  */
index 66891e32c5e311b386c9afa25b2cf4cd25325427..e193c2b5476b3a83973e9799e2e826fdcd2b842c 100644 (file)
@@ -622,24 +622,6 @@ process_status:
        return status;
 }
 
-/**
- * xs_tcp_shutdown - gracefully shut down a TCP socket
- * @xprt: transport
- *
- * Initiates a graceful shutdown of the TCP socket by calling the
- * equivalent of shutdown(SHUT_RDWR);
- */
-static void xs_tcp_shutdown(struct rpc_xprt *xprt)
-{
-       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
-       struct socket *sock = transport->sock;
-
-       if (sock != NULL) {
-               kernel_sock_shutdown(sock, SHUT_RDWR);
-               trace_rpc_socket_shutdown(xprt, sock);
-       }
-}
-
 /**
  * xs_tcp_send_request - write an RPC request to a TCP socket
  * @task: address of RPC task that manages the state of an RPC request
@@ -786,6 +768,7 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
        xs_sock_reset_connection_flags(xprt);
        /* Mark transport as closed and wake up all pending tasks */
        xprt_disconnect_done(xprt);
+       xprt_force_disconnect(xprt);
 }
 
 /**
@@ -827,6 +810,9 @@ static void xs_reset_transport(struct sock_xprt *transport)
        if (sk == NULL)
                return;
 
+       if (atomic_read(&transport->xprt.swapper))
+               sk_clear_memalloc(sk);
+
        write_lock_bh(&sk->sk_callback_lock);
        transport->inet = NULL;
        transport->sock = NULL;
@@ -863,6 +849,13 @@ static void xs_close(struct rpc_xprt *xprt)
        xprt_disconnect_done(xprt);
 }
 
+static void xs_inject_disconnect(struct rpc_xprt *xprt)
+{
+       dprintk("RPC:       injecting transport disconnect on xprt=%p\n",
+               xprt);
+       xprt_disconnect_done(xprt);
+}
+
 static void xs_xprt_free(struct rpc_xprt *xprt)
 {
        xs_free_peer_addresses(xprt);
@@ -901,7 +894,6 @@ static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
 /**
  * xs_local_data_ready - "data ready" callback for AF_LOCAL sockets
  * @sk: socket with data to read
- * @len: how much data to read
  *
  * Currently this assumes we can read the whole reply in a single gulp.
  */
@@ -965,7 +957,6 @@ static void xs_local_data_ready(struct sock *sk)
 /**
  * xs_udp_data_ready - "data ready" callback for UDP sockets
  * @sk: socket with data to read
- * @len: how much data to read
  *
  */
 static void xs_udp_data_ready(struct sock *sk)
@@ -1389,7 +1380,6 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
 /**
  * xs_tcp_data_ready - "data ready" callback for TCP sockets
  * @sk: socket with data to read
- * @bytes: how much data to read
  *
  */
 static void xs_tcp_data_ready(struct sock *sk)
@@ -1886,9 +1876,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
 
 /**
  * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
- * @xprt: RPC transport to connect
  * @transport: socket transport to connect
- * @create_sock: function to create a socket of the correct type
  */
 static int xs_local_setup_socket(struct sock_xprt *transport)
 {
@@ -1960,43 +1948,84 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
                msleep_interruptible(15000);
 }
 
-#ifdef CONFIG_SUNRPC_SWAP
+#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
+/*
+ * Note that this should be called with XPRT_LOCKED held (or when we otherwise
+ * know that we have exclusive access to the socket), to guard against
+ * races with xs_reset_transport.
+ */
 static void xs_set_memalloc(struct rpc_xprt *xprt)
 {
        struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
                        xprt);
 
-       if (xprt->swapper)
+       /*
+        * If there's no sock, then we have nothing to set. The
+        * reconnecting process will get it for us.
+        */
+       if (!transport->inet)
+               return;
+       if (atomic_read(&xprt->swapper))
                sk_set_memalloc(transport->inet);
 }
 
 /**
- * xs_swapper - Tag this transport as being used for swap.
+ * xs_enable_swap - Tag this transport as being used for swap.
  * @xprt: transport to tag
- * @enable: enable/disable
  *
+ * Take a reference to this transport on behalf of the rpc_clnt, and
+ * optionally mark it for swapping if it wasn't already.
  */
-int xs_swapper(struct rpc_xprt *xprt, int enable)
+static int
+xs_enable_swap(struct rpc_xprt *xprt)
 {
-       struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
-                       xprt);
-       int err = 0;
+       struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
 
-       if (enable) {
-               xprt->swapper++;
-               xs_set_memalloc(xprt);
-       } else if (xprt->swapper) {
-               xprt->swapper--;
-               sk_clear_memalloc(transport->inet);
-       }
+       if (atomic_inc_return(&xprt->swapper) != 1)
+               return 0;
+       if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
+               return -ERESTARTSYS;
+       if (xs->inet)
+               sk_set_memalloc(xs->inet);
+       xprt_release_xprt(xprt, NULL);
+       return 0;
+}
 
-       return err;
+/**
+ * xs_disable_swap - Untag this transport as being used for swap.
+ * @xprt: transport to tag
+ *
+ * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
+ * swapper refcount goes to 0, untag the socket as a memalloc socket.
+ */
+static void
+xs_disable_swap(struct rpc_xprt *xprt)
+{
+       struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
+
+       if (!atomic_dec_and_test(&xprt->swapper))
+               return;
+       if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
+               return;
+       if (xs->inet)
+               sk_clear_memalloc(xs->inet);
+       xprt_release_xprt(xprt, NULL);
 }
-EXPORT_SYMBOL_GPL(xs_swapper);
 #else
 static void xs_set_memalloc(struct rpc_xprt *xprt)
 {
 }
+
+static int
+xs_enable_swap(struct rpc_xprt *xprt)
+{
+       return -EINVAL;
+}
+
+static void
+xs_disable_swap(struct rpc_xprt *xprt)
+{
+}
 #endif
 
 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
@@ -2057,6 +2086,27 @@ out:
        xprt_wake_pending_tasks(xprt, status);
 }
 
+/**
+ * xs_tcp_shutdown - gracefully shut down a TCP socket
+ * @xprt: transport
+ *
+ * Initiates a graceful shutdown of the TCP socket by calling the
+ * equivalent of shutdown(SHUT_RDWR);
+ */
+static void xs_tcp_shutdown(struct rpc_xprt *xprt)
+{
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+       struct socket *sock = transport->sock;
+
+       if (sock == NULL)
+               return;
+       if (xprt_connected(xprt)) {
+               kernel_sock_shutdown(sock, SHUT_RDWR);
+               trace_rpc_socket_shutdown(xprt, sock);
+       } else
+               xs_reset_transport(transport);
+}
+
 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
 {
        struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
@@ -2067,6 +2117,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                unsigned int keepidle = xprt->timeout->to_initval / HZ;
                unsigned int keepcnt = xprt->timeout->to_retries + 1;
                unsigned int opt_on = 1;
+               unsigned int timeo;
 
                /* TCP Keepalive options */
                kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
@@ -2078,6 +2129,12 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
                                (char *)&keepcnt, sizeof(keepcnt));
 
+               /* TCP user timeout (see RFC5482) */
+               timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
+                       (xprt->timeout->to_retries + 1);
+               kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
+                               (char *)&timeo, sizeof(timeo));
+
                write_lock_bh(&sk->sk_callback_lock);
 
                xs_save_old_callbacks(transport, sk);
@@ -2125,9 +2182,6 @@ out:
 
 /**
  * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
- * @xprt: RPC transport to connect
- * @transport: socket transport to connect
- * @create_sock: function to create a socket of the correct type
  *
  * Invoked by a work queue tasklet.
  */
@@ -2463,6 +2517,8 @@ static struct rpc_xprt_ops xs_local_ops = {
        .close                  = xs_close,
        .destroy                = xs_destroy,
        .print_stats            = xs_local_print_stats,
+       .enable_swap            = xs_enable_swap,
+       .disable_swap           = xs_disable_swap,
 };
 
 static struct rpc_xprt_ops xs_udp_ops = {
@@ -2482,6 +2538,9 @@ static struct rpc_xprt_ops xs_udp_ops = {
        .close                  = xs_close,
        .destroy                = xs_destroy,
        .print_stats            = xs_udp_print_stats,
+       .enable_swap            = xs_enable_swap,
+       .disable_swap           = xs_disable_swap,
+       .inject_disconnect      = xs_inject_disconnect,
 };
 
 static struct rpc_xprt_ops xs_tcp_ops = {
@@ -2498,6 +2557,9 @@ static struct rpc_xprt_ops xs_tcp_ops = {
        .close                  = xs_tcp_shutdown,
        .destroy                = xs_destroy,
        .print_stats            = xs_tcp_print_stats,
+       .enable_swap            = xs_enable_swap,
+       .disable_swap           = xs_disable_swap,
+       .inject_disconnect      = xs_inject_disconnect,
 };
 
 /*
@@ -2515,6 +2577,9 @@ static struct rpc_xprt_ops bc_tcp_ops = {
        .close                  = bc_close,
        .destroy                = bc_destroy,
        .print_stats            = xs_tcp_print_stats,
+       .enable_swap            = xs_enable_swap,
+       .disable_swap           = xs_disable_swap,
+       .inject_disconnect      = xs_inject_disconnect,
 };
 
 static int xs_init_anyaddr(const int family, struct sockaddr *sap)
@@ -2982,7 +3047,7 @@ static int param_set_portnr(const char *val, const struct kernel_param *kp)
                        RPC_MAX_RESVPORT);
 }
 
-static struct kernel_param_ops param_ops_portnr = {
+static const struct kernel_param_ops param_ops_portnr = {
        .set = param_set_portnr,
        .get = param_get_uint,
 };
@@ -3001,7 +3066,7 @@ static int param_set_slot_table_size(const char *val,
                        RPC_MAX_SLOT_TABLE);
 }
 
-static struct kernel_param_ops param_ops_slot_table_size = {
+static const struct kernel_param_ops param_ops_slot_table_size = {
        .set = param_set_slot_table_size,
        .get = param_get_uint,
 };
@@ -3017,7 +3082,7 @@ static int param_set_max_slot_table_size(const char *val,
                        RPC_MAX_SLOT_TABLE_LIMIT);
 }
 
-static struct kernel_param_ops param_ops_max_slot_table_size = {
+static const struct kernel_param_ops param_ops_max_slot_table_size = {
        .set = param_set_max_slot_table_size,
        .get = param_get_uint,
 };
index 4906ca3c0f3a576a529eacb26631f8585291ae40..a816382fc8af1b9efb016f888493ca4dcc65fe3b 100644 (file)
@@ -108,6 +108,11 @@ void tipc_bclink_remove_node(struct net *net, u32 addr)
 
        tipc_bclink_lock(net);
        tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
+
+       /* Last node? => reset backlog queue */
+       if (!tn->bclink->bcast_nodes.count)
+               tipc_link_purge_backlog(&tn->bclink->link);
+
        tipc_bclink_unlock(net);
 }
 
index ca8b8e0f49b526ebbf7a87e2cc89491ecfedf988..eaa9fe54b4aebfb531610611637915dc1b0c7256 100644 (file)
@@ -404,7 +404,7 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
        l_ptr->reasm_buf = NULL;
 }
 
-static void tipc_link_purge_backlog(struct tipc_link *l)
+void tipc_link_purge_backlog(struct tipc_link *l)
 {
        __skb_queue_purge(&l->backlogq);
        l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
index 0c02c973e98558c699f006cce06b81768891b08b..ae0a0ea572f2961aca2617f9244ea74ebba15c6a 100644 (file)
@@ -218,6 +218,7 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr);
 int tipc_link_is_up(struct tipc_link *l_ptr);
 int tipc_link_is_active(struct tipc_link *l_ptr);
 void tipc_link_purge_queues(struct tipc_link *l_ptr);
+void tipc_link_purge_backlog(struct tipc_link *l);
 void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
 int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
index ee96a2519eff6f39d2f093ae88774c612d71906e..e81a8c74b8d2802003c14584b8326bb5bfb174be 100644 (file)
@@ -53,7 +53,7 @@ struct check {
        void *data;
        bool warn, error;
        enum checkstatus status;
-       int inprogress;
+       bool inprogress;
        int num_prereqs;
        struct check **prereq;
 };
@@ -113,6 +113,7 @@ static inline void check_msg(struct check *c, const char *fmt, ...)
                vfprintf(stderr, fmt, ap);
                fprintf(stderr, "\n");
        }
+       va_end(ap);
 }
 
 #define FAIL(c, ...) \
@@ -141,9 +142,9 @@ static void check_nodes_props(struct check *c, struct node *dt, struct node *nod
                check_nodes_props(c, dt, child);
 }
 
-static int run_check(struct check *c, struct node *dt)
+static bool run_check(struct check *c, struct node *dt)
 {
-       int error = 0;
+       bool error = false;
        int i;
 
        assert(!c->inprogress);
@@ -151,11 +152,11 @@ static int run_check(struct check *c, struct node *dt)
        if (c->status != UNCHECKED)
                goto out;
 
-       c->inprogress = 1;
+       c->inprogress = true;
 
        for (i = 0; i < c->num_prereqs; i++) {
                struct check *prq = c->prereq[i];
-               error |= run_check(prq, dt);
+               error = error || run_check(prq, dt);
                if (prq->status != PASSED) {
                        c->status = PREREQ;
                        check_msg(c, "Failed prerequisite '%s'",
@@ -177,9 +178,9 @@ static int run_check(struct check *c, struct node *dt)
        TRACE(c, "\tCompleted, status %d", c->status);
 
 out:
-       c->inprogress = 0;
+       c->inprogress = false;
        if ((c->status != PASSED) && (c->error))
-               error = 1;
+               error = true;
        return error;
 }
 
@@ -624,11 +625,11 @@ static void check_avoid_default_addr_size(struct check *c, struct node *dt,
        if (!reg && !ranges)
                return;
 
-       if ((node->parent->addr_cells == -1))
+       if (node->parent->addr_cells == -1)
                FAIL(c, "Relying on default #address-cells value for %s",
                     node->fullpath);
 
-       if ((node->parent->size_cells == -1))
+       if (node->parent->size_cells == -1)
                FAIL(c, "Relying on default #size-cells value for %s",
                     node->fullpath);
 }
@@ -706,15 +707,15 @@ static void disable_warning_error(struct check *c, bool warn, bool error)
        c->error = c->error && !error;
 }
 
-void parse_checks_option(bool warn, bool error, const char *optarg)
+void parse_checks_option(bool warn, bool error, const char *arg)
 {
        int i;
-       const char *name = optarg;
+       const char *name = arg;
        bool enable = true;
 
-       if ((strncmp(optarg, "no-", 3) == 0)
-           || (strncmp(optarg, "no_", 3) == 0)) {
-               name = optarg + 3;
+       if ((strncmp(arg, "no-", 3) == 0)
+           || (strncmp(arg, "no_", 3) == 0)) {
+               name = arg + 3;
                enable = false;
        }
 
@@ -733,7 +734,7 @@ void parse_checks_option(bool warn, bool error, const char *optarg)
        die("Unrecognized check name \"%s\"\n", name);
 }
 
-void process_checks(int force, struct boot_info *bi)
+void process_checks(bool force, struct boot_info *bi)
 {
        struct node *dt = bi->dt;
        int i;
index 4a40c5b92474fede88049a2de4876ad641e9e063..8cae23746882e4747ea5d13e300484452fc48eee 100644 (file)
@@ -74,7 +74,7 @@ struct data data_copy_escape_string(const char *s, int len)
        struct data d;
        char *q;
 
-       d = data_grow_for(empty_data, strlen(s)+1);
+       d = data_grow_for(empty_data, len + 1);
 
        q = d.val;
        while (i < len) {
@@ -250,20 +250,20 @@ struct data data_add_marker(struct data d, enum markertype type, char *ref)
        return data_append_markers(d, m);
 }
 
-int data_is_one_string(struct data d)
+bool data_is_one_string(struct data d)
 {
        int i;
        int len = d.len;
 
        if (len == 0)
-               return 0;
+               return false;
 
        for (i = 0; i < len-1; i++)
                if (d.val[i] == '\0')
-                       return 0;
+                       return false;
 
        if (d.val[len-1] != '\0')
-               return 0;
+               return false;
 
-       return 1;
+       return true;
 }
index 3b41bfca636cea743e575cf3c7dfcb95967e915e..0ee1caf03dd057bee8c212002568941f0658edd7 100644 (file)
@@ -20,7 +20,6 @@
 
 %option noyywrap nounput noinput never-interactive
 
-%x INCLUDE
 %x BYTESTRING
 %x PROPNODENAME
 %s V1
@@ -40,6 +39,7 @@ LINECOMMENT   "//".*\n
 #include "dtc-parser.tab.h"
 
 YYLTYPE yylloc;
+extern bool treesource_error;
 
 /* CAUTION: this will stop working if we ever use yyless() or yyunput() */
 #define        YY_USER_ACTION \
@@ -61,7 +61,8 @@ static int dts_version = 1;
                                BEGIN(V1); \
 
 static void push_input_file(const char *filename);
-static int pop_input_file(void);
+static bool pop_input_file(void);
+static void lexical_error(const char *fmt, ...);
 %}
 
 %%
@@ -75,11 +76,11 @@ static int pop_input_file(void);
                        char *line, *tmp, *fn;
                        /* skip text before line # */
                        line = yytext;
-                       while (!isdigit(*line))
+                       while (!isdigit((unsigned char)*line))
                                line++;
                        /* skip digits in line # */
                        tmp = line;
-                       while (!isspace(*tmp))
+                       while (!isspace((unsigned char)*tmp))
                                tmp++;
                        /* "NULL"-terminate line # */
                        *tmp = '\0';
@@ -146,15 +147,42 @@ static int pop_input_file(void);
                }
 
 <V1>([0-9]+|0[xX][0-9a-fA-F]+)(U|L|UL|LL|ULL)? {
-                       yylval.literal = xstrdup(yytext);
-                       DPRINT("Literal: '%s'\n", yylval.literal);
+                       char *e;
+                       DPRINT("Integer Literal: '%s'\n", yytext);
+
+                       errno = 0;
+                       yylval.integer = strtoull(yytext, &e, 0);
+
+                       assert(!(*e) || !e[strspn(e, "UL")]);
+
+                       if (errno == ERANGE)
+                               lexical_error("Integer literal '%s' out of range",
+                                             yytext);
+                       else
+                               /* ERANGE is the only strtoull error triggerable
+                                *  by strings matching the pattern */
+                               assert(errno == 0);
                        return DT_LITERAL;
                }
 
 <*>{CHAR_LITERAL}      {
-                       yytext[yyleng-1] = '\0';
-                       yylval.literal = xstrdup(yytext+1);
-                       DPRINT("Character literal: %s\n", yylval.literal);
+                       struct data d;
+                       DPRINT("Character literal: %s\n", yytext);
+
+                       d = data_copy_escape_string(yytext+1, yyleng-2);
+                       if (d.len == 1) {
+                               lexical_error("Empty character literal");
+                               yylval.integer = 0;
+                               return DT_CHAR_LITERAL;
+                       }
+
+                       yylval.integer = (unsigned char)d.val[0];
+
+                       if (d.len > 2)
+                               lexical_error("Character literal has %d"
+                                             " characters instead of 1",
+                                             d.len - 1);
+
                        return DT_CHAR_LITERAL;
                }
 
@@ -164,7 +192,7 @@ static int pop_input_file(void);
                        return DT_REF;
                }
 
-<*>"&{/"{PATHCHAR}+\}  {       /* new-style path reference */
+<*>"&{/"{PATHCHAR}*\}  {       /* new-style path reference */
                        yytext[yyleng-1] = '\0';
                        DPRINT("Ref: %s\n", yytext+2);
                        yylval.labelref = xstrdup(yytext+2);
@@ -238,13 +266,24 @@ static void push_input_file(const char *filename)
 }
 
 
-static int pop_input_file(void)
+static bool pop_input_file(void)
 {
        if (srcfile_pop() == 0)
-               return 0;
+               return false;
 
        yypop_buffer_state();
        yyin = current_srcfile->f;
 
-       return 1;
+       return true;
+}
+
+static void lexical_error(const char *fmt, ...)
+{
+       va_list ap;
+
+       va_start(ap, fmt);
+       srcpos_verror(&yylloc, "Lexical error", fmt, ap);
+       va_end(ap);
+
+       treesource_error = true;
 }
index 2d30f41778b7270b074798b4439952dca28d9c53..11cd78e723050e56d00709ef63164bd83cccff9e 100644 (file)
@@ -9,7 +9,7 @@
 #define FLEX_SCANNER
 #define YY_FLEX_MAJOR_VERSION 2
 #define YY_FLEX_MINOR_VERSION 5
-#define YY_FLEX_SUBMINOR_VERSION 35
+#define YY_FLEX_SUBMINOR_VERSION 39
 #if YY_FLEX_SUBMINOR_VERSION > 0
 #define FLEX_BETA
 #endif
@@ -162,7 +162,12 @@ typedef unsigned int flex_uint32_t;
 typedef struct yy_buffer_state *YY_BUFFER_STATE;
 #endif
 
-extern int yyleng;
+#ifndef YY_TYPEDEF_YY_SIZE_T
+#define YY_TYPEDEF_YY_SIZE_T
+typedef size_t yy_size_t;
+#endif
+
+extern yy_size_t yyleng;
 
 extern FILE *yyin, *yyout;
 
@@ -171,6 +176,7 @@ extern FILE *yyin, *yyout;
 #define EOB_ACT_LAST_MATCH 2
 
     #define YY_LESS_LINENO(n)
+    #define YY_LINENO_REWIND_TO(ptr)
     
 /* Return all but the first "n" matched characters back to the input stream. */
 #define yyless(n) \
@@ -188,11 +194,6 @@ extern FILE *yyin, *yyout;
 
 #define unput(c) yyunput( c, (yytext_ptr)  )
 
-#ifndef YY_TYPEDEF_YY_SIZE_T
-#define YY_TYPEDEF_YY_SIZE_T
-typedef size_t yy_size_t;
-#endif
-
 #ifndef YY_STRUCT_YY_BUFFER_STATE
 #define YY_STRUCT_YY_BUFFER_STATE
 struct yy_buffer_state
@@ -210,7 +211,7 @@ struct yy_buffer_state
        /* Number of characters read into yy_ch_buf, not including EOB
         * characters.
         */
-       int yy_n_chars;
+       yy_size_t yy_n_chars;
 
        /* Whether we "own" the buffer - i.e., we know we created it,
         * and can realloc() it to grow it, and should free() it to
@@ -280,8 +281,8 @@ static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */
 
 /* yy_hold_char holds the character lost when yytext is formed. */
 static char yy_hold_char;
-static int yy_n_chars;         /* number of characters read into yy_ch_buf */
-int yyleng;
+static yy_size_t yy_n_chars;           /* number of characters read into yy_ch_buf */
+yy_size_t yyleng;
 
 /* Points to current character in buffer. */
 static char *yy_c_buf_p = (char *) 0;
@@ -309,7 +310,7 @@ static void yy_init_buffer (YY_BUFFER_STATE b,FILE *file  );
 
 YY_BUFFER_STATE yy_scan_buffer (char *base,yy_size_t size  );
 YY_BUFFER_STATE yy_scan_string (yyconst char *yy_str  );
-YY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,int len  );
+YY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,yy_size_t len  );
 
 void *yyalloc (yy_size_t  );
 void *yyrealloc (void *,yy_size_t  );
@@ -341,7 +342,7 @@ void yyfree (void *  );
 
 /* Begin user sect3 */
 
-#define yywrap(n) 1
+#define yywrap() 1
 #define YY_SKIP_YYWRAP
 
 typedef unsigned char YY_CHAR;
@@ -381,25 +382,25 @@ struct yy_trans_info
        flex_int32_t yy_verify;
        flex_int32_t yy_nxt;
        };
-static yyconst flex_int16_t yy_accept[161] =
+static yyconst flex_int16_t yy_accept[159] =
     {   0,
-        0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
-       31,   29,   18,   18,   29,   29,   29,   29,   29,   29,
-       29,   29,   29,   29,   29,   29,   29,   29,   15,   16,
-       16,   29,   16,   10,   10,   18,   26,    0,    3,    0,
-       27,   12,    0,    0,   11,    0,    0,    0,    0,    0,
-        0,    0,   21,   23,   25,   24,   22,    0,    9,   28,
-        0,    0,    0,   14,   14,   16,   16,   16,   10,   10,
-       10,    0,   12,    0,   11,    0,    0,    0,   20,    0,
-        0,    0,    0,    0,    0,    0,    0,   16,   10,   10,
-       10,    0,   19,    0,    0,    0,    0,    0,    0,    0,
-
-        0,    0,   16,   13,    0,    0,    0,    0,    0,    0,
-        0,    0,    0,   16,    6,    0,    0,    0,    0,    0,
-        0,    2,    0,    0,    0,    0,    0,    0,    0,    0,
-        4,   17,    0,    0,    2,    0,    0,    0,    0,    0,
-        0,    0,    0,    0,    0,    0,    0,    1,    0,    0,
-        0,    0,    5,    8,    0,    0,    0,    0,    7,    0
+        0,    0,    0,    0,    0,    0,    0,    0,   31,   29,
+       18,   18,   29,   29,   29,   29,   29,   29,   29,   29,
+       29,   29,   29,   29,   29,   29,   15,   16,   16,   29,
+       16,   10,   10,   18,   26,    0,    3,    0,   27,   12,
+        0,    0,   11,    0,    0,    0,    0,    0,    0,    0,
+       21,   23,   25,   24,   22,    0,    9,   28,    0,    0,
+        0,   14,   14,   16,   16,   16,   10,   10,   10,    0,
+       12,    0,   11,    0,    0,    0,   20,    0,    0,    0,
+        0,    0,    0,    0,    0,   16,   10,   10,   10,    0,
+       13,   19,    0,    0,    0,    0,    0,    0,    0,    0,
+
+        0,   16,    0,    0,    0,    0,    0,    0,    0,    0,
+        0,   16,    6,    0,    0,    0,    0,    0,    0,    2,
+        0,    0,    0,    0,    0,    0,    0,    0,    4,   17,
+        0,    0,    2,    0,    0,    0,    0,    0,    0,    0,
+        0,    0,    0,    0,    0,    1,    0,    0,    0,    0,
+        5,    8,    0,    0,    0,    0,    7,    0
     } ;
 
 static yyconst flex_int32_t yy_ec[256] =
@@ -440,157 +441,157 @@ static yyconst flex_int32_t yy_meta[47] =
         2,    2,    4,    5,    5,    5,    6,    1,    1,    1,
         7,    8,    8,    8,    8,    1,    1,    7,    7,    7,
         7,    8,    8,    8,    8,    8,    8,    8,    8,    8,
-        8,    8,    8,    3,    1,    1
+        8,    8,    8,    3,    1,    4
     } ;
 
-static yyconst flex_int16_t yy_base[175] =
+static yyconst flex_int16_t yy_base[173] =
     {   0,
-        0,  385,  378,   40,   41,  383,   72,  382,   34,   44,
-      388,  393,   61,  117,  368,  116,  115,  115,  115,   48,
-      367,  107,  368,  339,  127,  120,    0,  147,  393,    0,
-      127,    0,  133,  156,  168,  153,  393,  125,  393,  380,
-      393,    0,  369,  127,  393,  160,  371,  377,  347,   21,
-      343,  346,  393,  393,  393,  393,  393,  359,  393,  393,
-      183,  343,  339,  393,  356,    0,  183,  340,  187,  348,
-      347,    0,    0,    0,  178,  359,  195,  365,  354,  326,
-      332,  325,  334,  328,  204,  326,  331,  324,  393,  335,
-      150,  311,  343,  342,  315,  322,  340,  179,  313,  207,
-
-      319,  316,  317,  393,  337,  333,  305,  302,  311,  301,
-      310,  190,  338,  337,  393,  307,  322,  301,  305,  277,
-      208,  311,  307,  278,  271,  270,  248,  246,  213,  130,
-      393,  393,  263,  235,  207,  221,  218,  229,  213,  213,
-      206,  234,  218,  210,  208,  193,  219,  393,  223,  204,
-      176,  157,  393,  393,  120,  106,   97,  119,  393,  393,
-      245,  251,  259,  263,  267,  273,  280,  284,  292,  300,
-      304,  310,  318,  326
+        0,  383,   34,  382,   65,  381,   37,  105,  387,  391,
+       54,  111,  367,  110,  109,  109,  112,   41,  366,  104,
+      367,  338,  124,  117,    0,  144,  391,    0,  121,    0,
+      135,  155,  140,  179,  391,  160,  391,  379,  391,    0,
+      368,  141,  391,  167,  370,  376,  346,  103,  342,  345,
+      391,  391,  391,  391,  391,  358,  391,  391,  175,  342,
+      338,  391,  355,    0,  185,  339,  184,  347,  346,    0,
+        0,  322,  175,  357,  175,  363,  352,  324,  330,  323,
+      332,  326,  201,  324,  329,  322,  391,  333,  181,  309,
+      391,  341,  340,  313,  320,  338,  178,  311,  146,  317,
+
+      314,  315,  335,  331,  303,  300,  309,  299,  308,  188,
+      336,  335,  391,  305,  320,  281,  283,  271,  203,  288,
+      281,  271,  266,  264,  245,  242,  208,  104,  391,  391,
+      244,  218,  204,  219,  206,  224,  201,  212,  204,  229,
+      215,  208,  207,  200,  219,  391,  233,  221,  200,  181,
+      391,  391,  149,  122,   86,   41,  391,  391,  245,  251,
+      259,  263,  267,  273,  280,  284,  292,  300,  304,  310,
+      318,  326
     } ;
 
-static yyconst flex_int16_t yy_def[175] =
+static yyconst flex_int16_t yy_def[173] =
     {   0,
-      160,    1,    1,    1,    1,    5,  160,    7,    1,    1,
-      160,  160,  160,  160,  160,  161,  162,  163,  160,  160,
-      160,  160,  164,  160,  160,  160,  165,  164,  160,  166,
-      167,  166,  166,  160,  160,  160,  160,  161,  160,  161,
-      160,  168,  160,  163,  160,  163,  169,  170,  160,  160,
-      160,  160,  160,  160,  160,  160,  160,  164,  160,  160,
-      160,  160,  160,  160,  164,  166,  167,  166,  160,  160,
-      160,  171,  168,  172,  163,  169,  169,  170,  160,  160,
-      160,  160,  160,  160,  160,  160,  160,  166,  160,  160,
-      171,  172,  160,  160,  160,  160,  160,  160,  160,  160,
-
-      160,  160,  166,  160,  160,  160,  160,  160,  160,  160,
-      160,  173,  160,  166,  160,  160,  160,  160,  160,  160,
-      173,  160,  173,  160,  160,  160,  160,  160,  160,  160,
-      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
-      160,  160,  174,  160,  160,  160,  174,  160,  174,  160,
-      160,  160,  160,  160,  160,  160,  160,  160,  160,    0,
-      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
-      160,  160,  160,  160
+      158,    1,    1,    3,  158,    5,    1,    1,  158,  158,
+      158,  158,  158,  159,  160,  161,  158,  158,  158,  158,
+      162,  158,  158,  158,  163,  162,  158,  164,  165,  164,
+      164,  158,  158,  158,  158,  159,  158,  159,  158,  166,
+      158,  161,  158,  161,  167,  168,  158,  158,  158,  158,
+      158,  158,  158,  158,  158,  162,  158,  158,  158,  158,
+      158,  158,  162,  164,  165,  164,  158,  158,  158,  169,
+      166,  170,  161,  167,  167,  168,  158,  158,  158,  158,
+      158,  158,  158,  158,  158,  164,  158,  158,  169,  170,
+      158,  158,  158,  158,  158,  158,  158,  158,  158,  158,
+
+      158,  164,  158,  158,  158,  158,  158,  158,  158,  171,
+      158,  164,  158,  158,  158,  158,  158,  158,  171,  158,
+      171,  158,  158,  158,  158,  158,  158,  158,  158,  158,
+      158,  158,  158,  158,  158,  158,  158,  158,  158,  158,
+      172,  158,  158,  158,  172,  158,  172,  158,  158,  158,
+      158,  158,  158,  158,  158,  158,  158,    0,  158,  158,
+      158,  158,  158,  158,  158,  158,  158,  158,  158,  158,
+      158,  158
     } ;
 
-static yyconst flex_int16_t yy_nxt[440] =
+static yyconst flex_int16_t yy_nxt[438] =
     {   0,
-       12,   13,   14,   13,   15,   16,   12,   17,   18,   12,
-       12,   12,   19,   12,   12,   12,   12,   20,   21,   22,
-       23,   23,   23,   23,   23,   12,   12,   23,   23,   23,
-       23,   23,   23,   23,   23,   23,   23,   23,   23,   23,
-       23,   23,   23,   12,   24,   12,   25,   34,   35,   35,
-       25,   81,   26,   26,   27,   27,   27,   34,   35,   35,
-       82,   28,   36,   36,   36,   53,   54,   29,   28,   28,
-       28,   28,   12,   13,   14,   13,   15,   16,   30,   17,
-       18,   30,   30,   30,   26,   30,   30,   30,   12,   20,
-       21,   22,   31,   31,   31,   31,   31,   32,   12,   31,
-
-       31,   31,   31,   31,   31,   31,   31,   31,   31,   31,
-       31,   31,   31,   31,   31,   12,   24,   12,   36,   36,
-       36,   39,   41,   45,   47,   56,   57,   48,   61,   47,
-       39,  159,   48,   66,   61,   45,   66,   66,   66,  158,
-       46,   40,   49,   59,   50,  157,   51,   49,   52,   50,
-       40,   63,   46,   52,   36,   36,   36,  156,   43,   62,
-       65,   65,   65,   59,  136,   68,  137,   65,   75,   69,
-       69,   69,   70,   71,   65,   65,   65,   65,   70,   71,
-       72,   69,   69,   69,   61,   46,   45,  155,  154,   66,
-       70,   71,   66,   66,   66,  122,   85,   85,   85,   59,
-
-       69,   69,   69,   46,   77,  100,  109,   93,  100,   70,
-       71,  110,  112,  122,  129,  123,  153,   85,   85,   85,
-      135,  135,  135,  148,  148,  160,  135,  135,  135,  152,
-      142,  142,  142,  123,  143,  142,  142,  142,  151,  143,
-      150,  146,  145,  149,  149,   38,   38,   38,   38,   38,
-       38,   38,   38,   42,  144,  141,  140,   42,   42,   44,
-       44,   44,   44,   44,   44,   44,   44,   58,   58,   58,
-       58,   64,  139,   64,   66,  138,  134,   66,  133,   66,
-       66,   67,  132,  131,   67,   67,   67,   67,   73,  130,
-       73,   73,   76,   76,   76,   76,   76,   76,   76,   76,
-
-       78,   78,   78,   78,   78,   78,   78,   78,   91,  160,
-       91,   92,  129,   92,   92,  128,   92,   92,  121,  121,
-      121,  121,  121,  121,  121,  121,  147,  147,  147,  147,
-      147,  147,  147,  147,  127,  126,  125,  124,   61,   61,
-      120,  119,  118,  117,  116,  115,   47,  114,  110,  113,
-      111,  108,  107,  106,   48,  105,  104,   89,  103,  102,
-      101,   99,   98,   97,   96,   95,   94,   79,   77,   90,
-       89,   88,   59,   87,   86,   59,   84,   83,   80,   79,
-       77,   74,  160,   60,   59,   55,   37,  160,   33,   25,
-       26,   25,   11,  160,  160,  160,  160,  160,  160,  160,
-
-      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
-      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
-      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
-      160,  160,  160,  160,  160,  160,  160,  160,  160
+       10,   11,   12,   11,   13,   14,   10,   15,   16,   10,
+       10,   10,   17,   10,   10,   10,   10,   18,   19,   20,
+       21,   21,   21,   21,   21,   10,   10,   21,   21,   21,
+       21,   21,   21,   21,   21,   21,   21,   21,   21,   21,
+       21,   21,   21,   10,   22,   10,   24,   25,   25,   25,
+       32,   33,   33,  157,   26,   34,   34,   34,   51,   52,
+       27,   26,   26,   26,   26,   10,   11,   12,   11,   13,
+       14,   28,   15,   16,   28,   28,   28,   24,   28,   28,
+       28,   10,   18,   19,   20,   29,   29,   29,   29,   29,
+       30,   10,   29,   29,   29,   29,   29,   29,   29,   29,
+
+       29,   29,   29,   29,   29,   29,   29,   29,   10,   22,
+       10,   23,   34,   34,   34,   37,   39,   43,   32,   33,
+       33,   45,   54,   55,   46,   59,   45,   64,  156,   46,
+       64,   64,   64,   79,   44,   38,   59,   57,  134,   47,
+      135,   48,   80,   49,   47,   50,   48,   99,   61,   43,
+       50,  110,   41,   67,   67,   67,   60,   63,   63,   63,
+       57,  155,   68,   69,   63,   37,   44,   66,   67,   67,
+       67,   63,   63,   63,   63,   73,   59,   68,   69,   70,
+       34,   34,   34,   43,   75,   38,  154,   92,   83,   83,
+       83,   64,   44,  120,   64,   64,   64,   67,   67,   67,
+
+       44,   57,   99,   68,   69,  107,   68,   69,  120,  127,
+      108,  153,  152,  121,   83,   83,   83,  133,  133,  133,
+      146,  133,  133,  133,  146,  140,  140,  140,  121,  141,
+      140,  140,  140,  151,  141,  158,  150,  149,  148,  144,
+      147,  143,  142,  139,  147,   36,   36,   36,   36,   36,
+       36,   36,   36,   40,  138,  137,  136,   40,   40,   42,
+       42,   42,   42,   42,   42,   42,   42,   56,   56,   56,
+       56,   62,  132,   62,   64,  131,  130,   64,  129,   64,
+       64,   65,  128,  158,   65,   65,   65,   65,   71,  127,
+       71,   71,   74,   74,   74,   74,   74,   74,   74,   74,
+
+       76,   76,   76,   76,   76,   76,   76,   76,   89,  126,
+       89,   90,  125,   90,   90,  124,   90,   90,  119,  119,
+      119,  119,  119,  119,  119,  119,  145,  145,  145,  145,
+      145,  145,  145,  145,  123,  122,   59,   59,  118,  117,
+      116,  115,  114,  113,   45,  112,  108,  111,  109,  106,
+      105,  104,   46,  103,   91,   87,  102,  101,  100,   98,
+       97,   96,   95,   94,   93,   77,   75,   91,   88,   87,
+       86,   57,   85,   84,   57,   82,   81,   78,   77,   75,
+       72,  158,   58,   57,   53,   35,  158,   31,   23,   23,
+        9,  158,  158,  158,  158,  158,  158,  158,  158,  158,
+
+      158,  158,  158,  158,  158,  158,  158,  158,  158,  158,
+      158,  158,  158,  158,  158,  158,  158,  158,  158,  158,
+      158,  158,  158,  158,  158,  158,  158,  158,  158,  158,
+      158,  158,  158,  158,  158,  158,  158
     } ;
 
-static yyconst flex_int16_t yy_chk[440] =
+static yyconst flex_int16_t yy_chk[438] =
     {   0,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-        1,    1,    1,    1,    1,    1,    4,    9,    9,    9,
-       10,   50,    4,    5,    5,    5,    5,   10,   10,   10,
-       50,    5,   13,   13,   13,   20,   20,    5,    5,    5,
-        5,    5,    7,    7,    7,    7,    7,    7,    7,    7,
-        7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
-        7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
-
-        7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
-        7,    7,    7,    7,    7,    7,    7,    7,   14,   14,
-       14,   16,   17,   18,   19,   22,   22,   19,   25,   26,
-       38,  158,   26,   31,   33,   44,   31,   31,   31,  157,
-       18,   16,   19,   31,   19,  156,   19,   26,   19,   26,
-       38,   26,   44,   26,   36,   36,   36,  155,   17,   25,
-       28,   28,   28,   28,  130,   33,  130,   28,   46,   34,
-       34,   34,   91,   91,   28,   28,   28,   28,   34,   34,
-       34,   35,   35,   35,   61,   46,   75,  152,  151,   67,
-       35,   35,   67,   67,   67,  112,   61,   61,   61,   67,
-
-       69,   69,   69,   75,   77,   85,   98,   77,  100,   69,
-       69,   98,  100,  121,  129,  112,  150,   85,   85,   85,
-      135,  135,  135,  143,  147,  149,  129,  129,  129,  146,
-      138,  138,  138,  121,  138,  142,  142,  142,  145,  142,
-      144,  141,  140,  143,  147,  161,  161,  161,  161,  161,
-      161,  161,  161,  162,  139,  137,  136,  162,  162,  163,
-      163,  163,  163,  163,  163,  163,  163,  164,  164,  164,
-      164,  165,  134,  165,  166,  133,  128,  166,  127,  166,
-      166,  167,  126,  125,  167,  167,  167,  167,  168,  124,
-      168,  168,  169,  169,  169,  169,  169,  169,  169,  169,
-
-      170,  170,  170,  170,  170,  170,  170,  170,  171,  123,
-      171,  172,  122,  172,  172,  120,  172,  172,  173,  173,
-      173,  173,  173,  173,  173,  173,  174,  174,  174,  174,
-      174,  174,  174,  174,  119,  118,  117,  116,  114,  113,
-      111,  110,  109,  108,  107,  106,  105,  103,  102,  101,
-       99,   97,   96,   95,   94,   93,   92,   90,   88,   87,
-       86,   84,   83,   82,   81,   80,   79,   78,   76,   71,
-       70,   68,   65,   63,   62,   58,   52,   51,   49,   48,
-       47,   43,   40,   24,   23,   21,   15,   11,    8,    6,
-        3,    2,  160,  160,  160,  160,  160,  160,  160,  160,
-
-      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
-      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
-      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
-      160,  160,  160,  160,  160,  160,  160,  160,  160
+        1,    1,    1,    1,    1,    1,    3,    3,    3,    3,
+        7,    7,    7,  156,    3,   11,   11,   11,   18,   18,
+        3,    3,    3,    3,    3,    5,    5,    5,    5,    5,
+        5,    5,    5,    5,    5,    5,    5,    5,    5,    5,
+        5,    5,    5,    5,    5,    5,    5,    5,    5,    5,
+        5,    5,    5,    5,    5,    5,    5,    5,    5,    5,
+
+        5,    5,    5,    5,    5,    5,    5,    5,    5,    5,
+        5,    8,   12,   12,   12,   14,   15,   16,    8,    8,
+        8,   17,   20,   20,   17,   23,   24,   29,  155,   24,
+       29,   29,   29,   48,   16,   14,   31,   29,  128,   17,
+      128,   17,   48,   17,   24,   17,   24,   99,   24,   42,
+       24,   99,   15,   33,   33,   33,   23,   26,   26,   26,
+       26,  154,   33,   33,   26,   36,   42,   31,   32,   32,
+       32,   26,   26,   26,   26,   44,   59,   32,   32,   32,
+       34,   34,   34,   73,   75,   36,  153,   75,   59,   59,
+       59,   65,   44,  110,   65,   65,   65,   67,   67,   67,
+
+       73,   65,   83,   89,   89,   97,   67,   67,  119,  127,
+       97,  150,  149,  110,   83,   83,   83,  133,  133,  133,
+      141,  127,  127,  127,  145,  136,  136,  136,  119,  136,
+      140,  140,  140,  148,  140,  147,  144,  143,  142,  139,
+      141,  138,  137,  135,  145,  159,  159,  159,  159,  159,
+      159,  159,  159,  160,  134,  132,  131,  160,  160,  161,
+      161,  161,  161,  161,  161,  161,  161,  162,  162,  162,
+      162,  163,  126,  163,  164,  125,  124,  164,  123,  164,
+      164,  165,  122,  121,  165,  165,  165,  165,  166,  120,
+      166,  166,  167,  167,  167,  167,  167,  167,  167,  167,
+
+      168,  168,  168,  168,  168,  168,  168,  168,  169,  118,
+      169,  170,  117,  170,  170,  116,  170,  170,  171,  171,
+      171,  171,  171,  171,  171,  171,  172,  172,  172,  172,
+      172,  172,  172,  172,  115,  114,  112,  111,  109,  108,
+      107,  106,  105,  104,  103,  102,  101,  100,   98,   96,
+       95,   94,   93,   92,   90,   88,   86,   85,   84,   82,
+       81,   80,   79,   78,   77,   76,   74,   72,   69,   68,
+       66,   63,   61,   60,   56,   50,   49,   47,   46,   45,
+       41,   38,   22,   21,   19,   13,    9,    6,    4,    2,
+      158,  158,  158,  158,  158,  158,  158,  158,  158,  158,
+
+      158,  158,  158,  158,  158,  158,  158,  158,  158,  158,
+      158,  158,  158,  158,  158,  158,  158,  158,  158,  158,
+      158,  158,  158,  158,  158,  158,  158,  158,  158,  158,
+      158,  158,  158,  158,  158,  158,  158
     } ;
 
 static yy_state_type yy_last_accepting_state;
@@ -631,13 +632,13 @@ char *yytext;
 
 
 
-
-#line 38 "dtc-lexer.l"
+#line 37 "dtc-lexer.l"
 #include "dtc.h"
 #include "srcpos.h"
 #include "dtc-parser.tab.h"
 
 YYLTYPE yylloc;
+extern bool treesource_error;
 
 /* CAUTION: this will stop working if we ever use yyless() or yyunput() */
 #define        YY_USER_ACTION \
@@ -659,14 +660,14 @@ static int dts_version = 1;
                                BEGIN(V1); \
 
 static void push_input_file(const char *filename);
-static int pop_input_file(void);
-#line 664 "dtc-lexer.lex.c"
+static bool pop_input_file(void);
+static void lexical_error(const char *fmt, ...);
+#line 666 "dtc-lexer.lex.c"
 
 #define INITIAL 0
-#define INCLUDE 1
-#define BYTESTRING 2
-#define PROPNODENAME 3
-#define V1 4
+#define BYTESTRING 1
+#define PROPNODENAME 2
+#define V1 3
 
 #ifndef YY_NO_UNISTD_H
 /* Special case for "unistd.h", since it is non-ANSI. We include it way
@@ -703,7 +704,7 @@ FILE *yyget_out (void );
 
 void yyset_out  (FILE * out_str  );
 
-int yyget_leng (void );
+yy_size_t yyget_leng (void );
 
 char *yyget_text (void );
 
@@ -852,10 +853,6 @@ YY_DECL
        register char *yy_cp, *yy_bp;
        register int yy_act;
     
-#line 67 "dtc-lexer.l"
-
-#line 858 "dtc-lexer.lex.c"
-
        if ( !(yy_init) )
                {
                (yy_init) = 1;
@@ -882,6 +879,11 @@ YY_DECL
                yy_load_buffer_state( );
                }
 
+       {
+#line 68 "dtc-lexer.l"
+
+#line 886 "dtc-lexer.lex.c"
+
        while ( 1 )             /* loops until end-of-file is reached */
                {
                yy_cp = (yy_c_buf_p);
@@ -899,7 +901,7 @@ YY_DECL
 yy_match:
                do
                        {
-                       register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)];
+                       register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)] ;
                        if ( yy_accept[yy_current_state] )
                                {
                                (yy_last_accepting_state) = yy_current_state;
@@ -908,13 +910,13 @@ yy_match:
                        while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
                                {
                                yy_current_state = (int) yy_def[yy_current_state];
-                               if ( yy_current_state >= 161 )
+                               if ( yy_current_state >= 159 )
                                        yy_c = yy_meta[(unsigned int) yy_c];
                                }
                        yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
                        ++yy_cp;
                        }
-               while ( yy_current_state != 160 );
+               while ( yy_current_state != 158 );
                yy_cp = (yy_last_accepting_cpos);
                yy_current_state = (yy_last_accepting_state);
 
@@ -937,7 +939,7 @@ do_action:  /* This label is used only to access EOF actions. */
 case 1:
 /* rule 1 can match eol */
 YY_RULE_SETUP
-#line 68 "dtc-lexer.l"
+#line 69 "dtc-lexer.l"
 {
                        char *name = strchr(yytext, '\"') + 1;
                        yytext[yyleng-1] = '\0';
@@ -947,16 +949,16 @@ YY_RULE_SETUP
 case 2:
 /* rule 2 can match eol */
 YY_RULE_SETUP
-#line 74 "dtc-lexer.l"
+#line 75 "dtc-lexer.l"
 {
                        char *line, *tmp, *fn;
                        /* skip text before line # */
                        line = yytext;
-                       while (!isdigit(*line))
+                       while (!isdigit((unsigned char)*line))
                                line++;
                        /* skip digits in line # */
                        tmp = line;
-                       while (!isspace(*tmp))
+                       while (!isspace((unsigned char)*tmp))
                                tmp++;
                        /* "NULL"-terminate line # */
                        *tmp = '\0';
@@ -970,11 +972,10 @@ YY_RULE_SETUP
                }
        YY_BREAK
 case YY_STATE_EOF(INITIAL):
-case YY_STATE_EOF(INCLUDE):
 case YY_STATE_EOF(BYTESTRING):
 case YY_STATE_EOF(PROPNODENAME):
 case YY_STATE_EOF(V1):
-#line 95 "dtc-lexer.l"
+#line 96 "dtc-lexer.l"
 {
                        if (!pop_input_file()) {
                                yyterminate();
@@ -984,7 +985,7 @@ case YY_STATE_EOF(V1):
 case 3:
 /* rule 3 can match eol */
 YY_RULE_SETUP
-#line 101 "dtc-lexer.l"
+#line 102 "dtc-lexer.l"
 {
                        DPRINT("String: %s\n", yytext);
                        yylval.data = data_copy_escape_string(yytext+1,
@@ -994,7 +995,7 @@ YY_RULE_SETUP
        YY_BREAK
 case 4:
 YY_RULE_SETUP
-#line 108 "dtc-lexer.l"
+#line 109 "dtc-lexer.l"
 {
                        DPRINT("Keyword: /dts-v1/\n");
                        dts_version = 1;
@@ -1004,7 +1005,7 @@ YY_RULE_SETUP
        YY_BREAK
 case 5:
 YY_RULE_SETUP
-#line 115 "dtc-lexer.l"
+#line 116 "dtc-lexer.l"
 {
                        DPRINT("Keyword: /memreserve/\n");
                        BEGIN_DEFAULT();
@@ -1013,7 +1014,7 @@ YY_RULE_SETUP
        YY_BREAK
 case 6:
 YY_RULE_SETUP
-#line 121 "dtc-lexer.l"
+#line 122 "dtc-lexer.l"
 {
                        DPRINT("Keyword: /bits/\n");
                        BEGIN_DEFAULT();
@@ -1022,7 +1023,7 @@ YY_RULE_SETUP
        YY_BREAK
 case 7:
 YY_RULE_SETUP
-#line 127 "dtc-lexer.l"
+#line 128 "dtc-lexer.l"
 {
                        DPRINT("Keyword: /delete-property/\n");
                        DPRINT("<PROPNODENAME>\n");
@@ -1032,7 +1033,7 @@ YY_RULE_SETUP
        YY_BREAK
 case 8:
 YY_RULE_SETUP
-#line 134 "dtc-lexer.l"
+#line 135 "dtc-lexer.l"
 {
                        DPRINT("Keyword: /delete-node/\n");
                        DPRINT("<PROPNODENAME>\n");
@@ -1042,7 +1043,7 @@ YY_RULE_SETUP
        YY_BREAK
 case 9:
 YY_RULE_SETUP
-#line 141 "dtc-lexer.l"
+#line 142 "dtc-lexer.l"
 {
                        DPRINT("Label: %s\n", yytext);
                        yylval.labelref = xstrdup(yytext);
@@ -1052,27 +1053,54 @@ YY_RULE_SETUP
        YY_BREAK
 case 10:
 YY_RULE_SETUP
-#line 148 "dtc-lexer.l"
+#line 149 "dtc-lexer.l"
 {
-                       yylval.literal = xstrdup(yytext);
-                       DPRINT("Literal: '%s'\n", yylval.literal);
+                       char *e;
+                       DPRINT("Integer Literal: '%s'\n", yytext);
+
+                       errno = 0;
+                       yylval.integer = strtoull(yytext, &e, 0);
+
+                       assert(!(*e) || !e[strspn(e, "UL")]);
+
+                       if (errno == ERANGE)
+                               lexical_error("Integer literal '%s' out of range",
+                                             yytext);
+                       else
+                               /* ERANGE is the only strtoull error triggerable
+                                *  by strings matching the pattern */
+                               assert(errno == 0);
                        return DT_LITERAL;
                }
        YY_BREAK
 case 11:
 /* rule 11 can match eol */
 YY_RULE_SETUP
-#line 154 "dtc-lexer.l"
+#line 168 "dtc-lexer.l"
 {
-                       yytext[yyleng-1] = '\0';
-                       yylval.literal = xstrdup(yytext+1);
-                       DPRINT("Character literal: %s\n", yylval.literal);
+                       struct data d;
+                       DPRINT("Character literal: %s\n", yytext);
+
+                       d = data_copy_escape_string(yytext+1, yyleng-2);
+                       if (d.len == 1) {
+                               lexical_error("Empty character literal");
+                               yylval.integer = 0;
+                               return DT_CHAR_LITERAL;
+                       }
+
+                       yylval.integer = (unsigned char)d.val[0];
+
+                       if (d.len > 2)
+                               lexical_error("Character literal has %d"
+                                             " characters instead of 1",
+                                             d.len - 1);
+
                        return DT_CHAR_LITERAL;
                }
        YY_BREAK
 case 12:
 YY_RULE_SETUP
-#line 161 "dtc-lexer.l"
+#line 189 "dtc-lexer.l"
 {      /* label reference */
                        DPRINT("Ref: %s\n", yytext+1);
                        yylval.labelref = xstrdup(yytext+1);
@@ -1081,7 +1109,7 @@ YY_RULE_SETUP
        YY_BREAK
 case 13:
 YY_RULE_SETUP
-#line 167 "dtc-lexer.l"
+#line 195 "dtc-lexer.l"
 {      /* new-style path reference */
                        yytext[yyleng-1] = '\0';
                        DPRINT("Ref: %s\n", yytext+2);
@@ -1091,7 +1119,7 @@ YY_RULE_SETUP
        YY_BREAK
 case 14:
 YY_RULE_SETUP
-#line 174 "dtc-lexer.l"
+#line 202 "dtc-lexer.l"
 {
                        yylval.byte = strtol(yytext, NULL, 16);
                        DPRINT("Byte: %02x\n", (int)yylval.byte);
@@ -1100,7 +1128,7 @@ YY_RULE_SETUP
        YY_BREAK
 case 15:
 YY_RULE_SETUP
-#line 180 "dtc-lexer.l"
+#line 208 "dtc-lexer.l"
 {
                        DPRINT("/BYTESTRING\n");
                        BEGIN_DEFAULT();
@@ -1109,7 +1137,7 @@ YY_RULE_SETUP
        YY_BREAK
 case 16:
 YY_RULE_SETUP
-#line 186 "dtc-lexer.l"
+#line 214 "dtc-lexer.l"
 {
                        DPRINT("PropNodeName: %s\n", yytext);
                        yylval.propnodename = xstrdup((yytext[0] == '\\') ?
@@ -1120,7 +1148,7 @@ YY_RULE_SETUP
        YY_BREAK
 case 17:
 YY_RULE_SETUP
-#line 194 "dtc-lexer.l"
+#line 222 "dtc-lexer.l"
 {
                        DPRINT("Binary Include\n");
                        return DT_INCBIN;
@@ -1129,64 +1157,64 @@ YY_RULE_SETUP
 case 18:
 /* rule 18 can match eol */
 YY_RULE_SETUP
-#line 199 "dtc-lexer.l"
+#line 227 "dtc-lexer.l"
 /* eat whitespace */
        YY_BREAK
 case 19:
 /* rule 19 can match eol */
 YY_RULE_SETUP
-#line 200 "dtc-lexer.l"
+#line 228 "dtc-lexer.l"
 /* eat C-style comments */
        YY_BREAK
 case 20:
 /* rule 20 can match eol */
 YY_RULE_SETUP
-#line 201 "dtc-lexer.l"
+#line 229 "dtc-lexer.l"
 /* eat C++-style comments */
        YY_BREAK
 case 21:
 YY_RULE_SETUP
-#line 203 "dtc-lexer.l"
+#line 231 "dtc-lexer.l"
 { return DT_LSHIFT; };
        YY_BREAK
 case 22:
 YY_RULE_SETUP
-#line 204 "dtc-lexer.l"
+#line 232 "dtc-lexer.l"
 { return DT_RSHIFT; };
        YY_BREAK
 case 23:
 YY_RULE_SETUP
-#line 205 "dtc-lexer.l"
+#line 233 "dtc-lexer.l"
 { return DT_LE; };
        YY_BREAK
 case 24:
 YY_RULE_SETUP
-#line 206 "dtc-lexer.l"
+#line 234 "dtc-lexer.l"
 { return DT_GE; };
        YY_BREAK
 case 25:
 YY_RULE_SETUP
-#line 207 "dtc-lexer.l"
+#line 235 "dtc-lexer.l"
 { return DT_EQ; };
        YY_BREAK
 case 26:
 YY_RULE_SETUP
-#line 208 "dtc-lexer.l"
+#line 236 "dtc-lexer.l"
 { return DT_NE; };
        YY_BREAK
 case 27:
 YY_RULE_SETUP
-#line 209 "dtc-lexer.l"
+#line 237 "dtc-lexer.l"
 { return DT_AND; };
        YY_BREAK
 case 28:
 YY_RULE_SETUP
-#line 210 "dtc-lexer.l"
+#line 238 "dtc-lexer.l"
 { return DT_OR; };
        YY_BREAK
 case 29:
 YY_RULE_SETUP
-#line 212 "dtc-lexer.l"
+#line 240 "dtc-lexer.l"
 {
                        DPRINT("Char: %c (\\x%02x)\n", yytext[0],
                                (unsigned)yytext[0]);
@@ -1204,10 +1232,10 @@ YY_RULE_SETUP
        YY_BREAK
 case 30:
 YY_RULE_SETUP
-#line 227 "dtc-lexer.l"
+#line 255 "dtc-lexer.l"
 ECHO;
        YY_BREAK
-#line 1211 "dtc-lexer.lex.c"
+#line 1239 "dtc-lexer.lex.c"
 
        case YY_END_OF_BUFFER:
                {
@@ -1337,6 +1365,7 @@ ECHO;
                        "fatal flex scanner internal error--no action found" );
        } /* end of action switch */
                } /* end of scanning one token */
+       } /* end of user's declarations */
 } /* end of yylex */
 
 /* yy_get_next_buffer - try to read in a new buffer
@@ -1392,21 +1421,21 @@ static int yy_get_next_buffer (void)
 
        else
                {
-                       int num_to_read =
+                       yy_size_t num_to_read =
                        YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
 
                while ( num_to_read <= 0 )
                        { /* Not enough room in the buffer - grow it. */
 
                        /* just a shorter name for the current buffer */
-                       YY_BUFFER_STATE b = YY_CURRENT_BUFFER;
+                       YY_BUFFER_STATE b = YY_CURRENT_BUFFER_LVALUE;
 
                        int yy_c_buf_p_offset =
                                (int) ((yy_c_buf_p) - b->yy_ch_buf);
 
                        if ( b->yy_is_our_buffer )
                                {
-                               int new_size = b->yy_buf_size * 2;
+                               yy_size_t new_size = b->yy_buf_size * 2;
 
                                if ( new_size <= 0 )
                                        b->yy_buf_size += b->yy_buf_size / 8;
@@ -1437,7 +1466,7 @@ static int yy_get_next_buffer (void)
 
                /* Read in more data. */
                YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
-                       (yy_n_chars), (size_t) num_to_read );
+                       (yy_n_chars), num_to_read );
 
                YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
                }
@@ -1499,7 +1528,7 @@ static int yy_get_next_buffer (void)
                while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
                        {
                        yy_current_state = (int) yy_def[yy_current_state];
-                       if ( yy_current_state >= 161 )
+                       if ( yy_current_state >= 159 )
                                yy_c = yy_meta[(unsigned int) yy_c];
                        }
                yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
@@ -1527,13 +1556,13 @@ static int yy_get_next_buffer (void)
        while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
                {
                yy_current_state = (int) yy_def[yy_current_state];
-               if ( yy_current_state >= 161 )
+               if ( yy_current_state >= 159 )
                        yy_c = yy_meta[(unsigned int) yy_c];
                }
        yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
-       yy_is_jam = (yy_current_state == 160);
+       yy_is_jam = (yy_current_state == 158);
 
-       return yy_is_jam ? 0 : yy_current_state;
+               return yy_is_jam ? 0 : yy_current_state;
 }
 
 #ifndef YY_NO_INPUT
@@ -1560,7 +1589,7 @@ static int yy_get_next_buffer (void)
 
                else
                        { /* need more input */
-                       int offset = (yy_c_buf_p) - (yytext_ptr);
+                       yy_size_t offset = (yy_c_buf_p) - (yytext_ptr);
                        ++(yy_c_buf_p);
 
                        switch ( yy_get_next_buffer(  ) )
@@ -1834,7 +1863,7 @@ void yypop_buffer_state (void)
  */
 static void yyensure_buffer_stack (void)
 {
-       int num_to_alloc;
+       yy_size_t num_to_alloc;
     
        if (!(yy_buffer_stack)) {
 
@@ -1931,12 +1960,12 @@ YY_BUFFER_STATE yy_scan_string (yyconst char * yystr )
  * 
  * @return the newly allocated buffer state object.
  */
-YY_BUFFER_STATE yy_scan_bytes  (yyconst char * yybytes, int  _yybytes_len )
+YY_BUFFER_STATE yy_scan_bytes  (yyconst char * yybytes, yy_size_t  _yybytes_len )
 {
        YY_BUFFER_STATE b;
        char *buf;
        yy_size_t n;
-       int i;
+       yy_size_t i;
     
        /* Get memory for full buffer, including space for trailing EOB's. */
        n = _yybytes_len + 2;
@@ -2018,7 +2047,7 @@ FILE *yyget_out  (void)
 /** Get the length of the current token.
  * 
  */
-int yyget_leng  (void)
+yy_size_t yyget_leng  (void)
 {
         return yyleng;
 }
@@ -2166,7 +2195,7 @@ void yyfree (void * ptr )
 
 #define YYTABLES_NAME "yytables"
 
-#line 227 "dtc-lexer.l"
+#line 254 "dtc-lexer.l"
 
 
 
@@ -2182,14 +2211,25 @@ static void push_input_file(const char *filename)
 }
 
 
-static int pop_input_file(void)
+static bool pop_input_file(void)
 {
        if (srcfile_pop() == 0)
-               return 0;
+               return false;
 
        yypop_buffer_state();
        yyin = current_srcfile->f;
 
-       return 1;
+       return true;
+}
+
+static void lexical_error(const char *fmt, ...)
+{
+       va_list ap;
+
+       va_start(ap, fmt);
+       srcpos_verror(&yylloc, "Lexical error", fmt, ap);
+       va_end(ap);
+
+       treesource_error = true;
 }
 
index c8769d550cfbf322c64207438dd16ec9b700a1c9..116458c8dfc4cbd728db8fc348bb254a9bae7208 100644 (file)
@@ -1,19 +1,19 @@
-/* A Bison parser, made by GNU Bison 2.7.12-4996.  */
+/* A Bison parser, made by GNU Bison 3.0.2.  */
 
 /* Bison implementation for Yacc-like parsers in C
-   
-      Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc.
-   
+
+   Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc.
+
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.
-   
+
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
-   
+
    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
 
@@ -26,7 +26,7 @@
    special exception, which will cause the skeleton and the resulting
    Bison output files to be licensed under the GNU General Public
    License without this special exception.
-   
+
    This special exception was added by the Free Software Foundation in
    version 2.2 of Bison.  */
 
@@ -44,7 +44,7 @@
 #define YYBISON 1
 
 /* Bison version.  */
-#define YYBISON_VERSION "2.7.12-4996"
+#define YYBISON_VERSION "3.0.2"
 
 /* Skeleton name.  */
 #define YYSKELETON_NAME "yacc.c"
 
 
 /* Copy the first part of user declarations.  */
-/* Line 371 of yacc.c  */
-#line 21 "dtc-parser.y"
+#line 20 "dtc-parser.y" /* yacc.c:339  */
 
 #include <stdio.h>
 
 #include "dtc.h"
 #include "srcpos.h"
 
-YYLTYPE yylloc;
-
 extern int yylex(void);
-extern void print_error(char const *fmt, ...);
 extern void yyerror(char const *s);
+#define ERROR(loc, ...) \
+       do { \
+               srcpos_error((loc), "Error", __VA_ARGS__); \
+               treesource_error = true; \
+       } while (0)
 
 extern struct boot_info *the_boot_info;
-extern int treesource_error;
+extern bool treesource_error;
 
-static unsigned long long eval_literal(const char *s, int base, int bits);
-static unsigned char eval_char_literal(const char *s);
+#line 84 "dtc-parser.tab.c" /* yacc.c:339  */
 
-/* Line 371 of yacc.c  */
-#line 87 "dtc-parser.tab.c"
-
-# ifndef YY_NULL
+# ifndef YY_NULLPTR
 #  if defined __cplusplus && 201103L <= __cplusplus
-#   define YY_NULL nullptr
+#   define YY_NULLPTR nullptr
 #  else
-#   define YY_NULL 0
+#   define YY_NULLPTR 0
 #  endif
 # endif
 
@@ -105,7 +102,7 @@ static unsigned char eval_char_literal(const char *s);
    by #include "dtc-parser.tab.h".  */
 #ifndef YY_YY_DTC_PARSER_TAB_H_INCLUDED
 # define YY_YY_DTC_PARSER_TAB_H_INCLUDED
-/* Enabling traces.  */
+/* Debug traces.  */
 #ifndef YYDEBUG
 # define YYDEBUG 0
 #endif
@@ -113,48 +110,44 @@ static unsigned char eval_char_literal(const char *s);
 extern int yydebug;
 #endif
 
-/* Tokens.  */
+/* Token type.  */
 #ifndef YYTOKENTYPE
 # define YYTOKENTYPE
-   /* Put the tokens into the symbol table, so that GDB and other debuggers
-      know about them.  */
-   enum yytokentype {
-     DT_V1 = 258,
-     DT_MEMRESERVE = 259,
-     DT_LSHIFT = 260,
-     DT_RSHIFT = 261,
-     DT_LE = 262,
-     DT_GE = 263,
-     DT_EQ = 264,
-     DT_NE = 265,
-     DT_AND = 266,
-     DT_OR = 267,
-     DT_BITS = 268,
-     DT_DEL_PROP = 269,
-     DT_DEL_NODE = 270,
-     DT_PROPNODENAME = 271,
-     DT_LITERAL = 272,
-     DT_CHAR_LITERAL = 273,
-     DT_BASE = 274,
-     DT_BYTE = 275,
-     DT_STRING = 276,
-     DT_LABEL = 277,
-     DT_REF = 278,
-     DT_INCBIN = 279
-   };
+  enum yytokentype
+  {
+    DT_V1 = 258,
+    DT_MEMRESERVE = 259,
+    DT_LSHIFT = 260,
+    DT_RSHIFT = 261,
+    DT_LE = 262,
+    DT_GE = 263,
+    DT_EQ = 264,
+    DT_NE = 265,
+    DT_AND = 266,
+    DT_OR = 267,
+    DT_BITS = 268,
+    DT_DEL_PROP = 269,
+    DT_DEL_NODE = 270,
+    DT_PROPNODENAME = 271,
+    DT_LITERAL = 272,
+    DT_CHAR_LITERAL = 273,
+    DT_BYTE = 274,
+    DT_STRING = 275,
+    DT_LABEL = 276,
+    DT_REF = 277,
+    DT_INCBIN = 278
+  };
 #endif
 
-
+/* Value type.  */
 #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
-typedef union YYSTYPE
+typedef union YYSTYPE YYSTYPE;
+union YYSTYPE
 {
-/* Line 387 of yacc.c  */
-#line 40 "dtc-parser.y"
+#line 38 "dtc-parser.y" /* yacc.c:355  */
 
        char *propnodename;
-       char *literal;
        char *labelref;
-       unsigned int cbase;
        uint8_t byte;
        struct data data;
 
@@ -170,37 +163,36 @@ typedef union YYSTYPE
        struct reserve_info *re;
        uint64_t integer;
 
-
-/* Line 387 of yacc.c  */
-#line 176 "dtc-parser.tab.c"
-} YYSTYPE;
+#line 167 "dtc-parser.tab.c" /* yacc.c:355  */
+};
 # define YYSTYPE_IS_TRIVIAL 1
-# define yystype YYSTYPE /* obsolescent; will be withdrawn */
 # define YYSTYPE_IS_DECLARED 1
 #endif
 
-extern YYSTYPE yylval;
-
-#ifdef YYPARSE_PARAM
-#if defined __STDC__ || defined __cplusplus
-int yyparse (void *YYPARSE_PARAM);
-#else
-int yyparse ();
+/* Location type.  */
+#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
+typedef struct YYLTYPE YYLTYPE;
+struct YYLTYPE
+{
+  int first_line;
+  int first_column;
+  int last_line;
+  int last_column;
+};
+# define YYLTYPE_IS_DECLARED 1
+# define YYLTYPE_IS_TRIVIAL 1
 #endif
-#else /* ! YYPARSE_PARAM */
-#if defined __STDC__ || defined __cplusplus
+
+
+extern YYSTYPE yylval;
+extern YYLTYPE yylloc;
 int yyparse (void);
-#else
-int yyparse ();
-#endif
-#endif /* ! YYPARSE_PARAM */
 
 #endif /* !YY_YY_DTC_PARSER_TAB_H_INCLUDED  */
 
 /* Copy the second part of user declarations.  */
 
-/* Line 390 of yacc.c  */
-#line 204 "dtc-parser.tab.c"
+#line 196 "dtc-parser.tab.c" /* yacc.c:358  */
 
 #ifdef short
 # undef short
@@ -214,11 +206,8 @@ typedef unsigned char yytype_uint8;
 
 #ifdef YYTYPE_INT8
 typedef YYTYPE_INT8 yytype_int8;
-#elif (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
-typedef signed char yytype_int8;
 #else
-typedef short int yytype_int8;
+typedef signed char yytype_int8;
 #endif
 
 #ifdef YYTYPE_UINT16
@@ -238,8 +227,7 @@ typedef short int yytype_int16;
 #  define YYSIZE_T __SIZE_TYPE__
 # elif defined size_t
 #  define YYSIZE_T size_t
-# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
+# elif ! defined YYSIZE_T
 #  include <stddef.h> /* INFRINGES ON USER NAME SPACE */
 #  define YYSIZE_T size_t
 # else
@@ -261,11 +249,30 @@ typedef short int yytype_int16;
 # endif
 #endif
 
-#ifndef __attribute__
-/* This feature is available in gcc versions 2.5 and later.  */
-# if (! defined __GNUC__ || __GNUC__ < 2 \
-      || (__GNUC__ == 2 && __GNUC_MINOR__ < 5))
-#  define __attribute__(Spec) /* empty */
+#ifndef YY_ATTRIBUTE
+# if (defined __GNUC__                                               \
+      && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__)))  \
+     || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C
+#  define YY_ATTRIBUTE(Spec) __attribute__(Spec)
+# else
+#  define YY_ATTRIBUTE(Spec) /* empty */
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE_PURE
+# define YY_ATTRIBUTE_PURE   YY_ATTRIBUTE ((__pure__))
+#endif
+
+#ifndef YY_ATTRIBUTE_UNUSED
+# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__))
+#endif
+
+#if !defined _Noreturn \
+     && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112)
+# if defined _MSC_VER && 1200 <= _MSC_VER
+#  define _Noreturn __declspec (noreturn)
+# else
+#  define _Noreturn YY_ATTRIBUTE ((__noreturn__))
 # endif
 #endif
 
@@ -276,25 +283,26 @@ typedef short int yytype_int16;
 # define YYUSE(E) /* empty */
 #endif
 
-
-/* Identity function, used to suppress warnings about constant conditions.  */
-#ifndef lint
-# define YYID(N) (N)
-#else
-#if (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
-static int
-YYID (int yyi)
+#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__
+/* Suppress an incorrect diagnostic about yylval being uninitialized.  */
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \
+    _Pragma ("GCC diagnostic push") \
+    _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\
+    _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END \
+    _Pragma ("GCC diagnostic pop")
 #else
-static int
-YYID (yyi)
-    int yyi;
+# define YY_INITIAL_VALUE(Value) Value
 #endif
-{
-  return yyi;
-}
+#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END
+#endif
+#ifndef YY_INITIAL_VALUE
+# define YY_INITIAL_VALUE(Value) /* Nothing. */
 #endif
 
+
 #if ! defined yyoverflow || YYERROR_VERBOSE
 
 /* The parser invokes alloca or malloc; define the necessary symbols.  */
@@ -312,8 +320,7 @@ YYID (yyi)
 #    define alloca _alloca
 #   else
 #    define YYSTACK_ALLOC alloca
-#    if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
+#    if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS
 #     include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
       /* Use EXIT_SUCCESS as a witness for stdlib.h.  */
 #     ifndef EXIT_SUCCESS
@@ -325,8 +332,8 @@ YYID (yyi)
 # endif
 
 # ifdef YYSTACK_ALLOC
-   /* Pacify GCC's `empty if-body' warning.  */
-#  define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
+   /* Pacify GCC's 'empty if-body' warning.  */
+#  define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
 #  ifndef YYSTACK_ALLOC_MAXIMUM
     /* The OS might guarantee only one guard page at the bottom of the stack,
        and a page size can be as small as 4096 bytes.  So we cannot safely
@@ -342,7 +349,7 @@ YYID (yyi)
 #  endif
 #  if (defined __cplusplus && ! defined EXIT_SUCCESS \
        && ! ((defined YYMALLOC || defined malloc) \
-            && (defined YYFREE || defined free)))
+             && (defined YYFREE || defined free)))
 #   include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
 #   ifndef EXIT_SUCCESS
 #    define EXIT_SUCCESS 0
@@ -350,15 +357,13 @@ YYID (yyi)
 #  endif
 #  ifndef YYMALLOC
 #   define YYMALLOC malloc
-#   if ! defined malloc && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
+#   if ! defined malloc && ! defined EXIT_SUCCESS
 void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
 #   endif
 #  endif
 #  ifndef YYFREE
 #   define YYFREE free
-#   if ! defined free && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
+#   if ! defined free && ! defined EXIT_SUCCESS
 void free (void *); /* INFRINGES ON USER NAME SPACE */
 #   endif
 #  endif
@@ -368,13 +373,15 @@ void free (void *); /* INFRINGES ON USER NAME SPACE */
 
 #if (! defined yyoverflow \
      && (! defined __cplusplus \
-        || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
+         || (defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL \
+             && defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
 
 /* A type that is properly aligned for any stack member.  */
 union yyalloc
 {
   yytype_int16 yyss_alloc;
   YYSTYPE yyvs_alloc;
+  YYLTYPE yyls_alloc;
 };
 
 /* The size of the maximum gap between one aligned stack and the next.  */
@@ -383,8 +390,8 @@ union yyalloc
 /* The size of an array large to enough to hold all stacks, each with
    N elements.  */
 # define YYSTACK_BYTES(N) \
-     ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
-      + YYSTACK_GAP_MAXIMUM)
+     ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE) + sizeof (YYLTYPE)) \
+      + 2 * YYSTACK_GAP_MAXIMUM)
 
 # define YYCOPY_NEEDED 1
 
@@ -393,16 +400,16 @@ union yyalloc
    elements in the stack, and YYPTR gives the new location of the
    stack.  Advance YYPTR to a properly aligned location for the next
    stack.  */
-# define YYSTACK_RELOCATE(Stack_alloc, Stack)                          \
-    do                                                                 \
-      {                                                                        \
-       YYSIZE_T yynewbytes;                                            \
-       YYCOPY (&yyptr->Stack_alloc, Stack, yysize);                    \
-       Stack = &yyptr->Stack_alloc;                                    \
-       yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
-       yyptr += yynewbytes / sizeof (*yyptr);                          \
-      }                                                                        \
-    while (YYID (0))
+# define YYSTACK_RELOCATE(Stack_alloc, Stack)                           \
+    do                                                                  \
+      {                                                                 \
+        YYSIZE_T yynewbytes;                                            \
+        YYCOPY (&yyptr->Stack_alloc, Stack, yysize);                    \
+        Stack = &yyptr->Stack_alloc;                                    \
+        yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
+        yyptr += yynewbytes / sizeof (*yyptr);                          \
+      }                                                                 \
+    while (0)
 
 #endif
 
@@ -421,7 +428,7 @@ union yyalloc
           for (yyi = 0; yyi < (Count); yyi++)   \
             (Dst)[yyi] = (Src)[yyi];            \
         }                                       \
-      while (YYID (0))
+      while (0)
 #  endif
 # endif
 #endif /* !YYCOPY_NEEDED */
@@ -429,40 +436,42 @@ union yyalloc
 /* YYFINAL -- State number of the termination state.  */
 #define YYFINAL  4
 /* YYLAST -- Last index in YYTABLE.  */
-#define YYLAST   133
+#define YYLAST   136
 
 /* YYNTOKENS -- Number of terminals.  */
-#define YYNTOKENS  48
+#define YYNTOKENS  47
 /* YYNNTS -- Number of nonterminals.  */
 #define YYNNTS  28
 /* YYNRULES -- Number of rules.  */
-#define YYNRULES  79
-/* YYNRULES -- Number of states.  */
-#define YYNSTATES  141
+#define YYNRULES  80
+/* YYNSTATES -- Number of states.  */
+#define YYNSTATES  144
 
-/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX.  */
+/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned
+   by yylex, with out-of-bounds checking.  */
 #define YYUNDEFTOK  2
-#define YYMAXUTOK   279
+#define YYMAXUTOK   278
 
-#define YYTRANSLATE(YYX)                                               \
+#define YYTRANSLATE(YYX)                                                \
   ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
 
-/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX.  */
+/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM
+   as returned by yylex, without out-of-bounds checking.  */
 static const yytype_uint8 yytranslate[] =
 {
        0,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-       2,     2,     2,    47,     2,     2,     2,    45,    41,     2,
-      33,    35,    44,    42,    34,    43,     2,    26,     2,     2,
-       2,     2,     2,     2,     2,     2,     2,     2,    38,    25,
-      36,    29,    30,    37,     2,     2,     2,     2,     2,     2,
+       2,     2,     2,    46,     2,     2,     2,    44,    40,     2,
+      32,    34,    43,    41,    33,    42,     2,    25,     2,     2,
+       2,     2,     2,     2,     2,     2,     2,     2,    37,    24,
+      35,    28,    29,    36,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-       2,    31,     2,    32,    40,     2,     2,     2,     2,     2,
+       2,    30,     2,    31,    39,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-       2,     2,     2,    27,    39,    28,    46,     2,     2,     2,
+       2,     2,     2,    26,    38,    27,    45,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
@@ -477,67 +486,22 @@ static const yytype_uint8 yytranslate[] =
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     1,     2,     3,     4,
        5,     6,     7,     8,     9,    10,    11,    12,    13,    14,
-      15,    16,    17,    18,    19,    20,    21,    22,    23,    24
+      15,    16,    17,    18,    19,    20,    21,    22,    23
 };
 
 #if YYDEBUG
-/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
-   YYRHS.  */
-static const yytype_uint16 yyprhs[] =
-{
-       0,     0,     3,     8,     9,    12,    17,    20,    23,    27,
-      31,    36,    42,    43,    46,    51,    54,    58,    61,    64,
-      68,    73,    76,    86,    92,    95,    96,    99,   102,   106,
-     108,   111,   114,   117,   119,   121,   125,   127,   129,   135,
-     137,   141,   143,   147,   149,   153,   155,   159,   161,   165,
-     167,   171,   175,   177,   181,   185,   189,   193,   197,   201,
-     203,   207,   211,   213,   217,   221,   225,   227,   229,   232,
-     235,   238,   239,   242,   245,   246,   249,   252,   255,   259
-};
-
-/* YYRHS -- A `-1'-separated list of the rules' RHS.  */
-static const yytype_int8 yyrhs[] =
-{
-      49,     0,    -1,     3,    25,    50,    52,    -1,    -1,    51,
-      50,    -1,     4,    59,    59,    25,    -1,    22,    51,    -1,
-      26,    53,    -1,    52,    26,    53,    -1,    52,    23,    53,
-      -1,    52,    15,    23,    25,    -1,    27,    54,    74,    28,
-      25,    -1,    -1,    54,    55,    -1,    16,    29,    56,    25,
-      -1,    16,    25,    -1,    14,    16,    25,    -1,    22,    55,
-      -1,    57,    21,    -1,    57,    58,    30,    -1,    57,    31,
-      73,    32,    -1,    57,    23,    -1,    57,    24,    33,    21,
-      34,    59,    34,    59,    35,    -1,    57,    24,    33,    21,
-      35,    -1,    56,    22,    -1,    -1,    56,    34,    -1,    57,
-      22,    -1,    13,    17,    36,    -1,    36,    -1,    58,    59,
-      -1,    58,    23,    -1,    58,    22,    -1,    17,    -1,    18,
-      -1,    33,    60,    35,    -1,    61,    -1,    62,    -1,    62,
-      37,    60,    38,    61,    -1,    63,    -1,    62,    12,    63,
-      -1,    64,    -1,    63,    11,    64,    -1,    65,    -1,    64,
-      39,    65,    -1,    66,    -1,    65,    40,    66,    -1,    67,
-      -1,    66,    41,    67,    -1,    68,    -1,    67,     9,    68,
-      -1,    67,    10,    68,    -1,    69,    -1,    68,    36,    69,
-      -1,    68,    30,    69,    -1,    68,     7,    69,    -1,    68,
-       8,    69,    -1,    69,     5,    70,    -1,    69,     6,    70,
-      -1,    70,    -1,    70,    42,    71,    -1,    70,    43,    71,
-      -1,    71,    -1,    71,    44,    72,    -1,    71,    26,    72,
-      -1,    71,    45,    72,    -1,    72,    -1,    59,    -1,    43,
-      72,    -1,    46,    72,    -1,    47,    72,    -1,    -1,    73,
-      20,    -1,    73,    22,    -1,    -1,    75,    74,    -1,    75,
-      55,    -1,    16,    53,    -1,    15,    16,    25,    -1,    22,
-      75,    -1
-};
-
-/* YYRLINE[YYN] -- source line where rule number YYN was defined.  */
+  /* YYRLINE[YYN] -- Source line where rule number YYN was defined.  */
 static const yytype_uint16 yyrline[] =
 {
-       0,   109,   109,   118,   121,   128,   132,   140,   144,   148,
-     158,   172,   180,   183,   190,   194,   198,   202,   210,   214,
-     218,   222,   226,   243,   253,   261,   264,   268,   275,   290,
-     295,   315,   329,   336,   340,   344,   351,   355,   356,   360,
-     361,   365,   366,   370,   371,   375,   376,   380,   381,   385,
-     386,   387,   391,   392,   393,   394,   395,   399,   400,   401,
-     405,   406,   407,   411,   412,   413,   414,   418,   419,   420,
-     421,   426,   429,   433,   441,   444,   448,   456,   460,   464
+       0,   104,   104,   113,   116,   123,   127,   135,   139,   144,
+     155,   165,   180,   188,   191,   198,   202,   206,   210,   218,
+     222,   226,   230,   234,   250,   260,   268,   271,   275,   282,
+     298,   303,   322,   336,   343,   344,   345,   352,   356,   357,
+     361,   362,   366,   367,   371,   372,   376,   377,   381,   382,
+     386,   387,   388,   392,   393,   394,   395,   396,   400,   401,
+     402,   406,   407,   408,   412,   413,   414,   415,   419,   420,
+     421,   422,   427,   430,   434,   442,   445,   449,   457,   461,
+     465
 };
 #endif
 
@@ -549,209 +513,199 @@ static const char *const yytname[] =
   "$end", "error", "$undefined", "DT_V1", "DT_MEMRESERVE", "DT_LSHIFT",
   "DT_RSHIFT", "DT_LE", "DT_GE", "DT_EQ", "DT_NE", "DT_AND", "DT_OR",
   "DT_BITS", "DT_DEL_PROP", "DT_DEL_NODE", "DT_PROPNODENAME", "DT_LITERAL",
-  "DT_CHAR_LITERAL", "DT_BASE", "DT_BYTE", "DT_STRING", "DT_LABEL",
-  "DT_REF", "DT_INCBIN", "';'", "'/'", "'{'", "'}'", "'='", "'>'", "'['",
-  "']'", "'('", "','", "')'", "'<'", "'?'", "':'", "'|'", "'^'", "'&'",
-  "'+'", "'-'", "'*'", "'%'", "'~'", "'!'", "$accept", "sourcefile",
+  "DT_CHAR_LITERAL", "DT_BYTE", "DT_STRING", "DT_LABEL", "DT_REF",
+  "DT_INCBIN", "';'", "'/'", "'{'", "'}'", "'='", "'>'", "'['", "']'",
+  "'('", "','", "')'", "'<'", "'?'", "':'", "'|'", "'^'", "'&'", "'+'",
+  "'-'", "'*'", "'%'", "'~'", "'!'", "$accept", "sourcefile",
   "memreserves", "memreserve", "devicetree", "nodedef", "proplist",
   "propdef", "propdata", "propdataprefix", "arrayprefix", "integer_prim",
   "integer_expr", "integer_trinary", "integer_or", "integer_and",
   "integer_bitor", "integer_bitxor", "integer_bitand", "integer_eq",
   "integer_rela", "integer_shift", "integer_add", "integer_mul",
-  "integer_unary", "bytestring", "subnodes", "subnode", YY_NULL
+  "integer_unary", "bytestring", "subnodes", "subnode", YY_NULLPTR
 };
 #endif
 
 # ifdef YYPRINT
-/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
-   token YYLEX-NUM.  */
+/* YYTOKNUM[NUM] -- (External) token number corresponding to the
+   (internal) symbol number NUM (which must be that of a token).  */
 static const yytype_uint16 yytoknum[] =
 {
        0,   256,   257,   258,   259,   260,   261,   262,   263,   264,
      265,   266,   267,   268,   269,   270,   271,   272,   273,   274,
-     275,   276,   277,   278,   279,    59,    47,   123,   125,    61,
-      62,    91,    93,    40,    44,    41,    60,    63,    58,   124,
-      94,    38,    43,    45,    42,    37,   126,    33
+     275,   276,   277,   278,    59,    47,   123,   125,    61,    62,
+      91,    93,    40,    44,    41,    60,    63,    58,   124,    94,
+      38,    43,    45,    42,    37,   126,    33
 };
 # endif
 
-/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives.  */
-static const yytype_uint8 yyr1[] =
-{
-       0,    48,    49,    50,    50,    51,    51,    52,    52,    52,
-      52,    53,    54,    54,    55,    55,    55,    55,    56,    56,
-      56,    56,    56,    56,    56,    57,    57,    57,    58,    58,
-      58,    58,    58,    59,    59,    59,    60,    61,    61,    62,
-      62,    63,    63,    64,    64,    65,    65,    66,    66,    67,
-      67,    67,    68,    68,    68,    68,    68,    69,    69,    69,
-      70,    70,    70,    71,    71,    71,    71,    72,    72,    72,
-      72,    73,    73,    73,    74,    74,    74,    75,    75,    75
-};
+#define YYPACT_NINF -81
 
-/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN.  */
-static const yytype_uint8 yyr2[] =
+#define yypact_value_is_default(Yystate) \
+  (!!((Yystate) == (-81)))
+
+#define YYTABLE_NINF -1
+
+#define yytable_value_is_error(Yytable_value) \
+  0
+
+  /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
+     STATE-NUM.  */
+static const yytype_int8 yypact[] =
 {
-       0,     2,     4,     0,     2,     4,     2,     2,     3,     3,
-       4,     5,     0,     2,     4,     2,     3,     2,     2,     3,
-       4,     2,     9,     5,     2,     0,     2,     2,     3,     1,
-       2,     2,     2,     1,     1,     3,     1,     1,     5,     1,
-       3,     1,     3,     1,     3,     1,     3,     1,     3,     1,
-       3,     3,     1,     3,     3,     3,     3,     3,     3,     1,
-       3,     3,     1,     3,     3,     3,     1,     1,     2,     2,
-       2,     0,     2,     2,     0,     2,     2,     2,     3,     2
+      16,   -11,    21,    10,   -81,    25,    10,    19,    10,   -81,
+     -81,    -9,    25,   -81,     2,    51,   -81,    -9,    -9,    -9,
+     -81,     1,   -81,    -6,    50,    14,    28,    29,    36,     3,
+      58,    44,    -3,   -81,    47,   -81,   -81,    65,    68,     2,
+       2,   -81,   -81,   -81,   -81,    -9,    -9,    -9,    -9,    -9,
+      -9,    -9,    -9,    -9,    -9,    -9,    -9,    -9,    -9,    -9,
+      -9,    -9,    -9,    -9,   -81,    63,    69,     2,   -81,   -81,
+      50,    57,    14,    28,    29,    36,     3,     3,    58,    58,
+      58,    58,    44,    44,    -3,    -3,   -81,   -81,   -81,    79,
+      80,    -8,    63,   -81,    72,    63,   -81,   -81,    -9,    76,
+      77,   -81,   -81,   -81,   -81,   -81,    78,   -81,   -81,   -81,
+     -81,   -81,    35,     4,   -81,   -81,   -81,   -81,    86,   -81,
+     -81,   -81,    73,   -81,   -81,    33,    71,    84,    39,   -81,
+     -81,   -81,   -81,   -81,    41,   -81,   -81,   -81,    25,   -81,
+      74,    25,    75,   -81
 };
 
-/* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM.
-   Performed when YYTABLE doesn't specify something else to do.  Zero
-   means the default is an error.  */
+  /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM.
+     Performed when YYTABLE does not specify something else to do.  Zero
+     means the default is an error.  */
 static const yytype_uint8 yydefact[] =
 {
-       0,     0,     0,     3,     1,     0,     0,     0,     3,    33,
-      34,     0,     0,     6,     0,     2,     4,     0,     0,     0,
-      67,     0,    36,    37,    39,    41,    43,    45,    47,    49,
-      52,    59,    62,    66,     0,    12,     7,     0,     0,     0,
-      68,    69,    70,    35,     0,     0,     0,     0,     0,     0,
+       0,     0,     0,     3,     1,     0,     0,     0,     3,    34,
+      35,     0,     0,     6,     0,     2,     4,     0,     0,     0,
+      68,     0,    37,    38,    40,    42,    44,    46,    48,    50,
+      53,    60,    63,    67,     0,    13,     7,     0,     0,     0,
+       0,    69,    70,    71,    36,     0,     0,     0,     0,     0,
        0,     0,     0,     0,     0,     0,     0,     0,     0,     0,
-       0,     0,     0,     5,    74,     0,     9,     8,    40,     0,
-      42,    44,    46,    48,    50,    51,    55,    56,    54,    53,
-      57,    58,    60,    61,    64,    63,    65,     0,     0,     0,
-       0,    13,     0,    74,    10,     0,     0,     0,    15,    25,
-      77,    17,    79,     0,    76,    75,    38,    16,    78,     0,
-       0,    11,    24,    14,    26,     0,    18,    27,    21,     0,
-      71,    29,     0,     0,     0,     0,    32,    31,    19,    30,
-      28,     0,    72,    73,    20,     0,    23,     0,     0,     0,
-      22
+       0,     0,     0,     0,     5,    75,     0,     0,    10,     8,
+      41,     0,    43,    45,    47,    49,    51,    52,    56,    57,
+      55,    54,    58,    59,    61,    62,    65,    64,    66,     0,
+       0,     0,     0,    14,     0,    75,    11,     9,     0,     0,
+       0,    16,    26,    78,    18,    80,     0,    77,    76,    39,
+      17,    79,     0,     0,    12,    25,    15,    27,     0,    19,
+      28,    22,     0,    72,    30,     0,     0,     0,     0,    33,
+      32,    20,    31,    29,     0,    73,    74,    21,     0,    24,
+       0,     0,     0,    23
 };
 
-/* YYDEFGOTO[NTERM-NUM].  */
-static const yytype_int8 yydefgoto[] =
+  /* YYPGOTO[NTERM-NUM].  */
+static const yytype_int8 yypgoto[] =
 {
-      -1,     2,     7,     8,    15,    36,    64,    91,   109,   110,
-     122,    20,    21,    22,    23,    24,    25,    26,    27,    28,
-      29,    30,    31,    32,    33,   125,    92,    93
+     -81,   -81,   100,   104,   -81,   -38,   -81,   -80,   -81,   -81,
+     -81,    -5,    66,    13,   -81,    70,    67,    81,    64,    82,
+      37,    27,    34,    38,   -14,   -81,    22,    24
 };
 
-/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
-   STATE-NUM.  */
-#define YYPACT_NINF -78
-static const yytype_int8 yypact[] =
+  /* YYDEFGOTO[NTERM-NUM].  */
+static const yytype_int16 yydefgoto[] =
 {
-      22,    11,    51,    10,   -78,    23,    10,     2,    10,   -78,
-     -78,    -9,    23,   -78,    30,    38,   -78,    -9,    -9,    -9,
-     -78,    35,   -78,    -6,    52,    29,    48,    49,    33,     3,
-      71,    36,     0,   -78,    64,   -78,   -78,    68,    30,    30,
-     -78,   -78,   -78,   -78,    -9,    -9,    -9,    -9,    -9,    -9,
-      -9,    -9,    -9,    -9,    -9,    -9,    -9,    -9,    -9,    -9,
-      -9,    -9,    -9,   -78,    44,    67,   -78,   -78,    52,    55,
-      29,    48,    49,    33,     3,     3,    71,    71,    71,    71,
-      36,    36,     0,     0,   -78,   -78,   -78,    78,    79,    42,
-      44,   -78,    69,    44,   -78,    -9,    73,    74,   -78,   -78,
-     -78,   -78,   -78,    75,   -78,   -78,   -78,   -78,   -78,    -7,
-      -1,   -78,   -78,   -78,   -78,    84,   -78,   -78,   -78,    63,
-     -78,   -78,    32,    66,    82,    -3,   -78,   -78,   -78,   -78,
-     -78,    46,   -78,   -78,   -78,    23,   -78,    70,    23,    72,
-     -78
+      -1,     2,     7,     8,    15,    36,    65,    93,   112,   113,
+     125,    20,    21,    22,    23,    24,    25,    26,    27,    28,
+      29,    30,    31,    32,    33,   128,    94,    95
 };
 
-/* YYPGOTO[NTERM-NUM].  */
-static const yytype_int8 yypgoto[] =
+  /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM.  If
+     positive, shift that token.  If negative, reduce the rule whose
+     number is the opposite.  If YYTABLE_NINF, syntax error.  */
+static const yytype_uint8 yytable[] =
 {
-     -78,   -78,    97,   100,   -78,   -37,   -78,   -77,   -78,   -78,
-     -78,    -5,    65,    13,   -78,    76,    77,    62,    80,    83,
-      34,    20,    26,    28,   -14,   -78,    18,    24
+      12,    68,    69,    41,    42,    43,    45,    34,     9,    10,
+      53,    54,   104,     3,     5,   107,   101,   118,    35,     1,
+     102,     4,    61,    11,   119,   120,   121,   122,    35,    97,
+      46,     6,    55,    17,   123,    44,    18,    19,    56,   124,
+      62,    63,     9,    10,    14,    51,    52,    86,    87,    88,
+       9,    10,    48,   103,   129,   130,   115,    11,   135,   116,
+     136,    47,   131,    57,    58,    11,    37,    49,   117,    50,
+     137,    64,    38,    39,   138,   139,    40,    89,    90,    91,
+      78,    79,    80,    81,    92,    59,    60,    66,    76,    77,
+      67,    82,    83,    96,    98,    99,   100,    84,    85,   106,
+     110,   111,   114,   126,   134,   127,   133,   141,    16,   143,
+      13,   109,    71,    74,    72,    70,   105,   108,     0,     0,
+     132,     0,     0,     0,     0,     0,     0,     0,     0,    73,
+       0,     0,    75,   140,     0,     0,   142
 };
 
-/* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
-   positive, shift that token.  If negative, reduce the rule which
-   number is the opposite.  If YYTABLE_NINF, syntax error.  */
-#define YYTABLE_NINF -1
-static const yytype_uint8 yytable[] =
+static const yytype_int16 yycheck[] =
 {
-      12,    66,    67,    40,    41,    42,    44,    34,     9,    10,
-      52,    53,   115,   101,     5,   112,   104,   132,   113,   133,
-     116,   117,   118,   119,    11,     1,    60,   114,    14,   134,
-     120,    45,     6,    54,    17,   121,     3,    18,    19,    55,
-       9,    10,    50,    51,    61,    62,    84,    85,    86,     9,
-      10,     4,   100,    37,   126,   127,    11,    35,    87,    88,
-      89,    38,   128,    46,    39,    11,    90,    98,    47,    35,
-      43,    99,    76,    77,    78,    79,    56,    57,    58,    59,
-     135,   136,    80,    81,    74,    75,    82,    83,    48,    63,
-      49,    65,    94,    95,    96,    97,   124,   103,   107,   108,
-     111,   123,   130,   131,   138,    16,    13,   140,   106,    71,
-      69,   105,     0,     0,   102,     0,     0,   129,     0,     0,
-      68,     0,     0,    70,     0,     0,     0,     0,    72,     0,
-     137,     0,    73,   139
+       5,    39,    40,    17,    18,    19,    12,    12,    17,    18,
+       7,     8,    92,    24,     4,    95,    24,    13,    26,     3,
+      28,     0,    25,    32,    20,    21,    22,    23,    26,    67,
+      36,    21,    29,    42,    30,    34,    45,    46,    35,    35,
+      43,    44,    17,    18,    25,     9,    10,    61,    62,    63,
+      17,    18,    38,    91,    21,    22,    21,    32,    19,    24,
+      21,    11,    29,     5,     6,    32,    15,    39,    33,    40,
+      31,    24,    21,    22,    33,    34,    25,    14,    15,    16,
+      53,    54,    55,    56,    21,    41,    42,    22,    51,    52,
+      22,    57,    58,    24,    37,    16,    16,    59,    60,    27,
+      24,    24,    24,    17,    20,    32,    35,    33,     8,    34,
+       6,    98,    46,    49,    47,    45,    92,    95,    -1,    -1,
+     125,    -1,    -1,    -1,    -1,    -1,    -1,    -1,    -1,    48,
+      -1,    -1,    50,   138,    -1,    -1,   141
 };
 
-#define yypact_value_is_default(Yystate) \
-  (!!((Yystate) == (-78)))
-
-#define yytable_value_is_error(Yytable_value) \
-  YYID (0)
+  /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
+     symbol of state STATE-NUM.  */
+static const yytype_uint8 yystos[] =
+{
+       0,     3,    48,    24,     0,     4,    21,    49,    50,    17,
+      18,    32,    58,    50,    25,    51,    49,    42,    45,    46,
+      58,    59,    60,    61,    62,    63,    64,    65,    66,    67,
+      68,    69,    70,    71,    58,    26,    52,    15,    21,    22,
+      25,    71,    71,    71,    34,    12,    36,    11,    38,    39,
+      40,     9,    10,     7,     8,    29,    35,     5,     6,    41,
+      42,    25,    43,    44,    24,    53,    22,    22,    52,    52,
+      62,    59,    63,    64,    65,    66,    67,    67,    68,    68,
+      68,    68,    69,    69,    70,    70,    71,    71,    71,    14,
+      15,    16,    21,    54,    73,    74,    24,    52,    37,    16,
+      16,    24,    28,    52,    54,    74,    27,    54,    73,    60,
+      24,    24,    55,    56,    24,    21,    24,    33,    13,    20,
+      21,    22,    23,    30,    35,    57,    17,    32,    72,    21,
+      22,    29,    58,    35,    20,    19,    21,    31,    33,    34,
+      58,    33,    58,    34
+};
 
-static const yytype_int16 yycheck[] =
+  /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives.  */
+static const yytype_uint8 yyr1[] =
 {
-       5,    38,    39,    17,    18,    19,    12,    12,    17,    18,
-       7,     8,    13,    90,     4,    22,    93,    20,    25,    22,
-      21,    22,    23,    24,    33,     3,    26,    34,    26,    32,
-      31,    37,    22,    30,    43,    36,    25,    46,    47,    36,
-      17,    18,     9,    10,    44,    45,    60,    61,    62,    17,
-      18,     0,    89,    15,    22,    23,    33,    27,    14,    15,
-      16,    23,    30,    11,    26,    33,    22,    25,    39,    27,
-      35,    29,    52,    53,    54,    55,     5,     6,    42,    43,
-      34,    35,    56,    57,    50,    51,    58,    59,    40,    25,
-      41,    23,    25,    38,    16,    16,    33,    28,    25,    25,
-      25,    17,    36,    21,    34,     8,     6,    35,    95,    47,
-      45,    93,    -1,    -1,    90,    -1,    -1,   122,    -1,    -1,
-      44,    -1,    -1,    46,    -1,    -1,    -1,    -1,    48,    -1,
-     135,    -1,    49,   138
+       0,    47,    48,    49,    49,    50,    50,    51,    51,    51,
+      51,    51,    52,    53,    53,    54,    54,    54,    54,    55,
+      55,    55,    55,    55,    55,    55,    56,    56,    56,    57,
+      57,    57,    57,    57,    58,    58,    58,    59,    60,    60,
+      61,    61,    62,    62,    63,    63,    64,    64,    65,    65,
+      66,    66,    66,    67,    67,    67,    67,    67,    68,    68,
+      68,    69,    69,    69,    70,    70,    70,    70,    71,    71,
+      71,    71,    72,    72,    72,    73,    73,    73,    74,    74,
+      74
 };
 
-/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
-   symbol of state STATE-NUM.  */
-static const yytype_uint8 yystos[] =
+  /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN.  */
+static const yytype_uint8 yyr2[] =
 {
-       0,     3,    49,    25,     0,     4,    22,    50,    51,    17,
-      18,    33,    59,    51,    26,    52,    50,    43,    46,    47,
-      59,    60,    61,    62,    63,    64,    65,    66,    67,    68,
-      69,    70,    71,    72,    59,    27,    53,    15,    23,    26,
-      72,    72,    72,    35,    12,    37,    11,    39,    40,    41,
-       9,    10,     7,     8,    30,    36,     5,     6,    42,    43,
-      26,    44,    45,    25,    54,    23,    53,    53,    63,    60,
-      64,    65,    66,    67,    68,    68,    69,    69,    69,    69,
-      70,    70,    71,    71,    72,    72,    72,    14,    15,    16,
-      22,    55,    74,    75,    25,    38,    16,    16,    25,    29,
-      53,    55,    75,    28,    55,    74,    61,    25,    25,    56,
-      57,    25,    22,    25,    34,    13,    21,    22,    23,    24,
-      31,    36,    58,    17,    33,    73,    22,    23,    30,    59,
-      36,    21,    20,    22,    32,    34,    35,    59,    34,    59,
-      35
+       0,     2,     4,     0,     2,     4,     2,     2,     3,     4,
+       3,     4,     5,     0,     2,     4,     2,     3,     2,     2,
+       3,     4,     2,     9,     5,     2,     0,     2,     2,     3,
+       1,     2,     2,     2,     1,     1,     3,     1,     1,     5,
+       1,     3,     1,     3,     1,     3,     1,     3,     1,     3,
+       1,     3,     3,     1,     3,     3,     3,     3,     3,     3,
+       1,     3,     3,     1,     3,     3,     3,     1,     1,     2,
+       2,     2,     0,     2,     2,     0,     2,     2,     2,     3,
+       2
 };
 
-#define yyerrok                (yyerrstatus = 0)
-#define yyclearin      (yychar = YYEMPTY)
-#define YYEMPTY                (-2)
-#define YYEOF          0
-
-#define YYACCEPT       goto yyacceptlab
-#define YYABORT                goto yyabortlab
-#define YYERROR                goto yyerrorlab
-
-
-/* Like YYERROR except do call yyerror.  This remains here temporarily
-   to ease the transition to the new meaning of YYERROR, for GCC.
-   Once GCC version 2 has supplanted version 1, this can go.  However,
-   YYFAIL appears to be in use.  Nevertheless, it is formally deprecated
-   in Bison 2.4.2's NEWS entry, where a plan to phase it out is
-   discussed.  */
-
-#define YYFAIL         goto yyerrlab
-#if defined YYFAIL
-  /* This is here to suppress warnings from the GCC cpp's
-     -Wunused-macros.  Normally we don't worry about that warning, but
-     some users do, and we want to make it easy for users to remove
-     YYFAIL uses, which will produce warnings from Bison 2.5.  */
-#endif
+
+#define yyerrok         (yyerrstatus = 0)
+#define yyclearin       (yychar = YYEMPTY)
+#define YYEMPTY         (-2)
+#define YYEOF           0
+
+#define YYACCEPT        goto yyacceptlab
+#define YYABORT         goto yyabortlab
+#define YYERROR         goto yyerrorlab
+
 
 #define YYRECOVERING()  (!!yyerrstatus)
 
@@ -768,27 +722,41 @@ do                                                              \
   else                                                          \
     {                                                           \
       yyerror (YY_("syntax error: cannot back up")); \
-      YYERROR;                                                 \
-    }                                                          \
-while (YYID (0))
+      YYERROR;                                                  \
+    }                                                           \
+while (0)
 
 /* Error token number */
-#define YYTERROR       1
-#define YYERRCODE      256
-
-
-/* This macro is provided for backward compatibility. */
-#ifndef YY_LOCATION_PRINT
-# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
+#define YYTERROR        1
+#define YYERRCODE       256
+
+
+/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
+   If N is 0, then set CURRENT to the empty location which ends
+   the previous symbol: RHS[0] (always defined).  */
+
+#ifndef YYLLOC_DEFAULT
+# define YYLLOC_DEFAULT(Current, Rhs, N)                                \
+    do                                                                  \
+      if (N)                                                            \
+        {                                                               \
+          (Current).first_line   = YYRHSLOC (Rhs, 1).first_line;        \
+          (Current).first_column = YYRHSLOC (Rhs, 1).first_column;      \
+          (Current).last_line    = YYRHSLOC (Rhs, N).last_line;         \
+          (Current).last_column  = YYRHSLOC (Rhs, N).last_column;       \
+        }                                                               \
+      else                                                              \
+        {                                                               \
+          (Current).first_line   = (Current).last_line   =              \
+            YYRHSLOC (Rhs, 0).last_line;                                \
+          (Current).first_column = (Current).last_column =              \
+            YYRHSLOC (Rhs, 0).last_column;                              \
+        }                                                               \
+    while (0)
 #endif
 
+#define YYRHSLOC(Rhs, K) ((Rhs)[K])
 
-/* YYLEX -- calling `yylex' with the right arguments.  */
-#ifdef YYLEX_PARAM
-# define YYLEX yylex (YYLEX_PARAM)
-#else
-# define YYLEX yylex ()
-#endif
 
 /* Enable debugging if requested.  */
 #if YYDEBUG
@@ -798,50 +766,84 @@ while (YYID (0))
 #  define YYFPRINTF fprintf
 # endif
 
-# define YYDPRINTF(Args)                       \
-do {                                           \
-  if (yydebug)                                 \
-    YYFPRINTF Args;                            \
-} while (YYID (0))
+# define YYDPRINTF(Args)                        \
+do {                                            \
+  if (yydebug)                                  \
+    YYFPRINTF Args;                             \
+} while (0)
 
-# define YY_SYMBOL_PRINT(Title, Type, Value, Location)                   \
-do {                                                                     \
-  if (yydebug)                                                           \
-    {                                                                    \
-      YYFPRINTF (stderr, "%s ", Title);                                          \
-      yy_symbol_print (stderr,                                           \
-                 Type, Value); \
-      YYFPRINTF (stderr, "\n");                                                  \
-    }                                                                    \
-} while (YYID (0))
 
+/* YY_LOCATION_PRINT -- Print the location on the stream.
+   This macro was not mandated originally: define only if we know
+   we won't break user code: when these are the locations we know.  */
 
-/*--------------------------------.
-| Print this symbol on YYOUTPUT.  |
-`--------------------------------*/
+#ifndef YY_LOCATION_PRINT
+# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL
 
-/*ARGSUSED*/
-#if (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
-static void
-yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
-#else
-static void
-yy_symbol_value_print (yyoutput, yytype, yyvaluep)
-    FILE *yyoutput;
-    int yytype;
-    YYSTYPE const * const yyvaluep;
+/* Print *YYLOCP on YYO.  Private, do not rely on its existence. */
+
+YY_ATTRIBUTE_UNUSED
+static unsigned
+yy_location_print_ (FILE *yyo, YYLTYPE const * const yylocp)
+{
+  unsigned res = 0;
+  int end_col = 0 != yylocp->last_column ? yylocp->last_column - 1 : 0;
+  if (0 <= yylocp->first_line)
+    {
+      res += YYFPRINTF (yyo, "%d", yylocp->first_line);
+      if (0 <= yylocp->first_column)
+        res += YYFPRINTF (yyo, ".%d", yylocp->first_column);
+    }
+  if (0 <= yylocp->last_line)
+    {
+      if (yylocp->first_line < yylocp->last_line)
+        {
+          res += YYFPRINTF (yyo, "-%d", yylocp->last_line);
+          if (0 <= end_col)
+            res += YYFPRINTF (yyo, ".%d", end_col);
+        }
+      else if (0 <= end_col && yylocp->first_column < end_col)
+        res += YYFPRINTF (yyo, "-%d", end_col);
+    }
+  return res;
+ }
+
+#  define YY_LOCATION_PRINT(File, Loc)          \
+  yy_location_print_ (File, &(Loc))
+
+# else
+#  define YY_LOCATION_PRINT(File, Loc) ((void) 0)
+# endif
 #endif
+
+
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location)                    \
+do {                                                                      \
+  if (yydebug)                                                            \
+    {                                                                     \
+      YYFPRINTF (stderr, "%s ", Title);                                   \
+      yy_symbol_print (stderr,                                            \
+                  Type, Value, Location); \
+      YYFPRINTF (stderr, "\n");                                           \
+    }                                                                     \
+} while (0)
+
+
+/*----------------------------------------.
+| Print this symbol's value on YYOUTPUT.  |
+`----------------------------------------*/
+
+static void
+yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp)
 {
   FILE *yyo = yyoutput;
   YYUSE (yyo);
+  YYUSE (yylocationp);
   if (!yyvaluep)
     return;
 # ifdef YYPRINT
   if (yytype < YYNTOKENS)
     YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
-# else
-  YYUSE (yyoutput);
 # endif
   YYUSE (yytype);
 }
@@ -851,24 +853,15 @@ yy_symbol_value_print (yyoutput, yytype, yyvaluep)
 | Print this symbol on YYOUTPUT.  |
 `--------------------------------*/
 
-#if (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
-static void
-yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
-#else
 static void
-yy_symbol_print (yyoutput, yytype, yyvaluep)
-    FILE *yyoutput;
-    int yytype;
-    YYSTYPE const * const yyvaluep;
-#endif
+yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp)
 {
-  if (yytype < YYNTOKENS)
-    YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
-  else
-    YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
+  YYFPRINTF (yyoutput, "%s %s (",
+             yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]);
 
-  yy_symbol_value_print (yyoutput, yytype, yyvaluep);
+  YY_LOCATION_PRINT (yyoutput, *yylocationp);
+  YYFPRINTF (yyoutput, ": ");
+  yy_symbol_value_print (yyoutput, yytype, yyvaluep, yylocationp);
   YYFPRINTF (yyoutput, ")");
 }
 
@@ -877,16 +870,8 @@ yy_symbol_print (yyoutput, yytype, yyvaluep)
 | TOP (included).                                                   |
 `------------------------------------------------------------------*/
 
-#if (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
 static void
 yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
-#else
-static void
-yy_stack_print (yybottom, yytop)
-    yytype_int16 *yybottom;
-    yytype_int16 *yytop;
-#endif
 {
   YYFPRINTF (stderr, "Stack now");
   for (; yybottom <= yytop; yybottom++)
@@ -897,49 +882,42 @@ yy_stack_print (yybottom, yytop)
   YYFPRINTF (stderr, "\n");
 }
 
-# define YY_STACK_PRINT(Bottom, Top)                           \
-do {                                                           \
-  if (yydebug)                                                 \
-    yy_stack_print ((Bottom), (Top));                          \
-} while (YYID (0))
+# define YY_STACK_PRINT(Bottom, Top)                            \
+do {                                                            \
+  if (yydebug)                                                  \
+    yy_stack_print ((Bottom), (Top));                           \
+} while (0)
 
 
 /*------------------------------------------------.
 | Report that the YYRULE is going to be reduced.  |
 `------------------------------------------------*/
 
-#if (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
-static void
-yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
-#else
 static void
-yy_reduce_print (yyvsp, yyrule)
-    YYSTYPE *yyvsp;
-    int yyrule;
-#endif
+yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, YYLTYPE *yylsp, int yyrule)
 {
+  unsigned long int yylno = yyrline[yyrule];
   int yynrhs = yyr2[yyrule];
   int yyi;
-  unsigned long int yylno = yyrline[yyrule];
   YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
-            yyrule - 1, yylno);
+             yyrule - 1, yylno);
   /* The symbols being reduced.  */
   for (yyi = 0; yyi < yynrhs; yyi++)
     {
       YYFPRINTF (stderr, "   $%d = ", yyi + 1);
-      yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
-                      &(yyvsp[(yyi + 1) - (yynrhs)])
-                                      );
+      yy_symbol_print (stderr,
+                       yystos[yyssp[yyi + 1 - yynrhs]],
+                       &(yyvsp[(yyi + 1) - (yynrhs)])
+                       , &(yylsp[(yyi + 1) - (yynrhs)])                       );
       YYFPRINTF (stderr, "\n");
     }
 }
 
-# define YY_REDUCE_PRINT(Rule)         \
-do {                                   \
-  if (yydebug)                         \
-    yy_reduce_print (yyvsp, Rule); \
-} while (YYID (0))
+# define YY_REDUCE_PRINT(Rule)          \
+do {                                    \
+  if (yydebug)                          \
+    yy_reduce_print (yyssp, yyvsp, yylsp, Rule); \
+} while (0)
 
 /* Nonzero means print parse trace.  It is left uninitialized so that
    multiple parsers can coexist.  */
@@ -953,7 +931,7 @@ int yydebug;
 
 
 /* YYINITDEPTH -- initial size of the parser's stacks.  */
-#ifndef        YYINITDEPTH
+#ifndef YYINITDEPTH
 # define YYINITDEPTH 200
 #endif
 
@@ -976,15 +954,8 @@ int yydebug;
 #   define yystrlen strlen
 #  else
 /* Return the length of YYSTR.  */
-#if (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
 static YYSIZE_T
 yystrlen (const char *yystr)
-#else
-static YYSIZE_T
-yystrlen (yystr)
-    const char *yystr;
-#endif
 {
   YYSIZE_T yylen;
   for (yylen = 0; yystr[yylen]; yylen++)
@@ -1000,16 +971,8 @@ yystrlen (yystr)
 #  else
 /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
    YYDEST.  */
-#if (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
 static char *
 yystpcpy (char *yydest, const char *yysrc)
-#else
-static char *
-yystpcpy (yydest, yysrc)
-    char *yydest;
-    const char *yysrc;
-#endif
 {
   char *yyd = yydest;
   const char *yys = yysrc;
@@ -1039,27 +1002,27 @@ yytnamerr (char *yyres, const char *yystr)
       char const *yyp = yystr;
 
       for (;;)
-       switch (*++yyp)
-         {
-         case '\'':
-         case ',':
-           goto do_not_strip_quotes;
-
-         case '\\':
-           if (*++yyp != '\\')
-             goto do_not_strip_quotes;
-           /* Fall through.  */
-         default:
-           if (yyres)
-             yyres[yyn] = *yyp;
-           yyn++;
-           break;
-
-         case '"':
-           if (yyres)
-             yyres[yyn] = '\0';
-           return yyn;
-         }
+        switch (*++yyp)
+          {
+          case '\'':
+          case ',':
+            goto do_not_strip_quotes;
+
+          case '\\':
+            if (*++yyp != '\\')
+              goto do_not_strip_quotes;
+            /* Fall through.  */
+          default:
+            if (yyres)
+              yyres[yyn] = *yyp;
+            yyn++;
+            break;
+
+          case '"':
+            if (yyres)
+              yyres[yyn] = '\0';
+            return yyn;
+          }
     do_not_strip_quotes: ;
     }
 
@@ -1082,11 +1045,11 @@ static int
 yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
                 yytype_int16 *yyssp, int yytoken)
 {
-  YYSIZE_T yysize0 = yytnamerr (YY_NULL, yytname[yytoken]);
+  YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]);
   YYSIZE_T yysize = yysize0;
   enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
   /* Internationalized format string. */
-  const char *yyformat = YY_NULL;
+  const char *yyformat = YY_NULLPTR;
   /* Arguments of yyformat. */
   char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
   /* Number of reported tokens (one for the "unexpected", one per
@@ -1094,10 +1057,6 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
   int yycount = 0;
 
   /* There are many possibilities here to consider:
-     - Assume YYFAIL is not used.  It's too flawed to consider.  See
-       <http://lists.gnu.org/archive/html/bison-patches/2009-12/msg00024.html>
-       for details.  YYERROR is fine as it does not invoke this
-       function.
      - If this state is a consistent state with a default action, then
        the only way this function was invoked is if the default action
        is an error action.  In that case, don't check for expected
@@ -1147,7 +1106,7 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
                   }
                 yyarg[yycount++] = yytname[yyx];
                 {
-                  YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULL, yytname[yyx]);
+                  YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]);
                   if (! (yysize <= yysize1
                          && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
                     return 2;
@@ -1214,26 +1173,18 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
 | Release the memory associated to this symbol.  |
 `-----------------------------------------------*/
 
-/*ARGSUSED*/
-#if (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
-static void
-yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
-#else
 static void
-yydestruct (yymsg, yytype, yyvaluep)
-    const char *yymsg;
-    int yytype;
-    YYSTYPE *yyvaluep;
-#endif
+yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, YYLTYPE *yylocationp)
 {
   YYUSE (yyvaluep);
-
+  YYUSE (yylocationp);
   if (!yymsg)
     yymsg = "Deleting";
   YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
 
+  YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
   YYUSE (yytype);
+  YY_IGNORE_MAYBE_UNINITIALIZED_END
 }
 
 
@@ -1242,18 +1193,14 @@ yydestruct (yymsg, yytype, yyvaluep)
 /* The lookahead symbol.  */
 int yychar;
 
-
-#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
-# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
-# define YY_IGNORE_MAYBE_UNINITIALIZED_END
-#endif
-#ifndef YY_INITIAL_VALUE
-# define YY_INITIAL_VALUE(Value) /* Nothing. */
-#endif
-
 /* The semantic value of the lookahead symbol.  */
-YYSTYPE yylval YY_INITIAL_VALUE(yyval_default);
-
+YYSTYPE yylval;
+/* Location data for the lookahead symbol.  */
+YYLTYPE yylloc
+# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL
+  = { 1, 1, 1, 1 }
+# endif
+;
 /* Number of syntax errors so far.  */
 int yynerrs;
 
@@ -1262,35 +1209,17 @@ int yynerrs;
 | yyparse.  |
 `----------*/
 
-#ifdef YYPARSE_PARAM
-#if (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
-int
-yyparse (void *YYPARSE_PARAM)
-#else
-int
-yyparse (YYPARSE_PARAM)
-    void *YYPARSE_PARAM;
-#endif
-#else /* ! YYPARSE_PARAM */
-#if (defined __STDC__ || defined __C99__FUNC__ \
-     || defined __cplusplus || defined _MSC_VER)
 int
 yyparse (void)
-#else
-int
-yyparse ()
-
-#endif
-#endif
 {
     int yystate;
     /* Number of tokens to shift before error messages enabled.  */
     int yyerrstatus;
 
     /* The stacks and their tools:
-       `yyss': related to states.
-       `yyvs': related to semantic values.
+       'yyss': related to states.
+       'yyvs': related to semantic values.
+       'yyls': related to locations.
 
        Refer to the stacks through separate pointers, to allow yyoverflow
        to reallocate them elsewhere.  */
@@ -1305,6 +1234,14 @@ yyparse ()
     YYSTYPE *yyvs;
     YYSTYPE *yyvsp;
 
+    /* The location stack.  */
+    YYLTYPE yylsa[YYINITDEPTH];
+    YYLTYPE *yyls;
+    YYLTYPE *yylsp;
+
+    /* The locations where the error started and ended.  */
+    YYLTYPE yyerror_range[3];
+
     YYSIZE_T yystacksize;
 
   int yyn;
@@ -1314,6 +1251,7 @@ yyparse ()
   /* The variables used to return semantic value and location from the
      action routines.  */
   YYSTYPE yyval;
+  YYLTYPE yyloc;
 
 #if YYERROR_VERBOSE
   /* Buffer for error messages, and its allocated size.  */
@@ -1322,7 +1260,7 @@ yyparse ()
   YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
 #endif
 
-#define YYPOPSTACK(N)   (yyvsp -= (N), yyssp -= (N))
+#define YYPOPSTACK(N)   (yyvsp -= (N), yyssp -= (N), yylsp -= (N))
 
   /* The number of symbols on the RHS of the reduced rule.
      Keep to zero when no symbol should be popped.  */
@@ -1330,6 +1268,7 @@ yyparse ()
 
   yyssp = yyss = yyssa;
   yyvsp = yyvs = yyvsa;
+  yylsp = yyls = yylsa;
   yystacksize = YYINITDEPTH;
 
   YYDPRINTF ((stderr, "Starting parse\n"));
@@ -1338,6 +1277,7 @@ yyparse ()
   yyerrstatus = 0;
   yynerrs = 0;
   yychar = YYEMPTY; /* Cause a token to be read.  */
+  yylsp[0] = yylloc;
   goto yysetstate;
 
 /*------------------------------------------------------------.
@@ -1358,23 +1298,26 @@ yyparse ()
 
 #ifdef yyoverflow
       {
-       /* Give user a chance to reallocate the stack.  Use copies of
-          these so that the &'s don't force the real ones into
-          memory.  */
-       YYSTYPE *yyvs1 = yyvs;
-       yytype_int16 *yyss1 = yyss;
-
-       /* Each stack pointer address is followed by the size of the
-          data in use in that stack, in bytes.  This used to be a
-          conditional around just the two extra args, but that might
-          be undefined if yyoverflow is a macro.  */
-       yyoverflow (YY_("memory exhausted"),
-                   &yyss1, yysize * sizeof (*yyssp),
-                   &yyvs1, yysize * sizeof (*yyvsp),
-                   &yystacksize);
-
-       yyss = yyss1;
-       yyvs = yyvs1;
+        /* Give user a chance to reallocate the stack.  Use copies of
+           these so that the &'s don't force the real ones into
+           memory.  */
+        YYSTYPE *yyvs1 = yyvs;
+        yytype_int16 *yyss1 = yyss;
+        YYLTYPE *yyls1 = yyls;
+
+        /* Each stack pointer address is followed by the size of the
+           data in use in that stack, in bytes.  This used to be a
+           conditional around just the two extra args, but that might
+           be undefined if yyoverflow is a macro.  */
+        yyoverflow (YY_("memory exhausted"),
+                    &yyss1, yysize * sizeof (*yyssp),
+                    &yyvs1, yysize * sizeof (*yyvsp),
+                    &yyls1, yysize * sizeof (*yylsp),
+                    &yystacksize);
+
+        yyls = yyls1;
+        yyss = yyss1;
+        yyvs = yyvs1;
       }
 #else /* no yyoverflow */
 # ifndef YYSTACK_RELOCATE
@@ -1382,34 +1325,36 @@ yyparse ()
 # else
       /* Extend the stack our own way.  */
       if (YYMAXDEPTH <= yystacksize)
-       goto yyexhaustedlab;
+        goto yyexhaustedlab;
       yystacksize *= 2;
       if (YYMAXDEPTH < yystacksize)
-       yystacksize = YYMAXDEPTH;
+        yystacksize = YYMAXDEPTH;
 
       {
-       yytype_int16 *yyss1 = yyss;
-       union yyalloc *yyptr =
-         (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
-       if (! yyptr)
-         goto yyexhaustedlab;
-       YYSTACK_RELOCATE (yyss_alloc, yyss);
-       YYSTACK_RELOCATE (yyvs_alloc, yyvs);
+        yytype_int16 *yyss1 = yyss;
+        union yyalloc *yyptr =
+          (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
+        if (! yyptr)
+          goto yyexhaustedlab;
+        YYSTACK_RELOCATE (yyss_alloc, yyss);
+        YYSTACK_RELOCATE (yyvs_alloc, yyvs);
+        YYSTACK_RELOCATE (yyls_alloc, yyls);
 #  undef YYSTACK_RELOCATE
-       if (yyss1 != yyssa)
-         YYSTACK_FREE (yyss1);
+        if (yyss1 != yyssa)
+          YYSTACK_FREE (yyss1);
       }
 # endif
 #endif /* no yyoverflow */
 
       yyssp = yyss + yysize - 1;
       yyvsp = yyvs + yysize - 1;
+      yylsp = yyls + yysize - 1;
 
       YYDPRINTF ((stderr, "Stack size increased to %lu\n",
-                 (unsigned long int) yystacksize));
+                  (unsigned long int) yystacksize));
 
       if (yyss + yystacksize - 1 <= yyssp)
-       YYABORT;
+        YYABORT;
     }
 
   YYDPRINTF ((stderr, "Entering state %d\n", yystate));
@@ -1438,7 +1383,7 @@ yybackup:
   if (yychar == YYEMPTY)
     {
       YYDPRINTF ((stderr, "Reading a token: "));
-      yychar = YYLEX;
+      yychar = yylex ();
     }
 
   if (yychar <= YYEOF)
@@ -1481,7 +1426,7 @@ yybackup:
   YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
   *++yyvsp = yylval;
   YY_IGNORE_MAYBE_UNINITIALIZED_END
-
+  *++yylsp = yylloc;
   goto yynewstate;
 
 
@@ -1503,7 +1448,7 @@ yyreduce:
   yylen = yyr2[yyn];
 
   /* If YYLEN is nonzero, implement the default value of the action:
-     `$$ = $1'.
+     '$$ = $1'.
 
      Otherwise, the following line sets YYVAL to garbage.
      This behavior is undocumented and Bison
@@ -1512,287 +1457,303 @@ yyreduce:
      GCC warning that YYVAL may be used uninitialized.  */
   yyval = yyvsp[1-yylen];
 
-
+  /* Default location.  */
+  YYLLOC_DEFAULT (yyloc, (yylsp - yylen), yylen);
   YY_REDUCE_PRINT (yyn);
   switch (yyn)
     {
         case 2:
-/* Line 1787 of yacc.c  */
-#line 110 "dtc-parser.y"
+#line 105 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       the_boot_info = build_boot_info((yyvsp[(3) - (4)].re), (yyvsp[(4) - (4)].node),
-                                                       guess_boot_cpuid((yyvsp[(4) - (4)].node)));
+                       the_boot_info = build_boot_info((yyvsp[-1].re), (yyvsp[0].node),
+                                                       guess_boot_cpuid((yyvsp[0].node)));
                }
+#line 1472 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 3:
-/* Line 1787 of yacc.c  */
-#line 118 "dtc-parser.y"
+#line 113 "dtc-parser.y" /* yacc.c:1646  */
     {
                        (yyval.re) = NULL;
                }
+#line 1480 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 4:
-/* Line 1787 of yacc.c  */
-#line 122 "dtc-parser.y"
+#line 117 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.re) = chain_reserve_entry((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].re));
+                       (yyval.re) = chain_reserve_entry((yyvsp[-1].re), (yyvsp[0].re));
                }
+#line 1488 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 5:
-/* Line 1787 of yacc.c  */
-#line 129 "dtc-parser.y"
+#line 124 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.re) = build_reserve_entry((yyvsp[(2) - (4)].integer), (yyvsp[(3) - (4)].integer));
+                       (yyval.re) = build_reserve_entry((yyvsp[-2].integer), (yyvsp[-1].integer));
                }
+#line 1496 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 6:
-/* Line 1787 of yacc.c  */
-#line 133 "dtc-parser.y"
+#line 128 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       add_label(&(yyvsp[(2) - (2)].re)->labels, (yyvsp[(1) - (2)].labelref));
-                       (yyval.re) = (yyvsp[(2) - (2)].re);
+                       add_label(&(yyvsp[0].re)->labels, (yyvsp[-1].labelref));
+                       (yyval.re) = (yyvsp[0].re);
                }
+#line 1505 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 7:
-/* Line 1787 of yacc.c  */
-#line 141 "dtc-parser.y"
+#line 136 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.node) = name_node((yyvsp[(2) - (2)].node), "");
+                       (yyval.node) = name_node((yyvsp[0].node), "");
                }
+#line 1513 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 8:
-/* Line 1787 of yacc.c  */
-#line 145 "dtc-parser.y"
+#line 140 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.node) = merge_nodes((yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
+                       (yyval.node) = merge_nodes((yyvsp[-2].node), (yyvsp[0].node));
                }
+#line 1521 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 9:
-/* Line 1787 of yacc.c  */
-#line 149 "dtc-parser.y"
+#line 145 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       struct node *target = get_node_by_ref((yyvsp[(1) - (3)].node), (yyvsp[(2) - (3)].labelref));
+                       struct node *target = get_node_by_ref((yyvsp[-3].node), (yyvsp[-1].labelref));
 
+                       add_label(&target->labels, (yyvsp[-2].labelref));
                        if (target)
-                               merge_nodes(target, (yyvsp[(3) - (3)].node));
+                               merge_nodes(target, (yyvsp[0].node));
                        else
-                               print_error("label or path, '%s', not found", (yyvsp[(2) - (3)].labelref));
-                       (yyval.node) = (yyvsp[(1) - (3)].node);
+                               ERROR(&(yylsp[-1]), "Label or path %s not found", (yyvsp[-1].labelref));
+                       (yyval.node) = (yyvsp[-3].node);
                }
+#line 1536 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 10:
-/* Line 1787 of yacc.c  */
-#line 159 "dtc-parser.y"
+#line 156 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       struct node *target = get_node_by_ref((yyvsp[(1) - (4)].node), (yyvsp[(3) - (4)].labelref));
+                       struct node *target = get_node_by_ref((yyvsp[-2].node), (yyvsp[-1].labelref));
 
-                       if (!target)
-                               print_error("label or path, '%s', not found", (yyvsp[(3) - (4)].labelref));
+                       if (target)
+                               merge_nodes(target, (yyvsp[0].node));
                        else
-                               delete_node(target);
-
-                       (yyval.node) = (yyvsp[(1) - (4)].node);
+                               ERROR(&(yylsp[-1]), "Label or path %s not found", (yyvsp[-1].labelref));
+                       (yyval.node) = (yyvsp[-2].node);
                }
+#line 1550 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 11:
-/* Line 1787 of yacc.c  */
-#line 173 "dtc-parser.y"
+#line 166 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.node) = build_node((yyvsp[(2) - (5)].proplist), (yyvsp[(3) - (5)].nodelist));
+                       struct node *target = get_node_by_ref((yyvsp[-3].node), (yyvsp[-1].labelref));
+
+                       if (target)
+                               delete_node(target);
+                       else
+                               ERROR(&(yylsp[-1]), "Label or path %s not found", (yyvsp[-1].labelref));
+
+
+                       (yyval.node) = (yyvsp[-3].node);
                }
+#line 1566 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 12:
-/* Line 1787 of yacc.c  */
-#line 180 "dtc-parser.y"
+#line 181 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.proplist) = NULL;
+                       (yyval.node) = build_node((yyvsp[-3].proplist), (yyvsp[-2].nodelist));
                }
+#line 1574 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 13:
-/* Line 1787 of yacc.c  */
-#line 184 "dtc-parser.y"
+#line 188 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.proplist) = chain_property((yyvsp[(2) - (2)].prop), (yyvsp[(1) - (2)].proplist));
+                       (yyval.proplist) = NULL;
                }
+#line 1582 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 14:
-/* Line 1787 of yacc.c  */
-#line 191 "dtc-parser.y"
+#line 192 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.prop) = build_property((yyvsp[(1) - (4)].propnodename), (yyvsp[(3) - (4)].data));
+                       (yyval.proplist) = chain_property((yyvsp[0].prop), (yyvsp[-1].proplist));
                }
+#line 1590 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 15:
-/* Line 1787 of yacc.c  */
-#line 195 "dtc-parser.y"
+#line 199 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.prop) = build_property((yyvsp[(1) - (2)].propnodename), empty_data);
+                       (yyval.prop) = build_property((yyvsp[-3].propnodename), (yyvsp[-1].data));
                }
+#line 1598 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 16:
-/* Line 1787 of yacc.c  */
-#line 199 "dtc-parser.y"
+#line 203 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.prop) = build_property_delete((yyvsp[(2) - (3)].propnodename));
+                       (yyval.prop) = build_property((yyvsp[-1].propnodename), empty_data);
                }
+#line 1606 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 17:
-/* Line 1787 of yacc.c  */
-#line 203 "dtc-parser.y"
+#line 207 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       add_label(&(yyvsp[(2) - (2)].prop)->labels, (yyvsp[(1) - (2)].labelref));
-                       (yyval.prop) = (yyvsp[(2) - (2)].prop);
+                       (yyval.prop) = build_property_delete((yyvsp[-1].propnodename));
                }
+#line 1614 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 18:
-/* Line 1787 of yacc.c  */
-#line 211 "dtc-parser.y"
+#line 211 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.data) = data_merge((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].data));
+                       add_label(&(yyvsp[0].prop)->labels, (yyvsp[-1].labelref));
+                       (yyval.prop) = (yyvsp[0].prop);
                }
+#line 1623 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 19:
-/* Line 1787 of yacc.c  */
-#line 215 "dtc-parser.y"
+#line 219 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.data) = data_merge((yyvsp[(1) - (3)].data), (yyvsp[(2) - (3)].array).data);
+                       (yyval.data) = data_merge((yyvsp[-1].data), (yyvsp[0].data));
                }
+#line 1631 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 20:
-/* Line 1787 of yacc.c  */
-#line 219 "dtc-parser.y"
+#line 223 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data));
+                       (yyval.data) = data_merge((yyvsp[-2].data), (yyvsp[-1].array).data);
                }
+#line 1639 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 21:
-/* Line 1787 of yacc.c  */
-#line 223 "dtc-parser.y"
+#line 227 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), REF_PATH, (yyvsp[(2) - (2)].labelref));
+                       (yyval.data) = data_merge((yyvsp[-3].data), (yyvsp[-1].data));
                }
+#line 1647 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 22:
-/* Line 1787 of yacc.c  */
-#line 227 "dtc-parser.y"
+#line 231 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       FILE *f = srcfile_relative_open((yyvsp[(4) - (9)].data).val, NULL);
+                       (yyval.data) = data_add_marker((yyvsp[-1].data), REF_PATH, (yyvsp[0].labelref));
+               }
+#line 1655 "dtc-parser.tab.c" /* yacc.c:1646  */
+    break;
+
+  case 23:
+#line 235 "dtc-parser.y" /* yacc.c:1646  */
+    {
+                       FILE *f = srcfile_relative_open((yyvsp[-5].data).val, NULL);
                        struct data d;
 
-                       if ((yyvsp[(6) - (9)].integer) != 0)
-                               if (fseek(f, (yyvsp[(6) - (9)].integer), SEEK_SET) != 0)
-                                       print_error("Couldn't seek to offset %llu in \"%s\": %s",
-                                                    (unsigned long long)(yyvsp[(6) - (9)].integer),
-                                                    (yyvsp[(4) - (9)].data).val,
-                                                    strerror(errno));
+                       if ((yyvsp[-3].integer) != 0)
+                               if (fseek(f, (yyvsp[-3].integer), SEEK_SET) != 0)
+                                       die("Couldn't seek to offset %llu in \"%s\": %s",
+                                           (unsigned long long)(yyvsp[-3].integer), (yyvsp[-5].data).val,
+                                           strerror(errno));
 
-                       d = data_copy_file(f, (yyvsp[(8) - (9)].integer));
+                       d = data_copy_file(f, (yyvsp[-1].integer));
 
-                       (yyval.data) = data_merge((yyvsp[(1) - (9)].data), d);
+                       (yyval.data) = data_merge((yyvsp[-8].data), d);
                        fclose(f);
                }
+#line 1675 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 23:
-/* Line 1787 of yacc.c  */
-#line 244 "dtc-parser.y"
+  case 24:
+#line 251 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       FILE *f = srcfile_relative_open((yyvsp[(4) - (5)].data).val, NULL);
+                       FILE *f = srcfile_relative_open((yyvsp[-1].data).val, NULL);
                        struct data d = empty_data;
 
                        d = data_copy_file(f, -1);
 
-                       (yyval.data) = data_merge((yyvsp[(1) - (5)].data), d);
+                       (yyval.data) = data_merge((yyvsp[-4].data), d);
                        fclose(f);
                }
+#line 1689 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 24:
-/* Line 1787 of yacc.c  */
-#line 254 "dtc-parser.y"
+  case 25:
+#line 261 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
+                       (yyval.data) = data_add_marker((yyvsp[-1].data), LABEL, (yyvsp[0].labelref));
                }
+#line 1697 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 25:
-/* Line 1787 of yacc.c  */
-#line 261 "dtc-parser.y"
+  case 26:
+#line 268 "dtc-parser.y" /* yacc.c:1646  */
     {
                        (yyval.data) = empty_data;
                }
+#line 1705 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 26:
-/* Line 1787 of yacc.c  */
-#line 265 "dtc-parser.y"
+  case 27:
+#line 272 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.data) = (yyvsp[(1) - (2)].data);
+                       (yyval.data) = (yyvsp[-1].data);
                }
+#line 1713 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 27:
-/* Line 1787 of yacc.c  */
-#line 269 "dtc-parser.y"
+  case 28:
+#line 276 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
+                       (yyval.data) = data_add_marker((yyvsp[-1].data), LABEL, (yyvsp[0].labelref));
                }
+#line 1721 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 28:
-/* Line 1787 of yacc.c  */
-#line 276 "dtc-parser.y"
+  case 29:
+#line 283 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.array).data = empty_data;
-                       (yyval.array).bits = eval_literal((yyvsp[(2) - (3)].literal), 0, 7);
-
-                       if (((yyval.array).bits !=  8) &&
-                           ((yyval.array).bits != 16) &&
-                           ((yyval.array).bits != 32) &&
-                           ((yyval.array).bits != 64))
-                       {
-                               print_error("Only 8, 16, 32 and 64-bit elements"
-                                           " are currently supported");
-                               (yyval.array).bits = 32;
+                       unsigned long long bits;
+
+                       bits = (yyvsp[-1].integer);
+
+                       if ((bits !=  8) && (bits != 16) &&
+                           (bits != 32) && (bits != 64)) {
+                               ERROR(&(yylsp[-1]), "Array elements must be"
+                                     " 8, 16, 32 or 64-bits");
+                               bits = 32;
                        }
+
+                       (yyval.array).data = empty_data;
+                       (yyval.array).bits = bits;
                }
+#line 1741 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 29:
-/* Line 1787 of yacc.c  */
-#line 291 "dtc-parser.y"
+  case 30:
+#line 299 "dtc-parser.y" /* yacc.c:1646  */
     {
                        (yyval.array).data = empty_data;
                        (yyval.array).bits = 32;
                }
+#line 1750 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 30:
-/* Line 1787 of yacc.c  */
-#line 296 "dtc-parser.y"
+  case 31:
+#line 304 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       if ((yyvsp[(1) - (2)].array).bits < 64) {
-                               uint64_t mask = (1ULL << (yyvsp[(1) - (2)].array).bits) - 1;
+                       if ((yyvsp[-1].array).bits < 64) {
+                               uint64_t mask = (1ULL << (yyvsp[-1].array).bits) - 1;
                                /*
                                 * Bits above mask must either be all zero
                                 * (positive within range of mask) or all one
@@ -1801,275 +1762,258 @@ yyreduce:
                                 * within the mask to one (i.e. | in the
                                 * mask), all bits are one.
                                 */
-                               if (((yyvsp[(2) - (2)].integer) > mask) && (((yyvsp[(2) - (2)].integer) | mask) != -1ULL))
-                                       print_error(
-                                               "integer value out of range "
-                                               "%016lx (%d bits)", (yyvsp[(1) - (2)].array).bits);
+                               if (((yyvsp[0].integer) > mask) && (((yyvsp[0].integer) | mask) != -1ULL))
+                                       ERROR(&(yylsp[0]), "Value out of range for"
+                                             " %d-bit array element", (yyvsp[-1].array).bits);
                        }
 
-                       (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, (yyvsp[(2) - (2)].integer), (yyvsp[(1) - (2)].array).bits);
+                       (yyval.array).data = data_append_integer((yyvsp[-1].array).data, (yyvsp[0].integer), (yyvsp[-1].array).bits);
                }
+#line 1773 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 31:
-/* Line 1787 of yacc.c  */
-#line 316 "dtc-parser.y"
+  case 32:
+#line 323 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       uint64_t val = ~0ULL >> (64 - (yyvsp[(1) - (2)].array).bits);
+                       uint64_t val = ~0ULL >> (64 - (yyvsp[-1].array).bits);
 
-                       if ((yyvsp[(1) - (2)].array).bits == 32)
-                               (yyvsp[(1) - (2)].array).data = data_add_marker((yyvsp[(1) - (2)].array).data,
+                       if ((yyvsp[-1].array).bits == 32)
+                               (yyvsp[-1].array).data = data_add_marker((yyvsp[-1].array).data,
                                                          REF_PHANDLE,
-                                                         (yyvsp[(2) - (2)].labelref));
+                                                         (yyvsp[0].labelref));
                        else
-                               print_error("References are only allowed in "
+                               ERROR(&(yylsp[0]), "References are only allowed in "
                                            "arrays with 32-bit elements.");
 
-                       (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, val, (yyvsp[(1) - (2)].array).bits);
-               }
-    break;
-
-  case 32:
-/* Line 1787 of yacc.c  */
-#line 330 "dtc-parser.y"
-    {
-                       (yyval.array).data = data_add_marker((yyvsp[(1) - (2)].array).data, LABEL, (yyvsp[(2) - (2)].labelref));
+                       (yyval.array).data = data_append_integer((yyvsp[-1].array).data, val, (yyvsp[-1].array).bits);
                }
+#line 1791 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 33:
-/* Line 1787 of yacc.c  */
-#line 337 "dtc-parser.y"
-    {
-                       (yyval.integer) = eval_literal((yyvsp[(1) - (1)].literal), 0, 64);
-               }
-    break;
-
-  case 34:
-/* Line 1787 of yacc.c  */
-#line 341 "dtc-parser.y"
+#line 337 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.integer) = eval_char_literal((yyvsp[(1) - (1)].literal));
+                       (yyval.array).data = data_add_marker((yyvsp[-1].array).data, LABEL, (yyvsp[0].labelref));
                }
+#line 1799 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 35:
-/* Line 1787 of yacc.c  */
-#line 345 "dtc-parser.y"
+  case 36:
+#line 346 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.integer) = (yyvsp[(2) - (3)].integer);
+                       (yyval.integer) = (yyvsp[-1].integer);
                }
+#line 1807 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 38:
-/* Line 1787 of yacc.c  */
-#line 356 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (5)].integer) ? (yyvsp[(3) - (5)].integer) : (yyvsp[(5) - (5)].integer); }
-    break;
-
-  case 40:
-/* Line 1787 of yacc.c  */
-#line 361 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) || (yyvsp[(3) - (3)].integer); }
+  case 39:
+#line 357 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-4].integer) ? (yyvsp[-2].integer) : (yyvsp[0].integer); }
+#line 1813 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 42:
-/* Line 1787 of yacc.c  */
-#line 366 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) && (yyvsp[(3) - (3)].integer); }
+  case 41:
+#line 362 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) || (yyvsp[0].integer); }
+#line 1819 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 44:
-/* Line 1787 of yacc.c  */
-#line 371 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) | (yyvsp[(3) - (3)].integer); }
+  case 43:
+#line 367 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) && (yyvsp[0].integer); }
+#line 1825 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 46:
-/* Line 1787 of yacc.c  */
-#line 376 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) ^ (yyvsp[(3) - (3)].integer); }
+  case 45:
+#line 372 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) | (yyvsp[0].integer); }
+#line 1831 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 48:
-/* Line 1787 of yacc.c  */
-#line 381 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) & (yyvsp[(3) - (3)].integer); }
+  case 47:
+#line 377 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) ^ (yyvsp[0].integer); }
+#line 1837 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 50:
-/* Line 1787 of yacc.c  */
-#line 386 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) == (yyvsp[(3) - (3)].integer); }
+  case 49:
+#line 382 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) & (yyvsp[0].integer); }
+#line 1843 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 51:
-/* Line 1787 of yacc.c  */
-#line 387 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) != (yyvsp[(3) - (3)].integer); }
+#line 387 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) == (yyvsp[0].integer); }
+#line 1849 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 53:
-/* Line 1787 of yacc.c  */
-#line 392 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) < (yyvsp[(3) - (3)].integer); }
+  case 52:
+#line 388 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) != (yyvsp[0].integer); }
+#line 1855 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 54:
-/* Line 1787 of yacc.c  */
-#line 393 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) > (yyvsp[(3) - (3)].integer); }
+#line 393 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) < (yyvsp[0].integer); }
+#line 1861 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 55:
-/* Line 1787 of yacc.c  */
-#line 394 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) <= (yyvsp[(3) - (3)].integer); }
+#line 394 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) > (yyvsp[0].integer); }
+#line 1867 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 56:
-/* Line 1787 of yacc.c  */
-#line 395 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) >= (yyvsp[(3) - (3)].integer); }
+#line 395 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) <= (yyvsp[0].integer); }
+#line 1873 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 57:
-/* Line 1787 of yacc.c  */
-#line 399 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) << (yyvsp[(3) - (3)].integer); }
+#line 396 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) >= (yyvsp[0].integer); }
+#line 1879 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 58:
-/* Line 1787 of yacc.c  */
-#line 400 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) >> (yyvsp[(3) - (3)].integer); }
+#line 400 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) << (yyvsp[0].integer); }
+#line 1885 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 60:
-/* Line 1787 of yacc.c  */
-#line 405 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) + (yyvsp[(3) - (3)].integer); }
+  case 59:
+#line 401 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) >> (yyvsp[0].integer); }
+#line 1891 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 61:
-/* Line 1787 of yacc.c  */
-#line 406 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) - (yyvsp[(3) - (3)].integer); }
+#line 406 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) + (yyvsp[0].integer); }
+#line 1897 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 63:
-/* Line 1787 of yacc.c  */
-#line 411 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) * (yyvsp[(3) - (3)].integer); }
+  case 62:
+#line 407 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) - (yyvsp[0].integer); }
+#line 1903 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 64:
-/* Line 1787 of yacc.c  */
-#line 412 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) / (yyvsp[(3) - (3)].integer); }
+#line 412 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) * (yyvsp[0].integer); }
+#line 1909 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 65:
-/* Line 1787 of yacc.c  */
-#line 413 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) % (yyvsp[(3) - (3)].integer); }
+#line 413 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) / (yyvsp[0].integer); }
+#line 1915 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
-  case 68:
-/* Line 1787 of yacc.c  */
-#line 419 "dtc-parser.y"
-    { (yyval.integer) = -(yyvsp[(2) - (2)].integer); }
+  case 66:
+#line 414 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = (yyvsp[-2].integer) % (yyvsp[0].integer); }
+#line 1921 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 69:
-/* Line 1787 of yacc.c  */
-#line 420 "dtc-parser.y"
-    { (yyval.integer) = ~(yyvsp[(2) - (2)].integer); }
+#line 420 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = -(yyvsp[0].integer); }
+#line 1927 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 70:
-/* Line 1787 of yacc.c  */
-#line 421 "dtc-parser.y"
-    { (yyval.integer) = !(yyvsp[(2) - (2)].integer); }
+#line 421 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = ~(yyvsp[0].integer); }
+#line 1933 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 71:
-/* Line 1787 of yacc.c  */
-#line 426 "dtc-parser.y"
-    {
-                       (yyval.data) = empty_data;
-               }
+#line 422 "dtc-parser.y" /* yacc.c:1646  */
+    { (yyval.integer) = !(yyvsp[0].integer); }
+#line 1939 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 72:
-/* Line 1787 of yacc.c  */
-#line 430 "dtc-parser.y"
+#line 427 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.data) = data_append_byte((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].byte));
+                       (yyval.data) = empty_data;
                }
+#line 1947 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 73:
-/* Line 1787 of yacc.c  */
-#line 434 "dtc-parser.y"
+#line 431 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
+                       (yyval.data) = data_append_byte((yyvsp[-1].data), (yyvsp[0].byte));
                }
+#line 1955 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 74:
-/* Line 1787 of yacc.c  */
-#line 441 "dtc-parser.y"
+#line 435 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.nodelist) = NULL;
+                       (yyval.data) = data_add_marker((yyvsp[-1].data), LABEL, (yyvsp[0].labelref));
                }
+#line 1963 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 75:
-/* Line 1787 of yacc.c  */
-#line 445 "dtc-parser.y"
+#line 442 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.nodelist) = chain_node((yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].nodelist));
+                       (yyval.nodelist) = NULL;
                }
+#line 1971 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 76:
-/* Line 1787 of yacc.c  */
-#line 449 "dtc-parser.y"
+#line 446 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       print_error("syntax error: properties must precede subnodes");
-                       YYERROR;
+                       (yyval.nodelist) = chain_node((yyvsp[-1].node), (yyvsp[0].nodelist));
                }
+#line 1979 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 77:
-/* Line 1787 of yacc.c  */
-#line 457 "dtc-parser.y"
+#line 450 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.node) = name_node((yyvsp[(2) - (2)].node), (yyvsp[(1) - (2)].propnodename));
+                       ERROR(&(yylsp[0]), "Properties must precede subnodes");
+                       YYERROR;
                }
+#line 1988 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 78:
-/* Line 1787 of yacc.c  */
-#line 461 "dtc-parser.y"
+#line 458 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       (yyval.node) = name_node(build_node_delete(), (yyvsp[(2) - (3)].propnodename));
+                       (yyval.node) = name_node((yyvsp[0].node), (yyvsp[-1].propnodename));
                }
+#line 1996 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
   case 79:
-/* Line 1787 of yacc.c  */
-#line 465 "dtc-parser.y"
+#line 462 "dtc-parser.y" /* yacc.c:1646  */
+    {
+                       (yyval.node) = name_node(build_node_delete(), (yyvsp[-1].propnodename));
+               }
+#line 2004 "dtc-parser.tab.c" /* yacc.c:1646  */
+    break;
+
+  case 80:
+#line 466 "dtc-parser.y" /* yacc.c:1646  */
     {
-                       add_label(&(yyvsp[(2) - (2)].node)->labels, (yyvsp[(1) - (2)].labelref));
-                       (yyval.node) = (yyvsp[(2) - (2)].node);
+                       add_label(&(yyvsp[0].node)->labels, (yyvsp[-1].labelref));
+                       (yyval.node) = (yyvsp[0].node);
                }
+#line 2013 "dtc-parser.tab.c" /* yacc.c:1646  */
     break;
 
 
-/* Line 1787 of yacc.c  */
-#line 2073 "dtc-parser.tab.c"
+#line 2017 "dtc-parser.tab.c" /* yacc.c:1646  */
       default: break;
     }
   /* User semantic actions sometimes alter yychar, and that requires
@@ -2090,8 +2034,9 @@ yyreduce:
   YY_STACK_PRINT (yyss, yyssp);
 
   *++yyvsp = yyval;
+  *++yylsp = yyloc;
 
-  /* Now `shift' the result of the reduction.  Determine what state
+  /* Now 'shift' the result of the reduction.  Determine what state
      that goes to, based on the state we popped back to and the rule
      number reduced by.  */
 
@@ -2106,9 +2051,9 @@ yyreduce:
   goto yynewstate;
 
 
-/*------------------------------------.
-| yyerrlab -- here on detecting error |
-`------------------------------------*/
+/*--------------------------------------.
+| yyerrlab -- here on detecting error |
+`--------------------------------------*/
 yyerrlab:
   /* Make sure we have latest lookahead translation.  See comments at
      user semantic actions for why this is necessary.  */
@@ -2154,25 +2099,25 @@ yyerrlab:
 #endif
     }
 
-
+  yyerror_range[1] = yylloc;
 
   if (yyerrstatus == 3)
     {
       /* If just tried and failed to reuse lookahead token after an
-        error, discard it.  */
+         error, discard it.  */
 
       if (yychar <= YYEOF)
-       {
-         /* Return failure if at end of input.  */
-         if (yychar == YYEOF)
-           YYABORT;
-       }
+        {
+          /* Return failure if at end of input.  */
+          if (yychar == YYEOF)
+            YYABORT;
+        }
       else
-       {
-         yydestruct ("Error: discarding",
-                     yytoken, &yylval);
-         yychar = YYEMPTY;
-       }
+        {
+          yydestruct ("Error: discarding",
+                      yytoken, &yylval, &yylloc);
+          yychar = YYEMPTY;
+        }
     }
 
   /* Else will try to reuse lookahead token after shifting the error
@@ -2191,7 +2136,8 @@ yyerrorlab:
   if (/*CONSTCOND*/ 0)
      goto yyerrorlab;
 
-  /* Do not reclaim the symbols of the rule which action triggered
+  yyerror_range[1] = yylsp[1-yylen];
+  /* Do not reclaim the symbols of the rule whose action triggered
      this YYERROR.  */
   YYPOPSTACK (yylen);
   yylen = 0;
@@ -2204,29 +2150,29 @@ yyerrorlab:
 | yyerrlab1 -- common code for both syntax error and YYERROR.  |
 `-------------------------------------------------------------*/
 yyerrlab1:
-  yyerrstatus = 3;     /* Each real token shifted decrements this.  */
+  yyerrstatus = 3;      /* Each real token shifted decrements this.  */
 
   for (;;)
     {
       yyn = yypact[yystate];
       if (!yypact_value_is_default (yyn))
-       {
-         yyn += YYTERROR;
-         if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
-           {
-             yyn = yytable[yyn];
-             if (0 < yyn)
-               break;
-           }
-       }
+        {
+          yyn += YYTERROR;
+          if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
+            {
+              yyn = yytable[yyn];
+              if (0 < yyn)
+                break;
+            }
+        }
 
       /* Pop the current state because it cannot handle the error token.  */
       if (yyssp == yyss)
-       YYABORT;
-
+        YYABORT;
 
+      yyerror_range[1] = *yylsp;
       yydestruct ("Error: popping",
-                 yystos[yystate], yyvsp);
+                  yystos[yystate], yyvsp, yylsp);
       YYPOPSTACK (1);
       yystate = *yyssp;
       YY_STACK_PRINT (yyss, yyssp);
@@ -2236,6 +2182,11 @@ yyerrlab1:
   *++yyvsp = yylval;
   YY_IGNORE_MAYBE_UNINITIALIZED_END
 
+  yyerror_range[2] = yylloc;
+  /* Using YYLLOC is tempting, but would change the location of
+     the lookahead.  YYLOC is available though.  */
+  YYLLOC_DEFAULT (yyloc, yyerror_range, 2);
+  *++yylsp = yyloc;
 
   /* Shift the error token.  */
   YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
@@ -2275,16 +2226,16 @@ yyreturn:
          user semantic actions for why this is necessary.  */
       yytoken = YYTRANSLATE (yychar);
       yydestruct ("Cleanup: discarding lookahead",
-                  yytoken, &yylval);
+                  yytoken, &yylval, &yylloc);
     }
-  /* Do not reclaim the symbols of the rule which action triggered
+  /* Do not reclaim the symbols of the rule whose action triggered
      this YYABORT or YYACCEPT.  */
   YYPOPSTACK (yylen);
   YY_STACK_PRINT (yyss, yyssp);
   while (yyssp != yyss)
     {
       yydestruct ("Cleanup: popping",
-                 yystos[*yyssp], yyvsp);
+                  yystos[*yyssp], yyvsp, yylsp);
       YYPOPSTACK (1);
     }
 #ifndef yyoverflow
@@ -2295,72 +2246,12 @@ yyreturn:
   if (yymsg != yymsgbuf)
     YYSTACK_FREE (yymsg);
 #endif
-  /* Make sure YYID is used.  */
-  return YYID (yyresult);
+  return yyresult;
 }
+#line 472 "dtc-parser.y" /* yacc.c:1906  */
 
 
-/* Line 2050 of yacc.c  */
-#line 471 "dtc-parser.y"
-
-
-void print_error(char const *fmt, ...)
-{
-       va_list va;
-
-       va_start(va, fmt);
-       srcpos_verror(&yylloc, fmt, va);
-       va_end(va);
-
-       treesource_error = 1;
-}
-
-void yyerror(char const *s) {
-       print_error("%s", s);
-}
-
-static unsigned long long eval_literal(const char *s, int base, int bits)
-{
-       unsigned long long val;
-       char *e;
-
-       errno = 0;
-       val = strtoull(s, &e, base);
-       if (*e) {
-               size_t uls = strspn(e, "UL");
-               if (e[uls])
-                       print_error("bad characters in literal");
-       }
-       if ((errno == ERANGE)
-                || ((bits < 64) && (val >= (1ULL << bits))))
-               print_error("literal out of range");
-       else if (errno != 0)
-               print_error("bad literal");
-       return val;
-}
-
-static unsigned char eval_char_literal(const char *s)
+void yyerror(char const *s)
 {
-       int i = 1;
-       char c = s[0];
-
-       if (c == '\0')
-       {
-               print_error("empty character literal");
-               return 0;
-       }
-
-       /*
-        * If the first character in the character literal is a \ then process
-        * the remaining characters as an escape encoding. If the first
-        * character is neither an escape or a terminator it should be the only
-        * character in the literal and will be returned.
-        */
-       if (c == '\\')
-               c = get_escape_char(s, &i);
-
-       if (s[i] != '\0')
-               print_error("malformed character literal");
-
-       return c;
+       ERROR(&yylloc, "%s", s);
 }
index b2e7a86cd85e6fe406b1b2b26dfee52bf83095bf..30867c688300e38333877360e3b05475d5b4a2a3 100644 (file)
@@ -1,19 +1,19 @@
-/* A Bison parser, made by GNU Bison 2.7.12-4996.  */
+/* A Bison parser, made by GNU Bison 3.0.2.  */
 
 /* Bison interface for Yacc-like parsers in C
-   
-      Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc.
-   
+
+   Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc.
+
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.
-   
+
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
-   
+
    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
 
    special exception, which will cause the skeleton and the resulting
    Bison output files to be licensed under the GNU General Public
    License without this special exception.
-   
+
    This special exception was added by the Free Software Foundation in
    version 2.2 of Bison.  */
 
 #ifndef YY_YY_DTC_PARSER_TAB_H_INCLUDED
 # define YY_YY_DTC_PARSER_TAB_H_INCLUDED
-/* Enabling traces.  */
+/* Debug traces.  */
 #ifndef YYDEBUG
 # define YYDEBUG 0
 #endif
 extern int yydebug;
 #endif
 
-/* Tokens.  */
+/* Token type.  */
 #ifndef YYTOKENTYPE
 # define YYTOKENTYPE
-   /* Put the tokens into the symbol table, so that GDB and other debuggers
-      know about them.  */
-   enum yytokentype {
-     DT_V1 = 258,
-     DT_MEMRESERVE = 259,
-     DT_LSHIFT = 260,
-     DT_RSHIFT = 261,
-     DT_LE = 262,
-     DT_GE = 263,
-     DT_EQ = 264,
-     DT_NE = 265,
-     DT_AND = 266,
-     DT_OR = 267,
-     DT_BITS = 268,
-     DT_DEL_PROP = 269,
-     DT_DEL_NODE = 270,
-     DT_PROPNODENAME = 271,
-     DT_LITERAL = 272,
-     DT_CHAR_LITERAL = 273,
-     DT_BASE = 274,
-     DT_BYTE = 275,
-     DT_STRING = 276,
-     DT_LABEL = 277,
-     DT_REF = 278,
-     DT_INCBIN = 279
-   };
+  enum yytokentype
+  {
+    DT_V1 = 258,
+    DT_MEMRESERVE = 259,
+    DT_LSHIFT = 260,
+    DT_RSHIFT = 261,
+    DT_LE = 262,
+    DT_GE = 263,
+    DT_EQ = 264,
+    DT_NE = 265,
+    DT_AND = 266,
+    DT_OR = 267,
+    DT_BITS = 268,
+    DT_DEL_PROP = 269,
+    DT_DEL_NODE = 270,
+    DT_PROPNODENAME = 271,
+    DT_LITERAL = 272,
+    DT_CHAR_LITERAL = 273,
+    DT_BYTE = 274,
+    DT_STRING = 275,
+    DT_LABEL = 276,
+    DT_REF = 277,
+    DT_INCBIN = 278
+  };
 #endif
 
-
+/* Value type.  */
 #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
-typedef union YYSTYPE
+typedef union YYSTYPE YYSTYPE;
+union YYSTYPE
 {
-/* Line 2053 of yacc.c  */
-#line 40 "dtc-parser.y"
+#line 38 "dtc-parser.y" /* yacc.c:1909  */
 
        char *propnodename;
-       char *literal;
        char *labelref;
-       unsigned int cbase;
        uint8_t byte;
        struct data data;
 
@@ -97,29 +93,29 @@ typedef union YYSTYPE
        struct reserve_info *re;
        uint64_t integer;
 
-
-/* Line 2053 of yacc.c  */
-#line 103 "dtc-parser.tab.h"
-} YYSTYPE;
+#line 97 "dtc-parser.tab.h" /* yacc.c:1909  */
+};
 # define YYSTYPE_IS_TRIVIAL 1
-# define yystype YYSTYPE /* obsolescent; will be withdrawn */
 # define YYSTYPE_IS_DECLARED 1
 #endif
 
-extern YYSTYPE yylval;
-
-#ifdef YYPARSE_PARAM
-#if defined __STDC__ || defined __cplusplus
-int yyparse (void *YYPARSE_PARAM);
-#else
-int yyparse ();
+/* Location type.  */
+#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
+typedef struct YYLTYPE YYLTYPE;
+struct YYLTYPE
+{
+  int first_line;
+  int first_column;
+  int last_line;
+  int last_column;
+};
+# define YYLTYPE_IS_DECLARED 1
+# define YYLTYPE_IS_TRIVIAL 1
 #endif
-#else /* ! YYPARSE_PARAM */
-#if defined __STDC__ || defined __cplusplus
+
+
+extern YYSTYPE yylval;
+extern YYLTYPE yylloc;
 int yyparse (void);
-#else
-int yyparse ();
-#endif
-#endif /* ! YYPARSE_PARAM */
 
 #endif /* !YY_YY_DTC_PARSER_TAB_H_INCLUDED  */
index f412460f94d7478ad3fb9a81951f5c6a5e60c4ad..5a897e36562d67107dba44fa91274253b34152dc 100644 (file)
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
  *                                                                   USA
  */
-
 %{
 #include <stdio.h>
 
 #include "dtc.h"
 #include "srcpos.h"
 
-YYLTYPE yylloc;
-
 extern int yylex(void);
-extern void print_error(char const *fmt, ...);
 extern void yyerror(char const *s);
+#define ERROR(loc, ...) \
+       do { \
+               srcpos_error((loc), "Error", __VA_ARGS__); \
+               treesource_error = true; \
+       } while (0)
 
 extern struct boot_info *the_boot_info;
-extern int treesource_error;
-
-static unsigned long long eval_literal(const char *s, int base, int bits);
-static unsigned char eval_char_literal(const char *s);
+extern bool treesource_error;
 %}
 
 %union {
        char *propnodename;
-       char *literal;
        char *labelref;
-       unsigned int cbase;
        uint8_t byte;
        struct data data;
 
@@ -65,9 +61,8 @@ static unsigned char eval_char_literal(const char *s);
 %token DT_DEL_PROP
 %token DT_DEL_NODE
 %token <propnodename> DT_PROPNODENAME
-%token <literal> DT_LITERAL
-%token <literal> DT_CHAR_LITERAL
-%token <cbase> DT_BASE
+%token <integer> DT_LITERAL
+%token <integer> DT_CHAR_LITERAL
 %token <byte> DT_BYTE
 %token <data> DT_STRING
 %token <labelref> DT_LABEL
@@ -145,6 +140,18 @@ devicetree:
                {
                        $$ = merge_nodes($1, $3);
                }
+
+       | devicetree DT_LABEL DT_REF nodedef
+               {
+                       struct node *target = get_node_by_ref($1, $3);
+
+                       add_label(&target->labels, $2);
+                       if (target)
+                               merge_nodes(target, $4);
+                       else
+                               ERROR(&@3, "Label or path %s not found", $3);
+                       $$ = $1;
+               }
        | devicetree DT_REF nodedef
                {
                        struct node *target = get_node_by_ref($1, $2);
@@ -152,17 +159,18 @@ devicetree:
                        if (target)
                                merge_nodes(target, $3);
                        else
-                               print_error("label or path, '%s', not found", $2);
+                               ERROR(&@2, "Label or path %s not found", $2);
                        $$ = $1;
                }
        | devicetree DT_DEL_NODE DT_REF ';'
                {
                        struct node *target = get_node_by_ref($1, $3);
 
-                       if (!target)
-                               print_error("label or path, '%s', not found", $3);
-                       else
+                       if (target)
                                delete_node(target);
+                       else
+                               ERROR(&@3, "Label or path %s not found", $3);
+
 
                        $$ = $1;
                }
@@ -230,10 +238,9 @@ propdata:
 
                        if ($6 != 0)
                                if (fseek(f, $6, SEEK_SET) != 0)
-                                       print_error("Couldn't seek to offset %llu in \"%s\": %s",
-                                                    (unsigned long long)$6,
-                                                    $4.val,
-                                                    strerror(errno));
+                                       die("Couldn't seek to offset %llu in \"%s\": %s",
+                                           (unsigned long long)$6, $4.val,
+                                           strerror(errno));
 
                        d = data_copy_file(f, $8);
 
@@ -274,18 +281,19 @@ propdataprefix:
 arrayprefix:
        DT_BITS DT_LITERAL '<'
                {
-                       $$.data = empty_data;
-                       $$.bits = eval_literal($2, 0, 7);
-
-                       if (($$.bits !=  8) &&
-                           ($$.bits != 16) &&
-                           ($$.bits != 32) &&
-                           ($$.bits != 64))
-                       {
-                               print_error("Only 8, 16, 32 and 64-bit elements"
-                                           " are currently supported");
-                               $$.bits = 32;
+                       unsigned long long bits;
+
+                       bits = $2;
+
+                       if ((bits !=  8) && (bits != 16) &&
+                           (bits != 32) && (bits != 64)) {
+                               ERROR(&@2, "Array elements must be"
+                                     " 8, 16, 32 or 64-bits");
+                               bits = 32;
                        }
+
+                       $$.data = empty_data;
+                       $$.bits = bits;
                }
        | '<'
                {
@@ -305,9 +313,8 @@ arrayprefix:
                                 * mask), all bits are one.
                                 */
                                if (($2 > mask) && (($2 | mask) != -1ULL))
-                                       print_error(
-                                               "integer value out of range "
-                                               "%016lx (%d bits)", $1.bits);
+                                       ERROR(&@2, "Value out of range for"
+                                             " %d-bit array element", $1.bits);
                        }
 
                        $$.data = data_append_integer($1.data, $2, $1.bits);
@@ -321,7 +328,7 @@ arrayprefix:
                                                          REF_PHANDLE,
                                                          $2);
                        else
-                               print_error("References are only allowed in "
+                               ERROR(&@2, "References are only allowed in "
                                            "arrays with 32-bit elements.");
 
                        $$.data = data_append_integer($1.data, val, $1.bits);
@@ -334,13 +341,7 @@ arrayprefix:
 
 integer_prim:
          DT_LITERAL
-               {
-                       $$ = eval_literal($1, 0, 64);
-               }
        | DT_CHAR_LITERAL
-               {
-                       $$ = eval_char_literal($1);
-               }
        | '(' integer_expr ')'
                {
                        $$ = $2;
@@ -447,7 +448,7 @@ subnodes:
                }
        | subnode propdef
                {
-                       print_error("syntax error: properties must precede subnodes");
+                       ERROR(&@2, "Properties must precede subnodes");
                        YYERROR;
                }
        ;
@@ -470,63 +471,7 @@ subnode:
 
 %%
 
-void print_error(char const *fmt, ...)
-{
-       va_list va;
-
-       va_start(va, fmt);
-       srcpos_verror(&yylloc, fmt, va);
-       va_end(va);
-
-       treesource_error = 1;
-}
-
-void yyerror(char const *s) {
-       print_error("%s", s);
-}
-
-static unsigned long long eval_literal(const char *s, int base, int bits)
-{
-       unsigned long long val;
-       char *e;
-
-       errno = 0;
-       val = strtoull(s, &e, base);
-       if (*e) {
-               size_t uls = strspn(e, "UL");
-               if (e[uls])
-                       print_error("bad characters in literal");
-       }
-       if ((errno == ERANGE)
-                || ((bits < 64) && (val >= (1ULL << bits))))
-               print_error("literal out of range");
-       else if (errno != 0)
-               print_error("bad literal");
-       return val;
-}
-
-static unsigned char eval_char_literal(const char *s)
+void yyerror(char const *s)
 {
-       int i = 1;
-       char c = s[0];
-
-       if (c == '\0')
-       {
-               print_error("empty character literal");
-               return 0;
-       }
-
-       /*
-        * If the first character in the character literal is a \ then process
-        * the remaining characters as an escape encoding. If the first
-        * character is neither an escape or a terminator it should be the only
-        * character in the literal and will be returned.
-        */
-       if (c == '\\')
-               c = get_escape_char(s, &i);
-
-       if (s[i] != '\0')
-               print_error("malformed character literal");
-
-       return c;
+       ERROR(&yylloc, "%s", s);
 }
index e3c96536fd9db090c184a5d6f3a83ee8226b621b..8c4add69a76578839de910292b165b4504f47cf2 100644 (file)
@@ -48,6 +48,8 @@ static void fill_fullpaths(struct node *tree, const char *prefix)
 }
 
 /* Usage related data. */
+#define FDT_VERSION(version)   _FDT_VERSION(version)
+#define _FDT_VERSION(version)  #version
 static const char usage_synopsis[] = "dtc [options] <input file>";
 static const char usage_short_opts[] = "qI:O:o:V:d:R:S:p:fb:i:H:sW:E:hv";
 static struct option const usage_long_opts[] = {
@@ -82,9 +84,9 @@ static const char * const usage_opts_help[] = {
         "\t\tdts - device tree source text\n"
         "\t\tdtb - device tree blob\n"
         "\t\tasm - assembler source",
-       "\n\tBlob version to produce, defaults to %d (for dtb and asm output)", //, DEFAULT_FDT_VERSION);
+       "\n\tBlob version to produce, defaults to "FDT_VERSION(DEFAULT_FDT_VERSION)" (for dtb and asm output)",
        "\n\tOutput dependency file",
-       "\n\ttMake space for <number> reserve map entries (for dtb and asm output)",
+       "\n\tMake space for <number> reserve map entries (for dtb and asm output)",
        "\n\tMake the blob at least <bytes> long (extra space)",
        "\n\tAdd padding to the blob of <bytes> long (extra space)",
        "\n\tSet the physical boot cpu",
@@ -109,7 +111,7 @@ int main(int argc, char *argv[])
        const char *outform = "dts";
        const char *outname = "-";
        const char *depname = NULL;
-       int force = 0, sort = 0;
+       bool force = false, sort = false;
        const char *arg;
        int opt;
        FILE *outf = NULL;
@@ -148,7 +150,7 @@ int main(int argc, char *argv[])
                        padsize = strtol(optarg, NULL, 0);
                        break;
                case 'f':
-                       force = 1;
+                       force = true;
                        break;
                case 'q':
                        quiet++;
@@ -174,7 +176,7 @@ int main(int argc, char *argv[])
                        break;
 
                case 's':
-                       sort = 1;
+                       sort = true;
                        break;
 
                case 'W':
@@ -237,7 +239,7 @@ int main(int argc, char *argv[])
        if (streq(outname, "-")) {
                outf = stdout;
        } else {
-               outf = fopen(outname, "w");
+               outf = fopen(outname, "wb");
                if (! outf)
                        die("Couldn't open output file %s: %s\n",
                            outname, strerror(errno));
index 264a20cf66a8c6548c2f41b7266dac953937fd38..56212c8df660396b1d9bf6752e1121990e0789ab 100644 (file)
@@ -38,9 +38,9 @@
 #include "util.h"
 
 #ifdef DEBUG
-#define debug(fmt,args...)     printf(fmt, ##args)
+#define debug(...)     printf(__VA_ARGS__)
 #else
-#define debug(fmt,args...)
+#define debug(...)
 #endif
 
 
@@ -88,7 +88,7 @@ struct data {
 };
 
 
-#define empty_data ((struct data){ /* all .members = 0 or NULL */ })
+#define empty_data ((struct data){ /* all .members = 0 or NULL */ })
 
 #define for_each_marker(m) \
        for (; (m); (m) = (m)->next)
@@ -118,7 +118,7 @@ struct data data_append_align(struct data d, int align);
 
 struct data data_add_marker(struct data d, enum markertype type, char *ref);
 
-int data_is_one_string(struct data d);
+bool data_is_one_string(struct data d);
 
 /* DT constraints */
 
@@ -127,13 +127,13 @@ int data_is_one_string(struct data d);
 
 /* Live trees */
 struct label {
-       int deleted;
+       bool deleted;
        char *label;
        struct label *next;
 };
 
 struct property {
-       int deleted;
+       bool deleted;
        char *name;
        struct data val;
 
@@ -143,7 +143,7 @@ struct property {
 };
 
 struct node {
-       int deleted;
+       bool deleted;
        char *name;
        struct property *proplist;
        struct node *children;
@@ -247,8 +247,8 @@ void sort_tree(struct boot_info *bi);
 
 /* Checks */
 
-void parse_checks_option(bool warn, bool error, const char *optarg);
-void process_checks(int force, struct boot_info *bi);
+void parse_checks_option(bool warn, bool error, const char *arg);
+void process_checks(bool force, struct boot_info *bi);
 
 /* Flattened trees */
 
index 665dad7bb465b474387af9996e1d36e96a579d6b..bd99fa2d33b85e873bd00178d6390d70f4afaa0d 100644 (file)
@@ -261,7 +261,7 @@ static void flatten_tree(struct node *tree, struct emitter *emit,
 {
        struct property *prop;
        struct node *child;
-       int seen_name_prop = 0;
+       bool seen_name_prop = false;
 
        if (tree->deleted)
                return;
@@ -279,7 +279,7 @@ static void flatten_tree(struct node *tree, struct emitter *emit,
                int nameoff;
 
                if (streq(prop->name, "name"))
-                       seen_name_prop = 1;
+                       seen_name_prop = true;
 
                nameoff = stringtable_insert(strbuf, prop->name);
 
index e464727c8808cbcd06e184b004fb7261c4472288..6d1beec9581d559539773215a66f6eb0e8b51d77 100644 (file)
@@ -37,26 +37,26 @@ static struct node *read_fstree(const char *dirname)
        tree = build_node(NULL, NULL);
 
        while ((de = readdir(d)) != NULL) {
-               char *tmpnam;
+               char *tmpname;
 
                if (streq(de->d_name, ".")
                    || streq(de->d_name, ".."))
                        continue;
 
-               tmpnam = join_path(dirname, de->d_name);
+               tmpname = join_path(dirname, de->d_name);
 
-               if (lstat(tmpnam, &st) < 0)
-                       die("stat(%s): %s\n", tmpnam, strerror(errno));
+               if (lstat(tmpname, &st) < 0)
+                       die("stat(%s): %s\n", tmpname, strerror(errno));
 
                if (S_ISREG(st.st_mode)) {
                        struct property *prop;
                        FILE *pfile;
 
-                       pfile = fopen(tmpnam, "r");
+                       pfile = fopen(tmpname, "rb");
                        if (! pfile) {
                                fprintf(stderr,
                                        "WARNING: Cannot open %s: %s\n",
-                                       tmpnam, strerror(errno));
+                                       tmpname, strerror(errno));
                        } else {
                                prop = build_property(xstrdup(de->d_name),
                                                      data_copy_file(pfile,
@@ -67,12 +67,12 @@ static struct node *read_fstree(const char *dirname)
                } else if (S_ISDIR(st.st_mode)) {
                        struct node *newchild;
 
-                       newchild = read_fstree(tmpnam);
+                       newchild = read_fstree(tmpname);
                        newchild = name_node(newchild, xstrdup(de->d_name));
                        add_child(tree, newchild);
                }
 
-               free(tmpnam);
+               free(tmpname);
        }
 
        closedir(d);
@@ -88,3 +88,4 @@ struct boot_info *dt_from_fs(const char *dirname)
 
        return build_boot_info(NULL, tree, guess_boot_cpuid(tree));
 }
+
index 91126c000a1ed82dbf58d1d2c7cd9ff59a4e96ae..09c322ed82ba3eaba8dfb888a17650dd80c0b649 100644 (file)
@@ -6,5 +6,6 @@
 LIBFDT_soname = libfdt.$(SHAREDLIB_EXT).1
 LIBFDT_INCLUDES = fdt.h libfdt.h libfdt_env.h
 LIBFDT_VERSION = version.lds
-LIBFDT_SRCS = fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c fdt_empty_tree.c
+LIBFDT_SRCS = fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c fdt_empty_tree.c \
+       fdt_addresses.c
 LIBFDT_OBJS = $(LIBFDT_SRCS:%.c=%.o)
index e56833ae9b6ffaee67281dc959b0c263ccc24c15..2ce6a44179deca2feb1ec5b484878f1965d257f6 100644 (file)
@@ -92,7 +92,7 @@ const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int len)
 
 uint32_t fdt_next_tag(const void *fdt, int startoffset, int *nextoffset)
 {
-       const uint32_t *tagp, *lenp;
+       const fdt32_t *tagp, *lenp;
        uint32_t tag;
        int offset = startoffset;
        const char *p;
@@ -198,6 +198,34 @@ int fdt_next_node(const void *fdt, int offset, int *depth)
        return offset;
 }
 
+int fdt_first_subnode(const void *fdt, int offset)
+{
+       int depth = 0;
+
+       offset = fdt_next_node(fdt, offset, &depth);
+       if (offset < 0 || depth != 1)
+               return -FDT_ERR_NOTFOUND;
+
+       return offset;
+}
+
+int fdt_next_subnode(const void *fdt, int offset)
+{
+       int depth = 1;
+
+       /*
+        * With respect to the parent, the depth of the next subnode will be
+        * the same as the last.
+        */
+       do {
+               offset = fdt_next_node(fdt, offset, &depth);
+               if (offset < 0 || depth < 1)
+                       return -FDT_ERR_NOTFOUND;
+       } while (depth > 1);
+
+       return offset;
+}
+
 const char *_fdt_find_string(const char *strtab, int tabsize, const char *s)
 {
        int len = strlen(s) + 1;
index 48ccfd9100002deb3aff35ee4f17bc7d232b25b2..526aedb51556b60efb35960a4a2012af170a8e78 100644 (file)
@@ -1,48 +1,99 @@
 #ifndef _FDT_H
 #define _FDT_H
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ * Copyright 2012 Kim Phillips, Freescale Semiconductor.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
 
 #ifndef __ASSEMBLY__
 
 struct fdt_header {
-       uint32_t magic;                  /* magic word FDT_MAGIC */
-       uint32_t totalsize;              /* total size of DT block */
-       uint32_t off_dt_struct;          /* offset to structure */
-       uint32_t off_dt_strings;         /* offset to strings */
-       uint32_t off_mem_rsvmap;         /* offset to memory reserve map */
-       uint32_t version;                /* format version */
-       uint32_t last_comp_version;      /* last compatible version */
+       fdt32_t magic;                   /* magic word FDT_MAGIC */
+       fdt32_t totalsize;               /* total size of DT block */
+       fdt32_t off_dt_struct;           /* offset to structure */
+       fdt32_t off_dt_strings;          /* offset to strings */
+       fdt32_t off_mem_rsvmap;          /* offset to memory reserve map */
+       fdt32_t version;                 /* format version */
+       fdt32_t last_comp_version;       /* last compatible version */
 
        /* version 2 fields below */
-       uint32_t boot_cpuid_phys;        /* Which physical CPU id we're
+       fdt32_t boot_cpuid_phys;         /* Which physical CPU id we're
                                            booting on */
        /* version 3 fields below */
-       uint32_t size_dt_strings;        /* size of the strings block */
+       fdt32_t size_dt_strings;         /* size of the strings block */
 
        /* version 17 fields below */
-       uint32_t size_dt_struct;         /* size of the structure block */
+       fdt32_t size_dt_struct;          /* size of the structure block */
 };
 
 struct fdt_reserve_entry {
-       uint64_t address;
-       uint64_t size;
+       fdt64_t address;
+       fdt64_t size;
 };
 
 struct fdt_node_header {
-       uint32_t tag;
+       fdt32_t tag;
        char name[0];
 };
 
 struct fdt_property {
-       uint32_t tag;
-       uint32_t len;
-       uint32_t nameoff;
+       fdt32_t tag;
+       fdt32_t len;
+       fdt32_t nameoff;
        char data[0];
 };
 
 #endif /* !__ASSEMBLY */
 
 #define FDT_MAGIC      0xd00dfeed      /* 4: version, 4: total size */
-#define FDT_TAGSIZE    sizeof(uint32_t)
+#define FDT_TAGSIZE    sizeof(fdt32_t)
 
 #define FDT_BEGIN_NODE 0x1             /* Start node: full name */
 #define FDT_END_NODE   0x2             /* End node */
@@ -51,10 +102,10 @@ struct fdt_property {
 #define FDT_NOP                0x4             /* nop */
 #define FDT_END                0x9
 
-#define FDT_V1_SIZE    (7*sizeof(uint32_t))
-#define FDT_V2_SIZE    (FDT_V1_SIZE + sizeof(uint32_t))
-#define FDT_V3_SIZE    (FDT_V2_SIZE + sizeof(uint32_t))
+#define FDT_V1_SIZE    (7*sizeof(fdt32_t))
+#define FDT_V2_SIZE    (FDT_V1_SIZE + sizeof(fdt32_t))
+#define FDT_V3_SIZE    (FDT_V2_SIZE + sizeof(fdt32_t))
 #define FDT_V16_SIZE   FDT_V3_SIZE
-#define FDT_V17_SIZE   (FDT_V16_SIZE + sizeof(uint32_t))
+#define FDT_V17_SIZE   (FDT_V16_SIZE + sizeof(fdt32_t))
 
 #endif /* _FDT_H */
index f2ae9b77c285733e50b4b496407471149650d290..f72d13b1d19c0bce7a27658e18a5f9fe26b2453c 100644 (file)
@@ -81,3 +81,4 @@ int fdt_create_empty_tree(void *buf, int bufsize)
 
        return fdt_open_into(buf, buf, bufsize);
 }
+
index 02b6d687537fac11e13a835502b1c16c98fb7603..a65e4b5b72b69ee92e6a54f5c60f0f2041f975af 100644 (file)
@@ -154,9 +154,9 @@ int fdt_subnode_offset(const void *fdt, int parentoffset,
        return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name));
 }
 
-int fdt_path_offset(const void *fdt, const char *path)
+int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen)
 {
-       const char *end = path + strlen(path);
+       const char *end = path + namelen;
        const char *p = path;
        int offset = 0;
 
@@ -164,7 +164,7 @@ int fdt_path_offset(const void *fdt, const char *path)
 
        /* see if we have an alias */
        if (*path != '/') {
-               const char *q = strchr(path, '/');
+               const char *q = memchr(path, '/', end - p);
 
                if (!q)
                        q = end;
@@ -177,14 +177,15 @@ int fdt_path_offset(const void *fdt, const char *path)
                p = q;
        }
 
-       while (*p) {
+       while (p < end) {
                const char *q;
 
-               while (*p == '/')
+               while (*p == '/') {
                        p++;
-               if (! *p)
-                       return offset;
-               q = strchr(p, '/');
+                       if (p == end)
+                               return offset;
+               }
+               q = memchr(p, '/', end - p);
                if (! q)
                        q = end;
 
@@ -198,6 +199,11 @@ int fdt_path_offset(const void *fdt, const char *path)
        return offset;
 }
 
+int fdt_path_offset(const void *fdt, const char *path)
+{
+       return fdt_path_offset_namelen(fdt, path, strlen(path));
+}
+
 const char *fdt_get_name(const void *fdt, int nodeoffset, int *len)
 {
        const struct fdt_node_header *nh = _fdt_offset_ptr(fdt, nodeoffset);
@@ -322,7 +328,7 @@ const void *fdt_getprop(const void *fdt, int nodeoffset,
 
 uint32_t fdt_get_phandle(const void *fdt, int nodeoffset)
 {
-       const uint32_t *php;
+       const fdt32_t *php;
        int len;
 
        /* FIXME: This is a bit sub-optimal, since we potentially scan
@@ -515,8 +521,7 @@ int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle)
        return offset; /* error from fdt_next_node() */
 }
 
-static int _fdt_stringlist_contains(const char *strlist, int listlen,
-                                   const char *str)
+int fdt_stringlist_contains(const char *strlist, int listlen, const char *str)
 {
        int len = strlen(str);
        const char *p;
@@ -542,7 +547,7 @@ int fdt_node_check_compatible(const void *fdt, int nodeoffset,
        prop = fdt_getprop(fdt, nodeoffset, "compatible", &len);
        if (!prop)
                return len;
-       if (_fdt_stringlist_contains(prop, len, compatible))
+       if (fdt_stringlist_contains(prop, len, compatible))
                return 0;
        else
                return 1;
index 24437dfc32b8843f22ddc6f123867f8ee0f3987c..70adec6c371b73131d5617dde569aa5244d6dee4 100644 (file)
@@ -84,9 +84,9 @@ static int _fdt_rw_check_header(void *fdt)
 
 #define FDT_RW_CHECK_HEADER(fdt) \
        { \
-               int err; \
-               if ((err = _fdt_rw_check_header(fdt)) != 0) \
-                       return err; \
+               int __err; \
+               if ((__err = _fdt_rw_check_header(fdt)) != 0) \
+                       return __err; \
        }
 
 static inline int _fdt_data_size(void *fdt)
@@ -339,7 +339,7 @@ int fdt_add_subnode_namelen(void *fdt, int parentoffset,
        int nodelen;
        int err;
        uint32_t tag;
-       uint32_t *endtag;
+       fdt32_t *endtag;
 
        FDT_RW_CHECK_HEADER(fdt);
 
@@ -366,7 +366,7 @@ int fdt_add_subnode_namelen(void *fdt, int parentoffset,
        nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE);
        memset(nh->name, 0, FDT_TAGALIGN(namelen+1));
        memcpy(nh->name, name, namelen);
-       endtag = (uint32_t *)((char *)nh + nodelen - FDT_TAGSIZE);
+       endtag = (fdt32_t *)((char *)nh + nodelen - FDT_TAGSIZE);
        *endtag = cpu_to_fdt32(FDT_END_NODE);
 
        return offset;
index 55ebebf1eb20e8c7f9200881f99ecf03632db9a3..6a804859fd0c189636677b80f1c20c9cff9b30af 100644 (file)
@@ -107,6 +107,38 @@ int fdt_create(void *buf, int bufsize)
        return 0;
 }
 
+int fdt_resize(void *fdt, void *buf, int bufsize)
+{
+       size_t headsize, tailsize;
+       char *oldtail, *newtail;
+
+       FDT_SW_CHECK_HEADER(fdt);
+
+       headsize = fdt_off_dt_struct(fdt);
+       tailsize = fdt_size_dt_strings(fdt);
+
+       if ((headsize + tailsize) > bufsize)
+               return -FDT_ERR_NOSPACE;
+
+       oldtail = (char *)fdt + fdt_totalsize(fdt) - tailsize;
+       newtail = (char *)buf + bufsize - tailsize;
+
+       /* Two cases to avoid clobbering data if the old and new
+        * buffers partially overlap */
+       if (buf <= fdt) {
+               memmove(buf, fdt, headsize);
+               memmove(newtail, oldtail, tailsize);
+       } else {
+               memmove(newtail, oldtail, tailsize);
+               memmove(buf, fdt, headsize);
+       }
+
+       fdt_set_off_dt_strings(buf, bufsize);
+       fdt_set_totalsize(buf, bufsize);
+
+       return 0;
+}
+
 int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size)
 {
        struct fdt_reserve_entry *re;
@@ -153,7 +185,7 @@ int fdt_begin_node(void *fdt, const char *name)
 
 int fdt_end_node(void *fdt)
 {
-       uint32_t *en;
+       fdt32_t *en;
 
        FDT_SW_CHECK_HEADER(fdt);
 
@@ -213,7 +245,7 @@ int fdt_property(void *fdt, const char *name, const void *val, int len)
 int fdt_finish(void *fdt)
 {
        char *p = (char *)fdt;
-       uint32_t *end;
+       fdt32_t *end;
        int oldstroffset, newstroffset;
        uint32_t tag;
        int offset, nextoffset;
index 6025fa1fe8feeb046a68605776d0d1ecac1efe9f..c5bbb68d3273dd1f711d10530be3bb9dbc1161fd 100644 (file)
@@ -74,7 +74,7 @@ int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
 
 static void _fdt_nop_region(void *start, int len)
 {
-       uint32_t *p;
+       fdt32_t *p;
 
        for (p = start; (char *)p < ((char *)start + len); p++)
                *p = cpu_to_fdt32(FDT_NOP);
index 73f49759a5e71b79aa5bb4ae172681ce329cd294..ea35ac3c9be4d08f6fc424ca1355170136fbb2e3 100644 (file)
@@ -51,8 +51,8 @@
  *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <libfdt_env.h>
-#include <fdt.h>
+#include "libfdt_env.h"
+#include "fdt.h"
 
 #define FDT_FIRST_SUPPORTED_VERSION    0x10
 #define FDT_LAST_SUPPORTED_VERSION     0x11
         * Should never be returned, if it is, it indicates a bug in
         * libfdt itself. */
 
-#define FDT_ERR_MAX            13
+/* Errors in device tree content */
+#define FDT_ERR_BADNCELLS      14
+       /* FDT_ERR_BADNCELLS: Device tree has a #address-cells, #size-cells
+        * or similar property with a bad format or value */
+
+#define FDT_ERR_MAX            14
 
 /**********************************************************************/
 /* Low-level functions (you probably don't need these)                */
@@ -136,6 +141,28 @@ uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset);
 
 int fdt_next_node(const void *fdt, int offset, int *depth);
 
+/**
+ * fdt_first_subnode() - get offset of first direct subnode
+ *
+ * @fdt:       FDT blob
+ * @offset:    Offset of node to check
+ * @return offset of first subnode, or -FDT_ERR_NOTFOUND if there is none
+ */
+int fdt_first_subnode(const void *fdt, int offset);
+
+/**
+ * fdt_next_subnode() - get offset of next direct subnode
+ *
+ * After first calling fdt_first_subnode(), call this function repeatedly to
+ * get direct subnodes of a parent node.
+ *
+ * @fdt:       FDT blob
+ * @offset:    Offset of previous subnode
+ * @return offset of next subnode, or -FDT_ERR_NOTFOUND if there are no more
+ * subnodes
+ */
+int fdt_next_subnode(const void *fdt, int offset);
+
 /**********************************************************************/
 /* General functions                                                  */
 /**********************************************************************/
@@ -295,6 +322,17 @@ int fdt_subnode_offset_namelen(const void *fdt, int parentoffset,
  */
 int fdt_subnode_offset(const void *fdt, int parentoffset, const char *name);
 
+/**
+ * fdt_path_offset_namelen - find a tree node by its full path
+ * @fdt: pointer to the device tree blob
+ * @path: full path of the node to locate
+ * @namelen: number of characters of path to consider
+ *
+ * Identical to fdt_path_offset(), but only consider the first namelen
+ * characters of path as the path name.
+ */
+int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen);
+
 /**
  * fdt_path_offset - find a tree node by its full path
  * @fdt: pointer to the device tree blob
@@ -582,7 +620,7 @@ const char *fdt_get_alias_namelen(const void *fdt,
  * value of the property named 'name' in the node /aliases.
  *
  * returns:
- *     a pointer to the expansion of the alias named 'name', of it exists
+ *     a pointer to the expansion of the alias named 'name', if it exists
  *     NULL, if the given alias or the /aliases node does not exist
  */
 const char *fdt_get_alias(const void *fdt, const char *name);
@@ -816,6 +854,75 @@ int fdt_node_check_compatible(const void *fdt, int nodeoffset,
 int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
                                  const char *compatible);
 
+/**
+ * fdt_stringlist_contains - check a string list property for a string
+ * @strlist: Property containing a list of strings to check
+ * @listlen: Length of property
+ * @str: String to search for
+ *
+ * This is a utility function provided for convenience. The list contains
+ * one or more strings, each terminated by \0, as is found in a device tree
+ * "compatible" property.
+ *
+ * @return: 1 if the string is found in the list, 0 not found, or invalid list
+ */
+int fdt_stringlist_contains(const char *strlist, int listlen, const char *str);
+
+/**********************************************************************/
+/* Read-only functions (addressing related)                           */
+/**********************************************************************/
+
+/**
+ * FDT_MAX_NCELLS - maximum value for #address-cells and #size-cells
+ *
+ * This is the maximum value for #address-cells, #size-cells and
+ * similar properties that will be processed by libfdt.  IEE1275
+ * requires that OF implementations handle values up to 4.
+ * Implementations may support larger values, but in practice higher
+ * values aren't used.
+ */
+#define FDT_MAX_NCELLS         4
+
+/**
+ * fdt_address_cells - retrieve address size for a bus represented in the tree
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to find the address size for
+ *
+ * When the node has a valid #address-cells property, returns its value.
+ *
+ * returns:
+ *     0 <= n < FDT_MAX_NCELLS, on success
+ *      2, if the node has no #address-cells property
+ *      -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid #address-cells property
+ *     -FDT_ERR_BADMAGIC,
+ *     -FDT_ERR_BADVERSION,
+ *     -FDT_ERR_BADSTATE,
+ *     -FDT_ERR_BADSTRUCTURE,
+ *     -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_address_cells(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_size_cells - retrieve address range size for a bus represented in the
+ *                  tree
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to find the address range size for
+ *
+ * When the node has a valid #size-cells property, returns its value.
+ *
+ * returns:
+ *     0 <= n < FDT_MAX_NCELLS, on success
+ *      2, if the node has no #address-cells property
+ *      -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid #size-cells property
+ *     -FDT_ERR_BADMAGIC,
+ *     -FDT_ERR_BADVERSION,
+ *     -FDT_ERR_BADSTATE,
+ *     -FDT_ERR_BADSTRUCTURE,
+ *     -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_size_cells(const void *fdt, int nodeoffset);
+
+
 /**********************************************************************/
 /* Write-in-place functions                                           */
 /**********************************************************************/
@@ -882,8 +989,8 @@ int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
 static inline int fdt_setprop_inplace_u32(void *fdt, int nodeoffset,
                                          const char *name, uint32_t val)
 {
-       val = cpu_to_fdt32(val);
-       return fdt_setprop_inplace(fdt, nodeoffset, name, &val, sizeof(val));
+       fdt32_t tmp = cpu_to_fdt32(val);
+       return fdt_setprop_inplace(fdt, nodeoffset, name, &tmp, sizeof(tmp));
 }
 
 /**
@@ -917,8 +1024,8 @@ static inline int fdt_setprop_inplace_u32(void *fdt, int nodeoffset,
 static inline int fdt_setprop_inplace_u64(void *fdt, int nodeoffset,
                                          const char *name, uint64_t val)
 {
-       val = cpu_to_fdt64(val);
-       return fdt_setprop_inplace(fdt, nodeoffset, name, &val, sizeof(val));
+       fdt64_t tmp = cpu_to_fdt64(val);
+       return fdt_setprop_inplace(fdt, nodeoffset, name, &tmp, sizeof(tmp));
 }
 
 /**
@@ -987,19 +1094,20 @@ int fdt_nop_node(void *fdt, int nodeoffset);
 /**********************************************************************/
 
 int fdt_create(void *buf, int bufsize);
+int fdt_resize(void *fdt, void *buf, int bufsize);
 int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size);
 int fdt_finish_reservemap(void *fdt);
 int fdt_begin_node(void *fdt, const char *name);
 int fdt_property(void *fdt, const char *name, const void *val, int len);
 static inline int fdt_property_u32(void *fdt, const char *name, uint32_t val)
 {
-       val = cpu_to_fdt32(val);
-       return fdt_property(fdt, name, &val, sizeof(val));
+       fdt32_t tmp = cpu_to_fdt32(val);
+       return fdt_property(fdt, name, &tmp, sizeof(tmp));
 }
 static inline int fdt_property_u64(void *fdt, const char *name, uint64_t val)
 {
-       val = cpu_to_fdt64(val);
-       return fdt_property(fdt, name, &val, sizeof(val));
+       fdt64_t tmp = cpu_to_fdt64(val);
+       return fdt_property(fdt, name, &tmp, sizeof(tmp));
 }
 static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val)
 {
@@ -1154,8 +1262,8 @@ int fdt_setprop(void *fdt, int nodeoffset, const char *name,
 static inline int fdt_setprop_u32(void *fdt, int nodeoffset, const char *name,
                                  uint32_t val)
 {
-       val = cpu_to_fdt32(val);
-       return fdt_setprop(fdt, nodeoffset, name, &val, sizeof(val));
+       fdt32_t tmp = cpu_to_fdt32(val);
+       return fdt_setprop(fdt, nodeoffset, name, &tmp, sizeof(tmp));
 }
 
 /**
@@ -1189,8 +1297,8 @@ static inline int fdt_setprop_u32(void *fdt, int nodeoffset, const char *name,
 static inline int fdt_setprop_u64(void *fdt, int nodeoffset, const char *name,
                                  uint64_t val)
 {
-       val = cpu_to_fdt64(val);
-       return fdt_setprop(fdt, nodeoffset, name, &val, sizeof(val));
+       fdt64_t tmp = cpu_to_fdt64(val);
+       return fdt_setprop(fdt, nodeoffset, name, &tmp, sizeof(tmp));
 }
 
 /**
@@ -1296,8 +1404,8 @@ int fdt_appendprop(void *fdt, int nodeoffset, const char *name,
 static inline int fdt_appendprop_u32(void *fdt, int nodeoffset,
                                     const char *name, uint32_t val)
 {
-       val = cpu_to_fdt32(val);
-       return fdt_appendprop(fdt, nodeoffset, name, &val, sizeof(val));
+       fdt32_t tmp = cpu_to_fdt32(val);
+       return fdt_appendprop(fdt, nodeoffset, name, &tmp, sizeof(tmp));
 }
 
 /**
@@ -1331,8 +1439,8 @@ static inline int fdt_appendprop_u32(void *fdt, int nodeoffset,
 static inline int fdt_appendprop_u64(void *fdt, int nodeoffset,
                                     const char *name, uint64_t val)
 {
-       val = cpu_to_fdt64(val);
-       return fdt_appendprop(fdt, nodeoffset, name, &val, sizeof(val));
+       fdt64_t tmp = cpu_to_fdt64(val);
+       return fdt_appendprop(fdt, nodeoffset, name, &tmp, sizeof(tmp));
 }
 
 /**
index 213d7fb81c4218ba516bbbf59fa35b1f36d1b6ad..9dea97dfff818e6c96d0f7ef278de87f9384b9ff 100644 (file)
 #ifndef _LIBFDT_ENV_H
 #define _LIBFDT_ENV_H
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ * Copyright 2012 Kim Phillips, Freescale Semiconductor.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
 
 #include <stddef.h>
 #include <stdint.h>
 #include <string.h>
 
-#define EXTRACT_BYTE(n)        ((unsigned long long)((uint8_t *)&x)[n])
-static inline uint16_t fdt16_to_cpu(uint16_t x)
+#ifdef __CHECKER__
+#define __force __attribute__((force))
+#define __bitwise __attribute__((bitwise))
+#else
+#define __force
+#define __bitwise
+#endif
+
+typedef uint16_t __bitwise fdt16_t;
+typedef uint32_t __bitwise fdt32_t;
+typedef uint64_t __bitwise fdt64_t;
+
+#define EXTRACT_BYTE(x, n)     ((unsigned long long)((uint8_t *)&x)[n])
+#define CPU_TO_FDT16(x) ((EXTRACT_BYTE(x, 0) << 8) | EXTRACT_BYTE(x, 1))
+#define CPU_TO_FDT32(x) ((EXTRACT_BYTE(x, 0) << 24) | (EXTRACT_BYTE(x, 1) << 16) | \
+                        (EXTRACT_BYTE(x, 2) << 8) | EXTRACT_BYTE(x, 3))
+#define CPU_TO_FDT64(x) ((EXTRACT_BYTE(x, 0) << 56) | (EXTRACT_BYTE(x, 1) << 48) | \
+                        (EXTRACT_BYTE(x, 2) << 40) | (EXTRACT_BYTE(x, 3) << 32) | \
+                        (EXTRACT_BYTE(x, 4) << 24) | (EXTRACT_BYTE(x, 5) << 16) | \
+                        (EXTRACT_BYTE(x, 6) << 8) | EXTRACT_BYTE(x, 7))
+
+static inline uint16_t fdt16_to_cpu(fdt16_t x)
+{
+       return (__force uint16_t)CPU_TO_FDT16(x);
+}
+static inline fdt16_t cpu_to_fdt16(uint16_t x)
 {
-       return (EXTRACT_BYTE(0) << 8) | EXTRACT_BYTE(1);
+       return (__force fdt16_t)CPU_TO_FDT16(x);
 }
-#define cpu_to_fdt16(x) fdt16_to_cpu(x)
 
-static inline uint32_t fdt32_to_cpu(uint32_t x)
+static inline uint32_t fdt32_to_cpu(fdt32_t x)
 {
-       return (EXTRACT_BYTE(0) << 24) | (EXTRACT_BYTE(1) << 16) | (EXTRACT_BYTE(2) << 8) | EXTRACT_BYTE(3);
+       return (__force uint32_t)CPU_TO_FDT32(x);
+}
+static inline fdt32_t cpu_to_fdt32(uint32_t x)
+{
+       return (__force fdt32_t)CPU_TO_FDT32(x);
 }
-#define cpu_to_fdt32(x) fdt32_to_cpu(x)
 
-static inline uint64_t fdt64_to_cpu(uint64_t x)
+static inline uint64_t fdt64_to_cpu(fdt64_t x)
+{
+       return (__force uint64_t)CPU_TO_FDT64(x);
+}
+static inline fdt64_t cpu_to_fdt64(uint64_t x)
 {
-       return (EXTRACT_BYTE(0) << 56) | (EXTRACT_BYTE(1) << 48) | (EXTRACT_BYTE(2) << 40) | (EXTRACT_BYTE(3) << 32)
-               | (EXTRACT_BYTE(4) << 24) | (EXTRACT_BYTE(5) << 16) | (EXTRACT_BYTE(6) << 8) | EXTRACT_BYTE(7);
+       return (__force fdt64_t)CPU_TO_FDT64(x);
 }
-#define cpu_to_fdt64(x) fdt64_to_cpu(x)
+#undef CPU_TO_FDT64
+#undef CPU_TO_FDT32
+#undef CPU_TO_FDT16
 #undef EXTRACT_BYTE
 
 #endif /* _LIBFDT_ENV_H */
index 381133ba81df7d02c375d7a1462d650136abd6d1..02cfa6fb612db834c6baad27d96b003d78ab49b0 100644 (file)
@@ -57,9 +57,9 @@
 
 #define FDT_CHECK_HEADER(fdt) \
        { \
-               int err; \
-               if ((err = fdt_check_header(fdt)) != 0) \
-                       return err; \
+               int __err; \
+               if ((__err = fdt_check_header(fdt)) != 0) \
+                       return __err; \
        }
 
 int _fdt_check_node_offset(const void *fdt, int offset);
index b61465fb2f33813ac78959c22fb3ded4452c9efa..e229b84432f99216189e92d6e61bc505aa4d6b9f 100644 (file)
@@ -511,7 +511,9 @@ struct node *get_node_by_phandle(struct node *tree, cell_t phandle)
 
 struct node *get_node_by_ref(struct node *tree, const char *ref)
 {
-       if (ref[0] == '/')
+       if (streq(ref, "/"))
+               return tree;
+       else if (ref[0] == '/')
                return get_node_by_path(tree, ref);
        else
                return get_node_by_label(tree, ref);
index c20bc5315bc1b039bdc76507350d888df180e91f..f534c22a888d71e9b321d4eadd1430749e171ac0 100644 (file)
@@ -34,7 +34,7 @@ struct search_path {
 static struct search_path *search_path_head, **search_path_tail;
 
 
-static char *dirname(const char *path)
+static char *get_dirname(const char *path)
 {
        const char *slash = strrchr(path, '/');
 
@@ -77,7 +77,7 @@ static char *try_open(const char *dirname, const char *fname, FILE **fp)
        else
                fullname = join_path(dirname, fname);
 
-       *fp = fopen(fullname, "r");
+       *fp = fopen(fullname, "rb");
        if (!*fp) {
                free(fullname);
                fullname = NULL;
@@ -150,7 +150,7 @@ void srcfile_push(const char *fname)
        srcfile = xmalloc(sizeof(*srcfile));
 
        srcfile->f = srcfile_relative_open(fname, &srcfile->name);
-       srcfile->dir = dirname(srcfile->name);
+       srcfile->dir = get_dirname(srcfile->name);
        srcfile->prev = current_srcfile;
 
        srcfile->lineno = 1;
@@ -159,7 +159,7 @@ void srcfile_push(const char *fname)
        current_srcfile = srcfile;
 }
 
-int srcfile_pop(void)
+bool srcfile_pop(void)
 {
        struct srcfile_state *srcfile = current_srcfile;
 
@@ -177,7 +177,7 @@ int srcfile_pop(void)
         * fix this we could either allocate all the files from a
         * table, or use a pool allocator. */
 
-       return current_srcfile ? 1 : 0;
+       return current_srcfile ? true : false;
 }
 
 void srcfile_add_search_path(const char *dirname)
@@ -290,42 +290,27 @@ srcpos_string(struct srcpos *pos)
        return pos_str;
 }
 
-void
-srcpos_verror(struct srcpos *pos, char const *fmt, va_list va)
+void srcpos_verror(struct srcpos *pos, const char *prefix,
+                  const char *fmt, va_list va)
 {
-       const char *srcstr;
-
-       srcstr = srcpos_string(pos);
+       char *srcstr;
 
-       fprintf(stderr, "Error: %s ", srcstr);
-       vfprintf(stderr, fmt, va);
-       fprintf(stderr, "\n");
-}
+       srcstr = srcpos_string(pos);
 
-void
-srcpos_error(struct srcpos *pos, char const *fmt, ...)
-{
-       va_list va;
+       fprintf(stderr, "%s: %s ", prefix, srcstr);
+       vfprintf(stderr, fmt, va);
+       fprintf(stderr, "\n");
 
-       va_start(va, fmt);
-       srcpos_verror(pos, fmt, va);
-       va_end(va);
+       free(srcstr);
 }
 
-
-void
-srcpos_warn(struct srcpos *pos, char const *fmt, ...)
+void srcpos_error(struct srcpos *pos, const char *prefix,
+                 const char *fmt, ...)
 {
-       const char *srcstr;
        va_list va;
-       va_start(va, fmt);
-
-       srcstr = srcpos_string(pos);
-
-       fprintf(stderr, "Warning: %s ", srcstr);
-       vfprintf(stderr, fmt, va);
-       fprintf(stderr, "\n");
 
+       va_start(va, fmt);
+       srcpos_verror(pos, prefix, fmt, va);
        va_end(va);
 }
 
index 93a27123c2e9c17ec27c728243860121447feb7e..f81827bd684a767da1d62f6f31020284821f5045 100644 (file)
@@ -21,6 +21,7 @@
 #define _SRCPOS_H_
 
 #include <stdio.h>
+#include <stdbool.h>
 
 struct srcfile_state {
        FILE *f;
@@ -55,7 +56,7 @@ extern struct srcfile_state *current_srcfile; /* = NULL */
 FILE *srcfile_relative_open(const char *fname, char **fullnamep);
 
 void srcfile_push(const char *fname);
-int srcfile_pop(void);
+bool srcfile_pop(void);
 
 /**
  * Add a new directory to the search path for input files
@@ -106,12 +107,12 @@ extern struct srcpos *srcpos_copy(struct srcpos *pos);
 extern char *srcpos_string(struct srcpos *pos);
 extern void srcpos_dump(struct srcpos *pos);
 
-extern void srcpos_verror(struct srcpos *pos, char const *, va_list va)
-     __attribute__((format(printf, 2, 0)));
-extern void srcpos_error(struct srcpos *pos, char const *, ...)
-     __attribute__((format(printf, 2, 3)));
-extern void srcpos_warn(struct srcpos *pos, char const *, ...)
-     __attribute__((format(printf, 2, 3)));
+extern void srcpos_verror(struct srcpos *pos, const char *prefix,
+                         const char *fmt, va_list va)
+       __attribute__((format(printf, 3, 0)));
+extern void srcpos_error(struct srcpos *pos, const char *prefix,
+                        const char *fmt, ...)
+       __attribute__((format(printf, 3, 4)));
 
 extern void srcpos_set_line(char *f, int l);
 
index 5740e6992d37c157d5395572341a8d53cd5d10d8..a55d1d128cce7fe03808691fd06428f9f30da2ae 100644 (file)
@@ -26,12 +26,12 @@ extern int yyparse(void);
 extern YYLTYPE yylloc;
 
 struct boot_info *the_boot_info;
-int treesource_error;
+bool treesource_error;
 
 struct boot_info *dt_from_source(const char *fname)
 {
        the_boot_info = NULL;
-       treesource_error = 0;
+       treesource_error = false;
 
        srcfile_push(fname);
        yyin = current_srcfile->f;
@@ -54,9 +54,9 @@ static void write_prefix(FILE *f, int level)
                fputc('\t', f);
 }
 
-static int isstring(char c)
+static bool isstring(char c)
 {
-       return (isprint(c)
+       return (isprint((unsigned char)c)
                || (c == '\0')
                || strchr("\a\b\t\n\v\f\r", c));
 }
@@ -109,7 +109,7 @@ static void write_propval_string(FILE *f, struct data val)
                        break;
                case '\0':
                        fprintf(f, "\", ");
-                       while (m && (m->offset < i)) {
+                       while (m && (m->offset <= (i + 1))) {
                                if (m->type == LABEL) {
                                        assert(m->offset == (i+1));
                                        fprintf(f, "%s: ", m->ref);
@@ -119,7 +119,7 @@ static void write_propval_string(FILE *f, struct data val)
                        fprintf(f, "\"");
                        break;
                default:
-                       if (isprint(c))
+                       if (isprint((unsigned char)c))
                                fprintf(f, "%c", c);
                        else
                                fprintf(f, "\\x%02hhx", c);
@@ -178,7 +178,7 @@ static void write_propval_bytes(FILE *f, struct data val)
                        m = m->next;
                }
 
-               fprintf(f, "%02hhx", *bp++);
+               fprintf(f, "%02hhx", (unsigned char)(*bp++));
                if ((const void *)bp >= propend)
                        break;
                fprintf(f, " ");
@@ -281,3 +281,4 @@ void dt_to_source(FILE *f, struct boot_info *bi)
 
        write_tree_source_node(f, bi->dt, 0);
 }
+
index feb01ef26be4f8b1766e4e8a6b9e21d977c41fd4..f5cde799db03f3154cf4fe93f0c7a715e5a617f4 100755 (executable)
@@ -34,6 +34,7 @@ DTC_SOURCE="checks.c data.c dtc.c dtc.h flattree.c fstree.c livetree.c srcpos.c
                srcpos.h treesource.c util.c util.h version_gen.h Makefile.dtc \
                dtc-lexer.l dtc-parser.y"
 DTC_GENERATED="dtc-lexer.lex.c dtc-parser.tab.c dtc-parser.tab.h"
+LIBFDT_SOURCE="Makefile.libfdt fdt.c fdt.h fdt_empty_tree.c fdt_ro.c fdt_rw.c fdt_strerror.c fdt_sw.c fdt_wip.c libfdt.h libfdt_env.h libfdt_internal.h"
 
 # Build DTC
 cd $DTC_UPSTREAM_PATH
@@ -50,5 +51,13 @@ for f in $DTC_GENERATED; do
        cp ${DTC_UPSTREAM_PATH}/$f ${f}_shipped
        git add ${f}_shipped
 done
+for f in $LIBFDT_SOURCE; do
+       cp ${DTC_UPSTREAM_PATH}/libfdt/${f} libfdt/${f}
+       git add libfdt/${f}
+done
+
+sed -i -- 's/#include <libfdt_env.h>/#include "libfdt_env.h"/g' ./libfdt/libfdt.h
+sed -i -- 's/#include <fdt.h>/#include "fdt.h"/g' ./libfdt/libfdt.h
+git add ./libfdt/libfdt.h
 
 git commit -e -v -m "scripts/dtc: Update to upstream version [CHANGEME]"
index 3055c16e980dc6fa5347c8d7fac4a6f6f674b821..9d65226df9e404e7a33ecdbb3f715a3a5a13856e 100644 (file)
 char *xstrdup(const char *s)
 {
        int len = strlen(s) + 1;
-       char *dup = xmalloc(len);
+       char *d = xmalloc(len);
 
-       memcpy(dup, s, len);
+       memcpy(d, s, len);
 
-       return dup;
+       return d;
 }
 
 char *join_path(const char *path, const char *name)
@@ -70,7 +70,7 @@ char *join_path(const char *path, const char *name)
        return str;
 }
 
-int util_is_printable_string(const void *data, int len)
+bool util_is_printable_string(const void *data, int len)
 {
        const char *s = data;
        const char *ss, *se;
@@ -87,7 +87,7 @@ int util_is_printable_string(const void *data, int len)
 
        while (s < se) {
                ss = s;
-               while (s < se && *s && isprint(*s))
+               while (s < se && *s && isprint((unsigned char)*s))
                        s++;
 
                /* not zero, or not done yet */
@@ -219,10 +219,6 @@ int utilfdt_read_err_len(const char *filename, char **buffp, off_t *len)
                if (offset == bufsize) {
                        bufsize *= 2;
                        buf = xrealloc(buf, bufsize);
-                       if (!buf) {
-                               ret = ENOMEM;
-                               break;
-                       }
                }
 
                ret = read(fd, &buf[offset], bufsize - offset);
@@ -375,9 +371,9 @@ void utilfdt_print_data(const char *data, int len)
                const uint32_t *cell = (const uint32_t *)data;
 
                printf(" = <");
-               for (i = 0; i < len; i += 4)
+               for (i = 0, len /= 4; i < len; i++)
                        printf("0x%08x%s", fdt32_to_cpu(cell[i]),
-                              i < (len - 4) ? " " : "");
+                              i < (len - 1) ? " " : "");
                printf(">");
        } else {
                printf(" = [");
index 8f40b4499359d7486262419466e49239b4f658cb..f800b6011fb1444ed5a8bd8471c7793b1f40c030 100644 (file)
@@ -2,6 +2,7 @@
 #define _UTIL_H
 
 #include <stdarg.h>
+#include <stdbool.h>
 #include <getopt.h>
 
 /*
@@ -33,6 +34,7 @@ static inline void __attribute__((noreturn)) die(const char *str, ...)
        va_start(ap, str);
        fprintf(stderr, "FATAL ERROR: ");
        vfprintf(stderr, str, ap);
+       va_end(ap);
        exit(1);
 }
 
@@ -68,7 +70,7 @@ extern char *join_path(const char *path, const char *name);
  * @param len  The string length including terminator
  * @return 1 if a valid printable string, 0 if not
  */
-int util_is_printable_string(const void *data, int len);
+bool util_is_printable_string(const void *data, int len);
 
 /*
  * Parse an escaped character starting at index i in string s.  The resulting
index 54d4e904433a36f129e70d45d5cf520d090db8ec..5b8c7d53d608bdbb60776c97732e44cb310cc4c1 100644 (file)
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.4.0-dirty"
+#define DTC_VERSION "DTC 1.4.1-g9d3649bd"
index 3c947f0c5dad13d1c364f4960c0724727fba6c91..927d0d2a3145177dcd0b42d1cee9dbc15554c05d 100644 (file)
@@ -12,7 +12,6 @@
 #
 
 import gdb
-import string
 
 from linux import utils
 
diff --git a/scripts/gdb/linux/lists.py b/scripts/gdb/linux/lists.py
new file mode 100644 (file)
index 0000000..3a3775b
--- /dev/null
@@ -0,0 +1,92 @@
+#
+# gdb helper commands and functions for Linux kernel debugging
+#
+#  list tools
+#
+# Copyright (c) Thiebaud Weksteen, 2015
+#
+# Authors:
+#  Thiebaud Weksteen <thiebaud@weksteen.fr>
+#
+# This work is licensed under the terms of the GNU GPL version 2.
+#
+
+import gdb
+
+from linux import utils
+
+list_head = utils.CachedType("struct list_head")
+
+
+def list_check(head):
+    nb = 0
+    if (head.type == list_head.get_type().pointer()):
+        head = head.dereference()
+    elif (head.type != list_head.get_type()):
+        raise gdb.GdbError('argument must be of type (struct list_head [*])')
+    c = head
+    try:
+        gdb.write("Starting with: {}\n".format(c))
+    except gdb.MemoryError:
+        gdb.write('head is not accessible\n')
+        return
+    while True:
+        p = c['prev'].dereference()
+        n = c['next'].dereference()
+        try:
+            if p['next'] != c.address:
+                gdb.write('prev.next != current: '
+                          'current@{current_addr}={current} '
+                          'prev@{p_addr}={p}\n'.format(
+                              current_addr=c.address,
+                              current=c,
+                              p_addr=p.address,
+                              p=p,
+                          ))
+                return
+        except gdb.MemoryError:
+            gdb.write('prev is not accessible: '
+                      'current@{current_addr}={current}\n'.format(
+                          current_addr=c.address,
+                          current=c
+                      ))
+            return
+        try:
+            if n['prev'] != c.address:
+                gdb.write('next.prev != current: '
+                          'current@{current_addr}={current} '
+                          'next@{n_addr}={n}\n'.format(
+                              current_addr=c.address,
+                              current=c,
+                              n_addr=n.address,
+                              n=n,
+                          ))
+                return
+        except gdb.MemoryError:
+            gdb.write('next is not accessible: '
+                      'current@{current_addr}={current}\n'.format(
+                          current_addr=c.address,
+                          current=c
+                      ))
+            return
+        c = n
+        nb += 1
+        if c == head:
+            gdb.write("list is consistent: {} node(s)\n".format(nb))
+            return
+
+
+class LxListChk(gdb.Command):
+    """Verify a list consistency"""
+
+    def __init__(self):
+        super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA,
+                                        gdb.COMPLETE_EXPRESSION)
+
+    def invoke(self, arg, from_tty):
+        argv = gdb.string_to_argv(arg)
+        if len(argv) != 1:
+            raise gdb.GdbError("lx-list-check takes one argument")
+        list_check(gdb.parse_and_eval(argv[0]))
+
+LxListChk()
index cd5bea965d4eb015f57b0baec1b9d4b75ac44964..627750cb420d08792868ce5b6c85b88d071e54f5 100644 (file)
@@ -14,9 +14,8 @@
 import gdb
 import os
 import re
-import string
 
-from linux import modules, utils
+from linux import modules
 
 
 if hasattr(gdb, 'Breakpoint'):
@@ -97,7 +96,7 @@ lx-symbols command."""
             return ""
         attrs = sect_attrs['attrs']
         section_name_to_address = {
-            attrs[n]['name'].string() : attrs[n]['address']
+            attrs[n]['name'].string(): attrs[n]['address']
             for n in range(int(sect_attrs['nsections']))}
         args = []
         for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
@@ -124,7 +123,7 @@ lx-symbols command."""
                 addr=module_addr,
                 sections=self._section_arguments(module))
             gdb.execute(cmdline, to_string=True)
-            if not module_name in self.loaded_modules:
+            if module_name not in self.loaded_modules:
                 self.loaded_modules.append(module_name)
         else:
             gdb.write("no module object found for '{0}'\n".format(module_name))
@@ -164,7 +163,7 @@ lx-symbols command."""
         self.load_all_symbols()
 
         if hasattr(gdb, 'Breakpoint'):
-            if not self.breakpoint is None:
+            if self.breakpoint is not None:
                 self.breakpoint.delete()
                 self.breakpoint = None
             self.breakpoint = LoadModuleBreakpoint(
index e2037d9bb7eb89dc8fa5b884313b7c55ed3bfb14..862a4ae24d4996caec315ddbe10a859456a04cf4 100644 (file)
@@ -18,8 +18,8 @@ from linux import utils
 
 task_type = utils.CachedType("struct task_struct")
 
+
 def task_lists():
-    global task_type
     task_ptr_type = task_type.get_type().pointer()
     init_task = gdb.parse_and_eval("init_task").address
     t = g = init_task
@@ -38,6 +38,7 @@ def task_lists():
         if t == init_task:
             return
 
+
 def get_task_by_pid(pid):
     for task in task_lists():
         if int(task['pid']) == pid:
@@ -65,13 +66,28 @@ return that task_struct variable which PID matches."""
 LxTaskByPidFunc()
 
 
+class LxPs(gdb.Command):
+    """Dump Linux tasks."""
+
+    def __init__(self):
+        super(LxPs, self).__init__("lx-ps", gdb.COMMAND_DATA)
+
+    def invoke(self, arg, from_tty):
+        for task in task_lists():
+            gdb.write("{address} {pid} {comm}\n".format(
+                address=task,
+                pid=task["pid"],
+                comm=task["comm"].string()))
+
+LxPs()
+
+
 thread_info_type = utils.CachedType("struct thread_info")
 
 ia64_task_size = None
 
 
 def get_thread_info(task):
-    global thread_info_type
     thread_info_ptr_type = thread_info_type.get_type().pointer()
     if utils.is_target_arch("ia64"):
         global ia64_task_size
index 128c306db3ee892b862c2eeee71164ee908d5759..0893b326a28b8fc938f705edc93f76a479166d86 100644 (file)
@@ -83,7 +83,7 @@ def get_target_endianness():
         elif "big endian" in endian:
             target_endianness = BIG_ENDIAN
         else:
-            raise gdb.GdgError("unknown endianness '{0}'".format(str(endian)))
+            raise gdb.GdbError("unknown endianness '{0}'".format(str(endian)))
     return target_endianness
 
 
@@ -151,6 +151,6 @@ def get_gdbserver_type():
             gdbserver_type = GDBSERVER_QEMU
         elif probe_kgdb():
             gdbserver_type = GDBSERVER_KGDB
-        if not gdbserver_type is None and hasattr(gdb, 'events'):
+        if gdbserver_type is not None and hasattr(gdb, 'events'):
             gdb.events.exited.connect(exit_handler)
     return gdbserver_type
index 48489285f1192cda0c192828d4ca86d3c193eecb..ce82bf5c394321674d40d1cb247ea2a0afcf10bd 100644 (file)
@@ -28,3 +28,4 @@ else:
     import linux.dmesg
     import linux.tasks
     import linux.cpus
+    import linux.lists
index d9b1fef0c67e3cb42e29f64e661496a50af02e53..aceaaed098112dbc73a6f8e0c526e5e9edd55a99 100644 (file)
@@ -86,7 +86,7 @@ $(simple-targets): $(obj)/conf
 PHONY += oldnoconfig savedefconfig defconfig
 
 # oldnoconfig is an alias of olddefconfig, because people already are dependent
-# on its behavior(sets new symbols to their default value but not 'n') with the
+# on its behavior (sets new symbols to their default value but not 'n') with the
 # counter-intuitive name.
 oldnoconfig: olddefconfig
 
@@ -115,6 +115,10 @@ PHONY += kvmconfig
 kvmconfig: kvm_guest.config
        @:
 
+PHONY += xenconfig
+xenconfig: xen.config
+       @:
+
 PHONY += tinyconfig
 tinyconfig:
        $(Q)$(MAKE) -f $(srctree)/Makefile allnoconfig tiny.config
@@ -122,10 +126,11 @@ tinyconfig:
 # Help text used by make help
 help:
        @echo  '  config          - Update current config utilising a line-oriented program'
-       @echo  '  nconfig         - Update current config utilising a ncurses menu based program'
+       @echo  '  nconfig         - Update current config utilising a ncurses menu based'
+       @echo  '                    program'
        @echo  '  menuconfig      - Update current config utilising a menu based program'
-       @echo  '  xconfig         - Update current config utilising a QT based front-end'
-       @echo  '  gconfig         - Update current config utilising a GTK based front-end'
+       @echo  '  xconfig         - Update current config utilising a Qt based front-end'
+       @echo  '  gconfig         - Update current config utilising a GTK+ based front-end'
        @echo  '  oldconfig       - Update current config utilising a provided .config as base'
        @echo  '  localmodconfig  - Update current config disabling modules not loaded'
        @echo  '  localyesconfig  - Update current config converting local mods to core'
@@ -138,8 +143,10 @@ help:
        @echo  '  alldefconfig    - New config with all symbols set to default'
        @echo  '  randconfig      - New config with random answer to all options'
        @echo  '  listnewconfig   - List new options'
-       @echo  '  olddefconfig    - Same as silentoldconfig but sets new symbols to their default value'
-       @echo  '  kvmconfig       - Enable additional options for guest kernel support'
+       @echo  '  olddefconfig    - Same as silentoldconfig but sets new symbols to their'
+       @echo  '                    default value'
+       @echo  '  kvmconfig       - Enable additional options for kvm guest kernel support'
+       @echo  '  xenconfig       - Enable additional options for xen dom0 and guest kernel support'
        @echo  '  tinyconfig      - Configure the tiniest possible kernel'
 
 # lxdialog stuff
@@ -158,9 +165,9 @@ HOST_EXTRACFLAGS += $(shell $(CONFIG_SHELL) $(check-lxdialog) -ccflags) \
 # mconf:  Used for the menuconfig target
 #         Utilizes the lxdialog package
 # qconf:  Used for the xconfig target
-#         Based on QT which needs to be installed to compile it
+#         Based on Qt which needs to be installed to compile it
 # gconf:  Used for the gconfig target
-#         Based on GTK which needs to be installed to compile it
+#         Based on GTK+ which needs to be installed to compile it
 # object files used by all kconfig flavours
 
 lxdialog := lxdialog/checklist.o lxdialog/util.o lxdialog/inputbox.o
@@ -217,11 +224,11 @@ ifeq ($(MAKECMDGOALS),xconfig)
 $(obj)/.tmp_qtcheck: $(src)/Makefile
 -include $(obj)/.tmp_qtcheck
 
-# QT needs some extra effort...
+# Qt needs some extra effort...
 $(obj)/.tmp_qtcheck:
        @set -e; $(kecho) "  CHECK   qt"; dir=""; pkg=""; \
        if ! pkg-config --exists QtCore 2> /dev/null; then \
-           echo "* Unable to find the QT4 tool qmake. Trying to use QT3"; \
+           echo "* Unable to find the Qt4 tool qmake. Trying to use Qt3"; \
            pkg-config --exists qt 2> /dev/null && pkg=qt; \
            pkg-config --exists qt-mt 2> /dev/null && pkg=qt-mt; \
            if [ -n "$$pkg" ]; then \
@@ -235,8 +242,8 @@ $(obj)/.tmp_qtcheck:
              done; \
              if [ -z "$$dir" ]; then \
                echo >&2 "*"; \
-               echo >&2 "* Unable to find any QT installation. Please make sure that"; \
-               echo >&2 "* the QT4 or QT3 development package is correctly installed and"; \
+               echo >&2 "* Unable to find any Qt installation. Please make sure that"; \
+               echo >&2 "* the Qt4 or Qt3 development package is correctly installed and"; \
                echo >&2 "* either qmake can be found or install pkg-config or set"; \
                echo >&2 "* the QTDIR environment variable to the correct location."; \
                echo >&2 "*"; \
@@ -273,7 +280,7 @@ $(obj)/gconf.o: $(obj)/.tmp_gtkcheck
 ifeq ($(MAKECMDGOALS),gconfig)
 -include $(obj)/.tmp_gtkcheck
 
-# GTK needs some extra effort, too...
+# GTK+ needs some extra effort, too...
 $(obj)/.tmp_gtkcheck:
        @if `pkg-config --exists gtk+-2.0 gmodule-2.0 libglade-2.0`; then               \
                if `pkg-config --atleast-version=2.0.0 gtk+-2.0`; then                  \
@@ -304,7 +311,7 @@ quiet_cmd_moc = MOC     $@
 $(obj)/%.moc: $(src)/%.h $(obj)/.tmp_qtcheck
        $(call cmd,moc)
 
-# Extract gconf menu items for I18N support
+# Extract gconf menu items for i18n support
 $(obj)/gconf.glade.h: $(obj)/gconf.glade
        $(Q)intltool-extract --type=gettext/glade --srcdir=$(srctree) \
        $(obj)/gconf.glade
index fb0a2a286dca6a8b0cc0c74deb5d23096be096c3..667d1aa237114453c28bafac618e9552405a19c4 100644 (file)
@@ -13,9 +13,6 @@
 
 static int expr_eq(struct expr *e1, struct expr *e2);
 static struct expr *expr_eliminate_yn(struct expr *e);
-static struct expr *expr_extract_eq_and(struct expr **ep1, struct expr **ep2);
-static struct expr *expr_extract_eq_or(struct expr **ep1, struct expr **ep2);
-static void expr_extract_eq(enum expr_type type, struct expr **ep, struct expr **ep1, struct expr **ep2);
 
 struct expr *expr_alloc_symbol(struct symbol *sym)
 {
@@ -82,6 +79,10 @@ struct expr *expr_copy(const struct expr *org)
                e->left.expr = expr_copy(org->left.expr);
                break;
        case E_EQUAL:
+       case E_GEQ:
+       case E_GTH:
+       case E_LEQ:
+       case E_LTH:
        case E_UNEQUAL:
                e->left.sym = org->left.sym;
                e->right.sym = org->right.sym;
@@ -114,6 +115,10 @@ void expr_free(struct expr *e)
                expr_free(e->left.expr);
                return;
        case E_EQUAL:
+       case E_GEQ:
+       case E_GTH:
+       case E_LEQ:
+       case E_LTH:
        case E_UNEQUAL:
                break;
        case E_OR:
@@ -200,6 +205,10 @@ static int expr_eq(struct expr *e1, struct expr *e2)
                return 0;
        switch (e1->type) {
        case E_EQUAL:
+       case E_GEQ:
+       case E_GTH:
+       case E_LEQ:
+       case E_LTH:
        case E_UNEQUAL:
                return e1->left.sym == e2->left.sym && e1->right.sym == e2->right.sym;
        case E_SYMBOL:
@@ -559,62 +568,6 @@ static void expr_eliminate_dups1(enum expr_type type, struct expr **ep1, struct
 #undef e2
 }
 
-static void expr_eliminate_dups2(enum expr_type type, struct expr **ep1, struct expr **ep2)
-{
-#define e1 (*ep1)
-#define e2 (*ep2)
-       struct expr *tmp, *tmp1, *tmp2;
-
-       if (e1->type == type) {
-               expr_eliminate_dups2(type, &e1->left.expr, &e2);
-               expr_eliminate_dups2(type, &e1->right.expr, &e2);
-               return;
-       }
-       if (e2->type == type) {
-               expr_eliminate_dups2(type, &e1, &e2->left.expr);
-               expr_eliminate_dups2(type, &e1, &e2->right.expr);
-       }
-       if (e1 == e2)
-               return;
-
-       switch (e1->type) {
-       case E_OR:
-               expr_eliminate_dups2(e1->type, &e1, &e1);
-               // (FOO || BAR) && (!FOO && !BAR) -> n
-               tmp1 = expr_transform(expr_alloc_one(E_NOT, expr_copy(e1)));
-               tmp2 = expr_copy(e2);
-               tmp = expr_extract_eq_and(&tmp1, &tmp2);
-               if (expr_is_yes(tmp1)) {
-                       expr_free(e1);
-                       e1 = expr_alloc_symbol(&symbol_no);
-                       trans_count++;
-               }
-               expr_free(tmp2);
-               expr_free(tmp1);
-               expr_free(tmp);
-               break;
-       case E_AND:
-               expr_eliminate_dups2(e1->type, &e1, &e1);
-               // (FOO && BAR) || (!FOO || !BAR) -> y
-               tmp1 = expr_transform(expr_alloc_one(E_NOT, expr_copy(e1)));
-               tmp2 = expr_copy(e2);
-               tmp = expr_extract_eq_or(&tmp1, &tmp2);
-               if (expr_is_no(tmp1)) {
-                       expr_free(e1);
-                       e1 = expr_alloc_symbol(&symbol_yes);
-                       trans_count++;
-               }
-               expr_free(tmp2);
-               expr_free(tmp1);
-               expr_free(tmp);
-               break;
-       default:
-               ;
-       }
-#undef e1
-#undef e2
-}
-
 struct expr *expr_eliminate_dups(struct expr *e)
 {
        int oldcount;
@@ -627,7 +580,6 @@ struct expr *expr_eliminate_dups(struct expr *e)
                switch (e->type) {
                case E_OR: case E_AND:
                        expr_eliminate_dups1(e->type, &e, &e);
-                       expr_eliminate_dups2(e->type, &e, &e);
                default:
                        ;
                }
@@ -647,6 +599,10 @@ struct expr *expr_transform(struct expr *e)
                return NULL;
        switch (e->type) {
        case E_EQUAL:
+       case E_GEQ:
+       case E_GTH:
+       case E_LEQ:
+       case E_LTH:
        case E_UNEQUAL:
        case E_SYMBOL:
        case E_LIST:
@@ -719,6 +675,22 @@ struct expr *expr_transform(struct expr *e)
                        e = tmp;
                        e->type = e->type == E_EQUAL ? E_UNEQUAL : E_EQUAL;
                        break;
+               case E_LEQ:
+               case E_GEQ:
+                       // !a<='x' -> a>'x'
+                       tmp = e->left.expr;
+                       free(e);
+                       e = tmp;
+                       e->type = e->type == E_LEQ ? E_GTH : E_LTH;
+                       break;
+               case E_LTH:
+               case E_GTH:
+                       // !a<'x' -> a>='x'
+                       tmp = e->left.expr;
+                       free(e);
+                       e = tmp;
+                       e->type = e->type == E_LTH ? E_GEQ : E_LEQ;
+                       break;
                case E_OR:
                        // !(a || b) -> !a && !b
                        tmp = e->left.expr;
@@ -789,6 +761,10 @@ int expr_contains_symbol(struct expr *dep, struct symbol *sym)
        case E_SYMBOL:
                return dep->left.sym == sym;
        case E_EQUAL:
+       case E_GEQ:
+       case E_GTH:
+       case E_LEQ:
+       case E_LTH:
        case E_UNEQUAL:
                return dep->left.sym == sym ||
                       dep->right.sym == sym;
@@ -829,57 +805,6 @@ bool expr_depends_symbol(struct expr *dep, struct symbol *sym)
        return false;
 }
 
-static struct expr *expr_extract_eq_and(struct expr **ep1, struct expr **ep2)
-{
-       struct expr *tmp = NULL;
-       expr_extract_eq(E_AND, &tmp, ep1, ep2);
-       if (tmp) {
-               *ep1 = expr_eliminate_yn(*ep1);
-               *ep2 = expr_eliminate_yn(*ep2);
-       }
-       return tmp;
-}
-
-static struct expr *expr_extract_eq_or(struct expr **ep1, struct expr **ep2)
-{
-       struct expr *tmp = NULL;
-       expr_extract_eq(E_OR, &tmp, ep1, ep2);
-       if (tmp) {
-               *ep1 = expr_eliminate_yn(*ep1);
-               *ep2 = expr_eliminate_yn(*ep2);
-       }
-       return tmp;
-}
-
-static void expr_extract_eq(enum expr_type type, struct expr **ep, struct expr **ep1, struct expr **ep2)
-{
-#define e1 (*ep1)
-#define e2 (*ep2)
-       if (e1->type == type) {
-               expr_extract_eq(type, ep, &e1->left.expr, &e2);
-               expr_extract_eq(type, ep, &e1->right.expr, &e2);
-               return;
-       }
-       if (e2->type == type) {
-               expr_extract_eq(type, ep, ep1, &e2->left.expr);
-               expr_extract_eq(type, ep, ep1, &e2->right.expr);
-               return;
-       }
-       if (expr_eq(e1, e2)) {
-               *ep = *ep ? expr_alloc_two(type, *ep, e1) : e1;
-               expr_free(e2);
-               if (type == E_AND) {
-                       e1 = expr_alloc_symbol(&symbol_yes);
-                       e2 = expr_alloc_symbol(&symbol_yes);
-               } else if (type == E_OR) {
-                       e1 = expr_alloc_symbol(&symbol_no);
-                       e2 = expr_alloc_symbol(&symbol_no);
-               }
-       }
-#undef e1
-#undef e2
-}
-
 struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symbol *sym)
 {
        struct expr *e1, *e2;
@@ -914,6 +839,10 @@ struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symb
        case E_NOT:
                return expr_trans_compare(e->left.expr, type == E_EQUAL ? E_UNEQUAL : E_EQUAL, sym);
        case E_UNEQUAL:
+       case E_LTH:
+       case E_LEQ:
+       case E_GTH:
+       case E_GEQ:
        case E_EQUAL:
                if (type == E_EQUAL) {
                        if (sym == &symbol_yes)
@@ -941,10 +870,57 @@ struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symb
        return NULL;
 }
 
+enum string_value_kind {
+       k_string,
+       k_signed,
+       k_unsigned,
+       k_invalid
+};
+
+union string_value {
+       unsigned long long u;
+       signed long long s;
+};
+
+static enum string_value_kind expr_parse_string(const char *str,
+                                               enum symbol_type type,
+                                               union string_value *val)
+{
+       char *tail;
+       enum string_value_kind kind;
+
+       errno = 0;
+       switch (type) {
+       case S_BOOLEAN:
+       case S_TRISTATE:
+               return k_string;
+       case S_INT:
+               val->s = strtoll(str, &tail, 10);
+               kind = k_signed;
+               break;
+       case S_HEX:
+               val->u = strtoull(str, &tail, 16);
+               kind = k_unsigned;
+               break;
+       case S_STRING:
+       case S_UNKNOWN:
+               val->s = strtoll(str, &tail, 0);
+               kind = k_signed;
+               break;
+       default:
+               return k_invalid;
+       }
+       return !errno && !*tail && tail > str && isxdigit(tail[-1])
+              ? kind : k_string;
+}
+
 tristate expr_calc_value(struct expr *e)
 {
        tristate val1, val2;
        const char *str1, *str2;
+       enum string_value_kind k1 = k_string, k2 = k_string;
+       union string_value lval = {}, rval = {};
+       int res;
 
        if (!e)
                return yes;
@@ -965,21 +941,57 @@ tristate expr_calc_value(struct expr *e)
                val1 = expr_calc_value(e->left.expr);
                return EXPR_NOT(val1);
        case E_EQUAL:
-               sym_calc_value(e->left.sym);
-               sym_calc_value(e->right.sym);
-               str1 = sym_get_string_value(e->left.sym);
-               str2 = sym_get_string_value(e->right.sym);
-               return !strcmp(str1, str2) ? yes : no;
+       case E_GEQ:
+       case E_GTH:
+       case E_LEQ:
+       case E_LTH:
        case E_UNEQUAL:
-               sym_calc_value(e->left.sym);
-               sym_calc_value(e->right.sym);
-               str1 = sym_get_string_value(e->left.sym);
-               str2 = sym_get_string_value(e->right.sym);
-               return !strcmp(str1, str2) ? no : yes;
+               break;
        default:
                printf("expr_calc_value: %d?\n", e->type);
                return no;
        }
+
+       sym_calc_value(e->left.sym);
+       sym_calc_value(e->right.sym);
+       str1 = sym_get_string_value(e->left.sym);
+       str2 = sym_get_string_value(e->right.sym);
+
+       if (e->left.sym->type != S_STRING || e->right.sym->type != S_STRING) {
+               k1 = expr_parse_string(str1, e->left.sym->type, &lval);
+               k2 = expr_parse_string(str2, e->right.sym->type, &rval);
+       }
+
+       if (k1 == k_string || k2 == k_string)
+               res = strcmp(str1, str2);
+       else if (k1 == k_invalid || k2 == k_invalid) {
+               if (e->type != E_EQUAL && e->type != E_UNEQUAL) {
+                       printf("Cannot compare \"%s\" and \"%s\"\n", str1, str2);
+                       return no;
+               }
+               res = strcmp(str1, str2);
+       } else if (k1 == k_unsigned || k2 == k_unsigned)
+               res = (lval.u > rval.u) - (lval.u < rval.u);
+       else /* if (k1 == k_signed && k2 == k_signed) */
+               res = (lval.s > rval.s) - (lval.s < rval.s);
+
+       switch(e->type) {
+       case E_EQUAL:
+               return res ? no : yes;
+       case E_GEQ:
+               return res >= 0 ? yes : no;
+       case E_GTH:
+               return res > 0 ? yes : no;
+       case E_LEQ:
+               return res <= 0 ? yes : no;
+       case E_LTH:
+               return res < 0 ? yes : no;
+       case E_UNEQUAL:
+               return res ? yes : no;
+       default:
+               printf("expr_calc_value: relation %d?\n", e->type);
+               return no;
+       }
 }
 
 static int expr_compare_type(enum expr_type t1, enum expr_type t2)
@@ -987,6 +999,12 @@ static int expr_compare_type(enum expr_type t1, enum expr_type t2)
        if (t1 == t2)
                return 0;
        switch (t1) {
+       case E_LEQ:
+       case E_LTH:
+       case E_GEQ:
+       case E_GTH:
+               if (t2 == E_EQUAL || t2 == E_UNEQUAL)
+                       return 1;
        case E_EQUAL:
        case E_UNEQUAL:
                if (t2 == E_NOT)
@@ -1080,6 +1098,24 @@ void expr_print(struct expr *e, void (*fn)(void *, struct symbol *, const char *
                fn(data, NULL, "=");
                fn(data, e->right.sym, e->right.sym->name);
                break;
+       case E_LEQ:
+       case E_LTH:
+               if (e->left.sym->name)
+                       fn(data, e->left.sym, e->left.sym->name);
+               else
+                       fn(data, NULL, "<choice>");
+               fn(data, NULL, e->type == E_LEQ ? "<=" : "<");
+               fn(data, e->right.sym, e->right.sym->name);
+               break;
+       case E_GEQ:
+       case E_GTH:
+               if (e->left.sym->name)
+                       fn(data, e->left.sym, e->left.sym->name);
+               else
+                       fn(data, NULL, "<choice>");
+               fn(data, NULL, e->type == E_LEQ ? ">=" : ">");
+               fn(data, e->right.sym, e->right.sym->name);
+               break;
        case E_UNEQUAL:
                if (e->left.sym->name)
                        fn(data, e->left.sym, e->left.sym->name);
index a2fc96a2bd2cf84115b8009b097ea772ab53dcdf..973b6f73336829a6290a0d2bb15a841086d9a18c 100644 (file)
@@ -29,7 +29,9 @@ typedef enum tristate {
 } tristate;
 
 enum expr_type {
-       E_NONE, E_OR, E_AND, E_NOT, E_EQUAL, E_UNEQUAL, E_LIST, E_SYMBOL, E_RANGE
+       E_NONE, E_OR, E_AND, E_NOT,
+       E_EQUAL, E_UNEQUAL, E_LTH, E_LEQ, E_GTH, E_GEQ,
+       E_LIST, E_SYMBOL, E_RANGE
 };
 
 union expr_data {
index 6731377f9bb2546f3b303d4d89df466e8b73b045..70c5ee189dce7c7d573c044117d3f63e4450cbcb 100644 (file)
@@ -1166,6 +1166,10 @@ static struct symbol *sym_check_expr_deps(struct expr *e)
        case E_NOT:
                return sym_check_expr_deps(e->left.expr);
        case E_EQUAL:
+       case E_GEQ:
+       case E_GTH:
+       case E_LEQ:
+       case E_LTH:
        case E_UNEQUAL:
                sym = sym_check_deps(e->left.sym);
                if (sym)
index 6c62d93b4ffbd018807993788fb78848d176d67d..200a3fe3009153bba22742e862f6114f9e7f5fc8 100644 (file)
@@ -122,6 +122,10 @@ n  [A-Za-z0-9_]
        "!"     return T_NOT;
        "="     return T_EQUAL;
        "!="    return T_UNEQUAL;
+       "<="    return T_LESS_EQUAL;
+       ">="    return T_GREATER_EQUAL;
+       "<"     return T_LESS;
+       ">"     return T_GREATER;
        \"|\'   {
                str = yytext[0];
                new_string();
@@ -141,7 +145,12 @@ n  [A-Za-z0-9_]
        }
        #.*     /* comment */
        \\\n    current_file->lineno++;
-       .
+       [[:blank:]]+
+       .       {
+               fprintf(stderr,
+                       "%s:%d:warning: ignoring unsupported character '%c'\n",
+                       zconf_curname(), zconf_lineno(), *yytext);
+       }
        <<EOF>> {
                BEGIN(INITIAL);
        }
index 349a7f24315b1d1c4887962626365fd749fdae02..dd4e86c825210775cd9282eae71f489f80adec07 100644 (file)
@@ -365,323 +365,354 @@ int zconflineno = 1;
 
 extern char *zconftext;
 #define yytext_ptr zconftext
-static yyconst flex_int16_t yy_nxt[][17] =
+static yyconst flex_int16_t yy_nxt[][19] =
     {
     {
         0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
-        0,    0,    0,    0,    0,    0,    0
+        0,    0,    0,    0,    0,    0,    0,    0,    0
     },
 
     {
        11,   12,   13,   14,   12,   12,   15,   12,   12,   12,
-       12,   12,   12,   12,   12,   12,   12
+       12,   12,   12,   12,   12,   12,   12,   12,   12
     },
 
     {
        11,   12,   13,   14,   12,   12,   15,   12,   12,   12,
-       12,   12,   12,   12,   12,   12,   12
+       12,   12,   12,   12,   12,   12,   12,   12,   12
     },
 
     {
        11,   16,   16,   17,   16,   16,   16,   16,   16,   16,
-       16,   16,   16,   18,   16,   16,   16
+       16,   16,   16,   18,   16,   16,   16,   16,   16
     },
 
     {
        11,   16,   16,   17,   16,   16,   16,   16,   16,   16,
-       16,   16,   16,   18,   16,   16,   16
+       16,   16,   16,   18,   16,   16,   16,   16,   16
 
     },
 
     {
        11,   19,   20,   21,   19,   19,   19,   19,   19,   19,
-       19,   19,   19,   19,   19,   19,   19
+       19,   19,   19,   19,   19,   19,   19,   19,   19
     },
 
     {
        11,   19,   20,   21,   19,   19,   19,   19,   19,   19,
-       19,   19,   19,   19,   19,   19,   19
+       19,   19,   19,   19,   19,   19,   19,   19,   19
     },
 
     {
        11,   22,   22,   23,   22,   24,   22,   22,   24,   22,
-       22,   22,   22,   22,   22,   25,   22
+       22,   22,   22,   22,   22,   22,   22,   25,   22
     },
 
     {
        11,   22,   22,   23,   22,   24,   22,   22,   24,   22,
-       22,   22,   22,   22,   22,   25,   22
+       22,   22,   22,   22,   22,   22,   22,   25,   22
     },
 
     {
-       11,   26,   26,   27,   28,   29,   30,   31,   29,   32,
-       33,   34,   35,   35,   36,   37,   38
+       11,   26,   27,   28,   29,   30,   31,   32,   30,   33,
+       34,   35,   36,   36,   37,   38,   39,   40,   41
 
     },
 
     {
-       11,   26,   26,   27,   28,   29,   30,   31,   29,   32,
-       33,   34,   35,   35,   36,   37,   38
+       11,   26,   27,   28,   29,   30,   31,   32,   30,   33,
+       34,   35,   36,   36,   37,   38,   39,   40,   41
     },
 
     {
       -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,
-      -11,  -11,  -11,  -11,  -11,  -11,  -11
+      -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11
     },
 
     {
        11,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,
-      -12,  -12,  -12,  -12,  -12,  -12,  -12
+      -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12
     },
 
     {
-       11,  -13,   39,   40,  -13,  -13,   41,  -13,  -13,  -13,
-      -13,  -13,  -13,  -13,  -13,  -13,  -13
+       11,  -13,   42,   43,  -13,  -13,   44,  -13,  -13,  -13,
+      -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13
     },
 
     {
        11,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,
-      -14,  -14,  -14,  -14,  -14,  -14,  -14
+      -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14
 
     },
 
     {
-       11,   42,   42,   43,   42,   42,   42,   42,   42,   42,
-       42,   42,   42,   42,   42,   42,   42
+       11,   45,   45,   46,   45,   45,   45,   45,   45,   45,
+       45,   45,   45,   45,   45,   45,   45,   45,   45
     },
 
     {
        11,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,
-      -16,  -16,  -16,  -16,  -16,  -16,  -16
+      -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16
     },
 
     {
        11,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,
-      -17,  -17,  -17,  -17,  -17,  -17,  -17
+      -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17
     },
 
     {
        11,  -18,  -18,  -18,  -18,  -18,  -18,  -18,  -18,  -18,
-      -18,  -18,  -18,   44,  -18,  -18,  -18
+      -18,  -18,  -18,   47,  -18,  -18,  -18,  -18,  -18
     },
 
     {
-       11,   45,   45,  -19,   45,   45,   45,   45,   45,   45,
-       45,   45,   45,   45,   45,   45,   45
+       11,   48,   48,  -19,   48,   48,   48,   48,   48,   48,
+       48,   48,   48,   48,   48,   48,   48,   48,   48
 
     },
 
     {
-       11,  -20,   46,   47,  -20,  -20,  -20,  -20,  -20,  -20,
-      -20,  -20,  -20,  -20,  -20,  -20,  -20
+       11,  -20,   49,   50,  -20,  -20,  -20,  -20,  -20,  -20,
+      -20,  -20,  -20,  -20,  -20,  -20,  -20,  -20,  -20
     },
 
     {
-       11,   48,  -21,  -21,   48,   48,   48,   48,   48,   48,
-       48,   48,   48,   48,   48,   48,   48
+       11,   51,  -21,  -21,   51,   51,   51,   51,   51,   51,
+       51,   51,   51,   51,   51,   51,   51,   51,   51
     },
 
     {
-       11,   49,   49,   50,   49,  -22,   49,   49,  -22,   49,
-       49,   49,   49,   49,   49,  -22,   49
+       11,   52,   52,   53,   52,  -22,   52,   52,  -22,   52,
+       52,   52,   52,   52,   52,   52,   52,  -22,   52
     },
 
     {
        11,  -23,  -23,  -23,  -23,  -23,  -23,  -23,  -23,  -23,
-      -23,  -23,  -23,  -23,  -23,  -23,  -23
+      -23,  -23,  -23,  -23,  -23,  -23,  -23,  -23,  -23
     },
 
     {
        11,  -24,  -24,  -24,  -24,  -24,  -24,  -24,  -24,  -24,
-      -24,  -24,  -24,  -24,  -24,  -24,  -24
+      -24,  -24,  -24,  -24,  -24,  -24,  -24,  -24,  -24
 
     },
 
     {
-       11,   51,   51,   52,   51,   51,   51,   51,   51,   51,
-       51,   51,   51,   51,   51,   51,   51
+       11,   54,   54,   55,   54,   54,   54,   54,   54,   54,
+       54,   54,   54,   54,   54,   54,   54,   54,   54
     },
 
     {
        11,  -26,  -26,  -26,  -26,  -26,  -26,  -26,  -26,  -26,
-      -26,  -26,  -26,  -26,  -26,  -26,  -26
+      -26,  -26,  -26,  -26,  -26,  -26,  -26,  -26,  -26
     },
 
     {
-       11,  -27,  -27,  -27,  -27,  -27,  -27,  -27,  -27,  -27,
-      -27,  -27,  -27,  -27,  -27,  -27,  -27
+       11,  -27,   56,  -27,  -27,  -27,  -27,  -27,  -27,  -27,
+      -27,  -27,  -27,  -27,  -27,  -27,  -27,  -27,  -27
     },
 
     {
        11,  -28,  -28,  -28,  -28,  -28,  -28,  -28,  -28,  -28,
-      -28,  -28,  -28,  -28,   53,  -28,  -28
+      -28,  -28,  -28,  -28,  -28,  -28,  -28,  -28,  -28
     },
 
     {
        11,  -29,  -29,  -29,  -29,  -29,  -29,  -29,  -29,  -29,
-      -29,  -29,  -29,  -29,  -29,  -29,  -29
+      -29,  -29,  -29,  -29,  -29,   57,  -29,  -29,  -29
 
     },
 
     {
-       11,   54,   54,  -30,   54,   54,   54,   54,   54,   54,
-       54,   54,   54,   54,   54,   54,   54
+       11,  -30,  -30,  -30,  -30,  -30,  -30,  -30,  -30,  -30,
+      -30,  -30,  -30,  -30,  -30,  -30,  -30,  -30,  -30
     },
 
     {
-       11,  -31,  -31,  -31,  -31,  -31,  -31,   55,  -31,  -31,
-      -31,  -31,  -31,  -31,  -31,  -31,  -31
+       11,   58,   58,  -31,   58,   58,   58,   58,   58,   58,
+       58,   58,   58,   58,   58,   58,   58,   58,   58
     },
 
     {
-       11,  -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,
-      -32,  -32,  -32,  -32,  -32,  -32,  -32
+       11,  -32,  -32,  -32,  -32,  -32,  -32,   59,  -32,  -32,
+      -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32
     },
 
     {
        11,  -33,  -33,  -33,  -33,  -33,  -33,  -33,  -33,  -33,
-      -33,  -33,  -33,  -33,  -33,  -33,  -33
+      -33,  -33,  -33,  -33,  -33,  -33,  -33,  -33,  -33
     },
 
     {
        11,  -34,  -34,  -34,  -34,  -34,  -34,  -34,  -34,  -34,
-      -34,   56,   57,   57,  -34,  -34,  -34
+      -34,  -34,  -34,  -34,  -34,  -34,  -34,  -34,  -34
 
     },
 
     {
        11,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,
-      -35,   57,   57,   57,  -35,  -35,  -35
+      -35,   60,   61,   61,  -35,  -35,  -35,  -35,  -35
     },
 
     {
        11,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,
-      -36,  -36,  -36,  -36,  -36,  -36,  -36
+      -36,   61,   61,   61,  -36,  -36,  -36,  -36,  -36
     },
 
     {
-       11,  -37,  -37,   58,  -37,  -37,  -37,  -37,  -37,  -37,
-      -37,  -37,  -37,  -37,  -37,  -37,  -37
+       11,  -37,  -37,  -37,  -37,  -37,  -37,  -37,  -37,  -37,
+      -37,  -37,  -37,  -37,  -37,   62,  -37,  -37,  -37
     },
 
     {
        11,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,
-      -38,  -38,  -38,  -38,  -38,  -38,   59
+      -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38
     },
 
     {
-       11,  -39,   39,   40,  -39,  -39,   41,  -39,  -39,  -39,
-      -39,  -39,  -39,  -39,  -39,  -39,  -39
+       11,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,
+      -39,  -39,  -39,  -39,  -39,   63,  -39,  -39,  -39
 
     },
 
     {
-       11,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,
-      -40,  -40,  -40,  -40,  -40,  -40,  -40
+       11,  -40,  -40,   64,  -40,  -40,  -40,  -40,  -40,  -40,
+      -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40
     },
 
     {
-       11,   42,   42,   43,   42,   42,   42,   42,   42,   42,
-       42,   42,   42,   42,   42,   42,   42
+       11,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,
+      -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,   65
     },
 
     {
-       11,   42,   42,   43,   42,   42,   42,   42,   42,   42,
-       42,   42,   42,   42,   42,   42,   42
+       11,  -42,   42,   43,  -42,  -42,   44,  -42,  -42,  -42,
+      -42,  -42,  -42,  -42,  -42,  -42,  -42,  -42,  -42
     },
 
     {
        11,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,
-      -43,  -43,  -43,  -43,  -43,  -43,  -43
+      -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43
     },
 
     {
-       11,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,
-      -44,  -44,  -44,   44,  -44,  -44,  -44
+       11,   45,   45,   46,   45,   45,   45,   45,   45,   45,
+       45,   45,   45,   45,   45,   45,   45,   45,   45
 
     },
 
     {
-       11,   45,   45,  -45,   45,   45,   45,   45,   45,   45,
-       45,   45,   45,   45,   45,   45,   45
+       11,   45,   45,   46,   45,   45,   45,   45,   45,   45,
+       45,   45,   45,   45,   45,   45,   45,   45,   45
     },
 
     {
-       11,  -46,   46,   47,  -46,  -46,  -46,  -46,  -46,  -46,
-      -46,  -46,  -46,  -46,  -46,  -46,  -46
+       11,  -46,  -46,  -46,  -46,  -46,  -46,  -46,  -46,  -46,
+      -46,  -46,  -46,  -46,  -46,  -46,  -46,  -46,  -46
     },
 
     {
-       11,   48,  -47,  -47,   48,   48,   48,   48,   48,   48,
-       48,   48,   48,   48,   48,   48,   48
+       11,  -47,  -47,  -47,  -47,  -47,  -47,  -47,  -47,  -47,
+      -47,  -47,  -47,   47,  -47,  -47,  -47,  -47,  -47
     },
 
     {
-       11,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,
-      -48,  -48,  -48,  -48,  -48,  -48,  -48
+       11,   48,   48,  -48,   48,   48,   48,   48,   48,   48,
+       48,   48,   48,   48,   48,   48,   48,   48,   48
     },
 
     {
-       11,   49,   49,   50,   49,  -49,   49,   49,  -49,   49,
-       49,   49,   49,   49,   49,  -49,   49
+       11,  -49,   49,   50,  -49,  -49,  -49,  -49,  -49,  -49,
+      -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49
 
     },
 
     {
-       11,  -50,  -50,  -50,  -50,  -50,  -50,  -50,  -50,  -50,
-      -50,  -50,  -50,  -50,  -50,  -50,  -50
+       11,   51,  -50,  -50,   51,   51,   51,   51,   51,   51,
+       51,   51,   51,   51,   51,   51,   51,   51,   51
     },
 
     {
-       11,  -51,  -51,   52,  -51,  -51,  -51,  -51,  -51,  -51,
-      -51,  -51,  -51,  -51,  -51,  -51,  -51
+       11,  -51,  -51,  -51,  -51,  -51,  -51,  -51,  -51,  -51,
+      -51,  -51,  -51,  -51,  -51,  -51,  -51,  -51,  -51
     },
 
     {
-       11,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,
-      -52,  -52,  -52,  -52,  -52,  -52,  -52
+       11,   52,   52,   53,   52,  -52,   52,   52,  -52,   52,
+       52,   52,   52,   52,   52,   52,   52,  -52,   52
     },
 
     {
        11,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,
-      -53,  -53,  -53,  -53,  -53,  -53,  -53
+      -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53
     },
 
     {
-       11,   54,   54,  -54,   54,   54,   54,   54,   54,   54,
-       54,   54,   54,   54,   54,   54,   54
+       11,  -54,  -54,   55,  -54,  -54,  -54,  -54,  -54,  -54,
+      -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54
 
     },
 
     {
        11,  -55,  -55,  -55,  -55,  -55,  -55,  -55,  -55,  -55,
-      -55,  -55,  -55,  -55,  -55,  -55,  -55
+      -55,  -55,  -55,  -55,  -55,  -55,  -55,  -55,  -55
     },
 
     {
-       11,  -56,  -56,  -56,  -56,  -56,  -56,  -56,  -56,  -56,
-      -56,   60,   57,   57,  -56,  -56,  -56
+       11,  -56,   56,  -56,  -56,  -56,  -56,  -56,  -56,  -56,
+      -56,  -56,  -56,  -56,  -56,  -56,  -56,  -56,  -56
     },
 
     {
        11,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,
-      -57,   57,   57,   57,  -57,  -57,  -57
+      -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57
     },
 
     {
-       11,  -58,  -58,  -58,  -58,  -58,  -58,  -58,  -58,  -58,
-      -58,  -58,  -58,  -58,  -58,  -58,  -58
+       11,   58,   58,  -58,   58,   58,   58,   58,   58,   58,
+       58,   58,   58,   58,   58,   58,   58,   58,   58
     },
 
     {
        11,  -59,  -59,  -59,  -59,  -59,  -59,  -59,  -59,  -59,
-      -59,  -59,  -59,  -59,  -59,  -59,  -59
+      -59,  -59,  -59,  -59,  -59,  -59,  -59,  -59,  -59
 
     },
 
     {
        11,  -60,  -60,  -60,  -60,  -60,  -60,  -60,  -60,  -60,
-      -60,   57,   57,   57,  -60,  -60,  -60
+      -60,   66,   61,   61,  -60,  -60,  -60,  -60,  -60
+    },
+
+    {
+       11,  -61,  -61,  -61,  -61,  -61,  -61,  -61,  -61,  -61,
+      -61,   61,   61,   61,  -61,  -61,  -61,  -61,  -61
+    },
+
+    {
+       11,  -62,  -62,  -62,  -62,  -62,  -62,  -62,  -62,  -62,
+      -62,  -62,  -62,  -62,  -62,  -62,  -62,  -62,  -62
+    },
+
+    {
+       11,  -63,  -63,  -63,  -63,  -63,  -63,  -63,  -63,  -63,
+      -63,  -63,  -63,  -63,  -63,  -63,  -63,  -63,  -63
+    },
+
+    {
+       11,  -64,  -64,  -64,  -64,  -64,  -64,  -64,  -64,  -64,
+      -64,  -64,  -64,  -64,  -64,  -64,  -64,  -64,  -64
+
+    },
+
+    {
+       11,  -65,  -65,  -65,  -65,  -65,  -65,  -65,  -65,  -65,
+      -65,  -65,  -65,  -65,  -65,  -65,  -65,  -65,  -65
+    },
+
+    {
+       11,  -66,  -66,  -66,  -66,  -66,  -66,  -66,  -66,  -66,
+      -66,   61,   61,   61,  -66,  -66,  -66,  -66,  -66
     },
 
     } ;
@@ -701,8 +732,8 @@ static void yy_fatal_error (yyconst char msg[]  );
        *yy_cp = '\0'; \
        (yy_c_buf_p) = yy_cp;
 
-#define YY_NUM_RULES 33
-#define YY_END_OF_BUFFER 34
+#define YY_NUM_RULES 38
+#define YY_END_OF_BUFFER 39
 /* This struct is not used in this scanner,
    but its presence is necessary. */
 struct yy_trans_info
@@ -710,14 +741,15 @@ struct yy_trans_info
        flex_int32_t yy_verify;
        flex_int32_t yy_nxt;
        };
-static yyconst flex_int16_t yy_accept[61] =
+static yyconst flex_int16_t yy_accept[67] =
     {   0,
         0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
-       34,    5,    4,    2,    3,    7,    8,    6,   32,   29,
-       31,   24,   28,   27,   26,   22,   17,   13,   16,   20,
-       22,   11,   12,   19,   19,   14,   22,   22,    4,    2,
-        3,    3,    1,    6,   32,   29,   31,   30,   24,   23,
-       26,   25,   15,   20,    9,   19,   19,   21,   10,   18
+       39,    5,    4,    2,    3,    7,    8,    6,   37,   34,
+       36,   29,   33,   32,   31,   27,   26,   21,   13,   20,
+       24,   27,   11,   12,   23,   23,   18,   14,   19,   27,
+       27,    4,    2,    3,    3,    1,    6,   37,   34,   36,
+       35,   29,   28,   31,   30,   26,   15,   24,    9,   23,
+       23,   16,   17,   25,   10,   22
     } ;
 
 static yyconst flex_int32_t yy_ec[256] =
@@ -727,15 +759,15 @@ static yyconst flex_int32_t yy_ec[256] =
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    2,    4,    5,    6,    1,    1,    7,    8,    9,
        10,    1,    1,    1,   11,   12,   12,   13,   13,   13,
-       13,   13,   13,   13,   13,   13,   13,    1,    1,    1,
-       14,    1,    1,    1,   13,   13,   13,   13,   13,   13,
+       13,   13,   13,   13,   13,   13,   13,    1,    1,   14,
+       15,   16,    1,    1,   13,   13,   13,   13,   13,   13,
        13,   13,   13,   13,   13,   13,   13,   13,   13,   13,
        13,   13,   13,   13,   13,   13,   13,   13,   13,   13,
-        1,   15,    1,    1,   13,    1,   13,   13,   13,   13,
+        1,   17,    1,    1,   13,    1,   13,   13,   13,   13,
 
        13,   13,   13,   13,   13,   13,   13,   13,   13,   13,
        13,   13,   13,   13,   13,   13,   13,   13,   13,   13,
-       13,   13,    1,   16,    1,    1,    1,    1,    1,    1,
+       13,   13,    1,   18,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
@@ -920,7 +952,7 @@ static int input (void );
 /* This used to be an fputs(), but since the string might contain NUL's,
  * we now use fwrite().
  */
-#define ECHO do { if (fwrite( zconftext, zconfleng, 1, zconfout )) {} } while (0)
+#define ECHO fwrite( zconftext, zconfleng, 1, zconfout )
 #endif
 
 /* Gets input and stuffs it into "buf".  number of characters read, or YY_NULL,
@@ -1142,22 +1174,38 @@ return T_UNEQUAL;
        YY_BREAK
 case 16:
 YY_RULE_SETUP
+return T_LESS_EQUAL;
+       YY_BREAK
+case 17:
+YY_RULE_SETUP
+return T_GREATER_EQUAL;
+       YY_BREAK
+case 18:
+YY_RULE_SETUP
+return T_LESS;
+       YY_BREAK
+case 19:
+YY_RULE_SETUP
+return T_GREATER;
+       YY_BREAK
+case 20:
+YY_RULE_SETUP
 {
                str = zconftext[0];
                new_string();
                BEGIN(STRING);
        }
        YY_BREAK
-case 17:
-/* rule 17 can match eol */
+case 21:
+/* rule 21 can match eol */
 YY_RULE_SETUP
 BEGIN(INITIAL); current_file->lineno++; return T_EOL;
        YY_BREAK
-case 18:
+case 22:
 YY_RULE_SETUP
 /* ignore */
        YY_BREAK
-case 19:
+case 23:
 YY_RULE_SETUP
 {
                const struct kconf_id *id = kconf_id_lookup(zconftext, zconfleng);
@@ -1170,18 +1218,26 @@ YY_RULE_SETUP
                return T_WORD;
        }
        YY_BREAK
-case 20:
+case 24:
 YY_RULE_SETUP
 /* comment */
        YY_BREAK
-case 21:
-/* rule 21 can match eol */
+case 25:
+/* rule 25 can match eol */
 YY_RULE_SETUP
 current_file->lineno++;
        YY_BREAK
-case 22:
+case 26:
 YY_RULE_SETUP
 
+       YY_BREAK
+case 27:
+YY_RULE_SETUP
+{
+               fprintf(stderr,
+                       "%s:%d:warning: ignoring unsupported character '%c'\n",
+                       zconf_curname(), zconf_lineno(), *zconftext);
+       }
        YY_BREAK
 case YY_STATE_EOF(PARAM):
 {
@@ -1189,8 +1245,8 @@ case YY_STATE_EOF(PARAM):
        }
        YY_BREAK
 
-case 23:
-/* rule 23 can match eol */
+case 28:
+/* rule 28 can match eol */
 *yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
 (yy_c_buf_p) = yy_cp -= 1;
 YY_DO_BEFORE_ACTION; /* set up zconftext again */
@@ -1201,14 +1257,14 @@ YY_RULE_SETUP
                return T_WORD_QUOTE;
        }
        YY_BREAK
-case 24:
+case 29:
 YY_RULE_SETUP
 {
                append_string(zconftext, zconfleng);
        }
        YY_BREAK
-case 25:
-/* rule 25 can match eol */
+case 30:
+/* rule 30 can match eol */
 *yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
 (yy_c_buf_p) = yy_cp -= 1;
 YY_DO_BEFORE_ACTION; /* set up zconftext again */
@@ -1219,13 +1275,13 @@ YY_RULE_SETUP
                return T_WORD_QUOTE;
        }
        YY_BREAK
-case 26:
+case 31:
 YY_RULE_SETUP
 {
                append_string(zconftext + 1, zconfleng - 1);
        }
        YY_BREAK
-case 27:
+case 32:
 YY_RULE_SETUP
 {
                if (str == zconftext[0]) {
@@ -1236,8 +1292,8 @@ YY_RULE_SETUP
                        append_string(zconftext, 1);
        }
        YY_BREAK
-case 28:
-/* rule 28 can match eol */
+case 33:
+/* rule 33 can match eol */
 YY_RULE_SETUP
 {
                printf("%s:%d:warning: multi-line strings not supported\n", zconf_curname(), zconf_lineno());
@@ -1252,7 +1308,7 @@ case YY_STATE_EOF(STRING):
        }
        YY_BREAK
 
-case 29:
+case 34:
 YY_RULE_SETUP
 {
                ts = 0;
@@ -1277,8 +1333,8 @@ YY_RULE_SETUP
                }
        }
        YY_BREAK
-case 30:
-/* rule 30 can match eol */
+case 35:
+/* rule 35 can match eol */
 *yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
 (yy_c_buf_p) = yy_cp -= 1;
 YY_DO_BEFORE_ACTION; /* set up zconftext again */
@@ -1289,15 +1345,15 @@ YY_RULE_SETUP
                return T_HELPTEXT;
        }
        YY_BREAK
-case 31:
-/* rule 31 can match eol */
+case 36:
+/* rule 36 can match eol */
 YY_RULE_SETUP
 {
                current_file->lineno++;
                append_string("\n", 1);
        }
        YY_BREAK
-case 32:
+case 37:
 YY_RULE_SETUP
 {
                while (zconfleng) {
@@ -1328,7 +1384,7 @@ case YY_STATE_EOF(COMMAND):
        yyterminate();
 }
        YY_BREAK
-case 33:
+case 38:
 YY_RULE_SETUP
 YY_FATAL_ERROR( "flex scanner jammed" );
        YY_BREAK
index de5e84ed3f96f824b63a1e5bd2e4da597996747f..7a4d658c20667d9f9d0df5facb5d534fa2c2f67e 100644 (file)
@@ -1,8 +1,8 @@
-/* A Bison parser, made by GNU Bison 2.5.  */
+/* A Bison parser, made by GNU Bison 2.5.1.  */
 
 /* Bison implementation for Yacc-like parsers in C
    
-      Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc.
+      Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc.
    
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -44,7 +44,7 @@
 #define YYBISON 1
 
 /* Bison version.  */
-#define YYBISON_VERSION "2.5"
+#define YYBISON_VERSION "2.5.1"
 
 /* Skeleton name.  */
 #define YYSKELETON_NAME "yacc.c"
@@ -108,6 +108,14 @@ static struct menu *current_menu, *current_entry;
 
 
 
+# ifndef YY_NULL
+#  if defined __cplusplus && 201103L <= __cplusplus
+#   define YY_NULL nullptr
+#  else
+#   define YY_NULL 0
+#  endif
+# endif
+
 /* Enabling traces.  */
 #ifndef YYDEBUG
 # define YYDEBUG 1
@@ -159,13 +167,17 @@ static struct menu *current_menu, *current_entry;
      T_WORD = 281,
      T_WORD_QUOTE = 282,
      T_UNEQUAL = 283,
-     T_CLOSE_PAREN = 284,
-     T_OPEN_PAREN = 285,
-     T_EOL = 286,
-     T_OR = 287,
-     T_AND = 288,
-     T_EQUAL = 289,
-     T_NOT = 290
+     T_LESS = 284,
+     T_LESS_EQUAL = 285,
+     T_GREATER = 286,
+     T_GREATER_EQUAL = 287,
+     T_CLOSE_PAREN = 288,
+     T_OPEN_PAREN = 289,
+     T_EOL = 290,
+     T_OR = 291,
+     T_AND = 292,
+     T_EQUAL = 293,
+     T_NOT = 294
    };
 #endif
 
@@ -304,6 +316,7 @@ YYID (yyi)
 #    if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 #     include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+      /* Use EXIT_SUCCESS as a witness for stdlib.h.  */
 #     ifndef EXIT_SUCCESS
 #      define EXIT_SUCCESS 0
 #     endif
@@ -395,20 +408,20 @@ union yyalloc
 #endif
 
 #if defined YYCOPY_NEEDED && YYCOPY_NEEDED
-/* Copy COUNT objects from FROM to TO.  The source and destination do
+/* Copy COUNT objects from SRC to DST.  The source and destination do
    not overlap.  */
 # ifndef YYCOPY
 #  if defined __GNUC__ && 1 < __GNUC__
-#   define YYCOPY(To, From, Count) \
-      __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
+#   define YYCOPY(Dst, Src, Count) \
+      __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src)))
 #  else
-#   define YYCOPY(To, From, Count)             \
-      do                                       \
-       {                                       \
-         YYSIZE_T yyi;                         \
-         for (yyi = 0; yyi < (Count); yyi++)   \
-           (To)[yyi] = (From)[yyi];            \
-       }                                       \
+#   define YYCOPY(Dst, Src, Count)              \
+      do                                        \
+        {                                       \
+          YYSIZE_T yyi;                         \
+          for (yyi = 0; yyi < (Count); yyi++)   \
+            (Dst)[yyi] = (Src)[yyi];            \
+        }                                       \
       while (YYID (0))
 #  endif
 # endif
@@ -417,20 +430,20 @@ union yyalloc
 /* YYFINAL -- State number of the termination state.  */
 #define YYFINAL  11
 /* YYLAST -- Last index in YYTABLE.  */
-#define YYLAST   290
+#define YYLAST   298
 
 /* YYNTOKENS -- Number of terminals.  */
-#define YYNTOKENS  36
+#define YYNTOKENS  40
 /* YYNNTS -- Number of nonterminals.  */
 #define YYNNTS  50
 /* YYNRULES -- Number of rules.  */
-#define YYNRULES  118
+#define YYNRULES  122
 /* YYNRULES -- Number of states.  */
-#define YYNSTATES  191
+#define YYNSTATES  199
 
 /* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX.  */
 #define YYUNDEFTOK  2
-#define YYMAXUTOK   290
+#define YYMAXUTOK   294
 
 #define YYTRANSLATE(YYX)                                               \
   ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
@@ -467,7 +480,7 @@ static const yytype_uint8 yytranslate[] =
        5,     6,     7,     8,     9,    10,    11,    12,    13,    14,
       15,    16,    17,    18,    19,    20,    21,    22,    23,    24,
       25,    26,    27,    28,    29,    30,    31,    32,    33,    34,
-      35
+      35,    36,    37,    38,    39
 };
 
 #if YYDEBUG
@@ -486,64 +499,67 @@ static const yytype_uint16 yyprhs[] =
      235,   238,   241,   244,   248,   252,   255,   258,   261,   262,
      265,   268,   271,   276,   277,   280,   283,   286,   287,   290,
      292,   294,   297,   300,   303,   305,   308,   309,   312,   314,
-     318,   322,   326,   329,   333,   337,   339,   341,   342
+     318,   322,   326,   330,   334,   338,   342,   345,   349,   353,
+     355,   357,   358
 };
 
 /* YYRHS -- A `-1'-separated list of the rules' RHS.  */
 static const yytype_int8 yyrhs[] =
 {
-      37,     0,    -1,    81,    38,    -1,    38,    -1,    63,    39,
-      -1,    39,    -1,    -1,    39,    41,    -1,    39,    55,    -1,
-      39,    67,    -1,    39,    80,    -1,    39,    26,     1,    31,
-      -1,    39,    40,     1,    31,    -1,    39,     1,    31,    -1,
+      41,     0,    -1,    85,    42,    -1,    42,    -1,    67,    43,
+      -1,    43,    -1,    -1,    43,    45,    -1,    43,    59,    -1,
+      43,    71,    -1,    43,    84,    -1,    43,    26,     1,    35,
+      -1,    43,    44,     1,    35,    -1,    43,     1,    35,    -1,
       16,    -1,    18,    -1,    19,    -1,    21,    -1,    17,    -1,
-      22,    -1,    20,    -1,    23,    -1,    31,    -1,    61,    -1,
-      71,    -1,    44,    -1,    46,    -1,    69,    -1,    26,     1,
-      31,    -1,     1,    31,    -1,    10,    26,    31,    -1,    43,
-      47,    -1,    11,    26,    31,    -1,    45,    47,    -1,    -1,
-      47,    48,    -1,    47,    49,    -1,    47,    75,    -1,    47,
-      73,    -1,    47,    42,    -1,    47,    31,    -1,    19,    78,
-      31,    -1,    18,    79,    82,    31,    -1,    20,    83,    82,
-      31,    -1,    21,    26,    82,    31,    -1,    22,    84,    84,
-      82,    31,    -1,    24,    50,    31,    -1,    -1,    50,    26,
-      51,    -1,    -1,    34,    79,    -1,     7,    85,    31,    -1,
-      52,    56,    -1,    80,    -1,    53,    58,    54,    -1,    -1,
-      56,    57,    -1,    56,    75,    -1,    56,    73,    -1,    56,
-      31,    -1,    56,    42,    -1,    18,    79,    82,    31,    -1,
-      19,    78,    31,    -1,    17,    31,    -1,    20,    26,    82,
-      31,    -1,    -1,    58,    41,    -1,    14,    83,    81,    -1,
-      80,    -1,    59,    62,    60,    -1,    -1,    62,    41,    -1,
-      62,    67,    -1,    62,    55,    -1,     3,    79,    81,    -1,
-       4,    79,    31,    -1,    64,    76,    74,    -1,    80,    -1,
-      65,    68,    66,    -1,    -1,    68,    41,    -1,    68,    67,
-      -1,    68,    55,    -1,     6,    79,    31,    -1,     9,    79,
-      31,    -1,    70,    74,    -1,    12,    31,    -1,    72,    13,
-      -1,    -1,    74,    75,    -1,    74,    31,    -1,    74,    42,
-      -1,    16,    25,    83,    31,    -1,    -1,    76,    77,    -1,
-      76,    31,    -1,    23,    82,    -1,    -1,    79,    82,    -1,
-      26,    -1,    27,    -1,     5,    31,    -1,     8,    31,    -1,
-      15,    31,    -1,    31,    -1,    81,    31,    -1,    -1,    14,
-      83,    -1,    84,    -1,    84,    34,    84,    -1,    84,    28,
-      84,    -1,    30,    83,    29,    -1,    35,    83,    -1,    83,
-      32,    83,    -1,    83,    33,    83,    -1,    26,    -1,    27,
-      -1,    -1,    26,    -1
+      22,    -1,    20,    -1,    23,    -1,    35,    -1,    65,    -1,
+      75,    -1,    48,    -1,    50,    -1,    73,    -1,    26,     1,
+      35,    -1,     1,    35,    -1,    10,    26,    35,    -1,    47,
+      51,    -1,    11,    26,    35,    -1,    49,    51,    -1,    -1,
+      51,    52,    -1,    51,    53,    -1,    51,    79,    -1,    51,
+      77,    -1,    51,    46,    -1,    51,    35,    -1,    19,    82,
+      35,    -1,    18,    83,    86,    35,    -1,    20,    87,    86,
+      35,    -1,    21,    26,    86,    35,    -1,    22,    88,    88,
+      86,    35,    -1,    24,    54,    35,    -1,    -1,    54,    26,
+      55,    -1,    -1,    38,    83,    -1,     7,    89,    35,    -1,
+      56,    60,    -1,    84,    -1,    57,    62,    58,    -1,    -1,
+      60,    61,    -1,    60,    79,    -1,    60,    77,    -1,    60,
+      35,    -1,    60,    46,    -1,    18,    83,    86,    35,    -1,
+      19,    82,    35,    -1,    17,    35,    -1,    20,    26,    86,
+      35,    -1,    -1,    62,    45,    -1,    14,    87,    85,    -1,
+      84,    -1,    63,    66,    64,    -1,    -1,    66,    45,    -1,
+      66,    71,    -1,    66,    59,    -1,     3,    83,    85,    -1,
+       4,    83,    35,    -1,    68,    80,    78,    -1,    84,    -1,
+      69,    72,    70,    -1,    -1,    72,    45,    -1,    72,    71,
+      -1,    72,    59,    -1,     6,    83,    35,    -1,     9,    83,
+      35,    -1,    74,    78,    -1,    12,    35,    -1,    76,    13,
+      -1,    -1,    78,    79,    -1,    78,    35,    -1,    78,    46,
+      -1,    16,    25,    87,    35,    -1,    -1,    80,    81,    -1,
+      80,    35,    -1,    23,    86,    -1,    -1,    83,    86,    -1,
+      26,    -1,    27,    -1,     5,    35,    -1,     8,    35,    -1,
+      15,    35,    -1,    35,    -1,    85,    35,    -1,    -1,    14,
+      87,    -1,    88,    -1,    88,    29,    88,    -1,    88,    30,
+      88,    -1,    88,    31,    88,    -1,    88,    32,    88,    -1,
+      88,    38,    88,    -1,    88,    28,    88,    -1,    34,    87,
+      33,    -1,    39,    87,    -1,    87,    36,    87,    -1,    87,
+      37,    87,    -1,    26,    -1,    27,    -1,    -1,    26,    -1
 };
 
 /* YYRLINE[YYN] -- source line where rule number YYN was defined.  */
 static const yytype_uint16 yyrline[] =
 {
-       0,   103,   103,   103,   105,   105,   107,   109,   110,   111,
-     112,   113,   114,   118,   122,   122,   122,   122,   122,   122,
-     122,   122,   126,   127,   128,   129,   130,   131,   135,   136,
-     142,   150,   156,   164,   174,   176,   177,   178,   179,   180,
-     181,   184,   192,   198,   208,   214,   220,   223,   225,   236,
-     237,   242,   251,   256,   264,   267,   269,   270,   271,   272,
-     273,   276,   282,   293,   299,   309,   311,   316,   324,   332,
-     335,   337,   338,   339,   344,   351,   358,   363,   371,   374,
-     376,   377,   378,   381,   389,   396,   403,   409,   416,   418,
-     419,   420,   423,   431,   433,   434,   437,   444,   446,   451,
-     452,   455,   456,   457,   461,   462,   465,   466,   469,   470,
-     471,   472,   473,   474,   475,   478,   479,   482,   483
+       0,   108,   108,   108,   110,   110,   112,   114,   115,   116,
+     117,   118,   119,   123,   127,   127,   127,   127,   127,   127,
+     127,   127,   131,   132,   133,   134,   135,   136,   140,   141,
+     147,   155,   161,   169,   179,   181,   182,   183,   184,   185,
+     186,   189,   197,   203,   213,   219,   225,   228,   230,   241,
+     242,   247,   256,   261,   269,   272,   274,   275,   276,   277,
+     278,   281,   287,   298,   304,   314,   316,   321,   329,   337,
+     340,   342,   343,   344,   349,   356,   363,   368,   376,   379,
+     381,   382,   383,   386,   394,   401,   408,   414,   421,   423,
+     424,   425,   428,   436,   438,   439,   442,   449,   451,   456,
+     457,   460,   461,   462,   466,   467,   470,   471,   474,   475,
+     476,   477,   478,   479,   480,   481,   482,   483,   484,   487,
+     488,   491,   492
 };
 #endif
 
@@ -557,6 +573,7 @@ static const char *const yytname[] =
   "T_MENUCONFIG", "T_HELP", "T_HELPTEXT", "T_IF", "T_ENDIF", "T_DEPENDS",
   "T_OPTIONAL", "T_PROMPT", "T_TYPE", "T_DEFAULT", "T_SELECT", "T_RANGE",
   "T_VISIBLE", "T_OPTION", "T_ON", "T_WORD", "T_WORD_QUOTE", "T_UNEQUAL",
+  "T_LESS", "T_LESS_EQUAL", "T_GREATER", "T_GREATER_EQUAL",
   "T_CLOSE_PAREN", "T_OPEN_PAREN", "T_EOL", "T_OR", "T_AND", "T_EQUAL",
   "T_NOT", "$accept", "input", "start", "stmt_list", "option_name",
   "common_stmt", "option_error", "config_entry_start", "config_stmt",
@@ -568,7 +585,7 @@ static const char *const yytname[] =
   "menu_entry", "menu_end", "menu_stmt", "menu_block", "source_stmt",
   "comment", "comment_stmt", "help_start", "help", "depends_list",
   "depends", "visibility_list", "visible", "prompt_stmt_opt", "prompt",
-  "end", "nl", "if_expr", "expr", "symbol", "word_opt", 0
+  "end", "nl", "if_expr", "expr", "symbol", "word_opt", YY_NULL
 };
 #endif
 
@@ -580,25 +597,26 @@ static const yytype_uint16 yytoknum[] =
        0,   256,   257,   258,   259,   260,   261,   262,   263,   264,
      265,   266,   267,   268,   269,   270,   271,   272,   273,   274,
      275,   276,   277,   278,   279,   280,   281,   282,   283,   284,
-     285,   286,   287,   288,   289,   290
+     285,   286,   287,   288,   289,   290,   291,   292,   293,   294
 };
 # endif
 
 /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives.  */
 static const yytype_uint8 yyr1[] =
 {
-       0,    36,    37,    37,    38,    38,    39,    39,    39,    39,
-      39,    39,    39,    39,    40,    40,    40,    40,    40,    40,
-      40,    40,    41,    41,    41,    41,    41,    41,    42,    42,
-      43,    44,    45,    46,    47,    47,    47,    47,    47,    47,
-      47,    48,    48,    48,    48,    48,    49,    50,    50,    51,
-      51,    52,    53,    54,    55,    56,    56,    56,    56,    56,
-      56,    57,    57,    57,    57,    58,    58,    59,    60,    61,
-      62,    62,    62,    62,    63,    64,    65,    66,    67,    68,
-      68,    68,    68,    69,    70,    71,    72,    73,    74,    74,
-      74,    74,    75,    76,    76,    76,    77,    78,    78,    79,
-      79,    80,    80,    80,    81,    81,    82,    82,    83,    83,
-      83,    83,    83,    83,    83,    84,    84,    85,    85
+       0,    40,    41,    41,    42,    42,    43,    43,    43,    43,
+      43,    43,    43,    43,    44,    44,    44,    44,    44,    44,
+      44,    44,    45,    45,    45,    45,    45,    45,    46,    46,
+      47,    48,    49,    50,    51,    51,    51,    51,    51,    51,
+      51,    52,    52,    52,    52,    52,    53,    54,    54,    55,
+      55,    56,    57,    58,    59,    60,    60,    60,    60,    60,
+      60,    61,    61,    61,    61,    62,    62,    63,    64,    65,
+      66,    66,    66,    66,    67,    68,    69,    70,    71,    72,
+      72,    72,    72,    73,    74,    75,    76,    77,    78,    78,
+      78,    78,    79,    80,    80,    80,    81,    82,    82,    83,
+      83,    84,    84,    84,    85,    85,    86,    86,    87,    87,
+      87,    87,    87,    87,    87,    87,    87,    87,    87,    88,
+      88,    89,    89
 };
 
 /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN.  */
@@ -615,7 +633,8 @@ static const yytype_uint8 yyr2[] =
        2,     2,     2,     3,     3,     2,     2,     2,     0,     2,
        2,     2,     4,     0,     2,     2,     2,     0,     2,     1,
        1,     2,     2,     2,     1,     2,     0,     2,     1,     3,
-       3,     3,     2,     3,     3,     1,     1,     0,     1
+       3,     3,     3,     3,     3,     3,     2,     3,     3,     1,
+       1,     0,     1
 };
 
 /* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM.
@@ -624,72 +643,72 @@ static const yytype_uint8 yyr2[] =
 static const yytype_uint8 yydefact[] =
 {
        6,     0,   104,     0,     3,     0,     6,     6,    99,   100,
-       0,     1,     0,     0,     0,     0,   117,     0,     0,     0,
+       0,     1,     0,     0,     0,     0,   121,     0,     0,     0,
        0,     0,     0,    14,    18,    15,    16,    20,    17,    19,
       21,     0,    22,     0,     7,    34,    25,    34,    26,    55,
       65,     8,    70,    23,    93,    79,     9,    27,    88,    24,
-      10,     0,   105,     2,    74,    13,     0,   101,     0,   118,
-       0,   102,     0,     0,     0,   115,   116,     0,     0,     0,
+      10,     0,   105,     2,    74,    13,     0,   101,     0,   122,
+       0,   102,     0,     0,     0,   119,   120,     0,     0,     0,
      108,   103,     0,     0,     0,     0,     0,     0,     0,    88,
-       0,     0,    75,    83,    51,    84,    30,    32,     0,   112,
-       0,     0,    67,     0,     0,    11,    12,     0,     0,     0,
-       0,    97,     0,     0,     0,    47,     0,    40,    39,    35,
-      36,     0,    38,    37,     0,     0,    97,     0,    59,    60,
-      56,    58,    57,    66,    54,    53,    71,    73,    69,    72,
-      68,   106,    95,     0,    94,    80,    82,    78,    81,    77,
-      90,    91,    89,   111,   113,   114,   110,   109,    29,    86,
-       0,   106,     0,   106,   106,   106,     0,     0,     0,    87,
-      63,   106,     0,   106,     0,    96,     0,     0,    41,    98,
-       0,     0,   106,    49,    46,    28,     0,    62,     0,   107,
-      92,    42,    43,    44,     0,     0,    48,    61,    64,    45,
-      50
+       0,     0,    75,    83,    51,    84,    30,    32,     0,   116,
+       0,     0,    67,     0,     0,     0,     0,     0,     0,    11,
+      12,     0,     0,     0,     0,    97,     0,     0,     0,    47,
+       0,    40,    39,    35,    36,     0,    38,    37,     0,     0,
+      97,     0,    59,    60,    56,    58,    57,    66,    54,    53,
+      71,    73,    69,    72,    68,   106,    95,     0,    94,    80,
+      82,    78,    81,    77,    90,    91,    89,   115,   117,   118,
+     114,   109,   110,   111,   112,   113,    29,    86,     0,   106,
+       0,   106,   106,   106,     0,     0,     0,    87,    63,   106,
+       0,   106,     0,    96,     0,     0,    41,    98,     0,     0,
+     106,    49,    46,    28,     0,    62,     0,   107,    92,    42,
+      43,    44,     0,     0,    48,    61,    64,    45,    50
 };
 
 /* YYDEFGOTO[NTERM-NUM].  */
 static const yytype_int16 yydefgoto[] =
 {
-      -1,     3,     4,     5,    33,    34,   108,    35,    36,    37,
-      38,    74,   109,   110,   157,   186,    39,    40,   124,    41,
-      76,   120,    77,    42,   128,    43,    78,     6,    44,    45,
-     137,    46,    80,    47,    48,    49,   111,   112,    81,   113,
-      79,   134,   152,   153,    50,     7,   165,    69,    70,    60
+      -1,     3,     4,     5,    33,    34,   112,    35,    36,    37,
+      38,    74,   113,   114,   165,   194,    39,    40,   128,    41,
+      76,   124,    77,    42,   132,    43,    78,     6,    44,    45,
+     141,    46,    80,    47,    48,    49,   115,   116,    81,   117,
+      79,   138,   160,   161,    50,     7,   173,    69,    70,    60
 };
 
 /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
    STATE-NUM.  */
-#define YYPACT_NINF -90
+#define YYPACT_NINF -91
 static const yytype_int16 yypact[] =
 {
-       4,    42,   -90,    96,   -90,   111,   -90,    15,   -90,   -90,
-      75,   -90,    82,    42,   104,    42,   110,   107,    42,   115,
-     125,    -4,   121,   -90,   -90,   -90,   -90,   -90,   -90,   -90,
-     -90,   162,   -90,   163,   -90,   -90,   -90,   -90,   -90,   -90,
-     -90,   -90,   -90,   -90,   -90,   -90,   -90,   -90,   -90,   -90,
-     -90,   139,   -90,   -90,   138,   -90,   142,   -90,   143,   -90,
-     152,   -90,   164,   167,   168,   -90,   -90,    -4,    -4,    77,
-     -18,   -90,   177,   185,    33,    71,   195,   247,   236,    -2,
-     236,   171,   -90,   -90,   -90,   -90,   -90,   -90,    41,   -90,
-      -4,    -4,   138,    97,    97,   -90,   -90,   186,   187,   194,
-      42,    42,    -4,   196,    97,   -90,   219,   -90,   -90,   -90,
-     -90,   210,   -90,   -90,   204,    42,    42,   199,   -90,   -90,
-     -90,   -90,   -90,   -90,   -90,   -90,   -90,   -90,   -90,   -90,
-     -90,   222,   -90,   223,   -90,   -90,   -90,   -90,   -90,   -90,
-     -90,   -90,   -90,   -90,   215,   -90,   -90,   -90,   -90,   -90,
-      -4,   222,   228,   222,    -5,   222,    97,    35,   229,   -90,
-     -90,   222,   232,   222,    -4,   -90,   135,   233,   -90,   -90,
-     234,   235,   222,   240,   -90,   -90,   237,   -90,   239,   -13,
-     -90,   -90,   -90,   -90,   244,    42,   -90,   -90,   -90,   -90,
-     -90
+      19,    37,   -91,    13,   -91,    79,   -91,    20,   -91,   -91,
+     -16,   -91,    21,    37,    25,    37,    41,    36,    37,    78,
+      83,    31,    56,   -91,   -91,   -91,   -91,   -91,   -91,   -91,
+     -91,   116,   -91,   127,   -91,   -91,   -91,   -91,   -91,   -91,
+     -91,   -91,   -91,   -91,   -91,   -91,   -91,   -91,   -91,   -91,
+     -91,   147,   -91,   -91,   105,   -91,   109,   -91,   111,   -91,
+     114,   -91,   136,   137,   142,   -91,   -91,    31,    31,    76,
+     254,   -91,   143,   146,    27,   115,   207,   258,   243,   -14,
+     243,   179,   -91,   -91,   -91,   -91,   -91,   -91,    -7,   -91,
+      31,    31,   105,    51,    51,    51,    51,    51,    51,   -91,
+     -91,   156,   168,   181,    37,    37,    31,   178,    51,   -91,
+     206,   -91,   -91,   -91,   -91,   196,   -91,   -91,   175,    37,
+      37,   185,   -91,   -91,   -91,   -91,   -91,   -91,   -91,   -91,
+     -91,   -91,   -91,   -91,   -91,   214,   -91,   230,   -91,   -91,
+     -91,   -91,   -91,   -91,   -91,   -91,   -91,   -91,   183,   -91,
+     -91,   -91,   -91,   -91,   -91,   -91,   -91,   -91,    31,   214,
+     194,   214,    45,   214,    51,    26,   195,   -91,   -91,   214,
+     197,   214,    31,   -91,   139,   208,   -91,   -91,   220,   224,
+     214,   222,   -91,   -91,   226,   -91,   227,   123,   -91,   -91,
+     -91,   -91,   235,    37,   -91,   -91,   -91,   -91,   -91
 };
 
 /* YYPGOTO[NTERM-NUM].  */
 static const yytype_int16 yypgoto[] =
 {
-     -90,   -90,   269,   271,   -90,    23,   -70,   -90,   -90,   -90,
-     -90,   243,   -90,   -90,   -90,   -90,   -90,   -90,   -90,   -48,
-     -90,   -90,   -90,   -90,   -90,   -90,   -90,   -90,   -90,   -90,
-     -90,   -20,   -90,   -90,   -90,   -90,   -90,   206,   205,   -68,
-     -90,   -90,   169,    -1,    27,    -7,   118,   -66,   -89,   -90
+     -91,   -91,   264,   268,   -91,    30,   -65,   -91,   -91,   -91,
+     -91,   238,   -91,   -91,   -91,   -91,   -91,   -91,   -91,   -12,
+     -91,   -91,   -91,   -91,   -91,   -91,   -91,   -91,   -91,   -91,
+     -91,    -5,   -91,   -91,   -91,   -91,   -91,   200,   209,   -61,
+     -91,   -91,   170,    -1,    65,     0,   118,   -66,   -90,   -91
 };
 
 /* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
@@ -698,102 +717,102 @@ static const yytype_int16 yypgoto[] =
 #define YYTABLE_NINF -86
 static const yytype_int16 yytable[] =
 {
-      10,    88,    89,    54,   146,   147,   119,     1,   122,   164,
-      93,   141,    56,   142,    58,   156,    94,    62,     1,    90,
-      91,   131,    65,    66,   144,   145,    67,    90,    91,   132,
-     127,    68,   136,   -31,    97,     2,   154,   -31,   -31,   -31,
-     -31,   -31,   -31,   -31,   -31,    98,    52,   -31,   -31,    99,
-     -31,   100,   101,   102,   103,   104,   -31,   105,   129,   106,
-     138,   173,    92,   141,   107,   142,   174,   172,     8,     9,
-     143,   -33,    97,    90,    91,   -33,   -33,   -33,   -33,   -33,
-     -33,   -33,   -33,    98,   166,   -33,   -33,    99,   -33,   100,
-     101,   102,   103,   104,   -33,   105,    11,   106,   179,   151,
-     123,   126,   107,   135,   125,   130,     2,   139,     2,    90,
-      91,    -5,    12,    55,   161,    13,    14,    15,    16,    17,
-      18,    19,    20,    65,    66,    21,    22,    23,    24,    25,
-      26,    27,    28,    29,    30,    57,    59,    31,    61,    -4,
-      12,    63,    32,    13,    14,    15,    16,    17,    18,    19,
-      20,    64,    71,    21,    22,    23,    24,    25,    26,    27,
-      28,    29,    30,    72,    73,    31,   180,    90,    91,    52,
-      32,   -85,    97,    82,    83,   -85,   -85,   -85,   -85,   -85,
-     -85,   -85,   -85,    84,   190,   -85,   -85,    99,   -85,   -85,
-     -85,   -85,   -85,   -85,   -85,    85,    97,   106,    86,    87,
-     -52,   -52,   140,   -52,   -52,   -52,   -52,    98,    95,   -52,
-     -52,    99,   114,   115,   116,   117,    96,   148,   149,   150,
-     158,   106,   155,   159,    97,   163,   118,   -76,   -76,   -76,
-     -76,   -76,   -76,   -76,   -76,   160,   164,   -76,   -76,    99,
-      13,    14,    15,    16,    17,    18,    19,    20,    91,   106,
-      21,    22,    14,    15,   140,    17,    18,    19,    20,   168,
-     175,    21,    22,   177,   181,   182,   183,    32,   187,   167,
-     188,   169,   170,   171,   185,   189,    53,    51,    32,   176,
-      75,   178,   121,     0,   133,   162,     0,     0,     0,     0,
-     184
+      10,    88,    89,   150,   151,   152,   153,   154,   155,   135,
+      54,   123,    56,    11,    58,   126,   145,    62,   164,     2,
+     146,   136,     1,     1,   148,   149,   147,   -31,   101,    90,
+      91,   -31,   -31,   -31,   -31,   -31,   -31,   -31,   -31,   102,
+     162,   -31,   -31,   103,   -31,   104,   105,   106,   107,   108,
+     -31,   109,   181,   110,     2,    52,    55,    65,    66,   172,
+      57,   182,   111,     8,     9,    67,   131,    59,   140,    92,
+      68,    61,   145,   133,   180,   142,   146,    65,    66,    -5,
+      12,    90,    91,    13,    14,    15,    16,    17,    18,    19,
+      20,    71,   174,    21,    22,    23,    24,    25,    26,    27,
+      28,    29,    30,   159,    63,    31,   187,   127,   130,    64,
+     139,     2,    90,    91,    32,   -33,   101,    72,   169,   -33,
+     -33,   -33,   -33,   -33,   -33,   -33,   -33,   102,    73,   -33,
+     -33,   103,   -33,   104,   105,   106,   107,   108,   -33,   109,
+      52,   110,   129,   134,    82,   143,    83,    -4,    12,    84,
+     111,    13,    14,    15,    16,    17,    18,    19,    20,    90,
+      91,    21,    22,    23,    24,    25,    26,    27,    28,    29,
+      30,    85,    86,    31,   188,    90,    91,    87,    99,   -85,
+     101,   100,    32,   -85,   -85,   -85,   -85,   -85,   -85,   -85,
+     -85,   156,   198,   -85,   -85,   103,   -85,   -85,   -85,   -85,
+     -85,   -85,   -85,   157,   163,   110,   158,   166,   101,   167,
+     168,   171,   -52,   -52,   144,   -52,   -52,   -52,   -52,   102,
+      91,   -52,   -52,   103,   118,   119,   120,   121,   172,   176,
+     183,   101,   185,   110,   -76,   -76,   -76,   -76,   -76,   -76,
+     -76,   -76,   122,   189,   -76,   -76,   103,    13,    14,    15,
+      16,    17,    18,    19,    20,   190,   110,    21,    22,   191,
+     193,   195,   196,    14,    15,   144,    17,    18,    19,    20,
+     197,    53,    21,    22,    51,    75,   125,   175,    32,   177,
+     178,   179,    93,    94,    95,    96,    97,   184,   137,   186,
+     170,     0,    98,    32,     0,     0,     0,     0,   192
 };
 
 #define yypact_value_is_default(yystate) \
-  ((yystate) == (-90))
+  ((yystate) == (-91))
 
 #define yytable_value_is_error(yytable_value) \
   YYID (0)
 
 static const yytype_int16 yycheck[] =
 {
-       1,    67,    68,    10,    93,    94,    76,     3,    76,    14,
-      28,    81,    13,    81,    15,   104,    34,    18,     3,    32,
-      33,    23,    26,    27,    90,    91,    30,    32,    33,    31,
-      78,    35,    80,     0,     1,    31,   102,     4,     5,     6,
-       7,     8,     9,    10,    11,    12,    31,    14,    15,    16,
-      17,    18,    19,    20,    21,    22,    23,    24,    78,    26,
-      80,    26,    69,   133,    31,   133,    31,   156,    26,    27,
-      29,     0,     1,    32,    33,     4,     5,     6,     7,     8,
-       9,    10,    11,    12,   150,    14,    15,    16,    17,    18,
-      19,    20,    21,    22,    23,    24,     0,    26,   164,   100,
-      77,    78,    31,    80,    77,    78,    31,    80,    31,    32,
-      33,     0,     1,    31,   115,     4,     5,     6,     7,     8,
-       9,    10,    11,    26,    27,    14,    15,    16,    17,    18,
-      19,    20,    21,    22,    23,    31,    26,    26,    31,     0,
-       1,    26,    31,     4,     5,     6,     7,     8,     9,    10,
-      11,    26,    31,    14,    15,    16,    17,    18,    19,    20,
-      21,    22,    23,     1,     1,    26,    31,    32,    33,    31,
-      31,     0,     1,    31,    31,     4,     5,     6,     7,     8,
-       9,    10,    11,    31,   185,    14,    15,    16,    17,    18,
-      19,    20,    21,    22,    23,    31,     1,    26,    31,    31,
-       5,     6,    31,     8,     9,    10,    11,    12,    31,    14,
-      15,    16,    17,    18,    19,    20,    31,    31,    31,    25,
-       1,    26,    26,    13,     1,    26,    31,     4,     5,     6,
-       7,     8,     9,    10,    11,    31,    14,    14,    15,    16,
-       4,     5,     6,     7,     8,     9,    10,    11,    33,    26,
-      14,    15,     5,     6,    31,     8,     9,    10,    11,    31,
-      31,    14,    15,    31,    31,    31,    31,    31,    31,   151,
-      31,   153,   154,   155,    34,    31,     7,     6,    31,   161,
-      37,   163,    76,    -1,    79,   116,    -1,    -1,    -1,    -1,
-     172
+       1,    67,    68,    93,    94,    95,    96,    97,    98,    23,
+      10,    76,    13,     0,    15,    76,    81,    18,   108,    35,
+      81,    35,     3,     3,    90,    91,    33,     0,     1,    36,
+      37,     4,     5,     6,     7,     8,     9,    10,    11,    12,
+     106,    14,    15,    16,    17,    18,    19,    20,    21,    22,
+      23,    24,    26,    26,    35,    35,    35,    26,    27,    14,
+      35,    35,    35,    26,    27,    34,    78,    26,    80,    69,
+      39,    35,   137,    78,   164,    80,   137,    26,    27,     0,
+       1,    36,    37,     4,     5,     6,     7,     8,     9,    10,
+      11,    35,   158,    14,    15,    16,    17,    18,    19,    20,
+      21,    22,    23,   104,    26,    26,   172,    77,    78,    26,
+      80,    35,    36,    37,    35,     0,     1,     1,   119,     4,
+       5,     6,     7,     8,     9,    10,    11,    12,     1,    14,
+      15,    16,    17,    18,    19,    20,    21,    22,    23,    24,
+      35,    26,    77,    78,    35,    80,    35,     0,     1,    35,
+      35,     4,     5,     6,     7,     8,     9,    10,    11,    36,
+      37,    14,    15,    16,    17,    18,    19,    20,    21,    22,
+      23,    35,    35,    26,    35,    36,    37,    35,    35,     0,
+       1,    35,    35,     4,     5,     6,     7,     8,     9,    10,
+      11,    35,   193,    14,    15,    16,    17,    18,    19,    20,
+      21,    22,    23,    35,    26,    26,    25,     1,     1,    13,
+      35,    26,     5,     6,    35,     8,     9,    10,    11,    12,
+      37,    14,    15,    16,    17,    18,    19,    20,    14,    35,
+      35,     1,    35,    26,     4,     5,     6,     7,     8,     9,
+      10,    11,    35,    35,    14,    15,    16,     4,     5,     6,
+       7,     8,     9,    10,    11,    35,    26,    14,    15,    35,
+      38,    35,    35,     5,     6,    35,     8,     9,    10,    11,
+      35,     7,    14,    15,     6,    37,    76,   159,    35,   161,
+     162,   163,    28,    29,    30,    31,    32,   169,    79,   171,
+     120,    -1,    38,    35,    -1,    -1,    -1,    -1,   180
 };
 
 /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
    symbol of state STATE-NUM.  */
 static const yytype_uint8 yystos[] =
 {
-       0,     3,    31,    37,    38,    39,    63,    81,    26,    27,
-      79,     0,     1,     4,     5,     6,     7,     8,     9,    10,
+       0,     3,    35,    41,    42,    43,    67,    85,    26,    27,
+      83,     0,     1,     4,     5,     6,     7,     8,     9,    10,
       11,    14,    15,    16,    17,    18,    19,    20,    21,    22,
-      23,    26,    31,    40,    41,    43,    44,    45,    46,    52,
-      53,    55,    59,    61,    64,    65,    67,    69,    70,    71,
-      80,    39,    31,    38,    81,    31,    79,    31,    79,    26,
-      85,    31,    79,    26,    26,    26,    27,    30,    35,    83,
-      84,    31,     1,     1,    47,    47,    56,    58,    62,    76,
-      68,    74,    31,    31,    31,    31,    31,    31,    83,    83,
-      32,    33,    81,    28,    34,    31,    31,     1,    12,    16,
-      18,    19,    20,    21,    22,    24,    26,    31,    42,    48,
-      49,    72,    73,    75,    17,    18,    19,    20,    31,    42,
-      57,    73,    75,    41,    54,    80,    41,    55,    60,    67,
-      80,    23,    31,    74,    77,    41,    55,    66,    67,    80,
-      31,    42,    75,    29,    83,    83,    84,    84,    31,    31,
-      25,    79,    78,    79,    83,    26,    84,    50,     1,    13,
-      31,    79,    78,    26,    14,    82,    83,    82,    31,    82,
-      82,    82,    84,    26,    31,    31,    82,    31,    82,    83,
-      31,    31,    31,    31,    82,    34,    51,    31,    31,    31,
-      79
+      23,    26,    35,    44,    45,    47,    48,    49,    50,    56,
+      57,    59,    63,    65,    68,    69,    71,    73,    74,    75,
+      84,    43,    35,    42,    85,    35,    83,    35,    83,    26,
+      89,    35,    83,    26,    26,    26,    27,    34,    39,    87,
+      88,    35,     1,     1,    51,    51,    60,    62,    66,    80,
+      72,    78,    35,    35,    35,    35,    35,    35,    87,    87,
+      36,    37,    85,    28,    29,    30,    31,    32,    38,    35,
+      35,     1,    12,    16,    18,    19,    20,    21,    22,    24,
+      26,    35,    46,    52,    53,    76,    77,    79,    17,    18,
+      19,    20,    35,    46,    61,    77,    79,    45,    58,    84,
+      45,    59,    64,    71,    84,    23,    35,    78,    81,    45,
+      59,    70,    71,    84,    35,    46,    79,    33,    87,    87,
+      88,    88,    88,    88,    88,    88,    35,    35,    25,    83,
+      82,    83,    87,    26,    88,    54,     1,    13,    35,    83,
+      82,    26,    14,    86,    87,    86,    35,    86,    86,    86,
+      88,    26,    35,    35,    86,    35,    86,    87,    35,    35,
+      35,    35,    86,    38,    55,    35,    35,    35,    83
 };
 
 #define yyerrok                (yyerrstatus = 0)
@@ -823,17 +842,18 @@ static const yytype_uint8 yystos[] =
 
 #define YYRECOVERING()  (!!yyerrstatus)
 
-#define YYBACKUP(Token, Value)                                 \
-do                                                             \
-  if (yychar == YYEMPTY && yylen == 1)                         \
-    {                                                          \
-      yychar = (Token);                                                \
-      yylval = (Value);                                                \
-      YYPOPSTACK (1);                                          \
-      goto yybackup;                                           \
-    }                                                          \
-  else                                                         \
-    {                                                          \
+#define YYBACKUP(Token, Value)                                  \
+do                                                              \
+  if (yychar == YYEMPTY)                                        \
+    {                                                           \
+      yychar = (Token);                                         \
+      yylval = (Value);                                         \
+      YYPOPSTACK (yylen);                                       \
+      yystate = *yyssp;                                         \
+      goto yybackup;                                            \
+    }                                                           \
+  else                                                          \
+    {                                                           \
       yyerror (YY_("syntax error: cannot back up")); \
       YYERROR;                                                 \
     }                                                          \
@@ -928,6 +948,8 @@ yy_symbol_value_print (yyoutput, yytype, yyvaluep)
     YYSTYPE const * const yyvaluep;
 #endif
 {
+  FILE *yyo = yyoutput;
+  YYUSE (yyo);
   if (!yyvaluep)
     return;
 # ifdef YYPRINT
@@ -1179,12 +1201,12 @@ static int
 yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
                 yytype_int16 *yyssp, int yytoken)
 {
-  YYSIZE_T yysize0 = yytnamerr (0, yytname[yytoken]);
+  YYSIZE_T yysize0 = yytnamerr (YY_NULL, yytname[yytoken]);
   YYSIZE_T yysize = yysize0;
   YYSIZE_T yysize1;
   enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
   /* Internationalized format string. */
-  const char *yyformat = 0;
+  const char *yyformat = YY_NULL;
   /* Arguments of yyformat. */
   char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
   /* Number of reported tokens (one for the "unexpected", one per
@@ -1244,7 +1266,7 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
                     break;
                   }
                 yyarg[yycount++] = yytname[yyx];
-                yysize1 = yysize + yytnamerr (0, yytname[yyx]);
+                yysize1 = yysize + yytnamerr (YY_NULL, yytname[yyx]);
                 if (! (yysize <= yysize1
                        && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
                   return 2;
@@ -1329,7 +1351,7 @@ yydestruct (yymsg, yytype, yyvaluep)
 
   switch (yytype)
     {
-      case 53: /* "choice_entry" */
+      case 57: /* "choice_entry" */
 
        {
        fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -1339,7 +1361,7 @@ yydestruct (yymsg, yytype, yyvaluep)
 };
 
        break;
-      case 59: /* "if_entry" */
+      case 63: /* "if_entry" */
 
        {
        fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -1349,7 +1371,7 @@ yydestruct (yymsg, yytype, yyvaluep)
 };
 
        break;
-      case 65: /* "menu_entry" */
+      case 69: /* "menu_entry" */
 
        {
        fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -1426,7 +1448,7 @@ yyparse ()
        `yyss': related to states.
        `yyvs': related to semantic values.
 
-       Refer to the stacks thru separate pointers, to allow yyoverflow
+       Refer to the stacks through separate pointers, to allow yyoverflow
        to reallocate them elsewhere.  */
 
     /* The state stack.  */
@@ -2012,46 +2034,66 @@ yyreduce:
 
   case 109:
 
-    { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
+    { (yyval.expr) = expr_alloc_comp(E_LTH, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
     break;
 
   case 110:
 
-    { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
+    { (yyval.expr) = expr_alloc_comp(E_LEQ, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
     break;
 
   case 111:
 
-    { (yyval.expr) = (yyvsp[(2) - (3)].expr); }
+    { (yyval.expr) = expr_alloc_comp(E_GTH, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
     break;
 
   case 112:
 
-    { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[(2) - (2)].expr)); }
+    { (yyval.expr) = expr_alloc_comp(E_GEQ, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
     break;
 
   case 113:
 
-    { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); }
+    { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
     break;
 
   case 114:
 
-    { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); }
+    { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
     break;
 
   case 115:
 
-    { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), 0); free((yyvsp[(1) - (1)].string)); }
+    { (yyval.expr) = (yyvsp[(2) - (3)].expr); }
     break;
 
   case 116:
 
-    { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), SYMBOL_CONST); free((yyvsp[(1) - (1)].string)); }
+    { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[(2) - (2)].expr)); }
     break;
 
   case 117:
 
+    { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); }
+    break;
+
+  case 118:
+
+    { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); }
+    break;
+
+  case 119:
+
+    { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), 0); free((yyvsp[(1) - (1)].string)); }
+    break;
+
+  case 120:
+
+    { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), SYMBOL_CONST); free((yyvsp[(1) - (1)].string)); }
+    break;
+
+  case 121:
+
     { (yyval.string) = NULL; }
     break;
 
@@ -2243,7 +2285,7 @@ yyabortlab:
   yyresult = 1;
   goto yyreturn;
 
-#if !defined(yyoverflow) || YYERROR_VERBOSE
+#if !defined yyoverflow || YYERROR_VERBOSE
 /*-------------------------------------------------.
 | yyexhaustedlab -- memory exhaustion comes here.  |
 `-------------------------------------------------*/
index 0f683cfa53e9abf9aecc032e03334fca182c0d20..71bf8bff696a41ff3f3ff8cdfe46149e4e11506d 100644 (file)
@@ -69,6 +69,10 @@ static struct menu *current_menu, *current_entry;
 %token <string> T_WORD
 %token <string> T_WORD_QUOTE
 %token T_UNEQUAL
+%token T_LESS
+%token T_LESS_EQUAL
+%token T_GREATER
+%token T_GREATER_EQUAL
 %token T_CLOSE_PAREN
 %token T_OPEN_PAREN
 %token T_EOL
@@ -76,6 +80,7 @@ static struct menu *current_menu, *current_entry;
 %left T_OR
 %left T_AND
 %left T_EQUAL T_UNEQUAL
+%left T_LESS T_LESS_EQUAL T_GREATER T_GREATER_EQUAL
 %nonassoc T_NOT
 
 %type <string> prompt
@@ -467,6 +472,10 @@ if_expr:  /* empty */                      { $$ = NULL; }
 ;
 
 expr:    symbol                                { $$ = expr_alloc_symbol($1); }
+       | symbol T_LESS symbol                  { $$ = expr_alloc_comp(E_LTH, $1, $3); }
+       | symbol T_LESS_EQUAL symbol            { $$ = expr_alloc_comp(E_LEQ, $1, $3); }
+       | symbol T_GREATER symbol               { $$ = expr_alloc_comp(E_GTH, $1, $3); }
+       | symbol T_GREATER_EQUAL symbol         { $$ = expr_alloc_comp(E_GEQ, $1, $3); }
        | symbol T_EQUAL symbol                 { $$ = expr_alloc_comp(E_EQUAL, $1, $3); }
        | symbol T_UNEQUAL symbol               { $$ = expr_alloc_comp(E_UNEQUAL, $1, $3); }
        | T_OPEN_PAREN expr T_CLOSE_PAREN       { $$ = $2; }
index 86a4fe75f453735936e3b218f885dcd887216659..1a10d8ac81620faad519d4f95ca8552ac4a958c1 100755 (executable)
@@ -82,7 +82,7 @@ kallsyms()
                kallsymopt="${kallsymopt} --all-symbols"
        fi
 
-       if [ -n "${CONFIG_ARM}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then
+       if [ -n "${CONFIG_ARM}" ] && [ -z "${CONFIG_XIP_KERNEL}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then
                kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
        fi
 
@@ -111,7 +111,6 @@ sortextable()
 }
 
 # Delete output files in case of error
-trap cleanup SIGHUP SIGINT SIGQUIT SIGTERM ERR
 cleanup()
 {
        rm -f .old_version
@@ -124,6 +123,20 @@ cleanup()
        rm -f vmlinux.o
 }
 
+on_exit()
+{
+       if [ $? -ne 0 ]; then
+               cleanup
+       fi
+}
+trap on_exit EXIT
+
+on_signals()
+{
+       exit 1
+}
+trap on_signals HUP INT QUIT TERM
+
 #
 #
 # Use "make V=1" to debug this script
@@ -231,7 +244,6 @@ if [ -n "${CONFIG_KALLSYMS}" ]; then
        if ! cmp -s System.map .tmp_System.map; then
                echo >&2 Inconsistent kallsyms data
                echo >&2 Try "make KALLSYMS_EXTRA_PASS=1" as a workaround
-               cleanup
                exit 1
        fi
 fi
index 1052d4834a44f502bda4f4f1ebe1202ace1608f5..c2423d913b46bd0e659ea4d4c057a3af6119c2d4 100644 (file)
 #define EM_MICROBLAZE  189
 #endif
 
+#ifndef EM_ARCV2
+#define EM_ARCV2       195
+#endif
+
 static int fd_map;     /* File descriptor for file being modified. */
 static int mmap_failed; /* Boolean flag. */
 static void *ehdr_curr; /* current ElfXX_Ehdr *  for resource cleanup */
@@ -281,6 +285,7 @@ do_file(char const *const fname)
                custom_sort = sort_relative_table;
                break;
        case EM_ARCOMPACT:
+       case EM_ARCV2:
        case EM_ARM:
        case EM_AARCH64:
        case EM_MICROBLAZE:
index cdb491d845035e59bff19a5cde7f1ca84c28c17f..c0a932dff3290c4675688f5ce865c1f933a88b0d 100755 (executable)
@@ -154,7 +154,7 @@ exuberant()
 {
        all_target_sources | xargs $1 -a                        \
        -I __initdata,__exitdata,__initconst,                   \
-       -I __cpuinitdata,__initdata_memblock                    \
+       -I __initdata_memblock                                  \
        -I __refdata,__attribute,__maybe_unused,__always_unused \
        -I __acquires,__releases,__deprecated                   \
        -I __read_mostly,__aligned,____cacheline_aligned        \
index 5696874e806264c50cdbddd317bbfdbdefe47260..dec607c17b6434d6b8e5416060a0202bc4cb52f4 100644 (file)
@@ -654,7 +654,7 @@ static struct security_hook_list apparmor_hooks[] = {
 static int param_set_aabool(const char *val, const struct kernel_param *kp);
 static int param_get_aabool(char *buffer, const struct kernel_param *kp);
 #define param_check_aabool param_check_bool
-static struct kernel_param_ops param_ops_aabool = {
+static const struct kernel_param_ops param_ops_aabool = {
        .flags = KERNEL_PARAM_OPS_FL_NOARG,
        .set = param_set_aabool,
        .get = param_get_aabool
@@ -663,7 +663,7 @@ static struct kernel_param_ops param_ops_aabool = {
 static int param_set_aauint(const char *val, const struct kernel_param *kp);
 static int param_get_aauint(char *buffer, const struct kernel_param *kp);
 #define param_check_aauint param_check_uint
-static struct kernel_param_ops param_ops_aauint = {
+static const struct kernel_param_ops param_ops_aauint = {
        .set = param_set_aauint,
        .get = param_get_aauint
 };
@@ -671,7 +671,7 @@ static struct kernel_param_ops param_ops_aauint = {
 static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp);
 static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
 #define param_check_aalockpolicy param_check_bool
-static struct kernel_param_ops param_ops_aalockpolicy = {
+static const struct kernel_param_ops param_ops_aalockpolicy = {
        .flags = KERNEL_PARAM_OPS_FL_NOARG,
        .set = param_set_aalockpolicy,
        .get = param_get_aalockpolicy
index 91503b79c5f8f8d373920da4b16854c590774799..16622aef9bdea83bee67e5e0080c7b9a17d240ae 100644 (file)
 static struct vfsmount *mount;
 static int mount_count;
 
-static inline int positive(struct dentry *dentry)
-{
-       return d_really_is_positive(dentry) && !d_unhashed(dentry);
-}
-
 static int fill_super(struct super_block *sb, void *data, int silent)
 {
        static struct tree_descr files[] = {{""}};
@@ -201,33 +196,29 @@ void securityfs_remove(struct dentry *dentry)
                return;
 
        mutex_lock(&d_inode(parent)->i_mutex);
-       if (positive(dentry)) {
-               if (d_really_is_positive(dentry)) {
-                       if (d_is_dir(dentry))
-                               simple_rmdir(d_inode(parent), dentry);
-                       else
-                               simple_unlink(d_inode(parent), dentry);
-                       dput(dentry);
-               }
+       if (simple_positive(dentry)) {
+               if (d_is_dir(dentry))
+                       simple_rmdir(d_inode(parent), dentry);
+               else
+                       simple_unlink(d_inode(parent), dentry);
+               dput(dentry);
        }
        mutex_unlock(&d_inode(parent)->i_mutex);
        simple_release_fs(&mount, &mount_count);
 }
 EXPORT_SYMBOL_GPL(securityfs_remove);
 
-static struct kobject *security_kobj;
-
 static int __init securityfs_init(void)
 {
        int retval;
 
-       security_kobj = kobject_create_and_add("security", kernel_kobj);
-       if (!security_kobj)
-               return -EINVAL;
+       retval = sysfs_create_mount_point(kernel_kobj, "security");
+       if (retval)
+               return retval;
 
        retval = register_filesystem(&fs_type);
        if (retval)
-               kobject_put(security_kobj);
+               sysfs_remove_mount_point(kernel_kobj, "security");
        return retval;
 }
 
index 686355fea7fd8aab766c5f5894cf3d5a4dd2e13d..e24121afb2f2773eacced7410dbdd5953a49c206 100644 (file)
@@ -55,7 +55,7 @@ static int param_set_bufsize(const char *val, const struct kernel_param *kp)
        return 0;
 }
 
-static struct kernel_param_ops param_ops_bufsize = {
+static const struct kernel_param_ops param_ops_bufsize = {
        .set = param_set_bufsize,
        .get = param_get_uint,
 };
index d2787cca1fcb94aea94f73f1535d4a57f2907129..3d22014130289d2904dee4fb3c831223437864d2 100644 (file)
@@ -1853,7 +1853,6 @@ static struct file_system_type sel_fs_type = {
 };
 
 struct vfsmount *selinuxfs_mount;
-static struct kobject *selinuxfs_kobj;
 
 static int __init init_sel_fs(void)
 {
@@ -1862,13 +1861,13 @@ static int __init init_sel_fs(void)
        if (!selinux_enabled)
                return 0;
 
-       selinuxfs_kobj = kobject_create_and_add("selinux", fs_kobj);
-       if (!selinuxfs_kobj)
-               return -ENOMEM;
+       err = sysfs_create_mount_point(fs_kobj, "selinux");
+       if (err)
+               return err;
 
        err = register_filesystem(&sel_fs_type);
        if (err) {
-               kobject_put(selinuxfs_kobj);
+               sysfs_remove_mount_point(fs_kobj, "selinux");
                return err;
        }
 
@@ -1887,7 +1886,7 @@ __initcall(init_sel_fs);
 #ifdef CONFIG_SECURITY_SELINUX_DISABLE
 void exit_sel_fs(void)
 {
-       kobject_put(selinuxfs_kobj);
+       sysfs_remove_mount_point(fs_kobj, "selinux");
        kern_unmount(selinuxfs_mount);
        unregister_filesystem(&sel_fs_type);
 }
index 5e0a64ebdf237e2f0fd1e3cb4db93c8e3d2eda37..2716d02119f3e80634aedcc39d48feed197c0bee 100644 (file)
@@ -2314,16 +2314,16 @@ static const struct file_operations smk_revoke_subj_ops = {
        .llseek         = generic_file_llseek,
 };
 
-static struct kset *smackfs_kset;
 /**
  * smk_init_sysfs - initialize /sys/fs/smackfs
  *
  */
 static int smk_init_sysfs(void)
 {
-       smackfs_kset = kset_create_and_add("smackfs", NULL, fs_kobj);
-       if (!smackfs_kset)
-               return -ENOMEM;
+       int err;
+       err = sysfs_create_mount_point(fs_kobj, "smackfs");
+       if (err)
+               return err;
        return 0;
 }
 
index 9149a4aefa9548e5b09d8d15a4e28dd90cb5918a..84a3cd683068a3bbf4a752e05c399600ce83572b 100644 (file)
@@ -41,8 +41,11 @@ static int get_available_index(struct snd_card *card, const char *name)
        sid.iface = SNDRV_CTL_ELEM_IFACE_CARD;
        strlcpy(sid.name, name, sizeof(sid.name));
 
-       while (snd_ctl_find_id(card, &sid))
+       while (snd_ctl_find_id(card, &sid)) {
                sid.index++;
+               /* reset numid; otherwise snd_ctl_find_id() hits this again */
+               sid.numid = 0;
+       }
 
        return sid.index;
 }
index 3e0cebacefe1e74c42148f7e3e227f9de47852b7..20f37fb3800ecce8222fc67ef48afa91f65ecb4c 100644 (file)
@@ -109,13 +109,12 @@ static void snd_card_id_read(struct snd_info_entry *entry,
 
 static int init_info_for_card(struct snd_card *card)
 {
-       int err;
        struct snd_info_entry *entry;
 
        entry = snd_info_create_card_entry(card, "id", card->proc_root);
        if (!entry) {
                dev_dbg(card->dev, "unable to create card entry\n");
-               return err;
+               return -ENOMEM;
        }
        entry->c.text.read = snd_card_id_read;
        card->proc_id = entry;
index 082509eb805d5395ed356ef0e9378432f891b96e..f05cb6a8cbe02ed03a3f27499b08a8b36190bcd1 100644 (file)
@@ -124,7 +124,7 @@ static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size)
        dmab->addr = 0;
 
        if (dev->of_node)
-               pool = of_get_named_gen_pool(dev->of_node, "iram", 0);
+               pool = of_gen_pool_get(dev->of_node, "iram", 0);
 
        if (!pool)
                return;
index 7dea7987d2afcb1039a3a4ae79c9619f39788c16..745535d1840a6713e802aaa8c1d475733b88720e 100644 (file)
@@ -171,7 +171,7 @@ MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode "
 
 #ifdef CONFIG_PM
 static int param_set_xint(const char *val, const struct kernel_param *kp);
-static struct kernel_param_ops param_ops_xint = {
+static const struct kernel_param_ops param_ops_xint = {
        .set = param_set_xint,
        .get = param_get_int,
 };
@@ -2180,6 +2180,8 @@ static const struct pci_device_id azx_ids[] = {
        { PCI_DEVICE(0x1022, 0x780d),
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
        /* ATI HDMI */
+       { PCI_DEVICE(0x1002, 0x1308),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x793b),
          .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
        { PCI_DEVICE(0x1002, 0x7919),
@@ -2188,6 +2190,8 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
        { PCI_DEVICE(0x1002, 0x970f),
          .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0x9840),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaa00),
          .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
        { PCI_DEVICE(0x1002, 0xaa08),
index f8527342a15062517af2be86ec2e9a5bb9e16a28..2f2433845d0487dd703301664cbc33ecfc77bc11 100644 (file)
@@ -591,7 +591,7 @@ static int eld_proc_new(struct hdmi_spec_per_pin *per_pin, int index)
 
 static void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
 {
-       if (!per_pin->codec->bus->shutdown && per_pin->proc_entry) {
+       if (!per_pin->codec->bus->shutdown) {
                snd_info_free_entry(per_pin->proc_entry);
                per_pin->proc_entry = NULL;
        }
index 431a20b17df4cae617b7258e641c96a8c0b32a21..b3b44681d3cfbe90b2ff7bcbabba735dcfaac7f7 100644 (file)
@@ -4464,6 +4464,7 @@ enum {
        ALC269_FIXUP_LIFEBOOK,
        ALC269_FIXUP_LIFEBOOK_EXTMIC,
        ALC269_FIXUP_LIFEBOOK_HP_PIN,
+       ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
        ALC269_FIXUP_AMIC,
        ALC269_FIXUP_DMIC,
        ALC269VB_FIXUP_AMIC,
@@ -4484,6 +4485,7 @@ enum {
        ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
        ALC269_FIXUP_HEADSET_MODE,
        ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
+       ALC269_FIXUP_ASPIRE_HEADSET_MIC,
        ALC269_FIXUP_ASUS_X101_FUNC,
        ALC269_FIXUP_ASUS_X101_VERB,
        ALC269_FIXUP_ASUS_X101,
@@ -4511,6 +4513,7 @@ enum {
        ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
        ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC292_FIXUP_TPT440_DOCK,
+       ALC292_FIXUP_TPT440_DOCK2,
        ALC283_FIXUP_BXBT2807_MIC,
        ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
        ALC282_FIXUP_ASPIRE_V5_PINS,
@@ -4521,6 +4524,8 @@ enum {
        ALC288_FIXUP_DELL_HEADSET_MODE,
        ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC288_FIXUP_DELL_XPS_13_GPIO6,
+       ALC288_FIXUP_DELL_XPS_13,
+       ALC288_FIXUP_DISABLE_AAMIX,
        ALC292_FIXUP_DELL_E7X,
        ALC292_FIXUP_DISABLE_AAMIX,
        ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -4630,6 +4635,10 @@ static const struct hda_fixup alc269_fixups[] = {
                        { }
                },
        },
+       [ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
+       },
        [ALC269_FIXUP_AMIC] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -4758,6 +4767,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_headset_mode_no_hp_mic,
        },
+       [ALC269_FIXUP_ASPIRE_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x01a1913c }, /* headset mic w/o jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE,
+       },
        [ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -4960,6 +4978,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chain_id = ALC269_FIXUP_HEADSET_MODE
        },
        [ALC292_FIXUP_TPT440_DOCK] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
+               .chained = true,
+               .chain_id = ALC292_FIXUP_TPT440_DOCK2
+       },
+       [ALC292_FIXUP_TPT440_DOCK2] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
                        { 0x16, 0x21211010 }, /* dock headphone */
@@ -5046,9 +5070,23 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC288_FIXUP_DELL1_MIC_NO_PRESENCE
        },
+       [ALC288_FIXUP_DISABLE_AAMIX] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_disable_aamix,
+               .chained = true,
+               .chain_id = ALC288_FIXUP_DELL_XPS_13_GPIO6
+       },
+       [ALC288_FIXUP_DELL_XPS_13] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_dell_xps13,
+               .chained = true,
+               .chain_id = ALC288_FIXUP_DISABLE_AAMIX
+       },
        [ALC292_FIXUP_DISABLE_AAMIX] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_disable_aamix,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE
        },
        [ALC292_FIXUP_DELL_E7X] = {
                .type = HDA_FIXUP_FUNC,
@@ -5073,6 +5111,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
+       SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
@@ -5086,10 +5126,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
+       SND_PCI_QUIRK(0x1028, 0x062e, "Dell Latitude E7450", ALC292_FIXUP_DELL_E7X),
        SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC292_FIXUP_DELL_E7X),
+       SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
        SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5173,6 +5214,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
        SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
+       SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
        SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
        SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
        SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
index 0521be8d46a81c08f6580e57a5a495066f22d977..da5366405eda55a6eccbca0e14bce5631c851cdc 100644 (file)
@@ -241,7 +241,9 @@ static int via_pin_power_ctl_get(struct snd_kcontrol *kcontrol,
                                 struct snd_ctl_elem_value *ucontrol)
 {
        struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       ucontrol->value.enumerated.item[0] = codec->power_save_node;
+       struct via_spec *spec = codec->spec;
+
+       ucontrol->value.enumerated.item[0] = spec->gen.power_down_unused;
        return 0;
 }
 
@@ -252,9 +254,9 @@ static int via_pin_power_ctl_put(struct snd_kcontrol *kcontrol,
        struct via_spec *spec = codec->spec;
        bool val = !!ucontrol->value.enumerated.item[0];
 
-       if (val == codec->power_save_node)
+       if (val == spec->gen.power_down_unused)
                return 0;
-       codec->power_save_node = val;
+       /* codec->power_save_node = val; */ /* widget PM seems yet broken */
        spec->gen.power_down_unused = val;
        analog_low_current_mode(codec);
        return 1;
index a51244a8022f91c26591b9553d16e0020c9f9d12..faca2bf6a430e1805ea125c663af392d51f89036 100644 (file)
@@ -25,7 +25,7 @@ build-dir := $(srctree)/tools/build
 include $(build-dir)/Build.include
 
 # do not force detected configuration
--include .config-detected
+-include $(OUTPUT).config-detected
 
 # Init all relevant variables used in build files so
 # 1) they have correct type
index f0e72674c52d2c9b88b46cb281db39fe8fb68d8d..9098083869c85a62bc4df5bcf8df96f5ffcc7e00 100644 (file)
 
 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
 
+#include <linux/types.h>
+
+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
+{
+       switch (size) {
+       case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
+       case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
+       case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
+       case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
+       default:
+               barrier();
+               __builtin_memcpy((void *)res, (const void *)p, size);
+               barrier();
+       }
+}
+
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+{
+       switch (size) {
+       case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
+       case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
+       case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
+       case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
+       default:
+               barrier();
+               __builtin_memcpy((void *)p, (const void *)res, size);
+               barrier();
+       }
+}
+
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
+ * compiler is aware of some particular ordering.  One way to make the
+ * compiler aware of ordering is to put the two invocations of READ_ONCE,
+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+ *
+ * In contrast to ACCESS_ONCE these two macros will also work on aggregate
+ * data types like structs or unions. If the size of the accessed data
+ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
+ * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
+ * compile-time warning.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+
+#define READ_ONCE(x) \
+       ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
+#define WRITE_ONCE(x, val) \
+       ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
 #endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/include/linux/export.h b/tools/include/linux/export.h
deleted file mode 100644 (file)
index d07e586..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _TOOLS_LINUX_EXPORT_H_
-#define _TOOLS_LINUX_EXPORT_H_
-
-#define EXPORT_SYMBOL(sym)
-#define EXPORT_SYMBOL_GPL(sym)
-#define EXPORT_SYMBOL_GPL_FUTURE(sym)
-#define EXPORT_UNUSED_SYMBOL(sym)
-#define EXPORT_UNUSED_SYMBOL_GPL(sym)
-
-#endif
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h
new file mode 100644 (file)
index 0000000..1125822
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+  Red Black Trees
+  (C) 1999  Andrea Arcangeli <andrea@suse.de>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+  linux/include/linux/rbtree.h
+
+  To use rbtrees you'll have to implement your own insert and search cores.
+  This will avoid us to use callbacks and to drop drammatically performances.
+  I know it's not the cleaner way,  but in C (not in C++) to get
+  performances and genericity...
+
+  See Documentation/rbtree.txt for documentation and samples.
+*/
+
+#ifndef __TOOLS_LINUX_PERF_RBTREE_H
+#define __TOOLS_LINUX_PERF_RBTREE_H
+
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+
+struct rb_node {
+       unsigned long  __rb_parent_color;
+       struct rb_node *rb_right;
+       struct rb_node *rb_left;
+} __attribute__((aligned(sizeof(long))));
+    /* The alignment might seem pointless, but allegedly CRIS needs it */
+
+struct rb_root {
+       struct rb_node *rb_node;
+};
+
+
+#define rb_parent(r)   ((struct rb_node *)((r)->__rb_parent_color & ~3))
+
+#define RB_ROOT        (struct rb_root) { NULL, }
+#define        rb_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define RB_EMPTY_ROOT(root)  ((root)->rb_node == NULL)
+
+/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
+#define RB_EMPTY_NODE(node)  \
+       ((node)->__rb_parent_color == (unsigned long)(node))
+#define RB_CLEAR_NODE(node)  \
+       ((node)->__rb_parent_color = (unsigned long)(node))
+
+
+extern void rb_insert_color(struct rb_node *, struct rb_root *);
+extern void rb_erase(struct rb_node *, struct rb_root *);
+
+
+/* Find logical next and previous nodes in a tree */
+extern struct rb_node *rb_next(const struct rb_node *);
+extern struct rb_node *rb_prev(const struct rb_node *);
+extern struct rb_node *rb_first(const struct rb_root *);
+extern struct rb_node *rb_last(const struct rb_root *);
+
+/* Postorder iteration - always visit the parent after its children */
+extern struct rb_node *rb_first_postorder(const struct rb_root *);
+extern struct rb_node *rb_next_postorder(const struct rb_node *);
+
+/* Fast replacement of a single node without remove/rebalance/add/rebalance */
+extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+                           struct rb_root *root);
+
+static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
+                               struct rb_node **rb_link)
+{
+       node->__rb_parent_color = (unsigned long)parent;
+       node->rb_left = node->rb_right = NULL;
+
+       *rb_link = node;
+}
+
+#define rb_entry_safe(ptr, type, member) \
+       ({ typeof(ptr) ____ptr = (ptr); \
+          ____ptr ? rb_entry(____ptr, type, member) : NULL; \
+       })
+
+
+/*
+ * Handy for checking that we are not deleting an entry that is
+ * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
+ * probably should be moved to lib/rbtree.c...
+ */
+static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
+{
+       rb_erase(n, root);
+       RB_CLEAR_NODE(n);
+}
+#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h
new file mode 100644 (file)
index 0000000..43be941
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+  Red Black Trees
+  (C) 1999  Andrea Arcangeli <andrea@suse.de>
+  (C) 2002  David Woodhouse <dwmw2@infradead.org>
+  (C) 2012  Michel Lespinasse <walken@google.com>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+  tools/linux/include/linux/rbtree_augmented.h
+
+  Copied from:
+  linux/include/linux/rbtree_augmented.h
+*/
+
+#ifndef _TOOLS_LINUX_RBTREE_AUGMENTED_H
+#define _TOOLS_LINUX_RBTREE_AUGMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+
+/*
+ * Please note - only struct rb_augment_callbacks and the prototypes for
+ * rb_insert_augmented() and rb_erase_augmented() are intended to be public.
+ * The rest are implementation details you are not expected to depend on.
+ *
+ * See Documentation/rbtree.txt for documentation and samples.
+ */
+
+struct rb_augment_callbacks {
+       void (*propagate)(struct rb_node *node, struct rb_node *stop);
+       void (*copy)(struct rb_node *old, struct rb_node *new);
+       void (*rotate)(struct rb_node *old, struct rb_node *new);
+};
+
+extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+/*
+ * Fixup the rbtree and update the augmented information when rebalancing.
+ *
+ * On insertion, the user must update the augmented information on the path
+ * leading to the inserted node, then call rb_link_node() as usual and
+ * rb_augment_inserted() instead of the usual rb_insert_color() call.
+ * If rb_augment_inserted() rebalances the rbtree, it will callback into
+ * a user provided function to update the augmented information on the
+ * affected subtrees.
+ */
+static inline void
+rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+                   const struct rb_augment_callbacks *augment)
+{
+       __rb_insert_augmented(node, root, augment->rotate);
+}
+
+#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield,      \
+                            rbtype, rbaugmented, rbcompute)            \
+static inline void                                                     \
+rbname ## _propagate(struct rb_node *rb, struct rb_node *stop)         \
+{                                                                      \
+       while (rb != stop) {                                            \
+               rbstruct *node = rb_entry(rb, rbstruct, rbfield);       \
+               rbtype augmented = rbcompute(node);                     \
+               if (node->rbaugmented == augmented)                     \
+                       break;                                          \
+               node->rbaugmented = augmented;                          \
+               rb = rb_parent(&node->rbfield);                         \
+       }                                                               \
+}                                                                      \
+static inline void                                                     \
+rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new)                \
+{                                                                      \
+       rbstruct *old = rb_entry(rb_old, rbstruct, rbfield);            \
+       rbstruct *new = rb_entry(rb_new, rbstruct, rbfield);            \
+       new->rbaugmented = old->rbaugmented;                            \
+}                                                                      \
+static void                                                            \
+rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new)      \
+{                                                                      \
+       rbstruct *old = rb_entry(rb_old, rbstruct, rbfield);            \
+       rbstruct *new = rb_entry(rb_new, rbstruct, rbfield);            \
+       new->rbaugmented = old->rbaugmented;                            \
+       old->rbaugmented = rbcompute(old);                              \
+}                                                                      \
+rbstatic const struct rb_augment_callbacks rbname = {                  \
+       rbname ## _propagate, rbname ## _copy, rbname ## _rotate        \
+};
+
+
+#define        RB_RED          0
+#define        RB_BLACK        1
+
+#define __rb_parent(pc)    ((struct rb_node *)(pc & ~3))
+
+#define __rb_color(pc)     ((pc) & 1)
+#define __rb_is_black(pc)  __rb_color(pc)
+#define __rb_is_red(pc)    (!__rb_color(pc))
+#define rb_color(rb)       __rb_color((rb)->__rb_parent_color)
+#define rb_is_red(rb)      __rb_is_red((rb)->__rb_parent_color)
+#define rb_is_black(rb)    __rb_is_black((rb)->__rb_parent_color)
+
+static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
+{
+       rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
+}
+
+static inline void rb_set_parent_color(struct rb_node *rb,
+                                      struct rb_node *p, int color)
+{
+       rb->__rb_parent_color = (unsigned long)p | color;
+}
+
+static inline void
+__rb_change_child(struct rb_node *old, struct rb_node *new,
+                 struct rb_node *parent, struct rb_root *root)
+{
+       if (parent) {
+               if (parent->rb_left == old)
+                       parent->rb_left = new;
+               else
+                       parent->rb_right = new;
+       } else
+               root->rb_node = new;
+}
+
+extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+
+static __always_inline struct rb_node *
+__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+                    const struct rb_augment_callbacks *augment)
+{
+       struct rb_node *child = node->rb_right, *tmp = node->rb_left;
+       struct rb_node *parent, *rebalance;
+       unsigned long pc;
+
+       if (!tmp) {
+               /*
+                * Case 1: node to erase has no more than 1 child (easy!)
+                *
+                * Note that if there is one child it must be red due to 5)
+                * and node must be black due to 4). We adjust colors locally
+                * so as to bypass __rb_erase_color() later on.
+                */
+               pc = node->__rb_parent_color;
+               parent = __rb_parent(pc);
+               __rb_change_child(node, child, parent, root);
+               if (child) {
+                       child->__rb_parent_color = pc;
+                       rebalance = NULL;
+               } else
+                       rebalance = __rb_is_black(pc) ? parent : NULL;
+               tmp = parent;
+       } else if (!child) {
+               /* Still case 1, but this time the child is node->rb_left */
+               tmp->__rb_parent_color = pc = node->__rb_parent_color;
+               parent = __rb_parent(pc);
+               __rb_change_child(node, tmp, parent, root);
+               rebalance = NULL;
+               tmp = parent;
+       } else {
+               struct rb_node *successor = child, *child2;
+               tmp = child->rb_left;
+               if (!tmp) {
+                       /*
+                        * Case 2: node's successor is its right child
+                        *
+                        *    (n)          (s)
+                        *    / \          / \
+                        *  (x) (s)  ->  (x) (c)
+                        *        \
+                        *        (c)
+                        */
+                       parent = successor;
+                       child2 = successor->rb_right;
+                       augment->copy(node, successor);
+               } else {
+                       /*
+                        * Case 3: node's successor is leftmost under
+                        * node's right child subtree
+                        *
+                        *    (n)          (s)
+                        *    / \          / \
+                        *  (x) (y)  ->  (x) (y)
+                        *      /            /
+                        *    (p)          (p)
+                        *    /            /
+                        *  (s)          (c)
+                        *    \
+                        *    (c)
+                        */
+                       do {
+                               parent = successor;
+                               successor = tmp;
+                               tmp = tmp->rb_left;
+                       } while (tmp);
+                       parent->rb_left = child2 = successor->rb_right;
+                       successor->rb_right = child;
+                       rb_set_parent(child, successor);
+                       augment->copy(node, successor);
+                       augment->propagate(parent, successor);
+               }
+
+               successor->rb_left = tmp = node->rb_left;
+               rb_set_parent(tmp, successor);
+
+               pc = node->__rb_parent_color;
+               tmp = __rb_parent(pc);
+               __rb_change_child(node, successor, tmp, root);
+               if (child2) {
+                       successor->__rb_parent_color = pc;
+                       rb_set_parent_color(child2, parent, RB_BLACK);
+                       rebalance = NULL;
+               } else {
+                       unsigned long pc2 = successor->__rb_parent_color;
+                       successor->__rb_parent_color = pc;
+                       rebalance = __rb_is_black(pc2) ? parent : NULL;
+               }
+               tmp = successor;
+       }
+
+       augment->propagate(tmp, NULL);
+       return rebalance;
+}
+
+static __always_inline void
+rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+                  const struct rb_augment_callbacks *augment)
+{
+       struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
+       if (rebalance)
+               __rb_erase_color(rebalance, root, augment->rotate);
+}
+
+#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
new file mode 100644 (file)
index 0000000..17c2b59
--- /dev/null
@@ -0,0 +1,548 @@
+/*
+  Red Black Trees
+  (C) 1999  Andrea Arcangeli <andrea@suse.de>
+  (C) 2002  David Woodhouse <dwmw2@infradead.org>
+  (C) 2012  Michel Lespinasse <walken@google.com>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+  linux/lib/rbtree.c
+*/
+
+#include <linux/rbtree_augmented.h>
+
+/*
+ * red-black trees properties:  http://en.wikipedia.org/wiki/Rbtree
+ *
+ *  1) A node is either red or black
+ *  2) The root is black
+ *  3) All leaves (NULL) are black
+ *  4) Both children of every red node are black
+ *  5) Every simple path from root to leaves contains the same number
+ *     of black nodes.
+ *
+ *  4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
+ *  consecutive red nodes in a path and every red node is therefore followed by
+ *  a black. So if B is the number of black nodes on every simple path (as per
+ *  5), then the longest possible path due to 4 is 2B.
+ *
+ *  We shall indicate color with case, where black nodes are uppercase and red
+ *  nodes will be lowercase. Unknown color nodes shall be drawn as red within
+ *  parentheses and have some accompanying text comment.
+ */
+
+static inline void rb_set_black(struct rb_node *rb)
+{
+       rb->__rb_parent_color |= RB_BLACK;
+}
+
+static inline struct rb_node *rb_red_parent(struct rb_node *red)
+{
+       return (struct rb_node *)red->__rb_parent_color;
+}
+
+/*
+ * Helper function for rotations:
+ * - old's parent and color get assigned to new
+ * - old gets assigned new as a parent and 'color' as a color.
+ */
+static inline void
+__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
+                       struct rb_root *root, int color)
+{
+       struct rb_node *parent = rb_parent(old);
+       new->__rb_parent_color = old->__rb_parent_color;
+       rb_set_parent_color(old, new, color);
+       __rb_change_child(old, new, parent, root);
+}
+
+static __always_inline void
+__rb_insert(struct rb_node *node, struct rb_root *root,
+           void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
+
+       while (true) {
+               /*
+                * Loop invariant: node is red
+                *
+                * If there is a black parent, we are done.
+                * Otherwise, take some corrective action as we don't
+                * want a red root or two consecutive red nodes.
+                */
+               if (!parent) {
+                       rb_set_parent_color(node, NULL, RB_BLACK);
+                       break;
+               } else if (rb_is_black(parent))
+                       break;
+
+               gparent = rb_red_parent(parent);
+
+               tmp = gparent->rb_right;
+               if (parent != tmp) {    /* parent == gparent->rb_left */
+                       if (tmp && rb_is_red(tmp)) {
+                               /*
+                                * Case 1 - color flips
+                                *
+                                *       G            g
+                                *      / \          / \
+                                *     p   u  -->   P   U
+                                *    /            /
+                                *   n            n
+                                *
+                                * However, since g's parent might be red, and
+                                * 4) does not allow this, we need to recurse
+                                * at g.
+                                */
+                               rb_set_parent_color(tmp, gparent, RB_BLACK);
+                               rb_set_parent_color(parent, gparent, RB_BLACK);
+                               node = gparent;
+                               parent = rb_parent(node);
+                               rb_set_parent_color(node, parent, RB_RED);
+                               continue;
+                       }
+
+                       tmp = parent->rb_right;
+                       if (node == tmp) {
+                               /*
+                                * Case 2 - left rotate at parent
+                                *
+                                *      G             G
+                                *     / \           / \
+                                *    p   U  -->    n   U
+                                *     \           /
+                                *      n         p
+                                *
+                                * This still leaves us in violation of 4), the
+                                * continuation into Case 3 will fix that.
+                                */
+                               parent->rb_right = tmp = node->rb_left;
+                               node->rb_left = parent;
+                               if (tmp)
+                                       rb_set_parent_color(tmp, parent,
+                                                           RB_BLACK);
+                               rb_set_parent_color(parent, node, RB_RED);
+                               augment_rotate(parent, node);
+                               parent = node;
+                               tmp = node->rb_right;
+                       }
+
+                       /*
+                        * Case 3 - right rotate at gparent
+                        *
+                        *        G           P
+                        *       / \         / \
+                        *      p   U  -->  n   g
+                        *     /                 \
+                        *    n                   U
+                        */
+                       gparent->rb_left = tmp;  /* == parent->rb_right */
+                       parent->rb_right = gparent;
+                       if (tmp)
+                               rb_set_parent_color(tmp, gparent, RB_BLACK);
+                       __rb_rotate_set_parents(gparent, parent, root, RB_RED);
+                       augment_rotate(gparent, parent);
+                       break;
+               } else {
+                       tmp = gparent->rb_left;
+                       if (tmp && rb_is_red(tmp)) {
+                               /* Case 1 - color flips */
+                               rb_set_parent_color(tmp, gparent, RB_BLACK);
+                               rb_set_parent_color(parent, gparent, RB_BLACK);
+                               node = gparent;
+                               parent = rb_parent(node);
+                               rb_set_parent_color(node, parent, RB_RED);
+                               continue;
+                       }
+
+                       tmp = parent->rb_left;
+                       if (node == tmp) {
+                               /* Case 2 - right rotate at parent */
+                               parent->rb_left = tmp = node->rb_right;
+                               node->rb_right = parent;
+                               if (tmp)
+                                       rb_set_parent_color(tmp, parent,
+                                                           RB_BLACK);
+                               rb_set_parent_color(parent, node, RB_RED);
+                               augment_rotate(parent, node);
+                               parent = node;
+                               tmp = node->rb_left;
+                       }
+
+                       /* Case 3 - left rotate at gparent */
+                       gparent->rb_right = tmp;  /* == parent->rb_left */
+                       parent->rb_left = gparent;
+                       if (tmp)
+                               rb_set_parent_color(tmp, gparent, RB_BLACK);
+                       __rb_rotate_set_parents(gparent, parent, root, RB_RED);
+                       augment_rotate(gparent, parent);
+                       break;
+               }
+       }
+}
+
+/*
+ * Inline version for rb_erase() use - we want to be able to inline
+ * and eliminate the dummy_rotate callback there
+ */
+static __always_inline void
+____rb_erase_color(struct rb_node *parent, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
+
+       while (true) {
+               /*
+                * Loop invariants:
+                * - node is black (or NULL on first iteration)
+                * - node is not the root (parent is not NULL)
+                * - All leaf paths going through parent and node have a
+                *   black node count that is 1 lower than other leaf paths.
+                */
+               sibling = parent->rb_right;
+               if (node != sibling) {  /* node == parent->rb_left */
+                       if (rb_is_red(sibling)) {
+                               /*
+                                * Case 1 - left rotate at parent
+                                *
+                                *     P               S
+                                *    / \             / \
+                                *   N   s    -->    p   Sr
+                                *      / \         / \
+                                *     Sl  Sr      N   Sl
+                                */
+                               parent->rb_right = tmp1 = sibling->rb_left;
+                               sibling->rb_left = parent;
+                               rb_set_parent_color(tmp1, parent, RB_BLACK);
+                               __rb_rotate_set_parents(parent, sibling, root,
+                                                       RB_RED);
+                               augment_rotate(parent, sibling);
+                               sibling = tmp1;
+                       }
+                       tmp1 = sibling->rb_right;
+                       if (!tmp1 || rb_is_black(tmp1)) {
+                               tmp2 = sibling->rb_left;
+                               if (!tmp2 || rb_is_black(tmp2)) {
+                                       /*
+                                        * Case 2 - sibling color flip
+                                        * (p could be either color here)
+                                        *
+                                        *    (p)           (p)
+                                        *    / \           / \
+                                        *   N   S    -->  N   s
+                                        *      / \           / \
+                                        *     Sl  Sr        Sl  Sr
+                                        *
+                                        * This leaves us violating 5) which
+                                        * can be fixed by flipping p to black
+                                        * if it was red, or by recursing at p.
+                                        * p is red when coming from Case 1.
+                                        */
+                                       rb_set_parent_color(sibling, parent,
+                                                           RB_RED);
+                                       if (rb_is_red(parent))
+                                               rb_set_black(parent);
+                                       else {
+                                               node = parent;
+                                               parent = rb_parent(node);
+                                               if (parent)
+                                                       continue;
+                                       }
+                                       break;
+                               }
+                               /*
+                                * Case 3 - right rotate at sibling
+                                * (p could be either color here)
+                                *
+                                *   (p)           (p)
+                                *   / \           / \
+                                *  N   S    -->  N   Sl
+                                *     / \             \
+                                *    sl  Sr            s
+                                *                       \
+                                *                        Sr
+                                */
+                               sibling->rb_left = tmp1 = tmp2->rb_right;
+                               tmp2->rb_right = sibling;
+                               parent->rb_right = tmp2;
+                               if (tmp1)
+                                       rb_set_parent_color(tmp1, sibling,
+                                                           RB_BLACK);
+                               augment_rotate(sibling, tmp2);
+                               tmp1 = sibling;
+                               sibling = tmp2;
+                       }
+                       /*
+                        * Case 4 - left rotate at parent + color flips
+                        * (p and sl could be either color here.
+                        *  After rotation, p becomes black, s acquires
+                        *  p's color, and sl keeps its color)
+                        *
+                        *      (p)             (s)
+                        *      / \             / \
+                        *     N   S     -->   P   Sr
+                        *        / \         / \
+                        *      (sl) sr      N  (sl)
+                        */
+                       parent->rb_right = tmp2 = sibling->rb_left;
+                       sibling->rb_left = parent;
+                       rb_set_parent_color(tmp1, sibling, RB_BLACK);
+                       if (tmp2)
+                               rb_set_parent(tmp2, parent);
+                       __rb_rotate_set_parents(parent, sibling, root,
+                                               RB_BLACK);
+                       augment_rotate(parent, sibling);
+                       break;
+               } else {
+                       sibling = parent->rb_left;
+                       if (rb_is_red(sibling)) {
+                               /* Case 1 - right rotate at parent */
+                               parent->rb_left = tmp1 = sibling->rb_right;
+                               sibling->rb_right = parent;
+                               rb_set_parent_color(tmp1, parent, RB_BLACK);
+                               __rb_rotate_set_parents(parent, sibling, root,
+                                                       RB_RED);
+                               augment_rotate(parent, sibling);
+                               sibling = tmp1;
+                       }
+                       tmp1 = sibling->rb_left;
+                       if (!tmp1 || rb_is_black(tmp1)) {
+                               tmp2 = sibling->rb_right;
+                               if (!tmp2 || rb_is_black(tmp2)) {
+                                       /* Case 2 - sibling color flip */
+                                       rb_set_parent_color(sibling, parent,
+                                                           RB_RED);
+                                       if (rb_is_red(parent))
+                                               rb_set_black(parent);
+                                       else {
+                                               node = parent;
+                                               parent = rb_parent(node);
+                                               if (parent)
+                                                       continue;
+                                       }
+                                       break;
+                               }
+                               /* Case 3 - right rotate at sibling */
+                               sibling->rb_right = tmp1 = tmp2->rb_left;
+                               tmp2->rb_left = sibling;
+                               parent->rb_left = tmp2;
+                               if (tmp1)
+                                       rb_set_parent_color(tmp1, sibling,
+                                                           RB_BLACK);
+                               augment_rotate(sibling, tmp2);
+                               tmp1 = sibling;
+                               sibling = tmp2;
+                       }
+                       /* Case 4 - left rotate at parent + color flips */
+                       parent->rb_left = tmp2 = sibling->rb_right;
+                       sibling->rb_right = parent;
+                       rb_set_parent_color(tmp1, sibling, RB_BLACK);
+                       if (tmp2)
+                               rb_set_parent(tmp2, parent);
+                       __rb_rotate_set_parents(parent, sibling, root,
+                                               RB_BLACK);
+                       augment_rotate(parent, sibling);
+                       break;
+               }
+       }
+}
+
+/* Non-inline version for rb_erase_augmented() use */
+void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       ____rb_erase_color(parent, root, augment_rotate);
+}
+
+/*
+ * Non-augmented rbtree manipulation functions.
+ *
+ * We use dummy augmented callbacks here, and have the compiler optimize them
+ * out of the rb_insert_color() and rb_erase() function definitions.
+ */
+
+static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
+static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
+static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
+
+static const struct rb_augment_callbacks dummy_callbacks = {
+       dummy_propagate, dummy_copy, dummy_rotate
+};
+
+void rb_insert_color(struct rb_node *node, struct rb_root *root)
+{
+       __rb_insert(node, root, dummy_rotate);
+}
+
+void rb_erase(struct rb_node *node, struct rb_root *root)
+{
+       struct rb_node *rebalance;
+       rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
+       if (rebalance)
+               ____rb_erase_color(rebalance, root, dummy_rotate);
+}
+
+/*
+ * Augmented rbtree manipulation functions.
+ *
+ * This instantiates the same __always_inline functions as in the non-augmented
+ * case, but this time with user-defined callbacks.
+ */
+
+void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       __rb_insert(node, root, augment_rotate);
+}
+
+/*
+ * This function returns the first node (in sort order) of the tree.
+ */
+struct rb_node *rb_first(const struct rb_root *root)
+{
+       struct rb_node  *n;
+
+       n = root->rb_node;
+       if (!n)
+               return NULL;
+       while (n->rb_left)
+               n = n->rb_left;
+       return n;
+}
+
+struct rb_node *rb_last(const struct rb_root *root)
+{
+       struct rb_node  *n;
+
+       n = root->rb_node;
+       if (!n)
+               return NULL;
+       while (n->rb_right)
+               n = n->rb_right;
+       return n;
+}
+
+struct rb_node *rb_next(const struct rb_node *node)
+{
+       struct rb_node *parent;
+
+       if (RB_EMPTY_NODE(node))
+               return NULL;
+
+       /*
+        * If we have a right-hand child, go down and then left as far
+        * as we can.
+        */
+       if (node->rb_right) {
+               node = node->rb_right;
+               while (node->rb_left)
+                       node=node->rb_left;
+               return (struct rb_node *)node;
+       }
+
+       /*
+        * No right-hand children. Everything down and left is smaller than us,
+        * so any 'next' node must be in the general direction of our parent.
+        * Go up the tree; any time the ancestor is a right-hand child of its
+        * parent, keep going up. First time it's a left-hand child of its
+        * parent, said parent is our 'next' node.
+        */
+       while ((parent = rb_parent(node)) && node == parent->rb_right)
+               node = parent;
+
+       return parent;
+}
+
+struct rb_node *rb_prev(const struct rb_node *node)
+{
+       struct rb_node *parent;
+
+       if (RB_EMPTY_NODE(node))
+               return NULL;
+
+       /*
+        * If we have a left-hand child, go down and then right as far
+        * as we can.
+        */
+       if (node->rb_left) {
+               node = node->rb_left;
+               while (node->rb_right)
+                       node=node->rb_right;
+               return (struct rb_node *)node;
+       }
+
+       /*
+        * No left-hand children. Go up till we find an ancestor which
+        * is a right-hand child of its parent.
+        */
+       while ((parent = rb_parent(node)) && node == parent->rb_left)
+               node = parent;
+
+       return parent;
+}
+
+void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+                    struct rb_root *root)
+{
+       struct rb_node *parent = rb_parent(victim);
+
+       /* Set the surrounding nodes to point to the replacement */
+       __rb_change_child(victim, new, parent, root);
+       if (victim->rb_left)
+               rb_set_parent(victim->rb_left, new);
+       if (victim->rb_right)
+               rb_set_parent(victim->rb_right, new);
+
+       /* Copy the pointers/colour from the victim to the replacement */
+       *new = *victim;
+}
+
+static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
+{
+       for (;;) {
+               if (node->rb_left)
+                       node = node->rb_left;
+               else if (node->rb_right)
+                       node = node->rb_right;
+               else
+                       return (struct rb_node *)node;
+       }
+}
+
+struct rb_node *rb_next_postorder(const struct rb_node *node)
+{
+       const struct rb_node *parent;
+       if (!node)
+               return NULL;
+       parent = rb_parent(node);
+
+       /* If we're sitting on node, we've already seen our children */
+       if (parent && node == parent->rb_left && parent->rb_right) {
+               /* If we are the parent's left node, go to the parent's right
+                * node then all the way down to the left */
+               return rb_left_deepest_node(parent->rb_right);
+       } else
+               /* Otherwise we are the parent's right node, and the parent
+                * should be next */
+               return (struct rb_node *)parent;
+}
+
+struct rb_node *rb_first_postorder(const struct rb_root *root)
+{
+       if (!root->rb_node)
+               return NULL;
+
+       return rb_left_deepest_node(root->rb_node);
+}
index 04e150d83e7da6fefaa0af14151b26458f6a01a9..47469abdcc1c11e2256effc86845232f78785075 100644 (file)
@@ -144,6 +144,10 @@ is a useful mode to detect imbalance between physical cores.  To enable this mod
 use --per-core in addition to -a. (system-wide).  The output includes the
 core number and the number of online logical processors on that physical processor.
 
+--per-thread::
+Aggregate counts per monitored threads, when monitoring threads (-t option)
+or processes (-p option).
+
 -D msecs::
 --delay msecs::
 After starting the program, wait msecs before measuring. This is useful to
index fe50a1b34aa0035dec38a07a752ae33df9ad0e5d..09dc0aabb5154cb0906b6299afed4a3d0fc009c8 100644 (file)
@@ -18,6 +18,7 @@ tools/arch/x86/include/asm/atomic.h
 tools/arch/x86/include/asm/rmwcc.h
 tools/lib/traceevent
 tools/lib/api
+tools/lib/rbtree.c
 tools/lib/symbol/kallsyms.c
 tools/lib/symbol/kallsyms.h
 tools/lib/util/find_next_bit.c
@@ -44,6 +45,8 @@ tools/include/linux/kernel.h
 tools/include/linux/list.h
 tools/include/linux/log2.h
 tools/include/linux/poison.h
+tools/include/linux/rbtree.h
+tools/include/linux/rbtree_augmented.h
 tools/include/linux/types.h
 include/asm-generic/bitops/arch_hweight.h
 include/asm-generic/bitops/const_hweight.h
@@ -51,12 +54,10 @@ include/asm-generic/bitops/fls64.h
 include/asm-generic/bitops/__fls.h
 include/asm-generic/bitops/fls.h
 include/linux/perf_event.h
-include/linux/rbtree.h
 include/linux/list.h
 include/linux/hash.h
 include/linux/stringify.h
 lib/hweight.c
-lib/rbtree.c
 include/linux/swab.h
 arch/*/include/asm/unistd*.h
 arch/*/include/uapi/asm/unistd*.h
@@ -65,7 +66,6 @@ arch/*/lib/memcpy*.S
 arch/*/lib/memset*.S
 include/linux/poison.h
 include/linux/hw_breakpoint.h
-include/linux/rbtree_augmented.h
 include/uapi/linux/perf_event.h
 include/uapi/linux/const.h
 include/uapi/linux/swab.h
index d31a7bbd7cee8610db236c7842cfb5ec63dc56b0..480546d5f13b205b85790c9512d51f33693e73a1 100644 (file)
@@ -83,8 +83,8 @@ build-test:
 #
 # All other targets get passed through:
 #
-%:
+%: FORCE
        $(print_msg)
        $(make)
 
-.PHONY: tags TAGS
+.PHONY: tags TAGS FORCE Makefile
index 1af0cfeb7a57824980ef64fdf4d26f643fa7ab6c..7a4b549214e34715aaf2ff7b834598ef2bae8080 100644 (file)
@@ -110,7 +110,7 @@ $(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD
        $(Q)touch $(OUTPUT)PERF-VERSION-FILE
 
 CC = $(CROSS_COMPILE)gcc
-LD = $(CROSS_COMPILE)ld
+LD ?= $(CROSS_COMPILE)ld
 AR = $(CROSS_COMPILE)ar
 PKG_CONFIG = $(CROSS_COMPILE)pkg-config
 
@@ -545,7 +545,7 @@ config-clean:
 clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean config-clean
        $(call QUIET_CLEAN, core-objs)  $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
        $(Q)find . -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
-       $(Q)$(RM) .config-detected
+       $(Q)$(RM) $(OUTPUT).config-detected
        $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32
        $(call QUIET_CLEAN, core-gen)   $(RM)  *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex*
        $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
index 52ec66b236076c46c1bd0666967d1d204d2c95c3..01b06492bd6a9cd74eb3d71dce8a57e7df71c593 100644 (file)
@@ -630,12 +630,13 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
        if (inject.session == NULL)
                return -1;
 
-       if (symbol__init(&inject.session->header.env) < 0)
-               return -1;
+       ret = symbol__init(&inject.session->header.env);
+       if (ret < 0)
+               goto out_delete;
 
        ret = __cmd_inject(&inject);
 
+out_delete:
        perf_session__delete(inject.session);
-
        return ret;
 }
index 950f296dfcf7a402ebbad0df1edf16e4d62a52bd..23b1faaaa4cc5f83c263ffbc9a0f14733c0dcae2 100644 (file)
@@ -1916,7 +1916,7 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
                if (!perf_evlist__find_tracepoint_by_name(session->evlist,
                                                          "kmem:kmalloc")) {
                        pr_err(errmsg, "slab", "slab");
-                       return -1;
+                       goto out_delete;
                }
        }
 
@@ -1927,7 +1927,7 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
                                                             "kmem:mm_page_alloc");
                if (evsel == NULL) {
                        pr_err(errmsg, "page", "page");
-                       return -1;
+                       goto out_delete;
                }
 
                kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent);
index 74878cd75078055e437396fc9a6b201603586076..fc1cffb1b7a28c9b0d9856770530a0e8b3bf9606 100644 (file)
@@ -1061,8 +1061,10 @@ static int read_events(struct perf_kvm_stat *kvm)
 
        symbol__init(&kvm->session->header.env);
 
-       if (!perf_session__has_traces(kvm->session, "kvm record"))
-               return -EINVAL;
+       if (!perf_session__has_traces(kvm->session, "kvm record")) {
+               ret = -EINVAL;
+               goto out_delete;
+       }
 
        /*
         * Do not use 'isa' recorded in kvm_exit tracepoint since it is not
@@ -1070,9 +1072,13 @@ static int read_events(struct perf_kvm_stat *kvm)
         */
        ret = cpu_isa_config(kvm);
        if (ret < 0)
-               return ret;
+               goto out_delete;
 
-       return perf_session__process_events(kvm->session);
+       ret = perf_session__process_events(kvm->session);
+
+out_delete:
+       perf_session__delete(kvm->session);
+       return ret;
 }
 
 static int parse_target_str(struct perf_kvm_stat *kvm)
index da2ec06f0742dc6acf98c1c9b74d7cf45ff0fcb2..80170aace5d4c893b99a842427759c8f5766066e 100644 (file)
@@ -124,7 +124,6 @@ static int report_raw_events(struct perf_mem *mem)
                .mode = PERF_DATA_MODE_READ,
                .force = mem->force,
        };
-       int err = -EINVAL;
        int ret;
        struct perf_session *session = perf_session__new(&file, false,
                                                         &mem->tool);
@@ -135,24 +134,21 @@ static int report_raw_events(struct perf_mem *mem)
        if (mem->cpu_list) {
                ret = perf_session__cpu_bitmap(session, mem->cpu_list,
                                               mem->cpu_bitmap);
-               if (ret)
+               if (ret < 0)
                        goto out_delete;
        }
 
-       if (symbol__init(&session->header.env) < 0)
-               return -1;
+       ret = symbol__init(&session->header.env);
+       if (ret < 0)
+               goto out_delete;
 
        printf("# PID, TID, IP, ADDR, LOCAL WEIGHT, DSRC, SYMBOL\n");
 
-       err = perf_session__process_events(session);
-       if (err)
-               return err;
-
-       return 0;
+       ret = perf_session__process_events(session);
 
 out_delete:
        perf_session__delete(session);
-       return err;
+       return ret;
 }
 
 static int report_events(int argc, const char **argv, struct perf_mem *mem)
index 32626ea3e2276b11279db88207e42c29eeed391a..95a47719aec302318defcb8de17fd8789012b819 100644 (file)
@@ -742,6 +742,17 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
 
        argc = parse_options(argc, argv, options, report_usage, 0);
 
+       if (symbol_conf.vmlinux_name &&
+           access(symbol_conf.vmlinux_name, R_OK)) {
+               pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name);
+               return -EINVAL;
+       }
+       if (symbol_conf.kallsyms_name &&
+           access(symbol_conf.kallsyms_name, R_OK)) {
+               pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name);
+               return -EINVAL;
+       }
+
        if (report.use_stdio)
                use_browser = 0;
        else if (report.use_tui)
@@ -828,8 +839,10 @@ repeat:
        if (report.header || report.header_only) {
                perf_session__fprintf_info(session, stdout,
                                           report.show_full_info);
-               if (report.header_only)
-                       return 0;
+               if (report.header_only) {
+                       ret = 0;
+                       goto error;
+               }
        } else if (use_browser == 0) {
                fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
                      stdout);
index fcf99bdeb19e1cf73c54e0b6edc6d4426dfe1f48..37e301a32f437eb6004ba4f4ccc6b90c73b9aa5c 100644 (file)
 #define CNTR_NOT_SUPPORTED     "<not supported>"
 #define CNTR_NOT_COUNTED       "<not counted>"
 
-static void print_stat(int argc, const char **argv);
-static void print_counter_aggr(struct perf_evsel *counter, char *prefix);
-static void print_counter(struct perf_evsel *counter, char *prefix);
-static void print_aggr(char *prefix);
+static void print_counters(struct timespec *ts, int argc, const char **argv);
 
 /* Default events used for perf stat -T */
 static const char *transaction_attrs = {
@@ -141,96 +138,9 @@ static inline void diff_timespec(struct timespec *r, struct timespec *a,
        }
 }
 
-static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
+static void perf_stat__reset_stats(void)
 {
-       return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus;
-}
-
-static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
-{
-       return perf_evsel__cpus(evsel)->nr;
-}
-
-static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
-{
-       int i;
-       struct perf_stat *ps = evsel->priv;
-
-       for (i = 0; i < 3; i++)
-               init_stats(&ps->res_stats[i]);
-
-       perf_stat_evsel_id_init(evsel);
-}
-
-static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
-{
-       evsel->priv = zalloc(sizeof(struct perf_stat));
-       if (evsel->priv == NULL)
-               return -ENOMEM;
-       perf_evsel__reset_stat_priv(evsel);
-       return 0;
-}
-
-static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
-{
-       zfree(&evsel->priv);
-}
-
-static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel)
-{
-       struct perf_counts *counts;
-
-       counts = perf_counts__new(perf_evsel__nr_cpus(evsel));
-       if (counts)
-               evsel->prev_raw_counts = counts;
-
-       return counts ? 0 : -ENOMEM;
-}
-
-static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
-{
-       perf_counts__delete(evsel->prev_raw_counts);
-       evsel->prev_raw_counts = NULL;
-}
-
-static void perf_evlist__free_stats(struct perf_evlist *evlist)
-{
-       struct perf_evsel *evsel;
-
-       evlist__for_each(evlist, evsel) {
-               perf_evsel__free_stat_priv(evsel);
-               perf_evsel__free_counts(evsel);
-               perf_evsel__free_prev_raw_counts(evsel);
-       }
-}
-
-static int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw)
-{
-       struct perf_evsel *evsel;
-
-       evlist__for_each(evlist, evsel) {
-               if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
-                   perf_evsel__alloc_counts(evsel, perf_evsel__nr_cpus(evsel)) < 0 ||
-                   (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel) < 0))
-                       goto out_free;
-       }
-
-       return 0;
-
-out_free:
-       perf_evlist__free_stats(evlist);
-       return -1;
-}
-
-static void perf_stat__reset_stats(struct perf_evlist *evlist)
-{
-       struct perf_evsel *evsel;
-
-       evlist__for_each(evlist, evsel) {
-               perf_evsel__reset_stat_priv(evsel);
-               perf_evsel__reset_counts(evsel, perf_evsel__nr_cpus(evsel));
-       }
-
+       perf_evlist__reset_stats(evsel_list);
        perf_stat__reset_shadow_stats();
 }
 
@@ -304,8 +214,9 @@ static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip)
        return 0;
 }
 
-static int read_cb(struct perf_evsel *evsel, int cpu, int thread __maybe_unused,
-                  struct perf_counts_values *count)
+static int
+process_counter_values(struct perf_evsel *evsel, int cpu, int thread,
+                      struct perf_counts_values *count)
 {
        struct perf_counts_values *aggr = &evsel->counts->aggr;
        static struct perf_counts_values zero;
@@ -320,13 +231,13 @@ static int read_cb(struct perf_evsel *evsel, int cpu, int thread __maybe_unused,
                count = &zero;
 
        switch (aggr_mode) {
+       case AGGR_THREAD:
        case AGGR_CORE:
        case AGGR_SOCKET:
        case AGGR_NONE:
                if (!evsel->snapshot)
-                       perf_evsel__compute_deltas(evsel, cpu, count);
+                       perf_evsel__compute_deltas(evsel, cpu, thread, count);
                perf_counts_values__scale(count, scale, NULL);
-               evsel->counts->cpu[cpu] = *count;
                if (aggr_mode == AGGR_NONE)
                        perf_stat__update_shadow_stats(evsel, count->values, cpu);
                break;
@@ -343,26 +254,48 @@ static int read_cb(struct perf_evsel *evsel, int cpu, int thread __maybe_unused,
        return 0;
 }
 
-static int read_counter(struct perf_evsel *counter);
+static int process_counter_maps(struct perf_evsel *counter)
+{
+       int nthreads = thread_map__nr(counter->threads);
+       int ncpus = perf_evsel__nr_cpus(counter);
+       int cpu, thread;
 
-/*
- * Read out the results of a single counter:
- * aggregate counts across CPUs in system-wide mode
- */
-static int read_counter_aggr(struct perf_evsel *counter)
+       if (counter->system_wide)
+               nthreads = 1;
+
+       for (thread = 0; thread < nthreads; thread++) {
+               for (cpu = 0; cpu < ncpus; cpu++) {
+                       if (process_counter_values(counter, cpu, thread,
+                                                  perf_counts(counter->counts, cpu, thread)))
+                               return -1;
+               }
+       }
+
+       return 0;
+}
+
+static int process_counter(struct perf_evsel *counter)
 {
        struct perf_counts_values *aggr = &counter->counts->aggr;
        struct perf_stat *ps = counter->priv;
        u64 *count = counter->counts->aggr.values;
-       int i;
+       int i, ret;
 
        aggr->val = aggr->ena = aggr->run = 0;
+       init_stats(ps->res_stats);
 
-       if (read_counter(counter))
-               return -1;
+       if (counter->per_pkg)
+               zero_per_pkg(counter);
+
+       ret = process_counter_maps(counter);
+       if (ret)
+               return ret;
+
+       if (aggr_mode != AGGR_GLOBAL)
+               return 0;
 
        if (!counter->snapshot)
-               perf_evsel__compute_deltas(counter, -1, aggr);
+               perf_evsel__compute_deltas(counter, -1, -1, aggr);
        perf_counts_values__scale(aggr, scale, &counter->counts->scaled);
 
        for (i = 0; i < 3; i++)
@@ -397,12 +330,12 @@ static int read_counter(struct perf_evsel *counter)
        if (counter->system_wide)
                nthreads = 1;
 
-       if (counter->per_pkg)
-               zero_per_pkg(counter);
-
        for (thread = 0; thread < nthreads; thread++) {
                for (cpu = 0; cpu < ncpus; cpu++) {
-                       if (perf_evsel__read_cb(counter, cpu, thread, read_cb))
+                       struct perf_counts_values *count;
+
+                       count = perf_counts(counter->counts, cpu, thread);
+                       if (perf_evsel__read(counter, cpu, thread, count))
                                return -1;
                }
        }
@@ -410,68 +343,34 @@ static int read_counter(struct perf_evsel *counter)
        return 0;
 }
 
-static void print_interval(void)
+static void read_counters(bool close)
 {
-       static int num_print_interval;
        struct perf_evsel *counter;
-       struct perf_stat *ps;
-       struct timespec ts, rs;
-       char prefix[64];
 
-       if (aggr_mode == AGGR_GLOBAL) {
-               evlist__for_each(evsel_list, counter) {
-                       ps = counter->priv;
-                       memset(ps->res_stats, 0, sizeof(ps->res_stats));
-                       read_counter_aggr(counter);
-               }
-       } else  {
-               evlist__for_each(evsel_list, counter) {
-                       ps = counter->priv;
-                       memset(ps->res_stats, 0, sizeof(ps->res_stats));
-                       read_counter(counter);
-               }
-       }
+       evlist__for_each(evsel_list, counter) {
+               if (read_counter(counter))
+                       pr_warning("failed to read counter %s\n", counter->name);
 
-       clock_gettime(CLOCK_MONOTONIC, &ts);
-       diff_timespec(&rs, &ts, &ref_time);
-       sprintf(prefix, "%6lu.%09lu%s", rs.tv_sec, rs.tv_nsec, csv_sep);
+               if (process_counter(counter))
+                       pr_warning("failed to process counter %s\n", counter->name);
 
-       if (num_print_interval == 0 && !csv_output) {
-               switch (aggr_mode) {
-               case AGGR_SOCKET:
-                       fprintf(output, "#           time socket cpus             counts %*s events\n", unit_width, "unit");
-                       break;
-               case AGGR_CORE:
-                       fprintf(output, "#           time core         cpus             counts %*s events\n", unit_width, "unit");
-                       break;
-               case AGGR_NONE:
-                       fprintf(output, "#           time CPU                counts %*s events\n", unit_width, "unit");
-                       break;
-               case AGGR_GLOBAL:
-               default:
-                       fprintf(output, "#           time             counts %*s events\n", unit_width, "unit");
+               if (close) {
+                       perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
+                                            thread_map__nr(evsel_list->threads));
                }
        }
+}
 
-       if (++num_print_interval == 25)
-               num_print_interval = 0;
+static void process_interval(void)
+{
+       struct timespec ts, rs;
 
-       switch (aggr_mode) {
-       case AGGR_CORE:
-       case AGGR_SOCKET:
-               print_aggr(prefix);
-               break;
-       case AGGR_NONE:
-               evlist__for_each(evsel_list, counter)
-                       print_counter(counter, prefix);
-               break;
-       case AGGR_GLOBAL:
-       default:
-               evlist__for_each(evsel_list, counter)
-                       print_counter_aggr(counter, prefix);
-       }
+       read_counters(false);
 
-       fflush(output);
+       clock_gettime(CLOCK_MONOTONIC, &ts);
+       diff_timespec(&rs, &ts, &ref_time);
+
+       print_counters(&rs, 0, NULL);
 }
 
 static void handle_initial_delay(void)
@@ -586,7 +485,7 @@ static int __run_perf_stat(int argc, const char **argv)
                if (interval) {
                        while (!waitpid(child_pid, &status, WNOHANG)) {
                                nanosleep(&ts, NULL);
-                               print_interval();
+                               process_interval();
                        }
                }
                wait(&status);
@@ -604,7 +503,7 @@ static int __run_perf_stat(int argc, const char **argv)
                while (!done) {
                        nanosleep(&ts, NULL);
                        if (interval)
-                               print_interval();
+                               process_interval();
                }
        }
 
@@ -612,18 +511,7 @@ static int __run_perf_stat(int argc, const char **argv)
 
        update_stats(&walltime_nsecs_stats, t1 - t0);
 
-       if (aggr_mode == AGGR_GLOBAL) {
-               evlist__for_each(evsel_list, counter) {
-                       read_counter_aggr(counter);
-                       perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
-                                            thread_map__nr(evsel_list->threads));
-               }
-       } else {
-               evlist__for_each(evsel_list, counter) {
-                       read_counter(counter);
-                       perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 1);
-               }
-       }
+       read_counters(true);
 
        return WEXITSTATUS(status);
 }
@@ -715,6 +603,14 @@ static void aggr_printout(struct perf_evsel *evsel, int id, int nr)
                        csv_output ? 0 : -4,
                        perf_evsel__cpus(evsel)->map[id], csv_sep);
                break;
+       case AGGR_THREAD:
+               fprintf(output, "%*s-%*d%s",
+                       csv_output ? 0 : 16,
+                       thread_map__comm(evsel->threads, id),
+                       csv_output ? 0 : -8,
+                       thread_map__pid(evsel->threads, id),
+                       csv_sep);
+               break;
        case AGGR_GLOBAL:
        default:
                break;
@@ -815,9 +711,9 @@ static void print_aggr(char *prefix)
                                s2 = aggr_get_id(evsel_list->cpus, cpu2);
                                if (s2 != id)
                                        continue;
-                               val += counter->counts->cpu[cpu].val;
-                               ena += counter->counts->cpu[cpu].ena;
-                               run += counter->counts->cpu[cpu].run;
+                               val += perf_counts(counter->counts, cpu, 0)->val;
+                               ena += perf_counts(counter->counts, cpu, 0)->ena;
+                               run += perf_counts(counter->counts, cpu, 0)->run;
                                nr++;
                        }
                        if (prefix)
@@ -863,6 +759,40 @@ static void print_aggr(char *prefix)
        }
 }
 
+static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
+{
+       int nthreads = thread_map__nr(counter->threads);
+       int ncpus = cpu_map__nr(counter->cpus);
+       int cpu, thread;
+       double uval;
+
+       for (thread = 0; thread < nthreads; thread++) {
+               u64 ena = 0, run = 0, val = 0;
+
+               for (cpu = 0; cpu < ncpus; cpu++) {
+                       val += perf_counts(counter->counts, cpu, thread)->val;
+                       ena += perf_counts(counter->counts, cpu, thread)->ena;
+                       run += perf_counts(counter->counts, cpu, thread)->run;
+               }
+
+               if (prefix)
+                       fprintf(output, "%s", prefix);
+
+               uval = val * counter->scale;
+
+               if (nsec_counter(counter))
+                       nsec_printout(thread, 0, counter, uval);
+               else
+                       abs_printout(thread, 0, counter, uval);
+
+               if (!csv_output)
+                       print_noise(counter, 1.0);
+
+               print_running(run, ena);
+               fputc('\n', output);
+       }
+}
+
 /*
  * Print out the results of a single counter:
  * aggregated counts in system-wide mode
@@ -925,9 +855,9 @@ static void print_counter(struct perf_evsel *counter, char *prefix)
        int cpu;
 
        for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
-               val = counter->counts->cpu[cpu].val;
-               ena = counter->counts->cpu[cpu].ena;
-               run = counter->counts->cpu[cpu].run;
+               val = perf_counts(counter->counts, cpu, 0)->val;
+               ena = perf_counts(counter->counts, cpu, 0)->ena;
+               run = perf_counts(counter->counts, cpu, 0)->run;
 
                if (prefix)
                        fprintf(output, "%s", prefix);
@@ -972,9 +902,38 @@ static void print_counter(struct perf_evsel *counter, char *prefix)
        }
 }
 
-static void print_stat(int argc, const char **argv)
+static void print_interval(char *prefix, struct timespec *ts)
+{
+       static int num_print_interval;
+
+       sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
+
+       if (num_print_interval == 0 && !csv_output) {
+               switch (aggr_mode) {
+               case AGGR_SOCKET:
+                       fprintf(output, "#           time socket cpus             counts %*s events\n", unit_width, "unit");
+                       break;
+               case AGGR_CORE:
+                       fprintf(output, "#           time core         cpus             counts %*s events\n", unit_width, "unit");
+                       break;
+               case AGGR_NONE:
+                       fprintf(output, "#           time CPU                counts %*s events\n", unit_width, "unit");
+                       break;
+               case AGGR_THREAD:
+                       fprintf(output, "#           time             comm-pid                  counts %*s events\n", unit_width, "unit");
+                       break;
+               case AGGR_GLOBAL:
+               default:
+                       fprintf(output, "#           time             counts %*s events\n", unit_width, "unit");
+               }
+       }
+
+       if (++num_print_interval == 25)
+               num_print_interval = 0;
+}
+
+static void print_header(int argc, const char **argv)
 {
-       struct perf_evsel *counter;
        int i;
 
        fflush(stdout);
@@ -1000,36 +959,57 @@ static void print_stat(int argc, const char **argv)
                        fprintf(output, " (%d runs)", run_count);
                fprintf(output, ":\n\n");
        }
+}
+
+static void print_footer(void)
+{
+       if (!null_run)
+               fprintf(output, "\n");
+       fprintf(output, " %17.9f seconds time elapsed",
+                       avg_stats(&walltime_nsecs_stats)/1e9);
+       if (run_count > 1) {
+               fprintf(output, "                                        ");
+               print_noise_pct(stddev_stats(&walltime_nsecs_stats),
+                               avg_stats(&walltime_nsecs_stats));
+       }
+       fprintf(output, "\n\n");
+}
+
+static void print_counters(struct timespec *ts, int argc, const char **argv)
+{
+       struct perf_evsel *counter;
+       char buf[64], *prefix = NULL;
+
+       if (interval)
+               print_interval(prefix = buf, ts);
+       else
+               print_header(argc, argv);
 
        switch (aggr_mode) {
        case AGGR_CORE:
        case AGGR_SOCKET:
-               print_aggr(NULL);
+               print_aggr(prefix);
+               break;
+       case AGGR_THREAD:
+               evlist__for_each(evsel_list, counter)
+                       print_aggr_thread(counter, prefix);
                break;
        case AGGR_GLOBAL:
                evlist__for_each(evsel_list, counter)
-                       print_counter_aggr(counter, NULL);
+                       print_counter_aggr(counter, prefix);
                break;
        case AGGR_NONE:
                evlist__for_each(evsel_list, counter)
-                       print_counter(counter, NULL);
+                       print_counter(counter, prefix);
                break;
        default:
                break;
        }
 
-       if (!csv_output) {
-               if (!null_run)
-                       fprintf(output, "\n");
-               fprintf(output, " %17.9f seconds time elapsed",
-                               avg_stats(&walltime_nsecs_stats)/1e9);
-               if (run_count > 1) {
-                       fprintf(output, "                                        ");
-                       print_noise_pct(stddev_stats(&walltime_nsecs_stats),
-                                       avg_stats(&walltime_nsecs_stats));
-               }
-               fprintf(output, "\n\n");
-       }
+       if (!interval && !csv_output)
+               print_footer();
+
+       fflush(output);
 }
 
 static volatile int signr = -1;
@@ -1101,6 +1081,7 @@ static int perf_stat_init_aggr_mode(void)
                break;
        case AGGR_NONE:
        case AGGR_GLOBAL:
+       case AGGR_THREAD:
        default:
                break;
        }
@@ -1325,6 +1306,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
                     "aggregate counts per processor socket", AGGR_SOCKET),
        OPT_SET_UINT(0, "per-core", &aggr_mode,
                     "aggregate counts per physical processor core", AGGR_CORE),
+       OPT_SET_UINT(0, "per-thread", &aggr_mode,
+                    "aggregate counts per thread", AGGR_THREAD),
        OPT_UINTEGER('D', "delay", &initial_delay,
                     "ms to wait before starting measurement after program start"),
        OPT_END()
@@ -1416,8 +1399,19 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
                run_count = 1;
        }
 
-       /* no_aggr, cgroup are for system-wide only */
-       if ((aggr_mode != AGGR_GLOBAL || nr_cgroups) &&
+       if ((aggr_mode == AGGR_THREAD) && !target__has_task(&target)) {
+               fprintf(stderr, "The --per-thread option is only available "
+                       "when monitoring via -p -t options.\n");
+               parse_options_usage(NULL, options, "p", 1);
+               parse_options_usage(NULL, options, "t", 1);
+               goto out;
+       }
+
+       /*
+        * no_aggr, cgroup are for system-wide only
+        * --per-thread is aggregated per thread, we dont mix it with cpu mode
+        */
+       if (((aggr_mode != AGGR_GLOBAL && aggr_mode != AGGR_THREAD) || nr_cgroups) &&
            !target__has_cpu(&target)) {
                fprintf(stderr, "both cgroup and no-aggregation "
                        "modes only available in system-wide mode\n");
@@ -1445,6 +1439,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
                }
                goto out;
        }
+
+       /*
+        * Initialize thread_map with comm names,
+        * so we could print it out on output.
+        */
+       if (aggr_mode == AGGR_THREAD)
+               thread_map__read_comms(evsel_list->threads);
+
        if (interval && interval < 100) {
                pr_err("print interval must be >= 100ms\n");
                parse_options_usage(stat_usage, options, "I", 1);
@@ -1478,13 +1480,13 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
 
                status = run_perf_stat(argc, argv);
                if (forever && status != -1) {
-                       print_stat(argc, argv);
-                       perf_stat__reset_stats(evsel_list);
+                       print_counters(NULL, argc, argv);
+                       perf_stat__reset_stats();
                }
        }
 
        if (!forever && status != -1 && !interval)
-               print_stat(argc, argv);
+               print_counters(NULL, argc, argv);
 
        perf_evlist__free_stats(evsel_list);
 out:
index 619a8696fda7c939cd0e6497abab5845813bda12..ecf319728f25d649768e33b3e1f274d04432f3fc 100644 (file)
@@ -586,27 +586,9 @@ static void *display_thread_tui(void *arg)
                hists->uid_filter_str = top->record_opts.target.uid_str;
        }
 
-       while (true)  {
-               int key = perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
-                                                       top->min_percent,
-                                                       &top->session->header.env);
-
-               if (key != 'f')
-                       break;
-
-               perf_evlist__toggle_enable(top->evlist);
-               /*
-                * No need to refresh, resort/decay histogram entries
-                * if we are not collecting samples:
-                */
-               if (top->evlist->enabled) {
-                       hbt.refresh = top->delay_secs;
-                       help = "Press 'f' to disable the events or 'h' to see other hotkeys";
-               } else {
-                       help = "Press 'f' again to re-enable the events";
-                       hbt.refresh = 0;
-               }
-       }
+       perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
+                                     top->min_percent,
+                                     &top->session->header.env);
 
        done = 1;
        return NULL;
index de5d277d1ad7cb97cac2c5da67032fc8a12ffdf6..39ad4d0ca88427cb222fa3ce7c0a35c7b77ed243 100644 (file)
@@ -1617,6 +1617,34 @@ static int trace__read_syscall_info(struct trace *trace, int id)
        return syscall__set_arg_fmts(sc);
 }
 
+static int trace__validate_ev_qualifier(struct trace *trace)
+{
+       int err = 0;
+       struct str_node *pos;
+
+       strlist__for_each(pos, trace->ev_qualifier) {
+               const char *sc = pos->s;
+
+               if (audit_name_to_syscall(sc, trace->audit.machine) < 0) {
+                       if (err == 0) {
+                               fputs("Error:\tInvalid syscall ", trace->output);
+                               err = -EINVAL;
+                       } else {
+                               fputs(", ", trace->output);
+                       }
+
+                       fputs(sc, trace->output);
+               }
+       }
+
+       if (err < 0) {
+               fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
+                     "\nHint:\tand: 'man syscalls'\n", trace->output);
+       }
+
+       return err;
+}
+
 /*
  * args is to be interpreted as a series of longs but we need to handle
  * 8-byte unaligned accesses. args points to raw_data within the event
@@ -2325,7 +2353,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
         */
        if (trace->filter_pids.nr > 0)
                err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
-       else if (evlist->threads->map[0] == -1)
+       else if (thread_map__pid(evlist->threads, 0) == -1)
                err = perf_evlist__set_filter_pid(evlist, getpid());
 
        if (err < 0) {
@@ -2343,7 +2371,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
        if (forks)
                perf_evlist__start_workload(evlist);
 
-       trace->multiple_threads = evlist->threads->map[0] == -1 ||
+       trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
                                  evlist->threads->nr > 1 ||
                                  perf_evlist__first(evlist)->attr.inherit;
 again:
@@ -2862,6 +2890,10 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
                        err = -ENOMEM;
                        goto out_close;
                }
+
+               err = trace__validate_ev_qualifier(&trace);
+               if (err)
+                       goto out_close;
        }
 
        err = target__validate(&trace.opts.target);
index 317001c946608be1430d03d485fe19586f1e0446..094ddaee104c73d7caae22d851d79629c4715cd3 100644 (file)
@@ -11,9 +11,9 @@ ifneq ($(obj-perf),)
 obj-perf := $(abspath $(obj-perf))/
 endif
 
-$(shell echo -n > .config-detected)
-detected     = $(shell echo "$(1)=y"       >> .config-detected)
-detected_var = $(shell echo "$(1)=$($(1))" >> .config-detected)
+$(shell echo -n > $(OUTPUT).config-detected)
+detected     = $(shell echo "$(1)=y"       >> $(OUTPUT).config-detected)
+detected_var = $(shell echo "$(1)=$($(1))" >> $(OUTPUT).config-detected)
 
 CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS)
 
index ee41e705b2eba7b726e417a1fdd73a421b7a985b..d20d6e6ab65beb1d433e06a769cd09510e4ced9b 100644 (file)
@@ -31,6 +31,7 @@ perf-y += code-reading.o
 perf-y += sample-parsing.o
 perf-y += parse-no-sample-id-all.o
 perf-y += kmod-path.o
+perf-y += thread-map.o
 
 perf-$(CONFIG_X86) += perf-time-to-tsc.o
 
index 87b9961646e4a5f08728e694549195d9eeb2ad86..c1dde733c3a6b594ccab52a2a0077c3504c95fc2 100644 (file)
@@ -170,6 +170,10 @@ static struct test {
                .desc = "Test kmod_path__parse function",
                .func = test__kmod_path__parse,
        },
+       {
+               .desc = "Test thread map",
+               .func = test__thread_map,
+       },
        {
                .func = NULL,
        },
index 22f8a00446e1f1b3cb6b447dbc1bc21ccfda3108..39c784a100a955143e401fc46cbcb93a2a3d8136 100644 (file)
@@ -545,8 +545,8 @@ out_err:
        if (evlist) {
                perf_evlist__delete(evlist);
        } else {
-               cpu_map__delete(cpus);
-               thread_map__delete(threads);
+               cpu_map__put(cpus);
+               thread_map__put(threads);
        }
        machines__destroy_kernel_maps(&machines);
        machine__delete_threads(machine);
index 5b171d1e338bdd26bcf1343f58e8b0bdb314b71c..4d4b9837b630ae9fd72cb0a404a4bae0e3864e14 100644 (file)
@@ -144,8 +144,8 @@ out_err:
                perf_evlist__disable(evlist);
                perf_evlist__delete(evlist);
        } else {
-               cpu_map__delete(cpus);
-               thread_map__delete(threads);
+               cpu_map__put(cpus);
+               thread_map__put(threads);
        }
 
        return err;
index 65280d28662e4c72177a20a3cfa56f968a10c359..729112f4cfaaaa66b91bcacc57c8a1d9af49203d 100644 (file)
@@ -1,5 +1,16 @@
+ifndef MK
+ifeq ($(MAKECMDGOALS),)
+# no target specified, trigger the whole suite
+all:
+       @echo "Testing Makefile";      $(MAKE) -sf tests/make MK=Makefile
+       @echo "Testing Makefile.perf"; $(MAKE) -sf tests/make MK=Makefile.perf
+else
+# run only specific test over 'Makefile'
+%:
+       @echo "Testing Makefile";      $(MAKE) -sf tests/make MK=Makefile $@
+endif
+else
 PERF := .
-MK   := Makefile
 
 include config/Makefile.arch
 
@@ -47,6 +58,7 @@ make_install_man    := install-man
 make_install_html   := install-html
 make_install_info   := install-info
 make_install_pdf    := install-pdf
+make_install_prefix := install prefix=/tmp/krava
 make_static         := LDFLAGS=-static
 
 # all the NO_* variable combined
@@ -57,7 +69,12 @@ make_minimal        += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1
 
 # $(run) contains all available tests
 run := make_pure
+# Targets 'clean all' can be run together only through top level
+# Makefile because we detect clean target in Makefile.perf and
+# disable features detection
+ifeq ($(MK),Makefile)
 run += make_clean_all
+endif
 run += make_python_perf_so
 run += make_debug
 run += make_no_libperl
@@ -83,6 +100,7 @@ run += make_util_map_o
 run += make_util_pmu_bison_o
 run += make_install
 run += make_install_bin
+run += make_install_prefix
 # FIXME 'install-*' commented out till they're fixed
 # run += make_install_doc
 # run += make_install_man
@@ -157,6 +175,12 @@ test_make_install_O     := $(call test_dest_files,$(installed_files_all))
 test_make_install_bin   := $(call test_dest_files,$(installed_files_bin))
 test_make_install_bin_O := $(call test_dest_files,$(installed_files_bin))
 
+# We prefix all installed files for make_install_prefix
+# with '/tmp/krava' to match installed/prefix-ed files.
+installed_files_all_prefix := $(addprefix /tmp/krava/,$(installed_files_all))
+test_make_install_prefix   := $(call test_dest_files,$(installed_files_all_prefix))
+test_make_install_prefix_O := $(call test_dest_files,$(installed_files_all_prefix))
+
 # FIXME nothing gets installed
 test_make_install_man    := test -f $$TMP_DEST/share/man/man1/perf.1
 test_make_install_man_O  := $(test_make_install_man)
@@ -226,13 +250,13 @@ tarpkg:
        ( eval $$cmd ) >> $@ 2>&1
 
 make_kernelsrc:
-       @echo " - make -C <kernelsrc> tools/perf"
+       @echo "- make -C <kernelsrc> tools/perf"
        $(call clean); \
        (make -C ../.. tools/perf) > $@ 2>&1 && \
        test -x perf && rm -f $@ || (cat $@ ; false)
 
 make_kernelsrc_tools:
-       @echo " - make -C <kernelsrc>/tools perf"
+       @echo "- make -C <kernelsrc>/tools perf"
        $(call clean); \
        (make -C ../../tools perf) > $@ 2>&1 && \
        test -x perf && rm -f $@ || (cat $@ ; false)
@@ -244,3 +268,4 @@ out: $(run_O)
        @echo OK
 
 .PHONY: all $(run) $(run_O) tarpkg clean
+endif # ifndef MK
index 5855cf47121003479ae63e859059a5ad8809c5ec..666b67a4df9dd0d8ea8d41736af4355008c11464 100644 (file)
@@ -140,8 +140,8 @@ out_delete_evlist:
        cpus    = NULL;
        threads = NULL;
 out_free_cpus:
-       cpu_map__delete(cpus);
+       cpu_map__put(cpus);
 out_free_threads:
-       thread_map__delete(threads);
+       thread_map__put(threads);
        return err;
 }
index 7f48efa7e295f63a72f0aa083857658ce68c45cb..145050e2e5446166f900f8d9405859b15bfcfff3 100644 (file)
@@ -143,7 +143,7 @@ static int synth_process(struct machine *machine)
                                                perf_event__process,
                                                machine, 0, 500);
 
-       thread_map__delete(map);
+       thread_map__put(map);
        return err;
 }
 
index 9a7a116e09b8087ef5f19e63113f6f65c1e0b5bf..a572f87e9c8d8e25665bf75e1c45b1f86ff8a6e9 100644 (file)
@@ -78,7 +78,7 @@ int test__openat_syscall_event_on_all_cpus(void)
         * we use the auto allocation it will allocate just for 1 cpu,
         * as we start by cpu 0.
         */
-       if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
+       if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
                pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
                goto out_close_fd;
        }
@@ -98,9 +98,9 @@ int test__openat_syscall_event_on_all_cpus(void)
                }
 
                expected = nr_openat_calls + cpu;
-               if (evsel->counts->cpu[cpu].val != expected) {
+               if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
                        pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
-                                expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
+                                expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
                        err = -1;
                }
        }
@@ -111,6 +111,6 @@ out_close_fd:
 out_evsel_delete:
        perf_evsel__delete(evsel);
 out_thread_map_delete:
-       thread_map__delete(threads);
+       thread_map__put(threads);
        return err;
 }
index 6245221479d766fb9e227064bcdc42742cae65d3..01a19626c84624136d909e1c3d3e58ef877d1b39 100644 (file)
@@ -45,7 +45,7 @@ int test__syscall_openat_tp_fields(void)
 
        perf_evsel__config(evsel, &opts);
 
-       evlist->threads->map[0] = getpid();
+       thread_map__set_pid(evlist->threads, 0, getpid());
 
        err = perf_evlist__open(evlist);
        if (err < 0) {
index 9f9491bb8e4897faddcffed824b3d094f73b7d3d..c9a37bc6b33ac336a128a4948623a1a93a5b388f 100644 (file)
@@ -44,9 +44,9 @@ int test__openat_syscall_event(void)
                goto out_close_fd;
        }
 
-       if (evsel->counts->cpu[0].val != nr_openat_calls) {
+       if (perf_counts(evsel->counts, 0, 0)->val != nr_openat_calls) {
                pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
-                        nr_openat_calls, evsel->counts->cpu[0].val);
+                        nr_openat_calls, perf_counts(evsel->counts, 0, 0)->val);
                goto out_close_fd;
        }
 
@@ -56,6 +56,6 @@ out_close_fd:
 out_evsel_delete:
        perf_evsel__delete(evsel);
 out_thread_map_delete:
-       thread_map__delete(threads);
+       thread_map__put(threads);
        return err;
 }
index 0d31403ea593c7d2e689056af1670a18423a39ed..e698742d4fec3300ac6c6b00bf55696414567dd4 100644 (file)
@@ -560,8 +560,8 @@ out:
                perf_evlist__disable(evlist);
                perf_evlist__delete(evlist);
        } else {
-               cpu_map__delete(cpus);
-               thread_map__delete(threads);
+               cpu_map__put(cpus);
+               thread_map__put(threads);
        }
 
        return err;
index 8e5038b48ba8dfe3313d9c508156ae9b4ecb8c5a..ebb47d96bc0b09fdf5c7b0bbba6022e209fc72ab 100644 (file)
@@ -61,6 +61,7 @@ int test__switch_tracking(void);
 int test__fdarray__filter(void);
 int test__fdarray__add(void);
 int test__kmod_path__parse(void);
+int test__thread_map(void);
 
 #if defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__)
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c
new file mode 100644 (file)
index 0000000..5acf000
--- /dev/null
@@ -0,0 +1,38 @@
+#include <sys/types.h>
+#include <unistd.h>
+#include "tests.h"
+#include "thread_map.h"
+#include "debug.h"
+
+int test__thread_map(void)
+{
+       struct thread_map *map;
+
+       /* test map on current pid */
+       map = thread_map__new_by_pid(getpid());
+       TEST_ASSERT_VAL("failed to alloc map", map);
+
+       thread_map__read_comms(map);
+
+       TEST_ASSERT_VAL("wrong nr", map->nr == 1);
+       TEST_ASSERT_VAL("wrong pid",
+                       thread_map__pid(map, 0) == getpid());
+       TEST_ASSERT_VAL("wrong comm",
+                       thread_map__comm(map, 0) &&
+                       !strcmp(thread_map__comm(map, 0), "perf"));
+       thread_map__put(map);
+
+       /* test dummy pid */
+       map = thread_map__new_dummy();
+       TEST_ASSERT_VAL("failed to alloc map", map);
+
+       thread_map__read_comms(map);
+
+       TEST_ASSERT_VAL("wrong nr", map->nr == 1);
+       TEST_ASSERT_VAL("wrong pid", thread_map__pid(map, 0) == -1);
+       TEST_ASSERT_VAL("wrong comm",
+                       thread_map__comm(map, 0) &&
+                       !strcmp(thread_map__comm(map, 0), "dummy"));
+       thread_map__put(map);
+       return 0;
+}
index c42adb6000914554bf0d109e02d9ad5cec801313..7629bef2fd791b41f64e49da53bc895e62d3dab4 100644 (file)
@@ -1902,8 +1902,23 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                case CTRL('c'):
                        goto out_free_stack;
                case 'f':
-                       if (!is_report_browser(hbt))
-                               goto out_free_stack;
+                       if (!is_report_browser(hbt)) {
+                               struct perf_top *top = hbt->arg;
+
+                               perf_evlist__toggle_enable(top->evlist);
+                               /*
+                                * No need to refresh, resort/decay histogram
+                                * entries if we are not collecting samples:
+                                */
+                               if (top->evlist->enabled) {
+                                       helpline = "Press 'f' to disable the events or 'h' to see other hotkeys";
+                                       hbt->refresh = delay_secs;
+                               } else {
+                                       helpline = "Press 'f' again to re-enable the events";
+                                       hbt->refresh = 0;
+                               }
+                               continue;
+                       }
                        /* Fall thru */
                default:
                        helpline = "Press '?' for help on key bindings";
index 586a59d46022a9fc8901807f5c02be4e2551db25..601d11440596dfd2b1d5244f56e972487d3fecff 100644 (file)
@@ -139,7 +139,7 @@ $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
 
-$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c FORCE
+$(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
 
index df66966cfde7ab1e78f9f5da6610ad8d707dea9c..7e7405c9b9361638f649820ae369bdec64e89c20 100644 (file)
@@ -119,12 +119,12 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
        if (per_cpu) {
                mp->cpu = evlist->cpus->map[idx];
                if (evlist->threads)
-                       mp->tid = evlist->threads->map[0];
+                       mp->tid = thread_map__pid(evlist->threads, 0);
                else
                        mp->tid = -1;
        } else {
                mp->cpu = -1;
-               mp->tid = evlist->threads->map[idx];
+               mp->tid = thread_map__pid(evlist->threads, idx);
        }
 }
 
@@ -1182,6 +1182,13 @@ static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
                data2 = NULL;
        }
 
+       if (itr->alignment) {
+               unsigned int unwanted = len1 % itr->alignment;
+
+               len1 -= unwanted;
+               size -= unwanted;
+       }
+
        /* padding must be written by fn() e.g. record__process_auxtrace() */
        padding = size & 7;
        if (padding)
index a171abbe730146c2ec6394c417250c57e3d84482..471aecbc4d684018f21d8e7696c39fe027f2430f 100644 (file)
@@ -303,6 +303,7 @@ struct auxtrace_record {
                                      const char *str);
        u64 (*reference)(struct auxtrace_record *itr);
        int (*read_finish)(struct auxtrace_record *itr, int idx);
+       unsigned int alignment;
 };
 
 #ifdef HAVE_AUXTRACE_SUPPORT
index 85b523885f9d70d3e708cb0688e4933b43a1f388..2babddaa24813102c0c9d7525ec8ab615b621775 100644 (file)
@@ -7,11 +7,15 @@
 
 static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
 
+#ifdef __GLIBC_PREREQ
+#if !__GLIBC_PREREQ(2, 6)
 int __weak sched_getcpu(void)
 {
        errno = ENOSYS;
        return -1;
 }
+#endif
+#endif
 
 static int perf_flag_probe(void)
 {
index c4e55b71010c6b52f2be7bc6c3af053c6f4183d0..3667e2123e5b44414a483c23b239e61b955dd7f7 100644 (file)
@@ -5,6 +5,7 @@
 #include <assert.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include "asm/bug.h"
 
 static struct cpu_map *cpu_map__default_new(void)
 {
@@ -22,6 +23,7 @@ static struct cpu_map *cpu_map__default_new(void)
                        cpus->map[i] = i;
 
                cpus->nr = nr_cpus;
+               atomic_set(&cpus->refcnt, 1);
        }
 
        return cpus;
@@ -35,6 +37,7 @@ static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
        if (cpus != NULL) {
                cpus->nr = nr_cpus;
                memcpy(cpus->map, tmp_cpus, payload_size);
+               atomic_set(&cpus->refcnt, 1);
        }
 
        return cpus;
@@ -194,14 +197,32 @@ struct cpu_map *cpu_map__dummy_new(void)
        if (cpus != NULL) {
                cpus->nr = 1;
                cpus->map[0] = -1;
+               atomic_set(&cpus->refcnt, 1);
        }
 
        return cpus;
 }
 
-void cpu_map__delete(struct cpu_map *map)
+static void cpu_map__delete(struct cpu_map *map)
 {
-       free(map);
+       if (map) {
+               WARN_ONCE(atomic_read(&map->refcnt) != 0,
+                         "cpu_map refcnt unbalanced\n");
+               free(map);
+       }
+}
+
+struct cpu_map *cpu_map__get(struct cpu_map *map)
+{
+       if (map)
+               atomic_inc(&map->refcnt);
+       return map;
+}
+
+void cpu_map__put(struct cpu_map *map)
+{
+       if (map && atomic_dec_and_test(&map->refcnt))
+               cpu_map__delete(map);
 }
 
 int cpu_map__get_socket(struct cpu_map *map, int idx)
@@ -263,6 +284,7 @@ static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
        /* ensure we process id in increasing order */
        qsort(c->map, c->nr, sizeof(int), cmp_ids);
 
+       atomic_set(&cpus->refcnt, 1);
        *res = c;
        return 0;
 }
index 61a6548490025e209f6232293578e662e62d610a..0af9cecb4c519d44da13d392baa9abca37ebe6ec 100644 (file)
@@ -3,18 +3,19 @@
 
 #include <stdio.h>
 #include <stdbool.h>
+#include <linux/atomic.h>
 
 #include "perf.h"
 #include "util/debug.h"
 
 struct cpu_map {
+       atomic_t refcnt;
        int nr;
        int map[];
 };
 
 struct cpu_map *cpu_map__new(const char *cpu_list);
 struct cpu_map *cpu_map__dummy_new(void);
-void cpu_map__delete(struct cpu_map *map);
 struct cpu_map *cpu_map__read(FILE *file);
 size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
 int cpu_map__get_socket(struct cpu_map *map, int idx);
@@ -22,6 +23,9 @@ int cpu_map__get_core(struct cpu_map *map, int idx);
 int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
 int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep);
 
+struct cpu_map *cpu_map__get(struct cpu_map *map);
+void cpu_map__put(struct cpu_map *map);
+
 static inline int cpu_map__socket(struct cpu_map *sock, int s)
 {
        if (!sock || s > sock->nr || s < 0)
index d7d986d8f23e5f890cc295b3b830470343f34063..67a977e5d0abee03a97eb74f914f9329163a6ca7 100644 (file)
@@ -504,7 +504,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
        for (thread = 0; thread < threads->nr; ++thread) {
                if (__event__synthesize_thread(comm_event, mmap_event,
                                               fork_event,
-                                              threads->map[thread], 0,
+                                              thread_map__pid(threads, thread), 0,
                                               process, tool, machine,
                                               mmap_data, proc_map_timeout)) {
                        err = -1;
@@ -515,12 +515,12 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
                 * comm.pid is set to thread group id by
                 * perf_event__synthesize_comm
                 */
-               if ((int) comm_event->comm.pid != threads->map[thread]) {
+               if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
                        bool need_leader = true;
 
                        /* is thread group leader in thread_map? */
                        for (j = 0; j < threads->nr; ++j) {
-                               if ((int) comm_event->comm.pid == threads->map[j]) {
+                               if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
                                        need_leader = false;
                                        break;
                                }
index 8366511b45f8327a65dc44e80544f6b08e0df24a..6cfdee68e76398cdb2d53c6950d8c34ea969484a 100644 (file)
@@ -114,8 +114,8 @@ void perf_evlist__delete(struct perf_evlist *evlist)
 {
        perf_evlist__munmap(evlist);
        perf_evlist__close(evlist);
-       cpu_map__delete(evlist->cpus);
-       thread_map__delete(evlist->threads);
+       cpu_map__put(evlist->cpus);
+       thread_map__put(evlist->threads);
        evlist->cpus = NULL;
        evlist->threads = NULL;
        perf_evlist__purge(evlist);
@@ -548,7 +548,7 @@ static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
        else
                sid->cpu = -1;
        if (!evsel->system_wide && evlist->threads && thread >= 0)
-               sid->tid = evlist->threads->map[thread];
+               sid->tid = thread_map__pid(evlist->threads, thread);
        else
                sid->tid = -1;
 }
@@ -1101,6 +1101,31 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
        return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
 }
 
+static int perf_evlist__propagate_maps(struct perf_evlist *evlist,
+                                      struct target *target)
+{
+       struct perf_evsel *evsel;
+
+       evlist__for_each(evlist, evsel) {
+               /*
+                * We already have cpus for evsel (via PMU sysfs) so
+                * keep it, if there's no target cpu list defined.
+                */
+               if (evsel->cpus && target->cpu_list)
+                       cpu_map__put(evsel->cpus);
+
+               if (!evsel->cpus || target->cpu_list)
+                       evsel->cpus = cpu_map__get(evlist->cpus);
+
+               evsel->threads = thread_map__get(evlist->threads);
+
+               if (!evsel->cpus || !evsel->threads)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
 {
        evlist->threads = thread_map__new_str(target->pid, target->tid,
@@ -1117,10 +1142,10 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
        if (evlist->cpus == NULL)
                goto out_delete_threads;
 
-       return 0;
+       return perf_evlist__propagate_maps(evlist, target);
 
 out_delete_threads:
-       thread_map__delete(evlist->threads);
+       thread_map__put(evlist->threads);
        evlist->threads = NULL;
        return -1;
 }
@@ -1353,7 +1378,7 @@ static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
 out:
        return err;
 out_free_cpus:
-       cpu_map__delete(evlist->cpus);
+       cpu_map__put(evlist->cpus);
        evlist->cpus = NULL;
        goto out;
 }
@@ -1475,7 +1500,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar
                                __func__, __LINE__);
                        goto out_close_pipes;
                }
-               evlist->threads->map[0] = evlist->workload.pid;
+               thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
        }
 
        close(child_ready_pipe[1]);
index a8489b9d2812baecf1ba4709ff3b6da9787adda0..037633c1da9d0c42402b908294724f92e6caddc8 100644 (file)
@@ -289,5 +289,4 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
 
 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
                                     struct perf_evsel *tracking_evsel);
-
 #endif /* __PERF_EVLIST_H */
index 33449decf7bd2c24d981fdf30c10042063a503ee..2936b308072200b79f54820a4bf63b0895704bbf 100644 (file)
@@ -885,6 +885,8 @@ void perf_evsel__exit(struct perf_evsel *evsel)
        perf_evsel__free_fd(evsel);
        perf_evsel__free_id(evsel);
        close_cgroup(evsel->cgrp);
+       cpu_map__put(evsel->cpus);
+       thread_map__put(evsel->threads);
        zfree(&evsel->group_name);
        zfree(&evsel->name);
        perf_evsel__object.fini(evsel);
@@ -896,7 +898,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
        free(evsel);
 }
 
-void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu,
+void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
                                struct perf_counts_values *count)
 {
        struct perf_counts_values tmp;
@@ -908,8 +910,8 @@ void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu,
                tmp = evsel->prev_raw_counts->aggr;
                evsel->prev_raw_counts->aggr = *count;
        } else {
-               tmp = evsel->prev_raw_counts->cpu[cpu];
-               evsel->prev_raw_counts->cpu[cpu] = *count;
+               tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
+               *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
        }
 
        count->val = count->val - tmp.val;
@@ -937,20 +939,18 @@ void perf_counts_values__scale(struct perf_counts_values *count,
                *pscaled = scaled;
 }
 
-int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread,
-                       perf_evsel__read_cb_t cb)
+int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
+                    struct perf_counts_values *count)
 {
-       struct perf_counts_values count;
-
-       memset(&count, 0, sizeof(count));
+       memset(count, 0, sizeof(*count));
 
        if (FD(evsel, cpu, thread) < 0)
                return -EINVAL;
 
-       if (readn(FD(evsel, cpu, thread), &count, sizeof(count)) < 0)
+       if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0)
                return -errno;
 
-       return cb(evsel, cpu, thread, &count);
+       return 0;
 }
 
 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
@@ -962,15 +962,15 @@ int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
        if (FD(evsel, cpu, thread) < 0)
                return -EINVAL;
 
-       if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
+       if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
                return -ENOMEM;
 
        if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
                return -errno;
 
-       perf_evsel__compute_deltas(evsel, cpu, &count);
+       perf_evsel__compute_deltas(evsel, cpu, thread, &count);
        perf_counts_values__scale(&count, scale, NULL);
-       evsel->counts->cpu[cpu] = count;
+       *perf_counts(evsel->counts, cpu, thread) = count;
        return 0;
 }
 
@@ -1167,7 +1167,7 @@ retry_sample_id:
                        int group_fd;
 
                        if (!evsel->cgrp && !evsel->system_wide)
-                               pid = threads->map[thread];
+                               pid = thread_map__pid(threads, thread);
 
                        group_fd = get_group_fd(evsel, cpu, thread);
 retry_open:
index bb0579e8a10a4556119c5aa313cd22ddacdbc220..4a7ed5656cf0165ffcdd52a1dba063aff6051c8e 100644 (file)
@@ -8,23 +8,8 @@
 #include <linux/types.h>
 #include "xyarray.h"
 #include "symbol.h"
-
-struct perf_counts_values {
-       union {
-               struct {
-                       u64 val;
-                       u64 ena;
-                       u64 run;
-               };
-               u64 values[3];
-       };
-};
-
-struct perf_counts {
-       s8                        scaled;
-       struct perf_counts_values aggr;
-       struct perf_counts_values cpu[];
-};
+#include "cpumap.h"
+#include "stat.h"
 
 struct perf_evsel;
 
@@ -82,6 +67,7 @@ struct perf_evsel {
        struct cgroup_sel       *cgrp;
        void                    *handler;
        struct cpu_map          *cpus;
+       struct thread_map       *threads;
        unsigned int            sample_size;
        int                     id_pos;
        int                     is_pos;
@@ -113,10 +99,20 @@ struct thread_map;
 struct perf_evlist;
 struct record_opts;
 
+static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
+{
+       return evsel->cpus;
+}
+
+static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
+{
+       return perf_evsel__cpus(evsel)->nr;
+}
+
 void perf_counts_values__scale(struct perf_counts_values *count,
                               bool scale, s8 *pscaled);
 
-void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu,
+void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
                                struct perf_counts_values *count);
 
 int perf_evsel__object_config(size_t object_size,
@@ -233,12 +229,8 @@ static inline bool perf_evsel__match2(struct perf_evsel *e1,
         (a)->attr.type == (b)->attr.type &&    \
         (a)->attr.config == (b)->attr.config)
 
-typedef int (perf_evsel__read_cb_t)(struct perf_evsel *evsel,
-                                   int cpu, int thread,
-                                   struct perf_counts_values *count);
-
-int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread,
-                       perf_evsel__read_cb_t cb);
+int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
+                    struct perf_counts_values *count);
 
 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
                              int cpu, int thread, bool scale);
index 21a77e7a171e8aa0664d5caf0737f141bc96e62f..03ace57a800c58fcc5fb3b2be8cd8bfbf8f50977 100644 (file)
@@ -1063,8 +1063,7 @@ out:
        free(buf);
        return events;
 error:
-       if (events)
-               free_event_desc(events);
+       free_event_desc(events);
        events = NULL;
        goto out;
 }
diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h
deleted file mode 100644 (file)
index f06d89f..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __TOOLS_LINUX_PERF_RBTREE_H
-#define __TOOLS_LINUX_PERF_RBTREE_H
-#include <stdbool.h>
-#include "../../../../include/linux/rbtree.h"
-
-/*
- * Handy for checking that we are not deleting an entry that is
- * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
- * probably should be moved to lib/rbtree.c...
- */
-static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
-{
-       rb_erase(n, root);
-       RB_CLEAR_NODE(n);
-}
-#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/perf/util/include/linux/rbtree_augmented.h b/tools/perf/util/include/linux/rbtree_augmented.h
deleted file mode 100644 (file)
index 9d6fcdf..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <stdbool.h>
-#include "../../../../include/linux/rbtree_augmented.h"
index 4744673aff1b287de3a091a40edade2a709a8e52..7ff682770fdb16e71b368af16efad0edd9443d87 100644 (file)
@@ -1448,10 +1448,9 @@ int machine__process_event(struct machine *machine, union perf_event *event,
        case PERF_RECORD_AUX:
                ret = machine__process_aux_event(machine, event); break;
        case PERF_RECORD_ITRACE_START:
-               ret = machine__process_itrace_start_event(machine, event);
+               ret = machine__process_itrace_start_event(machine, event); break;
        case PERF_RECORD_LOST_SAMPLES:
                ret = machine__process_lost_samples_event(machine, event, sample); break;
-               break;
        default:
                ret = -1;
                break;
index 2a4d1ec028464757d6723bbd08c2d0ce0a010a14..09f8d23571082ef6698f37eefcd8f9d9815f1fdc 100644 (file)
@@ -17,6 +17,7 @@
 #include "parse-events-flex.h"
 #include "pmu.h"
 #include "thread_map.h"
+#include "cpumap.h"
 #include "asm/bug.h"
 
 #define MAX_NAME_LEN 100
@@ -285,7 +286,9 @@ __add_event(struct list_head *list, int *idx,
        if (!evsel)
                return NULL;
 
-       evsel->cpus = cpus;
+       if (cpus)
+               evsel->cpus = cpu_map__get(cpus);
+
        if (name)
                evsel->name = strdup(name);
        list_add_tail(&evsel->node, list);
index 09e738fe9ea2790a1c304f2015cdb20c03c20614..13cef3c655652764c95cdd33f53d048e00bdc10c 100644 (file)
@@ -119,8 +119,8 @@ event               [^,{}/]+
 num_dec                [0-9]+
 num_hex                0x[a-fA-F0-9]+
 num_raw_hex    [a-fA-F0-9]+
-name           [a-zA-Z_*?][a-zA-Z0-9_*?]*
-name_minus     [a-zA-Z_*?][a-zA-Z0-9\-_*?]*
+name           [a-zA-Z_*?][a-zA-Z0-9_*?.]*
+name_minus     [a-zA-Z_*?][a-zA-Z0-9\-_*?.]*
 /* If you add a modifier you need to update check_modifier() */
 modifier_event [ukhpGHSDI]+
 modifier_bp    [rwx]{1,3}
@@ -165,7 +165,6 @@ modifier_bp [rwx]{1,3}
                        return PE_EVENT_NAME;
                }
 
-.              |
 <<EOF>>                {
                        BEGIN(INITIAL);
                        REWIND(0);
index 0fcc624eb76767b1c3fe211f678a981a71b744ce..7bcb8c315615b1ffdf7123584b2b832f5cfd9d35 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/list.h>
+#include <linux/compiler.h>
 #include <sys/types.h>
 #include <unistd.h>
 #include <stdio.h>
@@ -205,17 +206,12 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
        return 0;
 }
 
-static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FILE *file)
+static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
+                                char *desc __maybe_unused, char *val)
 {
        struct perf_pmu_alias *alias;
-       char buf[256];
        int ret;
 
-       ret = fread(buf, 1, sizeof(buf), file);
-       if (ret == 0)
-               return -EINVAL;
-       buf[ret] = 0;
-
        alias = malloc(sizeof(*alias));
        if (!alias)
                return -ENOMEM;
@@ -225,26 +221,43 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI
        alias->unit[0] = '\0';
        alias->per_pkg = false;
 
-       ret = parse_events_terms(&alias->terms, buf);
+       ret = parse_events_terms(&alias->terms, val);
        if (ret) {
+               pr_err("Cannot parse alias %s: %d\n", val, ret);
                free(alias);
                return ret;
        }
 
        alias->name = strdup(name);
-       /*
-        * load unit name and scale if available
-        */
-       perf_pmu__parse_unit(alias, dir, name);
-       perf_pmu__parse_scale(alias, dir, name);
-       perf_pmu__parse_per_pkg(alias, dir, name);
-       perf_pmu__parse_snapshot(alias, dir, name);
+       if (dir) {
+               /*
+                * load unit name and scale if available
+                */
+               perf_pmu__parse_unit(alias, dir, name);
+               perf_pmu__parse_scale(alias, dir, name);
+               perf_pmu__parse_per_pkg(alias, dir, name);
+               perf_pmu__parse_snapshot(alias, dir, name);
+       }
 
        list_add_tail(&alias->list, list);
 
        return 0;
 }
 
+static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FILE *file)
+{
+       char buf[256];
+       int ret;
+
+       ret = fread(buf, 1, sizeof(buf), file);
+       if (ret == 0)
+               return -EINVAL;
+
+       buf[ret] = 0;
+
+       return __perf_pmu__new_alias(list, dir, name, NULL, buf);
+}
+
 static inline bool pmu_alias_info_file(char *name)
 {
        size_t len;
@@ -436,7 +449,7 @@ static struct cpu_map *pmu_cpumask(const char *name)
        return cpus;
 }
 
-struct perf_event_attr *__attribute__((weak))
+struct perf_event_attr * __weak
 perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
 {
        return NULL;
index 076527b639bdbcab38b4e196f1d388352f7e22c1..381f23a443c7e71a78c008cb2e56a88326830d98 100644 (file)
@@ -249,8 +249,12 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
 static bool kprobe_blacklist__listed(unsigned long address);
 static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
 {
+       u64 etext_addr;
+
        /* Get the address of _etext for checking non-probable text symbol */
-       if (kernel_get_symbol_address_by_name("_etext", false) < address)
+       etext_addr = kernel_get_symbol_address_by_name("_etext", false);
+
+       if (etext_addr != 0 && etext_addr < address)
                pr_warning("%s is out of .text, skip it.\n", symbol);
        else if (kprobe_blacklist__listed(address))
                pr_warning("%s is blacklisted function, skip it.\n", symbol);
index 5925fec90562fc355514489ae3d33814800f3896..e23ded40c79e3ecd62642bf2a07a34c5fc7662f8 100644 (file)
@@ -20,3 +20,4 @@ util/stat.c
 util/strlist.c
 util/trace-event.c
 ../../lib/rbtree.c
+util/string.c
index d906d0ad5d40a34b49955ad9571130b1f93c7dc3..626422eda7274264c46ebca2aad48a51981788a2 100644 (file)
@@ -384,7 +384,7 @@ static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
 
 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
 {
-       cpu_map__delete(pcpus->cpus);
+       cpu_map__put(pcpus->cpus);
        pcpus->ob_type->tp_free((PyObject*)pcpus);
 }
 
@@ -453,7 +453,7 @@ static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
 
 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
 {
-       thread_map__delete(pthreads->threads);
+       thread_map__put(pthreads->threads);
        pthreads->ob_type->tp_free((PyObject*)pthreads);
 }
 
index d457c523a33d8bb7d00669dbd09e589eecf6de4b..1f7becbe5e182a4f560dd5f435148d302a5aa787 100644 (file)
@@ -64,7 +64,7 @@ static bool perf_probe_api(setup_probe_fn_t fn)
        if (!cpus)
                return false;
        cpu = cpus->map[0];
-       cpu_map__delete(cpus);
+       cpu_map__put(cpus);
 
        do {
                ret = perf_do_probe_api(fn, cpu, try[i++]);
@@ -226,7 +226,7 @@ bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
                struct cpu_map *cpus = cpu_map__new(NULL);
 
                cpu =  cpus ? cpus->map[0] : 0;
-               cpu_map__delete(cpus);
+               cpu_map__put(cpus);
        } else {
                cpu = evlist->cpus->map[0];
        }
index aa482c10469d748fb2c6379ff854ba80f53b2b73..ed9dc2555ec7277d01560c1133718e1066dc5630 100644 (file)
@@ -686,6 +686,8 @@ static int process_finished_round(struct perf_tool *tool __maybe_unused,
                                  union perf_event *event __maybe_unused,
                                  struct ordered_events *oe)
 {
+       if (dump_trace)
+               fprintf(stdout, "\n");
        return ordered_events__flush(oe, OE_FLUSH__ROUND);
 }
 
@@ -1726,7 +1728,7 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
        if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
                msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
 
-       ret = fprintf(fp, "Aggregated stats:%s\n", msg);
+       ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
 
        ret += events_stats__fprintf(&session->evlist->stats, fp);
        return ret;
@@ -1893,7 +1895,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
        err = 0;
 
 out_delete_map:
-       cpu_map__delete(map);
+       cpu_map__put(map);
        return err;
 }
 
index 4014b709f956b96b86dab3526f3b35fec6d8c166..f2a0d1521e266a32a0df10b60f22d88107fae276 100644 (file)
@@ -1,6 +1,8 @@
 #include <math.h>
 #include "stat.h"
+#include "evlist.h"
 #include "evsel.h"
+#include "thread_map.h"
 
 void update_stats(struct stats *stats, u64 val)
 {
@@ -95,33 +97,46 @@ void perf_stat_evsel_id_init(struct perf_evsel *evsel)
        }
 }
 
-struct perf_counts *perf_counts__new(int ncpus)
+struct perf_counts *perf_counts__new(int ncpus, int nthreads)
 {
-       int size = sizeof(struct perf_counts) +
-                  ncpus * sizeof(struct perf_counts_values);
+       struct perf_counts *counts = zalloc(sizeof(*counts));
 
-       return zalloc(size);
+       if (counts) {
+               struct xyarray *values;
+
+               values = xyarray__new(ncpus, nthreads, sizeof(struct perf_counts_values));
+               if (!values) {
+                       free(counts);
+                       return NULL;
+               }
+
+               counts->values = values;
+       }
+
+       return counts;
 }
 
 void perf_counts__delete(struct perf_counts *counts)
 {
-       free(counts);
+       if (counts) {
+               xyarray__delete(counts->values);
+               free(counts);
+       }
 }
 
-static void perf_counts__reset(struct perf_counts *counts, int ncpus)
+static void perf_counts__reset(struct perf_counts *counts)
 {
-       memset(counts, 0, (sizeof(*counts) +
-              (ncpus * sizeof(struct perf_counts_values))));
+       xyarray__reset(counts->values);
 }
 
-void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
+void perf_evsel__reset_counts(struct perf_evsel *evsel)
 {
-       perf_counts__reset(evsel->counts, ncpus);
+       perf_counts__reset(evsel->counts);
 }
 
-int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
+int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads)
 {
-       evsel->counts = perf_counts__new(ncpus);
+       evsel->counts = perf_counts__new(ncpus, nthreads);
        return evsel->counts != NULL ? 0 : -ENOMEM;
 }
 
@@ -130,3 +145,96 @@ void perf_evsel__free_counts(struct perf_evsel *evsel)
        perf_counts__delete(evsel->counts);
        evsel->counts = NULL;
 }
+
+void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
+{
+       int i;
+       struct perf_stat *ps = evsel->priv;
+
+       for (i = 0; i < 3; i++)
+               init_stats(&ps->res_stats[i]);
+
+       perf_stat_evsel_id_init(evsel);
+}
+
+int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
+{
+       evsel->priv = zalloc(sizeof(struct perf_stat));
+       if (evsel->priv == NULL)
+               return -ENOMEM;
+       perf_evsel__reset_stat_priv(evsel);
+       return 0;
+}
+
+void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
+{
+       zfree(&evsel->priv);
+}
+
+int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
+                                     int ncpus, int nthreads)
+{
+       struct perf_counts *counts;
+
+       counts = perf_counts__new(ncpus, nthreads);
+       if (counts)
+               evsel->prev_raw_counts = counts;
+
+       return counts ? 0 : -ENOMEM;
+}
+
+void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
+{
+       perf_counts__delete(evsel->prev_raw_counts);
+       evsel->prev_raw_counts = NULL;
+}
+
+int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
+{
+       int ncpus = perf_evsel__nr_cpus(evsel);
+       int nthreads = thread_map__nr(evsel->threads);
+
+       if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
+           perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
+           (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
+               return -ENOMEM;
+
+       return 0;
+}
+
+int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw)
+{
+       struct perf_evsel *evsel;
+
+       evlist__for_each(evlist, evsel) {
+               if (perf_evsel__alloc_stats(evsel, alloc_raw))
+                       goto out_free;
+       }
+
+       return 0;
+
+out_free:
+       perf_evlist__free_stats(evlist);
+       return -1;
+}
+
+void perf_evlist__free_stats(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel;
+
+       evlist__for_each(evlist, evsel) {
+               perf_evsel__free_stat_priv(evsel);
+               perf_evsel__free_counts(evsel);
+               perf_evsel__free_prev_raw_counts(evsel);
+       }
+}
+
+void perf_evlist__reset_stats(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel;
+
+       evlist__for_each(evlist, evsel) {
+               perf_evsel__reset_stat_priv(evsel);
+               perf_evsel__reset_counts(evsel);
+       }
+}
index 093dc3cb28dd3f62cb593095dfdc9c59317e0446..1cfbe0a980ac77a1a1af31db8a01141a449a6a4f 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/types.h>
 #include <stdio.h>
+#include "xyarray.h"
 
 struct stats
 {
@@ -29,8 +30,32 @@ enum aggr_mode {
        AGGR_GLOBAL,
        AGGR_SOCKET,
        AGGR_CORE,
+       AGGR_THREAD,
 };
 
+struct perf_counts_values {
+       union {
+               struct {
+                       u64 val;
+                       u64 ena;
+                       u64 run;
+               };
+               u64 values[3];
+       };
+};
+
+struct perf_counts {
+       s8                        scaled;
+       struct perf_counts_values aggr;
+       struct xyarray            *values;
+};
+
+static inline struct perf_counts_values*
+perf_counts(struct perf_counts *counts, int cpu, int thread)
+{
+       return xyarray__entry(counts->values, cpu, thread);
+}
+
 void update_stats(struct stats *stats, u64 val);
 double avg_stats(struct stats *stats);
 double stddev_stats(struct stats *stats);
@@ -46,6 +71,8 @@ static inline void init_stats(struct stats *stats)
 }
 
 struct perf_evsel;
+struct perf_evlist;
+
 bool __perf_evsel_stat__is(struct perf_evsel *evsel,
                           enum perf_stat_evsel_id id);
 
@@ -62,10 +89,24 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
 void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
                                   double avg, int cpu, enum aggr_mode aggr);
 
-struct perf_counts *perf_counts__new(int ncpus);
+struct perf_counts *perf_counts__new(int ncpus, int nthreads);
 void perf_counts__delete(struct perf_counts *counts);
 
-void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus);
-int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
+void perf_evsel__reset_counts(struct perf_evsel *evsel);
+int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads);
 void perf_evsel__free_counts(struct perf_evsel *evsel);
+
+void perf_evsel__reset_stat_priv(struct perf_evsel *evsel);
+int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel);
+void perf_evsel__free_stat_priv(struct perf_evsel *evsel);
+
+int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
+                                     int ncpus, int nthreads);
+void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel);
+
+int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw);
+
+int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
+void perf_evlist__free_stats(struct perf_evlist *evlist);
+void perf_evlist__reset_stats(struct perf_evlist *evlist);
 #endif
index 283d3e73e2f23a66b007dd04790f3c3a1dd486e8..eec6c1149f44758ebe22c37fb07cd4cd6fd0aef7 100644 (file)
@@ -748,7 +748,7 @@ static int str_to_bitmap(char *s, cpumask_t *b)
                set_bit(c, cpumask_bits(b));
        }
 
-       cpu_map__delete(m);
+       cpu_map__put(m);
 
        return ret;
 }
index 504f2d73b7eefe2699349ac75c5cc3b875961375..48b588c6951a9476246d6c71a9ee7b8535a8be37 100644 (file)
@@ -1132,8 +1132,11 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
        INIT_LIST_HEAD(&md.maps);
 
        fd = open(kcore_filename, O_RDONLY);
-       if (fd < 0)
+       if (fd < 0) {
+               pr_err("%s requires CAP_SYS_RAWIO capability to access.\n",
+                       kcore_filename);
                return -EINVAL;
+       }
 
        /* Read new maps into temporary lists */
        err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
index f4822bd03709af52aba3eed89d50f06bc53e4f86..da7646d767feba14c4f1ad3ba60f3758d5f89413 100644 (file)
@@ -8,8 +8,11 @@
 #include <unistd.h>
 #include "strlist.h"
 #include <string.h>
+#include <api/fs/fs.h>
+#include "asm/bug.h"
 #include "thread_map.h"
 #include "util.h"
+#include "debug.h"
 
 /* Skip "." and ".." directories */
 static int filter(const struct dirent *dir)
@@ -20,11 +23,26 @@ static int filter(const struct dirent *dir)
                return 1;
 }
 
+static void thread_map__reset(struct thread_map *map, int start, int nr)
+{
+       size_t size = (nr - start) * sizeof(map->map[0]);
+
+       memset(&map->map[start], 0, size);
+}
+
 static struct thread_map *thread_map__realloc(struct thread_map *map, int nr)
 {
-       size_t size = sizeof(*map) + sizeof(pid_t) * nr;
+       size_t size = sizeof(*map) + sizeof(map->map[0]) * nr;
+       int start = map ? map->nr : 0;
+
+       map = realloc(map, size);
+       /*
+        * We only realloc to add more items, let's reset new items.
+        */
+       if (map)
+               thread_map__reset(map, start, nr);
 
-       return realloc(map, size);
+       return map;
 }
 
 #define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr)
@@ -45,8 +63,9 @@ struct thread_map *thread_map__new_by_pid(pid_t pid)
        threads = thread_map__alloc(items);
        if (threads != NULL) {
                for (i = 0; i < items; i++)
-                       threads->map[i] = atoi(namelist[i]->d_name);
+                       thread_map__set_pid(threads, i, atoi(namelist[i]->d_name));
                threads->nr = items;
+               atomic_set(&threads->refcnt, 1);
        }
 
        for (i=0; i<items; i++)
@@ -61,8 +80,9 @@ struct thread_map *thread_map__new_by_tid(pid_t tid)
        struct thread_map *threads = thread_map__alloc(1);
 
        if (threads != NULL) {
-               threads->map[0] = tid;
-               threads->nr     = 1;
+               thread_map__set_pid(threads, 0, tid);
+               threads->nr = 1;
+               atomic_set(&threads->refcnt, 1);
        }
 
        return threads;
@@ -84,6 +104,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
                goto out_free_threads;
 
        threads->nr = 0;
+       atomic_set(&threads->refcnt, 1);
 
        while (!readdir_r(proc, &dirent, &next) && next) {
                char *end;
@@ -123,8 +144,10 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
                        threads = tmp;
                }
 
-               for (i = 0; i < items; i++)
-                       threads->map[threads->nr + i] = atoi(namelist[i]->d_name);
+               for (i = 0; i < items; i++) {
+                       thread_map__set_pid(threads, threads->nr + i,
+                                           atoi(namelist[i]->d_name));
+               }
 
                for (i = 0; i < items; i++)
                        zfree(&namelist[i]);
@@ -201,7 +224,7 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
                threads = nt;
 
                for (i = 0; i < items; i++) {
-                       threads->map[j++] = atoi(namelist[i]->d_name);
+                       thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name));
                        zfree(&namelist[i]);
                }
                threads->nr = total_tasks;
@@ -210,6 +233,8 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
 
 out:
        strlist__delete(slist);
+       if (threads)
+               atomic_set(&threads->refcnt, 1);
        return threads;
 
 out_free_namelist:
@@ -227,8 +252,9 @@ struct thread_map *thread_map__new_dummy(void)
        struct thread_map *threads = thread_map__alloc(1);
 
        if (threads != NULL) {
-               threads->map[0] = -1;
-               threads->nr     = 1;
+               thread_map__set_pid(threads, 0, -1);
+               threads->nr = 1;
+               atomic_set(&threads->refcnt, 1);
        }
        return threads;
 }
@@ -267,10 +293,12 @@ static struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
                        goto out_free_threads;
 
                threads = nt;
-               threads->map[ntasks - 1] = tid;
-               threads->nr              = ntasks;
+               thread_map__set_pid(threads, ntasks - 1, tid);
+               threads->nr = ntasks;
        }
 out:
+       if (threads)
+               atomic_set(&threads->refcnt, 1);
        return threads;
 
 out_free_threads:
@@ -290,9 +318,30 @@ struct thread_map *thread_map__new_str(const char *pid, const char *tid,
        return thread_map__new_by_tid_str(tid);
 }
 
-void thread_map__delete(struct thread_map *threads)
+static void thread_map__delete(struct thread_map *threads)
 {
-       free(threads);
+       if (threads) {
+               int i;
+
+               WARN_ONCE(atomic_read(&threads->refcnt) != 0,
+                         "thread map refcnt unbalanced\n");
+               for (i = 0; i < threads->nr; i++)
+                       free(thread_map__comm(threads, i));
+               free(threads);
+       }
+}
+
+struct thread_map *thread_map__get(struct thread_map *map)
+{
+       if (map)
+               atomic_inc(&map->refcnt);
+       return map;
+}
+
+void thread_map__put(struct thread_map *map)
+{
+       if (map && atomic_dec_and_test(&map->refcnt))
+               thread_map__delete(map);
 }
 
 size_t thread_map__fprintf(struct thread_map *threads, FILE *fp)
@@ -301,7 +350,60 @@ size_t thread_map__fprintf(struct thread_map *threads, FILE *fp)
        size_t printed = fprintf(fp, "%d thread%s: ",
                                 threads->nr, threads->nr > 1 ? "s" : "");
        for (i = 0; i < threads->nr; ++i)
-               printed += fprintf(fp, "%s%d", i ? ", " : "", threads->map[i]);
+               printed += fprintf(fp, "%s%d", i ? ", " : "", thread_map__pid(threads, i));
 
        return printed + fprintf(fp, "\n");
 }
+
+static int get_comm(char **comm, pid_t pid)
+{
+       char *path;
+       size_t size;
+       int err;
+
+       if (asprintf(&path, "%s/%d/comm", procfs__mountpoint(), pid) == -1)
+               return -ENOMEM;
+
+       err = filename__read_str(path, comm, &size);
+       if (!err) {
+               /*
+                * We're reading 16 bytes, while filename__read_str
+                * allocates data per BUFSIZ bytes, so we can safely
+                * mark the end of the string.
+                */
+               (*comm)[size] = 0;
+               rtrim(*comm);
+       }
+
+       free(path);
+       return err;
+}
+
+static void comm_init(struct thread_map *map, int i)
+{
+       pid_t pid = thread_map__pid(map, i);
+       char *comm = NULL;
+
+       /* dummy pid comm initialization */
+       if (pid == -1) {
+               map->map[i].comm = strdup("dummy");
+               return;
+       }
+
+       /*
+        * The comm name is like extra bonus ;-),
+        * so just warn if we fail for any reason.
+        */
+       if (get_comm(&comm, pid))
+               pr_warning("Couldn't resolve comm name for pid %d\n", pid);
+
+       map->map[i].comm = comm;
+}
+
+void thread_map__read_comms(struct thread_map *threads)
+{
+       int i;
+
+       for (i = 0; i < threads->nr; ++i)
+               comm_init(threads, i);
+}
index 95313f43cc0ffe5280b19a54fc9925ec4b83c498..af679d8a50f852ebb3457491f5dc4b96f6520743 100644 (file)
@@ -3,10 +3,17 @@
 
 #include <sys/types.h>
 #include <stdio.h>
+#include <linux/atomic.h>
+
+struct thread_map_data {
+       pid_t    pid;
+       char    *comm;
+};
 
 struct thread_map {
+       atomic_t refcnt;
        int nr;
-       pid_t map[];
+       struct thread_map_data map[];
 };
 
 struct thread_map *thread_map__new_dummy(void);
@@ -15,11 +22,12 @@ struct thread_map *thread_map__new_by_tid(pid_t tid);
 struct thread_map *thread_map__new_by_uid(uid_t uid);
 struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid);
 
+struct thread_map *thread_map__get(struct thread_map *map);
+void thread_map__put(struct thread_map *map);
+
 struct thread_map *thread_map__new_str(const char *pid,
                const char *tid, uid_t uid);
 
-void thread_map__delete(struct thread_map *threads);
-
 size_t thread_map__fprintf(struct thread_map *threads, FILE *fp);
 
 static inline int thread_map__nr(struct thread_map *threads)
@@ -27,4 +35,21 @@ static inline int thread_map__nr(struct thread_map *threads)
        return threads ? threads->nr : 1;
 }
 
+static inline pid_t thread_map__pid(struct thread_map *map, int thread)
+{
+       return map->map[thread].pid;
+}
+
+static inline void
+thread_map__set_pid(struct thread_map *map, int thread, pid_t pid)
+{
+       map->map[thread].pid = pid;
+}
+
+static inline char *thread_map__comm(struct thread_map *map, int thread)
+{
+       return map->map[thread].comm;
+}
+
+void thread_map__read_comms(struct thread_map *threads);
 #endif /* __PERF_THREAD_MAP_H */
index 5da129e10aa2dcd049647d10d025c26e08b7cf65..326e826a5d20ac5ee3829aea1aca23dfc964c2ce 100644 (file)
@@ -127,7 +127,7 @@ int acpi_getopt(int argc, char **argv, char *opts)
                    argv[acpi_gbl_optind][0] != '-' ||
                    argv[acpi_gbl_optind][1] == '\0') {
                        return (ACPI_OPT_END);
-               } else if (ACPI_STRCMP(argv[acpi_gbl_optind], "--") == 0) {
+               } else if (strcmp(argv[acpi_gbl_optind], "--") == 0) {
                        acpi_gbl_optind++;
                        return (ACPI_OPT_END);
                }
@@ -140,7 +140,7 @@ int acpi_getopt(int argc, char **argv, char *opts)
        /* Make sure that the option is legal */
 
        if (current_char == ':' ||
-           (opts_ptr = ACPI_STRCHR(opts, current_char)) == NULL) {
+           (opts_ptr = strchr(opts, current_char)) == NULL) {
                ACPI_OPTION_ERROR("Illegal option: -", current_char);
 
                if (argv[acpi_gbl_optind][++current_char_ptr] == '\0') {
index 38f095d86b5260d54a30f8dfde8da33de91d1995..79e2d1d435d1337dbb35d138b1e3e448a60217eb 100644 (file)
@@ -22,9 +22,6 @@ acpidump options are as follow:
 .B \-b
 Dump tables to binary files
 .TP
-.B \-c
-Dump customized tables
-.TP
 .B \-h \-?
 This help message
 .TP
@@ -48,15 +45,25 @@ Verbose mode
 .B \-a <Address>
 Get table via a physical address
 .TP
+.B \-c <on|off>
+Turning on/off customized table dumping
+.TP
 .B \-f <BinaryFile>
 Get table via a binary file
 .TP
 .B \-n <Signature>
 Get table via a name/signature
 .TP
-Invocation without parameters dumps all available tables
+.B \-x
+Do not use but dump XSDT
+.TP
+.B \-x \-x
+Do not use or dump XSDT
+.TP
+.fi
+Invocation without parameters dumps all available tables.
 .TP
-Multiple mixed instances of -a, -f, and -n are supported
+Multiple mixed instances of -a, -f, and -n are supported.
 
 .SH EXAMPLES
 
index db15c9d2049e08f6090d3d391a7617246aa9c1c5..dd5008b0617a033c76b0fda581aa4fb6a74e5788 100644 (file)
@@ -222,7 +222,7 @@ acpi_os_get_table_by_address(acpi_physical_address address,
                goto exit;
        }
 
-       ACPI_MEMCPY(local_table, mapped_table, table_length);
+       memcpy(local_table, mapped_table, table_length);
 
 exit:
        osl_unmap_table(mapped_table);
@@ -531,7 +531,7 @@ static acpi_status osl_load_rsdp(void)
        gbl_rsdp_address =
            rsdp_base + (ACPI_CAST8(mapped_table) - rsdp_address);
 
-       ACPI_MEMCPY(&gbl_rsdp, mapped_table, sizeof(struct acpi_table_rsdp));
+       memcpy(&gbl_rsdp, mapped_table, sizeof(struct acpi_table_rsdp));
        acpi_os_unmap_memory(rsdp_address, rsdp_size);
 
        return (AE_OK);
@@ -582,64 +582,67 @@ static acpi_status osl_table_initialize(void)
                return (AE_OK);
        }
 
-       /* Get RSDP from memory */
+       if (!gbl_dump_customized_tables) {
 
-       status = osl_load_rsdp();
-       if (ACPI_FAILURE(status)) {
-               return (status);
-       }
+               /* Get RSDP from memory */
+
+               status = osl_load_rsdp();
+               if (ACPI_FAILURE(status)) {
+                       return (status);
+               }
 
-       /* Get XSDT from memory */
+               /* Get XSDT from memory */
 
-       if (gbl_rsdp.revision && !gbl_do_not_dump_xsdt) {
-               if (gbl_xsdt) {
-                       free(gbl_xsdt);
-                       gbl_xsdt = NULL;
+               if (gbl_rsdp.revision && !gbl_do_not_dump_xsdt) {
+                       if (gbl_xsdt) {
+                               free(gbl_xsdt);
+                               gbl_xsdt = NULL;
+                       }
+
+                       gbl_revision = 2;
+                       status = osl_get_bios_table(ACPI_SIG_XSDT, 0,
+                                                   ACPI_CAST_PTR(struct
+                                                                 acpi_table_header
+                                                                 *, &gbl_xsdt),
+                                                   &address);
+                       if (ACPI_FAILURE(status)) {
+                               return (status);
+                       }
                }
 
-               gbl_revision = 2;
-               status = osl_get_bios_table(ACPI_SIG_XSDT, 0,
-                                           ACPI_CAST_PTR(struct
-                                                         acpi_table_header *,
-                                                         &gbl_xsdt), &address);
-               if (ACPI_FAILURE(status)) {
-                       return (status);
+               /* Get RSDT from memory */
+
+               if (gbl_rsdp.rsdt_physical_address) {
+                       if (gbl_rsdt) {
+                               free(gbl_rsdt);
+                               gbl_rsdt = NULL;
+                       }
+
+                       status = osl_get_bios_table(ACPI_SIG_RSDT, 0,
+                                                   ACPI_CAST_PTR(struct
+                                                                 acpi_table_header
+                                                                 *, &gbl_rsdt),
+                                                   &address);
+                       if (ACPI_FAILURE(status)) {
+                               return (status);
+                       }
                }
-       }
 
-       /* Get RSDT from memory */
+               /* Get FADT from memory */
 
-       if (gbl_rsdp.rsdt_physical_address) {
-               if (gbl_rsdt) {
-                       free(gbl_rsdt);
-                       gbl_rsdt = NULL;
+               if (gbl_fadt) {
+                       free(gbl_fadt);
+                       gbl_fadt = NULL;
                }
 
-               status = osl_get_bios_table(ACPI_SIG_RSDT, 0,
+               status = osl_get_bios_table(ACPI_SIG_FADT, 0,
                                            ACPI_CAST_PTR(struct
                                                          acpi_table_header *,
-                                                         &gbl_rsdt), &address);
+                                                         &gbl_fadt),
+                                           &gbl_fadt_address);
                if (ACPI_FAILURE(status)) {
                        return (status);
                }
-       }
-
-       /* Get FADT from memory */
-
-       if (gbl_fadt) {
-               free(gbl_fadt);
-               gbl_fadt = NULL;
-       }
-
-       status = osl_get_bios_table(ACPI_SIG_FADT, 0,
-                                   ACPI_CAST_PTR(struct acpi_table_header *,
-                                                 &gbl_fadt),
-                                   &gbl_fadt_address);
-       if (ACPI_FAILURE(status)) {
-               return (status);
-       }
-
-       if (!gbl_dump_customized_tables) {
 
                /* Add mandatory tables to global table list first */
 
@@ -961,7 +964,7 @@ osl_get_bios_table(char *signature,
                goto exit;
        }
 
-       ACPI_MEMCPY(local_table, mapped_table, table_length);
+       memcpy(local_table, mapped_table, table_length);
        *address = table_address;
        *table = local_table;
 
index 0b1fa290245a904d3255d1fd84976baab2f09851..44ad4889d468cad4b576e39eaad4ef5ba3844b07 100644 (file)
@@ -54,7 +54,7 @@ ACPI_MODULE_NAME("osunixmap")
 #ifndef O_BINARY
 #define O_BINARY 0
 #endif
-#ifdef _free_BSD
+#if defined(_dragon_fly) || defined(_free_BSD)
 #define MMAP_FLAGS          MAP_SHARED
 #else
 #define MMAP_FLAGS          MAP_PRIVATE
index 84bdef0136cbc15a6b9aea1c7c435f57452445fa..eed534481434301586fe13e28150a58677e57ac2 100644 (file)
@@ -66,7 +66,7 @@
 EXTERN u8 INIT_GLOBAL(gbl_summary_mode, FALSE);
 EXTERN u8 INIT_GLOBAL(gbl_verbose_mode, FALSE);
 EXTERN u8 INIT_GLOBAL(gbl_binary_mode, FALSE);
-EXTERN u8 INIT_GLOBAL(gbl_dump_customized_tables, FALSE);
+EXTERN u8 INIT_GLOBAL(gbl_dump_customized_tables, TRUE);
 EXTERN u8 INIT_GLOBAL(gbl_do_not_dump_xsdt, FALSE);
 EXTERN ACPI_FILE INIT_GLOBAL(gbl_output_file, NULL);
 EXTERN char INIT_GLOBAL(*gbl_output_filename, NULL);
index c736adf5fb55241ecb564a796496b2fad32e3b67..61d0de804b709d9568c1870565876f9c7fa0d5c0 100644 (file)
@@ -329,7 +329,7 @@ int ap_dump_table_by_name(char *signature)
        acpi_status status;
        int table_status;
 
-       if (ACPI_STRLEN(signature) != ACPI_NAME_SIZE) {
+       if (strlen(signature) != ACPI_NAME_SIZE) {
                acpi_log_error
                    ("Invalid table signature [%s]: must be exactly 4 characters\n",
                     signature);
@@ -338,15 +338,15 @@ int ap_dump_table_by_name(char *signature)
 
        /* Table signatures are expected to be uppercase */
 
-       ACPI_STRCPY(local_signature, signature);
+       strcpy(local_signature, signature);
        acpi_ut_strupr(local_signature);
 
        /* To be friendly, handle tables whose signatures do not match the name */
 
        if (ACPI_COMPARE_NAME(local_signature, "FADT")) {
-               ACPI_STRCPY(local_signature, ACPI_SIG_FADT);
+               strcpy(local_signature, ACPI_SIG_FADT);
        } else if (ACPI_COMPARE_NAME(local_signature, "MADT")) {
-               ACPI_STRCPY(local_signature, ACPI_SIG_MADT);
+               strcpy(local_signature, ACPI_SIG_MADT);
        }
 
        /* Dump all instances of this signature (to handle multiple SSDTs) */
index 8f2fe168228eb12d9e97efa085d9d5d5d41d94f3..a37f9702b2a90433978f4a6715919c4a9f8e11e6 100644 (file)
@@ -136,10 +136,10 @@ int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance)
        } else {
                ACPI_MOVE_NAME(filename, table->signature);
        }
-       filename[0] = (char)ACPI_TOLOWER(filename[0]);
-       filename[1] = (char)ACPI_TOLOWER(filename[1]);
-       filename[2] = (char)ACPI_TOLOWER(filename[2]);
-       filename[3] = (char)ACPI_TOLOWER(filename[3]);
+       filename[0] = (char)tolower((int)filename[0]);
+       filename[1] = (char)tolower((int)filename[1]);
+       filename[2] = (char)tolower((int)filename[2]);
+       filename[3] = (char)tolower((int)filename[3]);
        filename[ACPI_NAME_SIZE] = 0;
 
        /* Handle multiple SSDts - create different filenames for each */
@@ -147,10 +147,10 @@ int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance)
        if (instance > 0) {
                acpi_ut_snprintf(instance_str, sizeof(instance_str), "%u",
                                 instance);
-               ACPI_STRCAT(filename, instance_str);
+               strcat(filename, instance_str);
        }
 
-       ACPI_STRCAT(filename, ACPI_TABLE_FILE_SUFFIX);
+       strcat(filename, ACPI_TABLE_FILE_SUFFIX);
 
        if (gbl_verbose_mode) {
                acpi_log_error
index d0ba6535f5af0d0641487cda70ebb5b89ce24784..57620f66ae6c65c3db94952b83d01b8b492eb2fb 100644 (file)
@@ -80,7 +80,7 @@ struct ap_dump_action action_table[AP_MAX_ACTIONS];
 u32 current_action = 0;
 
 #define AP_UTILITY_NAME             "ACPI Binary Table Dump Utility"
-#define AP_SUPPORTED_OPTIONS        "?a:bcf:hn:o:r:svxz"
+#define AP_SUPPORTED_OPTIONS        "?a:bc:f:hn:o:r:svxz"
 
 /******************************************************************************
  *
@@ -96,7 +96,6 @@ static void ap_display_usage(void)
        ACPI_USAGE_HEADER("acpidump [options]");
 
        ACPI_OPTION("-b", "Dump tables to binary files");
-       ACPI_OPTION("-c", "Dump customized tables");
        ACPI_OPTION("-h -?", "This help message");
        ACPI_OPTION("-o <File>", "Redirect output to file");
        ACPI_OPTION("-r <Address>", "Dump tables from specified RSDP");
@@ -107,6 +106,7 @@ static void ap_display_usage(void)
        ACPI_USAGE_TEXT("\nTable Options:\n");
 
        ACPI_OPTION("-a <Address>", "Get table via a physical address");
+       ACPI_OPTION("-c <on|off>", "Turning on/off customized table dumping");
        ACPI_OPTION("-f <BinaryFile>", "Get table via a binary file");
        ACPI_OPTION("-n <Signature>", "Get table via a name/signature");
        ACPI_OPTION("-x", "Do not use but dump XSDT");
@@ -181,7 +181,16 @@ static int ap_do_options(int argc, char **argv)
 
                case 'c':       /* Dump customized tables */
 
-                       gbl_dump_customized_tables = TRUE;
+                       if (!strcmp(acpi_gbl_optarg, "on")) {
+                               gbl_dump_customized_tables = TRUE;
+                       } else if (!strcmp(acpi_gbl_optarg, "off")) {
+                               gbl_dump_customized_tables = FALSE;
+                       } else {
+                               acpi_log_error
+                                   ("%s: Cannot handle this switch, please use on|off\n",
+                                    acpi_gbl_optarg);
+                               return (-1);
+                       }
                        continue;
 
                case 'h':
index 848af90b8091a9a3bd8ed7e18d6ace2692210c36..8b8a4445367011b31afdb06f5363a120ea1db019 100644 (file)
@@ -553,6 +553,8 @@ static struct kvm *kvm_create_vm(unsigned long type)
        list_add(&kvm->vm_list, &vm_list);
        spin_unlock(&kvm_lock);
 
+       preempt_notifier_inc();
+
        return kvm;
 
 out_err:
@@ -620,6 +622,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
        cleanup_srcu_struct(&kvm->irq_srcu);
        cleanup_srcu_struct(&kvm->srcu);
        kvm_arch_free_vm(kvm);
+       preempt_notifier_dec();
        hardware_disable_all();
        mmdrop(mm);
 }