]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 1 Aug 2015 00:10:56 +0000 (17:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 1 Aug 2015 00:10:56 +0000 (17:10 -0700)
Pull networking fixes from David Miller:

 1) Must teardown SR-IOV before unregistering netdev in igb driver, from
    Alex Williamson.

 2) Fix ipv6 route unreachable crash in IPVS, from Alex Gartrell.

 3) Default route selection in ipv4 should take the prefix length, table
    ID, and TOS into account, from Julian Anastasov.

 4) sch_plug must have a reset method in order to purge all buffered
    packets when the qdisc is reset, likewise for sch_choke, from WANG
    Cong.

 5) Fix deadlock and races in slave_changelink/br_setport in bridging.
    From Nikolay Aleksandrov.

 6) mlx4 bug fixes (wrong index in port even propagation to VFs,
    overzealous BUG_ON assertion, etc.) from Ido Shamay, Jack
    Morgenstein, and Or Gerlitz.

 7) Turn off klog message about SCTP userspace interface compat that
    makes no sense at all, from Daniel Borkmann.

 8) Fix unbounded restarts of inet frag eviction process, causing NMI
    watchdog soft lockup messages, from Florian Westphal.

 9) Suspend/resume fixes for r8152 from Hayes Wang.

10) Fix busy loop when MSG_WAITALL|MSG_PEEK is used in TCP recv, from
    Sabrina Dubroca.

11) Fix performance regression when removing a lot of routes from the
    ipv4 routing tables, from Alexander Duyck.

12) Fix device leak in AF_PACKET, from Lars Westerhoff.

13) AF_PACKET also has a header length comparison bug due to signedness,
    from Alexander Drozdov.

14) Fix bug in EBPF tail call generation on x86, from Daniel Borkmann.

15) Memory leaks, TSO stats, watchdog timeout and other fixes to
    thunderx driver from Sunil Goutham and Thanneeru Srinivasulu.

16) act_bpf can leak memory when replacing programs, from Daniel
    Borkmann.

17) WOL packet fixes in gianfar driver, from Claudiu Manoil.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (79 commits)
  stmmac: fix missing MODULE_LICENSE in stmmac_platform
  gianfar: Enable device wakeup when appropriate
  gianfar: Fix suspend/resume for wol magic packet
  gianfar: Fix warning when CONFIG_PM off
  act_pedit: check binding before calling tcf_hash_release()
  net: sk_clone_lock() should only do get_net() if the parent is not a kernel socket
  net: sched: fix refcount imbalance in actions
  r8152: reset device when tx timeout
  r8152: add pre_reset and post_reset
  qlcnic: Fix corruption while copying
  act_bpf: fix memory leaks when replacing bpf programs
  net: thunderx: Fix for crash while BGX teardown
  net: thunderx: Add PCI driver shutdown routine
  net: thunderx: Fix crash when changing rss with mutliple traffic flows
  net: thunderx: Set watchdog timeout value
  net: thunderx: Wakeup TXQ only if CQE_TX are processed
  net: thunderx: Suppress alloc_pages() failure warnings
  net: thunderx: Fix TSO packet statistic
  net: thunderx: Fix memory leak when changing queue count
  net: thunderx: Fix RQ_DROP miscalculation
  ...

343 files changed:
Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
Documentation/devicetree/bindings/sound/mt8173-max98090.txt
Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
Documentation/devicetree/bindings/spi/spi-ath79.txt
Documentation/hwmon/nct7904
Documentation/target/tcm_mod_builder.py
MAINTAINERS
Makefile
arch/arm/boot/dts/imx25-pdk.dts
arch/arm/boot/dts/imx51-apf51dev.dts
arch/arm/boot/dts/imx53-ard.dts
arch/arm/boot/dts/imx53-m53evk.dts
arch/arm/boot/dts/imx53-qsb-common.dtsi
arch/arm/boot/dts/imx53-smd.dts
arch/arm/boot/dts/imx53-tqma53.dtsi
arch/arm/boot/dts/imx53-tx53.dtsi
arch/arm/boot/dts/imx53-voipac-bsb.dts
arch/arm/boot/dts/imx6dl-riotboard.dts
arch/arm/boot/dts/imx6q-arm2.dts
arch/arm/boot/dts/imx6q-gk802.dts
arch/arm/boot/dts/imx6q-tbs2910.dts
arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
arch/arm/boot/dts/imx6qdl-rex.dtsi
arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
arch/arm/boot/dts/imx6qdl-sabresd.dtsi
arch/arm/boot/dts/imx6qdl-tx6.dtsi
arch/arm/boot/dts/imx6qdl-wandboard.dtsi
arch/arm/boot/dts/imx6sl-evk.dts
arch/arm/boot/dts/imx6sx-sabreauto.dts
arch/arm/boot/dts/imx6sx-sdb.dtsi
arch/arm/boot/dts/imx7d-sdb.dts
arch/arm64/kernel/efi.c
arch/avr32/mach-at32ap/clock.c
arch/m32r/include/asm/io.h
arch/s390/kernel/cache.c
arch/s390/net/bpf_jit_comp.c
arch/tile/kernel/setup.c
arch/x86/entry/entry_64_compat.S
arch/x86/include/uapi/asm/kvm.h
arch/x86/kernel/cpu/perf_event_intel_cqm.c
arch/x86/kernel/fpu/init.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mtrr.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.h
arch/x86/mm/ioremap.c
arch/x86/mm/mmap.c
arch/x86/mm/mpx.c
arch/x86/mm/tlb.c
block/bio.c
block/blk-cgroup.c
drivers/acpi/device_pm.c
drivers/ata/libata-core.c
drivers/ata/libata-pmp.c
drivers/ata/libata-scsi.c
drivers/ata/libata-transport.c
drivers/block/null_blk.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/firmware/efi/cper.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_prime.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_platform.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nv04_fbcon.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv50_fbcon.c
drivers/gpu/drm/nouveau/nvc0_fbcon.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/dce6_afmt.c
drivers/gpu/drm/radeon/radeon_audio.c
drivers/gpu/drm/radeon/radeon_audio.h
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-ids.h
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hwmon/nct7802.c
drivers/hwmon/nct7904.c
drivers/iio/accel/mma8452.c
drivers/iio/adc/mcp320x.c
drivers/iio/adc/vf610_adc.c
drivers/iio/light/stk3310.c
drivers/iio/magnetometer/Kconfig
drivers/iio/magnetometer/bmc150_magn.c
drivers/iio/magnetometer/mmc35240.c
drivers/iio/temperature/mlx90614.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/ipath/ipath_driver.c
drivers/infiniband/hw/ocrdma/ocrdma.h
drivers/infiniband/hw/ocrdma/ocrdma_abi.h
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.h
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_stats.c
drivers/infiniband/hw/ocrdma/ocrdma_stats.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/input/input-leds.c
drivers/input/mouse/bcm5974.c
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/usbtouchscreen.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_v2.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/intel-iommu.c
drivers/md/Kconfig
drivers/md/bitmap.c
drivers/md/dm-cache-policy-smq.c
drivers/md/dm-cache-target.c
drivers/md/dm-thin.c
drivers/md/md-cluster.c
drivers/md/md-cluster.h
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/media/pci/ivtv/ivtvfb.c
drivers/misc/mei/main.c
drivers/misc/mic/scif/scif_nodeqp.c
drivers/mmc/card/block.c
drivers/mmc/host/Kconfig
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-esdhc.h
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci.c
drivers/nvdimm/region_devs.c
drivers/of/Kconfig
drivers/of/unittest.c
drivers/parport/share.c
drivers/phy/Kconfig
drivers/phy/phy-berlin-usb.c
drivers/phy/phy-ti-pipe3.c
drivers/regulator/88pm800.c
drivers/regulator/core.c
drivers/regulator/max8973-regulator.c
drivers/regulator/s2mps11.c
drivers/s390/Makefile
drivers/s390/kvm/Makefile [deleted file]
drivers/s390/kvm/kvm_virtio.c [deleted file]
drivers/s390/kvm/virtio_ccw.c [deleted file]
drivers/s390/virtio/Makefile [new file with mode: 0644]
drivers/s390/virtio/kvm_virtio.c [new file with mode: 0644]
drivers/s390/virtio/virtio_ccw.c [new file with mode: 0644]
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/virtio_scsi.c
drivers/spi/Kconfig
drivers/spi/spi-img-spfi.c
drivers/spi/spi-imx.c
drivers/spi/spi-zynqmp-gqspi.c
drivers/spi/spidev.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_login.h
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/target_core_configfs.c
drivers/target/target_core_pr.c
drivers/target/target_core_rd.c
drivers/target/target_core_spc.c
drivers/tty/n_tty.c
drivers/tty/serial/Kconfig
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/etraxfs-uart.c
drivers/tty/serial/imx.c
drivers/tty/serial/sc16is7xx.c
drivers/tty/serial/serial_core.c
drivers/tty/vt/selection.c
drivers/tty/vt/vt.c
drivers/usb/class/cdc-acm.c
drivers/usb/common/ulpi.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/usb.h
drivers/usb/dwc3/ep0.c
drivers/usb/gadget/udc/mv_udc_core.c
drivers/usb/gadget/udc/udc-core.c
drivers/usb/host/ohci-q.c
drivers/usb/host/ohci-tmio.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/storage/unusual_devs.h
drivers/vfio/vfio.c
drivers/vhost/vhost.c
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/transaction.c
fs/dax.c
fs/f2fs/data.c
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/inline.c
fs/f2fs/segment.c
fs/fs-writeback.c
fs/namespace.c
fs/nfs/client.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs42proc.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/write.c
fs/pnode.h
fs/xfs/libxfs/xfs_attr_remote.c
fs/xfs/xfs_file.c
fs/xfs/xfs_log_recover.c
include/linux/ata.h
include/linux/cper.h
include/linux/cpufreq.h
include/linux/ftrace.h
include/linux/libata.h
include/linux/mtd/nand.h
include/linux/nfs_fs.h
include/linux/nfs_fs_sb.h
include/linux/of_device.h
include/linux/platform_data/mmc-esdhc-imx.h
include/target/iscsi/iscsi_target_core.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/i915_drm.h
include/uapi/drm/radeon_drm.h
include/uapi/linux/virtio_net.h
include/uapi/linux/virtio_pci.h
include/uapi/linux/virtio_ring.h
include/uapi/sound/asoc.h
kernel/resource.c
kernel/trace/ftrace.c
net/9p/trans_virtio.c
net/sunrpc/backchannel_rqst.c
net/sunrpc/clnt.c
net/sunrpc/xprtsock.c
security/keys/keyring.c
sound/core/pcm_native.c
sound/firewire/fireworks/fireworks.c
sound/firewire/fireworks/fireworks.h
sound/firewire/fireworks/fireworks_stream.c
sound/hda/hdac_i915.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/soc/codecs/pcm1681.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/sgtl5000.h
sound/soc/codecs/ssm4567.c
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/Makefile
sound/soc/intel/atom/sst/sst_drv_interface.c
sound/soc/intel/boards/cht_bsw_max98090_ti.c
sound/soc/mediatek/mt8173-max98090.c
sound/soc/mediatek/mt8173-rt5650-rt5676.c
sound/soc/mediatek/mtk-afe-pcm.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-topology.c
sound/soc/zte/zx296702-i2s.c
sound/soc/zte/zx296702-spdif.c
sound/sparc/amd7930.c
sound/usb/mixer_maps.c
tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c

index c03eec1168721bdb1c851f7c42eba34ec48f4ba7..3443e0f838dfc8a53e548527a05cf9892f2c2a92 100644 (file)
@@ -35,3 +35,6 @@ the PCIe specification.
 
                       NOTE: this only applies to the SMMU itself, not
                       masters connected upstream of the SMMU.
+
+- hisilicon,broken-prefetch-cmd
+                    : Avoid sending CMD_PREFETCH_* commands to the SMMU.
index 5d0376b8f2026ed57daabd33d684590d02ff5920..211e7785f4d240ec2ffc7258839f740e8b614b78 100644 (file)
@@ -17,7 +17,6 @@ Required properties:
               "fsl,imx6sx-usdhc"
 
 Optional properties:
-- fsl,cd-controller : Indicate to use controller internal card detection
 - fsl,wp-controller : Indicate to use controller internal write protection
 - fsl,delay-line : Specify the number of delay cells for override mode.
   This is used to set the clock delay for DLL(Delay Line) on override mode
@@ -35,7 +34,6 @@ esdhc@70004000 {
        compatible = "fsl,imx51-esdhc";
        reg = <0x70004000 0x4000>;
        interrupts = <1>;
-       fsl,cd-controller;
        fsl,wp-controller;
 };
 
index 829bd26d17f86e753ca54f2a4750fd86db61a641..519e97c8f1b8c2029c30785cc39a97ae317e9e64 100644 (file)
@@ -3,11 +3,13 @@ MT8173 with MAX98090 CODEC
 Required properties:
 - compatible : "mediatek,mt8173-max98090"
 - mediatek,audio-codec: the phandle of the MAX98090 audio codec
+- mediatek,platform: the phandle of MT8173 ASoC platform
 
 Example:
 
        sound {
                compatible = "mediatek,mt8173-max98090";
                mediatek,audio-codec = <&max98090>;
+               mediatek,platform = <&afe>;
        };
 
index 61e98c976bd4012b99fc218b729e6fc006209b84..f205ce9e31dd5d31e981bac826e1f69e819434ca 100644 (file)
@@ -3,11 +3,13 @@ MT8173 with RT5650 RT5676 CODECS
 Required properties:
 - compatible : "mediatek,mt8173-rt5650-rt5676"
 - mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs
+- mediatek,platform: the phandle of MT8173 ASoC platform
 
 Example:
 
        sound {
                compatible = "mediatek,mt8173-rt5650-rt5676";
                mediatek,audio-codec = <&rt5650 &rt5676>;
+               mediatek,platform = <&afe>;
        };
 
index f1ad9c367532437b370b412cb9f5c68b8f73c494..9c696fa66f818eb98144c628aa1b7000f96a6695 100644 (file)
@@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9xxx SPI controller
 Required properties:
 - compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback.
 - reg: Base address and size of the controllers memory area
-- clocks: phandle to the AHB clock.
+- clocks: phandle of the AHB clock.
 - clock-names: has to be "ahb".
 - #address-cells: <1>, as required by generic SPI binding.
 - #size-cells: <0>, also as required by generic SPI binding.
@@ -12,9 +12,9 @@ Child nodes as per the generic SPI binding.
 
 Example:
 
-       spi@1F000000 {
+       spi@1f000000 {
                compatible = "qca,ar9132-spi", "qca,ar7100-spi";
-               reg = <0x1F000000 0x10>;
+               reg = <0x1f000000 0x10>;
 
                clocks = <&pll 2>;
                clock-names = "ahb";
index 014f112e2a14e19557878a250e30438fd8f91b73..57fffe33ebfcdef2c9b52831667e147007c9454e 100644 (file)
@@ -35,11 +35,11 @@ temp1_input         Local temperature (1/1000 degree,
 temp[2-9]_input                CPU temperatures (1/1000 degree,
                        0.125 degree resolution)
 
-fan[1-4]_mode          R/W, 0/1 for manual or SmartFan mode
+pwm[1-4]_enable                R/W, 1/2 for manual or SmartFan mode
                        Setting SmartFan mode is supported only if it has been
                        previously configured by BIOS (or configuration EEPROM)
 
-fan[1-4]_pwm           R/O in SmartFan mode, R/W in manual control mode
+pwm[1-4]               R/O in SmartFan mode, R/W in manual control mode
 
 The driver checks sensor control registers and does not export the sensors
 that are not enabled. Anyway, a sensor that is enabled may actually be not
index 949de191fcdc1939c9b160eb7a809afcdbc847af..cda56df9b8a7ce591f3eb254cad1d423c0a856c5 100755 (executable)
@@ -199,7 +199,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "#include <linux/string.h>\n"
        buf += "#include <linux/configfs.h>\n"
        buf += "#include <linux/ctype.h>\n"
-       buf += "#include <asm/unaligned.h>\n\n"
+       buf += "#include <asm/unaligned.h>\n"
+       buf += "#include <scsi/scsi_proto.h>\n\n"
        buf += "#include <target/target_core_base.h>\n"
        buf += "#include <target/target_core_fabric.h>\n"
        buf += "#include <target/target_core_fabric_configfs.h>\n"
@@ -230,8 +231,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        }\n"
        buf += "        tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
        buf += "        tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
-       buf += "        ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n"
-       buf += "                                &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
+
+       if proto_ident == "FC":
+               buf += "        ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
+       elif proto_ident == "SAS":
+               buf += "        ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
+       elif proto_ident == "iSCSI":
+               buf += "        ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
+
        buf += "        if (ret < 0) {\n"
        buf += "                kfree(tpg);\n"
        buf += "                return NULL;\n"
@@ -292,7 +299,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
 
        buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
        buf += "        .module                         = THIS_MODULE,\n"
-       buf += "        .name                           = " + fabric_mod_name + ",\n"
+       buf += "        .name                           = \"" + fabric_mod_name + "\",\n"
        buf += "        .get_fabric_name                = " + fabric_mod_name + "_get_fabric_name,\n"
        buf += "        .tpg_get_wwn                    = " + fabric_mod_name + "_get_fabric_wwn,\n"
        buf += "        .tpg_get_tag                    = " + fabric_mod_name + "_get_tag,\n"
@@ -322,17 +329,17 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .fabric_make_tpg                = " + fabric_mod_name + "_make_tpg,\n"
        buf += "        .fabric_drop_tpg                = " + fabric_mod_name + "_drop_tpg,\n"
        buf += "\n"
-       buf += "        .tfc_wwn_attrs                  = " + fabric_mod_name + "_wwn_attrs;\n"
+       buf += "        .tfc_wwn_attrs                  = " + fabric_mod_name + "_wwn_attrs,\n"
        buf += "};\n\n"
 
        buf += "static int __init " + fabric_mod_name + "_init(void)\n"
        buf += "{\n"
-       buf += "        return target_register_template(" + fabric_mod_name + "_ops);\n"
+       buf += "        return target_register_template(&" + fabric_mod_name + "_ops);\n"
        buf += "};\n\n"
 
        buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
        buf += "{\n"
-       buf += "        target_unregister_template(" + fabric_mod_name + "_ops);\n"
+       buf += "        target_unregister_template(&" + fabric_mod_name + "_ops);\n"
        buf += "};\n\n"
 
        buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
index a2264167791acd0d309b1097937ad1ba8aa36cb8..9289ecb57b68d17b612cdc3eb932627b6ac426d7 100644 (file)
@@ -5899,7 +5899,6 @@ S:        Supported
 F:     Documentation/s390/kvm.txt
 F:     arch/s390/include/asm/kvm*
 F:     arch/s390/kvm/
-F:     drivers/s390/kvm/
 
 KERNEL VIRTUAL MACHINE (KVM) FOR ARM
 M:     Christoffer Dall <christoffer.dall@linaro.org>
@@ -6839,6 +6838,12 @@ T:       git git://linuxtv.org/anttip/media_tree.git
 S:     Maintained
 F:     drivers/media/usb/msi2500/
 
+MSYSTEMS DISKONCHIP G3 MTD DRIVER
+M:     Robert Jarzmik <robert.jarzmik@free.fr>
+L:     linux-mtd@lists.infradead.org
+S:     Maintained
+F:     drivers/mtd/devices/docg3*
+
 MT9M032 APTINA SENSOR DRIVER
 M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 L:     linux-media@vger.kernel.org
@@ -10896,6 +10901,15 @@ F:     drivers/block/virtio_blk.c
 F:     include/linux/virtio_*.h
 F:     include/uapi/linux/virtio_*.h
 
+VIRTIO DRIVERS FOR S390
+M:     Christian Borntraeger <borntraeger@de.ibm.com>
+M:     Cornelia Huck <cornelia.huck@de.ibm.com>
+L:     linux-s390@vger.kernel.org
+L:     virtualization@lists.linux-foundation.org
+L:     kvm@vger.kernel.org
+S:     Supported
+F:     drivers/s390/virtio/
+
 VIRTIO GPU DRIVER
 M:     David Airlie <airlied@linux.ie>
 M:     Gerd Hoffmann <kraxel@redhat.com>
index a9ad4908e8701569effc7596833e6ca513c75033..afabc44a349b7b31a2028660e7e61b134556a675 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
index dd45e6971bc35061a3b9d5b4579c6e6697ca9eca..9351296356dcc419ccb2746efa031df01bcb6462 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 /dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
 #include "imx25.dtsi"
 
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
-       cd-gpios = <&gpio2 1 0>;
-       wp-gpios = <&gpio2 0 0>;
+       cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
index 93d3ea12328c50c07cf9d7ab4d09fc952dc5a397..0f3fe29b816ebad2c77d95271e76e089437109b2 100644 (file)
@@ -98,7 +98,7 @@
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
-       cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
        bus-width = <4>;
        status = "okay";
 };
index e9337ad52f59b2159a9419b7d177f2cf6baccfe3..3bc18835fb4bbbe0e388922689a70395bb532cd6 100644 (file)
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
-       cd-gpios = <&gpio1 1 0>;
-       wp-gpios = <&gpio1 9 0>;
+       cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
index d0e0f57eb432e9822b005e986c105fc7949e028b..53f40885c530637c1776dc6172764cf6669f918d 100644 (file)
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
-       cd-gpios = <&gpio1 1 0>;
-       wp-gpios = <&gpio1 9 0>;
+       cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
index ab4ba39f2ed9d09d144c6536e57c89a20b542604..b0d5542ac829a53b1168c8d8501f5e1fcbd8d167 100644 (file)
 &esdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc3>;
-       cd-gpios = <&gpio3 11 0>;
-       wp-gpios = <&gpio3 12 0>;
+       cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>;
        bus-width = <8>;
        status = "okay";
 };
index 1d325576bcc04d4a119d96f7c85612f2f4bd62a2..fc89ce1e5763a2b03d5da06b38e6e3896e56dd20 100644 (file)
@@ -41,8 +41,8 @@
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
-       cd-gpios = <&gpio3 13 0>;
-       wp-gpios = <&gpio4 11 0>;
+       cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
index 4f1f0e2868bf12816ec93f545a8f592e43e7f64d..e03373a58760ff79c431b40fabbb8b54e1d1039e 100644 (file)
@@ -41,8 +41,8 @@
        pinctrl-0 = <&pinctrl_esdhc2>,
                    <&pinctrl_esdhc2_cdwp>;
        vmmc-supply = <&reg_3p3v>;
-       wp-gpios = <&gpio1 2 0>;
-       cd-gpios = <&gpio1 4 0>;
+       wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
        status = "disabled";
 };
 
index 704bd72cbfec823da4145ead1fd6e7dc719d094b..d3e50b22064f28777bbd2a3b60f512d844ff9418 100644 (file)
 };
 
 &esdhc1 {
-       cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
        fsl,wp-controller;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
 };
 
 &esdhc2 {
-       cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
        fsl,wp-controller;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc2>;
index c17d3ad6dba50213c18a9076ca15c261d6740e77..fc51b87ad2087022e8b8648c71fac18e00abb762 100644 (file)
 &esdhc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc2>;
-       cd-gpios = <&gpio3 25 0>;
-       wp-gpios = <&gpio2 19 0>;
+       cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index 43cb3fd76be764cdceb08efd949f47866ebe03e9..5111f5170d5343398bab4ec60e548de07f26200b 100644 (file)
 &usdhc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
-       cd-gpios = <&gpio1 4 0>;
-       wp-gpios = <&gpio1 2 0>;
+       cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio7 0 0>;
-       wp-gpios = <&gpio7 1 0>;
+       cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index 78df05e9d1ce61ca7d71c0825367cd8cd3757925..d6515f7a56c427bc7a8490f932b78320f942a624 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 /dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
 #include "imx6q.dtsi"
 
 / {
 };
 
 &usdhc3 {
-       cd-gpios = <&gpio6 11 0>;
-       wp-gpios = <&gpio6 14 0>;
+       cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
        vmmc-supply = <&reg_3p3v>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3
index 703539cf36d3078fa7b179fad58c26f9a72b0069..00bd63e63d0cdd47f4ffa012f572f10323a2d14f 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 /dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
 #include "imx6q.dtsi"
 
 / {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
        bus-width = <4>;
-       cd-gpios = <&gpio6 11 0>;
+       cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index a43abfa21e33b9b596dbab2e82b8e121baebdd00..5645d52850a7eca0f1905f7341935ae82ce99859 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
        bus-width = <4>;
-       cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
        bus-width = <4>;
-       cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
index e6d9195a1da7bfb98260f0d2ca9bbd0f57de9374..f4d6ae564ead290cd9fc1e02d519573364c47af5 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc1>;
        vmmc-supply = <&reg_3p3v>;
-       cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
        vmmc-supply = <&reg_3p3v>;
-       cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
index 1d85de2befb3ef9c8e1257d33a34721e502e1755..a47a0399a1728da0c1293a8312f30bef082fd349 100644 (file)
 &usdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc1>;
-       cd-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
        no-1-8-v;
        status = "okay";
 };
 &usdhc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
-       cd-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
        no-1-8-v;
        status = "okay";
index 59e5d15e3ec4bad9cc664fe0f985502cf39a1375..ff41f83551de6ee72bb6da96b24c3e3ed87d4c58 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
        vmmc-supply = <&reg_3p3v>;
-       cd-gpios = <&gpio1 4 0>;
+       cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
index 2c253d6d20bd1fec6b71d18347f27ef0126e3f5e..45e7c39e80d584c73ade1523a3e4316e549c76d4 100644 (file)
@@ -1,3 +1,5 @@
+#include <dt-bindings/gpio/gpio.h>
+
 / {
        regulators {
                compatible = "simple-bus";
 &usdhc2 { /* module slot */
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
-       cd-gpios = <&gpio2 2 0>;
+       cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
index b5756c21ea1d55b791b10e6728b573a918ad78e7..4493f6e993301da96edefaac1334372cd18cf4c7 100644 (file)
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index 86f03c1b147c630c43166aa9d4782da28f333519..a857d1294609a0a0670a7d3f92e446e92185b743 100644 (file)
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index 4a8d97f477592316c3d9ff86cee4bc1b5c9e887f..1afe3385e2d283b7a9a2da84e37ca595ce04c13f 100644 (file)
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index 62a82f3eba888f7d16f11e8dc8cac129ae4c2073..6dd0b764e036d1c51cdde3015c1a2cc56178a999 100644 (file)
                &pinctrl_hummingboard_usdhc2
        >;
        vmmc-supply = <&reg_3p3v>;
-       cd-gpios = <&gpio1 4 0>;
+       cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
index 3af16dfe417be4bb6ec89a2f870e05f1b2df3214..d7fe6672d00cf38141aca0ea88cd807cff4b3cc7 100644 (file)
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio7 0 0>;
+       cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
 &usdhc4 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc4>;
-       cd-gpios = <&gpio2 6 0>;
+       cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index 1ce6133b67f5c65fefd2fe85d368ac455b199950..9e6ecd99b472dbcb5ce707048fc4fb4775a25a77 100644 (file)
 &usdhc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
-       cd-gpios = <&gpio1 4 0>;
-       wp-gpios = <&gpio1 2 0>;
+       cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
        status = "disabled";
 };
 
         pinctrl-names = "default";
         pinctrl-0 = <&pinctrl_usdhc3
                     &pinctrl_usdhc3_cdwp>;
-        cd-gpios = <&gpio1 27 0>;
-        wp-gpios = <&gpio1 29 0>;
+       cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
         status = "disabled";
 };
index 488a640796ac05fa50c4299185fbe71c64ef1a13..3373fd958e95c72b098ed14ea2a3228ba7903ea6 100644 (file)
        pinctrl-0 = <&pinctrl_usdhc2>;
        bus-width = <4>;
        cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
-       wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
        pinctrl-0 = <&pinctrl_usdhc3>;
        bus-width = <4>;
        cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
-       wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
index 3b24b12651b2b86ee1a74d5baccca435778ddd3e..e329ca5c3322716e14a9117d7d8e1ca956c89f0b 100644 (file)
        pinctrl-0 = <&pinctrl_usdhc3>;
        pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
-       cd-gpios = <&gpio6 15 0>;
-       wp-gpios = <&gpio1 13 0>;
+       cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
index e00c44f6a0df888f6ecb8935ddc99b85e932ee43..782379320517735f7beb62814e0ce1944ecebb18 100644 (file)
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio7 0 0>;
-       wp-gpios = <&gpio7 1 0>;
+       cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
 &usdhc4 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc4>;
-       cd-gpios = <&gpio2 6 0>;
+       cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index a626e6dd8022c04defdbc56171147c04027aa48b..944eb81cb2b8c03aa663a1ee7f2bcc6ca664af52 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
        bus-width = <8>;
-       cd-gpios = <&gpio2 2 0>;
-       wp-gpios = <&gpio2 3 0>;
+       cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
        bus-width = <8>;
-       cd-gpios = <&gpio2 0 0>;
-       wp-gpios = <&gpio2 1 0>;
+       cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
index f02b80b41d4fb94d9a5f690d9ddc6d89371e32f6..da08de324e9eb595db45c7cae308327d77bd33a9 100644 (file)
        pinctrl-0 = <&pinctrl_usdhc1>;
        bus-width = <4>;
        no-1-8-v;
-       cd-gpios = <&gpio7 2 0>;
+       cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>;
        fsl,wp-controller;
        status = "okay";
 };
        pinctrl-0 = <&pinctrl_usdhc2>;
        bus-width = <4>;
        no-1-8-v;
-       cd-gpios = <&gpio7 3 0>;
+       cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>;
        fsl,wp-controller;
        status = "okay";
 };
index 5fb091675582e25b84026f5447d0be604f9266ac..9e096d811bedac74d09c6ebf584665ff1ac6fead 100644 (file)
@@ -9,6 +9,8 @@
  *
  */
 
+#include <dt-bindings/gpio/gpio.h>
+
 / {
        regulators {
                compatible = "simple-bus";
 &usdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc1>;
-       cd-gpios = <&gpio1 2 0>;
+       cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio3 9 0>;
+       cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
index 945887d3fdb35a6588155590474479901a45671f..b84dff2e94ea1e4e44c15a054d90e67f9d06bde6 100644 (file)
        pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
        bus-width = <8>;
-       cd-gpios = <&gpio4 7 0>;
-       wp-gpios = <&gpio4 6 0>;
+       cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
        pinctrl-0 = <&pinctrl_usdhc2>;
        pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
-       cd-gpios = <&gpio5 0 0>;
-       wp-gpios = <&gpio4 29 0>;
+       cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
        pinctrl-0 = <&pinctrl_usdhc3>;
        pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
-       cd-gpios = <&gpio3 22 0>;
+       cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
index e3c0b63c22056764f93dbecfc2b9500770a6343c..115f3fd78971868ad7025fbb0f46c17c4252e574 100644 (file)
@@ -49,7 +49,7 @@
        pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
        bus-width = <8>;
-       cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
        keep-power-in-suspend;
        enable-sdio-wakeup;
@@ -61,7 +61,7 @@
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc4>;
        bus-width = <8>;
-       cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
        no-1-8-v;
        keep-power-in-suspend;
        enable-sdio-wakup;
index cef04cef3a807f44efdc6a44fb754e0eb4b40b1e..ac88c3467078ec92971324395101b7db785da005 100644 (file)
        pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
        bus-width = <8>;
-       cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
        keep-power-in-suspend;
        enable-sdio-wakeup;
 &usdhc4 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc4>;
-       cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
index 4d1a4b977d8492c62085f87ecff7ad29704eb4ad..fdd1d7c9a5cc2608ac047f088f62e394d88db124 100644 (file)
 &usdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc1>;
-       cd-gpios = <&gpio5 0 0>;
-       wp-gpios = <&gpio5 1 0>;
+       cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
        enable-sdio-wakeup;
        keep-power-in-suspend;
        status = "okay";
index 9d4aa18f2a825256537a7d7e33c606d89d003ee7..e8ca6eaedd0252e2056530d71d519f423931d323 100644 (file)
@@ -122,12 +122,12 @@ static int __init uefi_init(void)
 
        /* Show what we know for posterity */
        c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
-                            sizeof(vendor));
+                            sizeof(vendor) * sizeof(efi_char16_t));
        if (c16) {
                for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
                        vendor[i] = c16[i];
                vendor[i] = '\0';
-               early_memunmap(c16, sizeof(vendor));
+               early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
        }
 
        pr_info("EFI v%u.%.02u by %s\n",
index 23b1a97fae7ad686acc581c5888e95d5e4928a1e..52c179bec0cc6c69d4430324a0a581f66e066a6d 100644 (file)
@@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
 {
        unsigned long flags;
 
+       if (!clk)
+               return 0;
+
        spin_lock_irqsave(&clk_lock, flags);
        __clk_enable(clk);
        spin_unlock_irqrestore(&clk_lock, flags);
@@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
 {
        unsigned long flags;
 
+       if (IS_ERR_OR_NULL(clk))
+               return;
+
        spin_lock_irqsave(&clk_lock, flags);
        __clk_disable(clk);
        spin_unlock_irqrestore(&clk_lock, flags);
@@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
        unsigned long flags;
        unsigned long rate;
 
+       if (!clk)
+               return 0;
+
        spin_lock_irqsave(&clk_lock, flags);
        rate = clk->get_rate(clk);
        spin_unlock_irqrestore(&clk_lock, flags);
@@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
 {
        unsigned long flags, actual_rate;
 
+       if (!clk)
+               return 0;
+
        if (!clk->set_rate)
                return -ENOSYS;
 
@@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
        unsigned long flags;
        long ret;
 
+       if (!clk)
+               return 0;
+
        if (!clk->set_rate)
                return -ENOSYS;
 
@@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
        unsigned long flags;
        int ret;
 
+       if (!clk)
+               return 0;
+
        if (!clk->set_parent)
                return -ENOSYS;
 
@@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
 
 struct clk *clk_get_parent(struct clk *clk)
 {
-       return clk->parent;
+       return !clk ? NULL : clk->parent;
 }
 EXPORT_SYMBOL(clk_get_parent);
 
index 0c3f25ee3381d9fad606ea36bb09682d362a87d8..f8de767ce2bcc763979ef6e08d4ef4f862a89efb 100644 (file)
@@ -174,6 +174,11 @@ static inline void _writel(unsigned long l, unsigned long addr)
 #define iowrite16 writew
 #define iowrite32 writel
 
+#define ioread16be(addr)       be16_to_cpu(readw(addr))
+#define ioread32be(addr)       be32_to_cpu(readl(addr))
+#define iowrite16be(v, addr)   writew(cpu_to_be16(v), (addr))
+#define iowrite32be(v, addr)   writel(cpu_to_be32(v), (addr))
+
 #define mmiowb()
 
 #define flush_write_buffers() do { } while (0)  /* M32R_FIXME */
index bff5e3b6d8223b8f9a9c84d8f99235d3d4e648bc..8ba32436effe4b9da0a271ae1ec0ad2a2bd689e7 100644 (file)
@@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
        union cache_topology ct;
        enum cache_type ctype;
 
+       if (!test_facility(34))
+               return -EOPNOTSUPP;
        if (!this_cpu_ci)
                return -EINVAL;
        ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
index fee782acc2ee51f6a3aae4b28152ec76981c8350..8d2e5165865f2c27b50fe8b1c6845de45666698a 100644 (file)
@@ -448,13 +448,13 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
                EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
                              BPF_REG_1, offsetof(struct sk_buff, data));
        }
-       /* BPF compatibility: clear A (%b7) and X (%b8) registers */
-       if (REG_SEEN(BPF_REG_7))
-               /* lghi %b7,0 */
-               EMIT4_IMM(0xa7090000, BPF_REG_7, 0);
-       if (REG_SEEN(BPF_REG_8))
-               /* lghi %b8,0 */
-               EMIT4_IMM(0xa7090000, BPF_REG_8, 0);
+       /* BPF compatibility: clear A (%b0) and X (%b7) registers */
+       if (REG_SEEN(BPF_REG_A))
+               /* lghi %ba,0 */
+               EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
+       if (REG_SEEN(BPF_REG_X))
+               /* lghi %bx,0 */
+               EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
 }
 
 /*
index 99c9ff87e0187502179f8012afa5bd54309d2eb9..6b755d125783f45b05de02e3fe5f7b5e32d1af45 100644 (file)
@@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void)
 
 void __init free_initrd_mem(unsigned long begin, unsigned long end)
 {
-       free_bootmem(__pa(begin), end - begin);
+       free_bootmem_late(__pa(begin), end - begin);
 }
 
 static int __init setup_initrd(char *str)
index bb187a6a877cc62666afb43421eb7e5e8b6fbbfe..5a1844765a7aba6dab47b878daf6eb723c044c03 100644 (file)
@@ -205,7 +205,6 @@ sysexit_from_sys_call:
        movl    RDX(%rsp), %edx         /* arg3 */
        movl    RSI(%rsp), %ecx         /* arg4 */
        movl    RDI(%rsp), %r8d         /* arg5 */
-       movl    %ebp, %r9d              /* arg6 */
        .endm
 
        .macro auditsys_exit exit
@@ -236,6 +235,7 @@ sysexit_from_sys_call:
 
 sysenter_auditsys:
        auditsys_entry_common
+       movl    %ebp, %r9d              /* reload 6th syscall arg */
        jmp     sysenter_dispatch
 
 sysexit_audit:
@@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat)
         * 32-bit zero extended:
         */
        ASM_STAC
-1:     movl    (%r8), %ebp
+1:     movl    (%r8), %r9d
        _ASM_EXTABLE(1b, ia32_badarg)
        ASM_CLAC
        orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
@@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat)
 cstar_do_call:
        /* 32-bit syscall -> 64-bit C ABI argument conversion */
        movl    %edi, %r8d              /* arg5 */
-       movl    %ebp, %r9d              /* arg6 */
+       /* r9 already loaded */         /* arg6 */
        xchg    %ecx, %esi              /* rsi:arg2, rcx:arg4 */
        movl    %ebx, %edi              /* arg1 */
        movl    %edx, %edx              /* arg3 (zero extension) */
@@ -358,7 +358,6 @@ cstar_dispatch:
        call    *ia32_sys_call_table(, %rax, 8)
        movq    %rax, RAX(%rsp)
 1:
-       movl    RCX(%rsp), %ebp
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
        testl   $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
@@ -392,7 +391,9 @@ sysretl_from_sys_call:
 
 #ifdef CONFIG_AUDITSYSCALL
 cstar_auditsys:
+       movl    %r9d, R9(%rsp)          /* register to be clobbered by call */
        auditsys_entry_common
+       movl    R9(%rsp), %r9d          /* reload 6th syscall arg */
        jmp     cstar_dispatch
 
 sysretl_audit:
@@ -404,14 +405,16 @@ cstar_tracesys:
        testl   $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
        jz      cstar_auditsys
 #endif
+       xchgl   %r9d, %ebp
        SAVE_EXTRA_REGS
        xorl    %eax, %eax              /* Do not leak kernel information */
        movq    %rax, R11(%rsp)
        movq    %rax, R10(%rsp)
-       movq    %rax, R9(%rsp)
+       movq    %r9, R9(%rsp)
        movq    %rax, R8(%rsp)
        movq    %rsp, %rdi              /* &pt_regs -> arg1 */
        call    syscall_trace_enter
+       movl    R9(%rsp), %r9d
 
        /* Reload arg registers from stack. (see sysenter_tracesys) */
        movl    RCX(%rsp), %ecx
@@ -421,6 +424,7 @@ cstar_tracesys:
        movl    %eax, %eax              /* zero extension */
 
        RESTORE_EXTRA_REGS
+       xchgl   %ebp, %r9d
        jmp     cstar_do_call
 END(entry_SYSCALL_compat)
 
index a4ae82eb82aa8485146aee83376ba26fb9306afe..cd54147cb365fb3622121c782eee8e7fb3ddcc77 100644 (file)
@@ -354,7 +354,7 @@ struct kvm_xcrs {
 struct kvm_sync_regs {
 };
 
-#define KVM_QUIRK_LINT0_REENABLED      (1 << 0)
-#define KVM_QUIRK_CD_NW_CLEARED                (1 << 1)
+#define KVM_X86_QUIRK_LINT0_REENABLED  (1 << 0)
+#define KVM_X86_QUIRK_CD_NW_CLEARED    (1 << 1)
 
 #endif /* _ASM_X86_KVM_H */
index 188076161c1be51afe135d6289b8ce0e96952bf3..63eb68b73589bcbbc21f9c526193adca0de2e52d 100644 (file)
@@ -951,6 +951,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
        if (!cqm_group_leader(event))
                return 0;
 
+       /*
+        * Getting up-to-date values requires an SMP IPI which is not
+        * possible if we're being called in interrupt context. Return
+        * the cached values instead.
+        */
+       if (unlikely(in_interrupt()))
+               goto out;
+
        /*
         * Notice that we don't perform the reading of an RMID
         * atomically, because we can't hold a spin lock across the
index 0b39173dd9714ebed0eaca58b27af65e86dfbc0c..1e173f6285c73b76b2e6ab41daed7681406c5d15 100644 (file)
@@ -351,9 +351,15 @@ static int __init x86_noxsave_setup(char *s)
 
        setup_clear_cpu_cap(X86_FEATURE_XSAVE);
        setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+       setup_clear_cpu_cap(X86_FEATURE_XSAVEC);
        setup_clear_cpu_cap(X86_FEATURE_XSAVES);
        setup_clear_cpu_cap(X86_FEATURE_AVX);
        setup_clear_cpu_cap(X86_FEATURE_AVX2);
+       setup_clear_cpu_cap(X86_FEATURE_AVX512F);
+       setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
+       setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
+       setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
+       setup_clear_cpu_cap(X86_FEATURE_MPX);
 
        return 1;
 }
index 954e98a8c2e38bf9861d4d6349eb721264e8cb18..2a5ca97c263bb48092ea80f88c7d30120ea63b6e 100644 (file)
@@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        for (i = 0; i < APIC_LVT_NUM; i++)
                apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
        apic_update_lvtt(apic);
-       if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED))
+       if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
                apic_set_reg(apic, APIC_LVT0,
                             SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
        apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
index de1d2d8062e24048232af909d684f0c2ed9e21bc..dc0a84a6f3094ac997701de74c868763e6843229 100644 (file)
@@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
        return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
 }
 
+static u8 mtrr_disabled_type(void)
+{
+       /*
+        * Intel SDM 11.11.2.2: all MTRRs are disabled when
+        * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
+        * memory type is applied to all of physical memory.
+        */
+       return MTRR_TYPE_UNCACHABLE;
+}
+
 /*
 * Three terms are used in the following code:
 * - segment, it indicates the address segments covered by fixed MTRRs.
@@ -434,6 +444,8 @@ struct mtrr_iter {
 
        /* output fields. */
        int mem_type;
+       /* mtrr is completely disabled? */
+       bool mtrr_disabled;
        /* [start, end) is not fully covered in MTRRs? */
        bool partial_map;
 
@@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter)
 static void mtrr_lookup_start(struct mtrr_iter *iter)
 {
        if (!mtrr_is_enabled(iter->mtrr_state)) {
-               iter->partial_map = true;
+               iter->mtrr_disabled = true;
                return;
        }
 
@@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter,
        iter->mtrr_state = mtrr_state;
        iter->start = start;
        iter->end = end;
+       iter->mtrr_disabled = false;
        iter->partial_map = false;
        iter->fixed = false;
        iter->range = NULL;
@@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
                return MTRR_TYPE_WRBACK;
        }
 
-       /* It is not covered by MTRRs. */
-       if (iter.partial_map) {
-               /*
-                * We just check one page, partially covered by MTRRs is
-                * impossible.
-                */
-               WARN_ON(type != -1);
-               type = mtrr_default_type(mtrr_state);
-       }
+       if (iter.mtrr_disabled)
+               return mtrr_disabled_type();
+
+       /*
+        * We just check one page, partially covered by MTRRs is
+        * impossible.
+        */
+       WARN_ON(iter.partial_map);
+
+       /* not contained in any MTRRs. */
+       if (type == -1)
+               return mtrr_default_type(mtrr_state);
+
        return type;
 }
 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
@@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
                        return false;
        }
 
+       if (iter.mtrr_disabled)
+               return true;
+
        if (!iter.partial_map)
                return true;
 
index bbc678a66b18719287b091787db96cf1f9f98981..8e0c0844c6b9681e31e64bdeba0f1822108bec3e 100644 (file)
@@ -1672,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
         * does not do it - this results in some delay at
         * reboot
         */
-       if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED))
+       if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
                cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
        svm->vmcb->save.cr0 = cr0;
        mark_dirty(svm->vmcb, VMCB_CR);
index 5b4e9384717a17257ea20ef47031dd5f158273a2..83b7b5cd75d52dd67976274da3c11807f1c35490 100644 (file)
@@ -8650,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
 
        if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
                ipat = VMX_EPT_IPAT_BIT;
-               cache = MTRR_TYPE_UNCACHABLE;
+               if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+                       cache = MTRR_TYPE_WRBACK;
+               else
+                       cache = MTRR_TYPE_UNCACHABLE;
                goto exit;
        }
 
index edc8cdcd786b00627a1029c8bb3b2aedcb291d09..0ca2f3e4803c2a5846be297a51eb5c1cf67aff4a 100644 (file)
@@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
        return kvm_register_write(vcpu, reg, val);
 }
 
+static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
+{
+       return !(kvm->arch.disabled_quirks & quirk);
+}
+
 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
index cc5ccc415cc01ef8ea9e58b3f81a281c9ab412bf..b9c78f3bcd6739718def393f49d1f1cbb0e8bb5c 100644 (file)
@@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
                    !PageReserved(pfn_to_page(start_pfn + i)))
                        return 1;
 
-       WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
-
        return 0;
 }
 
@@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
        pgprot_t prot;
        int retval;
        void __iomem *ret_addr;
-       int ram_region;
 
        /* Don't allow wraparound or zero size */
        last_addr = phys_addr + size - 1;
@@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
        /*
         * Don't allow anybody to remap normal RAM that we're using..
         */
-       /* First check if whole region can be identified as RAM or not */
-       ram_region = region_is_ram(phys_addr, size);
-       if (ram_region > 0) {
-               WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
-                               (unsigned long int)phys_addr,
-                               (unsigned long int)last_addr);
+       pfn      = phys_addr >> PAGE_SHIFT;
+       last_pfn = last_addr >> PAGE_SHIFT;
+       if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+                                         __ioremap_check_ram) == 1) {
+               WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+                         &phys_addr, &last_addr);
                return NULL;
        }
 
-       /* If could not be identified(-1), check page by page */
-       if (ram_region < 0) {
-               pfn      = phys_addr >> PAGE_SHIFT;
-               last_pfn = last_addr >> PAGE_SHIFT;
-               if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
-                                         __ioremap_check_ram) == 1)
-                       return NULL;
-       }
        /*
         * Mappings have to be page-aligned
         */
index 9d518d693b4b7adf07463c695f3cd14412a19192..844b06d67df4da95cec611375d55c05d52884efd 100644 (file)
@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
        }
 }
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+       if (vma->vm_flags & VM_MPX)
+               return "[mpx]";
+       return NULL;
+}
index 7a657f58bbea152057262a61e325c169f78bc516..db1b0bc5017c9f01b456a97b5b84d6d40a2c03b2 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <asm/trace/mpx.h>
 
-static const char *mpx_mapping_name(struct vm_area_struct *vma)
-{
-       return "[mpx]";
-}
-
-static struct vm_operations_struct mpx_vma_ops = {
-       .name = mpx_mapping_name,
-};
-
-static int is_mpx_vma(struct vm_area_struct *vma)
-{
-       return (vma->vm_ops == &mpx_vma_ops);
-}
-
 static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
 {
        if (is_64bit_mm(mm))
@@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
 /*
  * This is really a simplified "vm_mmap". it only handles MPX
  * bounds tables (the bounds directory is user-allocated).
- *
- * Later on, we use the vma->vm_ops to uniquely identify these
- * VMAs.
  */
 static unsigned long mpx_mmap(unsigned long len)
 {
@@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len)
                ret = -ENOMEM;
                goto out;
        }
-       vma->vm_ops = &mpx_vma_ops;
 
        if (vm_flags & VM_LOCKED) {
                up_write(&mm->mmap_sem);
@@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
                 * so stop immediately and return an error.  This
                 * probably results in a SIGSEGV.
                 */
-               if (!is_mpx_vma(vma))
+               if (!(vma->vm_flags & VM_MPX))
                        return -EINVAL;
 
                len = min(vma->vm_end, end) - addr;
@@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm,
         * lots of tables even though we have no actual table
         * entries in use.
         */
-       while (next && is_mpx_vma(next))
+       while (next && (next->vm_flags & VM_MPX))
                next = next->vm_next;
-       while (prev && is_mpx_vma(prev))
+       while (prev && (prev->vm_flags & VM_MPX))
                prev = prev->vm_prev;
        /*
         * We know 'start' and 'end' lie within an area controlled
index 3250f2371aea5c9f2c8f8f19d4f6535627e0e188..90b924acd9822ffdd9409b9fd97325ea7954b97a 100644 (file)
@@ -117,7 +117,7 @@ static void flush_tlb_func(void *info)
                } else {
                        unsigned long addr;
                        unsigned long nr_pages =
-                               f->flush_end - f->flush_start / PAGE_SIZE;
+                               (f->flush_end - f->flush_start) / PAGE_SIZE;
                        addr = f->flush_start;
                        while (addr < f->flush_end) {
                                __flush_tlb_single(addr);
index 2a00d349cd6883cba32d9fd477251889a1c58081..d6e5ba3399f0ae151ea040e2ec1fd1df1c3dba6a 100644 (file)
@@ -1831,8 +1831,9 @@ EXPORT_SYMBOL(bio_endio);
  * Allocates and returns a new bio which represents @sectors from the start of
  * @bio, and updates @bio to represent the remaining sectors.
  *
- * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
- * responsibility to ensure that @bio is not freed before the split.
+ * Unless this is a discard request the newly allocated bio will point
+ * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
+ * @bio is not freed before the split.
  */
 struct bio *bio_split(struct bio *bio, int sectors,
                      gfp_t gfp, struct bio_set *bs)
@@ -1842,7 +1843,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
        BUG_ON(sectors <= 0);
        BUG_ON(sectors >= bio_sectors(bio));
 
-       split = bio_clone_fast(bio, gfp, bs);
+       /*
+        * Discards need a mutable bio_vec to accommodate the payload
+        * required by the DSM TRIM and UNMAP commands.
+        */
+       if (bio->bi_rw & REQ_DISCARD)
+               split = bio_clone_bioset(bio, gfp, bs);
+       else
+               split = bio_clone_fast(bio, gfp, bs);
+
        if (!split)
                return NULL;
 
@@ -2009,6 +2018,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
        bio->bi_css = blkcg_css;
        return 0;
 }
+EXPORT_SYMBOL_GPL(bio_associate_blkcg);
 
 /**
  * bio_associate_current - associate a bio with %current
@@ -2039,6 +2049,7 @@ int bio_associate_current(struct bio *bio)
        bio->bi_css = task_get_css(current, blkio_cgrp_id);
        return 0;
 }
+EXPORT_SYMBOL_GPL(bio_associate_current);
 
 /**
  * bio_disassociate_task - undo bio_associate_current()
index 9da02c021ebe2ed296cbdf8560d5bbf2ecd1c48f..d6283b3f5db50674d18ae485970f1e24ed44569d 100644 (file)
@@ -718,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
                return -EINVAL;
 
        disk = get_gendisk(MKDEV(major, minor), &part);
-       if (!disk || part)
+       if (!disk)
                return -EINVAL;
+       if (part) {
+               put_disk(disk);
+               return -EINVAL;
+       }
 
        rcu_read_lock();
        spin_lock_irq(disk->queue->queue_lock);
index 717afcdb5f4a9657a9ca6f6af9825e109eba9688..88dbbb115285a3ce54ae5c35260692163bae16ca 100644 (file)
@@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
                dev_warn(&device->dev, "Failed to change power state to %s\n",
                         acpi_power_state_string(state));
        } else {
-               device->power.state = state;
+               device->power.state = target_state;
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "Device [%s] transitioned to %s\n",
                                  device->pnp.bus_id,
index e83fc3d0da9c9c60a99a6dec56cc568a97a0a851..db5d9f79a247c5ceb2cb590f206927c22f6f2b7c 100644 (file)
@@ -2478,6 +2478,10 @@ int ata_dev_configure(struct ata_device *dev)
                dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
                                         dev->max_sectors);
 
+       if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
+               dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
+                                        dev->max_sectors);
+
        if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
                dev->max_sectors = ATA_MAX_SECTORS_LBA48;
 
@@ -4146,6 +4150,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "Slimtype DVD A  DS8A8SH", NULL,      ATA_HORKAGE_MAX_SEC_LBA48 },
        { "Slimtype DVD A  DS8A9SH", NULL,      ATA_HORKAGE_MAX_SEC_LBA48 },
 
+       /*
+        * Causes silent data corruption with higher max sects.
+        * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
+        */
+       { "ST380013AS",         "3.20",         ATA_HORKAGE_MAX_SEC_1024 },
+
        /* Devices we expect to fail diagnostics */
 
        /* Devices where NCQ should be avoided */
@@ -4174,9 +4184,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "ST3320[68]13AS",     "SD1[5-9]",     ATA_HORKAGE_NONCQ |
                                                ATA_HORKAGE_FIRMWARE_WARN },
 
-       /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+       /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
        { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
        { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+       { "VB0250EAVER",        "HPG7",         ATA_HORKAGE_BROKEN_FPDMA_AA },
 
        /* Blacklist entries taken from Silicon Image 3124/3132
           Windows driver .inf file - also several Linux problem reports */
@@ -4229,7 +4240,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Crucial_CT*M500*",           NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
-       { "Micron_M5[15]0*",            "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+       { "Micron_M5[15]0_*",           "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Crucial_CT*M550*",           "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4238,6 +4249,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "Samsung SSD 8*",             NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
 
+       /* devices that don't properly handle TRIM commands */
+       { "SuperSSpeed S238*",          NULL,   ATA_HORKAGE_NOTRIM, },
+
        /*
         * As defined, the DRAT (Deterministic Read After Trim) and RZAT
         * (Return Zero After Trim) flags in the ATA Command Set are
@@ -4501,7 +4515,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
        else /* In the ancient relic department - skip all of this */
                return 0;
 
-       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+       /* On some disks, this command causes spin-up, so we need longer timeout */
+       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
 
        DPRINTK("EXIT, err_mask=%x\n", err_mask);
        return err_mask;
index 7ccc084bf1dfb8f7b979f5e7ae777e030303d1b8..85aa76116a305eb50d77f2544c87f09914998bed 100644 (file)
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
                                       ATA_LFLAG_NO_SRST |
                                       ATA_LFLAG_ASSUME_ATA;
                }
+       } else if (vendor == 0x11ab && devid == 0x4140) {
+               /* Marvell 4140 quirks */
+               ata_for_each_link(link, ap, EDGE) {
+                       /* port 4 is for SEMB device and it doesn't like SRST */
+                       if (link->pmp == 4)
+                               link->flags |= ATA_LFLAG_DISABLED;
+               }
        }
 }
 
index 3131adcc1f87e001f7f8bfe317e92527665e4dd4..641a61a59e89c00036af65d3a31fe2cf67eb22b8 100644 (file)
@@ -2568,7 +2568,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
                rbuf[14] = (lowest_aligned >> 8) & 0x3f;
                rbuf[15] = lowest_aligned;
 
-               if (ata_id_has_trim(args->id)) {
+               if (ata_id_has_trim(args->id) &&
+                   !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
                        rbuf[14] |= 0x80; /* LBPME */
 
                        if (ata_id_has_zero_after_trim(args->id) &&
index d6c37bcd416d17145f291136b6e5f2a7192ee404..e2d94972962d69d766e99e8ade594040ae8d9429 100644 (file)
@@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev,
 
        if (!ata_id_has_trim(ata_dev->id))
                mode = "unsupported";
+       else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
+               mode = "forced_unsupported";
        else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
                        mode = "forced_unqueued";
        else if (ata_fpdma_dsm_supported(ata_dev))
index 69de41a87b74311b2b7478fb0226b8bc253c6ebc..3177b245d2bdf63e821a12a4c0f18cbab1b16229 100644 (file)
@@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
        while ((entry = llist_del_all(&cq->list)) != NULL) {
                entry = llist_reverse_order(entry);
                do {
+                       struct request_queue *q = NULL;
+
                        cmd = container_of(entry, struct nullb_cmd, ll_list);
                        entry = entry->next;
+                       if (cmd->rq)
+                               q = cmd->rq->q;
                        end_cmd(cmd);
 
-                       if (cmd->rq) {
-                               struct request_queue *q = cmd->rq->q;
-
-                               if (!q->mq_ops && blk_queue_stopped(q)) {
-                                       spin_lock(q->queue_lock);
-                                       if (blk_queue_stopped(q))
-                                               blk_start_queue(q);
-                                       spin_unlock(q->queue_lock);
-                               }
+                       if (q && !q->mq_ops && blk_queue_stopped(q)) {
+                               spin_lock(q->queue_lock);
+                               if (blk_queue_stopped(q))
+                                       blk_start_queue(q);
+                               spin_unlock(q->queue_lock);
                        }
                } while (entry);
        }
index 26063afb3eba41a88f68e753689b05f741f82d36..7a3c30c4336f3cb9b7b67d5965b77175b4cad53f 100644 (file)
@@ -1002,7 +1002,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
        int ret = 0;
 
        /* Some related CPUs might not be present (physically hotplugged) */
-       for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+       for_each_cpu(j, policy->real_cpus) {
                if (j == policy->kobj_cpu)
                        continue;
 
@@ -1019,7 +1019,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
        unsigned int j;
 
        /* Some related CPUs might not be present (physically hotplugged) */
-       for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+       for_each_cpu(j, policy->real_cpus) {
                if (j == policy->kobj_cpu)
                        continue;
 
@@ -1163,11 +1163,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
        if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
                goto err_free_cpumask;
 
+       if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
+               goto err_free_rcpumask;
+
        ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
                                   "cpufreq");
        if (ret) {
                pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
-               goto err_free_rcpumask;
+               goto err_free_real_cpus;
        }
 
        INIT_LIST_HEAD(&policy->policy_list);
@@ -1184,6 +1187,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
 
        return policy;
 
+err_free_real_cpus:
+       free_cpumask_var(policy->real_cpus);
 err_free_rcpumask:
        free_cpumask_var(policy->related_cpus);
 err_free_cpumask:
@@ -1234,6 +1239,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
        cpufreq_policy_put_kobj(policy, notify);
+       free_cpumask_var(policy->real_cpus);
        free_cpumask_var(policy->related_cpus);
        free_cpumask_var(policy->cpus);
        kfree(policy);
@@ -1258,14 +1264,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 
        pr_debug("adding CPU %u\n", cpu);
 
-       /*
-        * Only possible if 'cpu' wasn't physically present earlier and we are
-        * here from subsys_interface add callback. A hotplug notifier will
-        * follow and we will handle it like logical CPU hotplug then. For now,
-        * just create the sysfs link.
-        */
-       if (cpu_is_offline(cpu))
-               return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
+       if (cpu_is_offline(cpu)) {
+               /*
+                * Only possible if we are here from the subsys_interface add
+                * callback.  A hotplug notifier will follow and we will handle
+                * it as CPU online then.  For now, just create the sysfs link,
+                * unless there is no policy or the link is already present.
+                */
+               policy = per_cpu(cpufreq_cpu_data, cpu);
+               return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
+                       ? add_cpu_dev_symlink(policy, cpu) : 0;
+       }
 
        if (!down_read_trylock(&cpufreq_rwsem))
                return 0;
@@ -1307,6 +1316,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        /* related cpus should atleast have policy->cpus */
        cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
 
+       /* Remember which CPUs have been present at the policy creation time. */
+       if (!recover_policy)
+               cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
+
        /*
         * affected cpus must always be the one, which are online. We aren't
         * managing offline cpus here.
@@ -1420,8 +1433,7 @@ nomem_out:
        return ret;
 }
 
-static int __cpufreq_remove_dev_prepare(struct device *dev,
-                                       struct subsys_interface *sif)
+static int __cpufreq_remove_dev_prepare(struct device *dev)
 {
        unsigned int cpu = dev->id;
        int ret = 0;
@@ -1437,10 +1449,8 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
 
        if (has_target()) {
                ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-               if (ret) {
+               if (ret)
                        pr_err("%s: Failed to stop governor\n", __func__);
-                       return ret;
-               }
        }
 
        down_write(&policy->rwsem);
@@ -1473,8 +1483,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
        return ret;
 }
 
-static int __cpufreq_remove_dev_finish(struct device *dev,
-                                      struct subsys_interface *sif)
+static int __cpufreq_remove_dev_finish(struct device *dev)
 {
        unsigned int cpu = dev->id;
        int ret;
@@ -1492,10 +1501,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
        /* If cpu is last user of policy, free policy */
        if (has_target()) {
                ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
-               if (ret) {
+               if (ret)
                        pr_err("%s: Failed to exit governor\n", __func__);
-                       return ret;
-               }
        }
 
        /*
@@ -1506,10 +1513,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
 
-       /* Free the policy only if the driver is getting removed. */
-       if (sif)
-               cpufreq_policy_free(policy, true);
-
        return 0;
 }
 
@@ -1521,42 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
 {
        unsigned int cpu = dev->id;
-       int ret;
-
-       /*
-        * Only possible if 'cpu' is getting physically removed now. A hotplug
-        * notifier should have already been called and we just need to remove
-        * link or free policy here.
-        */
-       if (cpu_is_offline(cpu)) {
-               struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
-               struct cpumask mask;
+       struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
 
-               if (!policy)
-                       return 0;
+       if (!policy)
+               return 0;
 
-               cpumask_copy(&mask, policy->related_cpus);
-               cpumask_clear_cpu(cpu, &mask);
+       if (cpu_online(cpu)) {
+               __cpufreq_remove_dev_prepare(dev);
+               __cpufreq_remove_dev_finish(dev);
+       }
 
-               /*
-                * Free policy only if all policy->related_cpus are removed
-                * physically.
-                */
-               if (cpumask_intersects(&mask, cpu_present_mask)) {
-                       remove_cpu_dev_symlink(policy, cpu);
-                       return 0;
-               }
+       cpumask_clear_cpu(cpu, policy->real_cpus);
 
+       if (cpumask_empty(policy->real_cpus)) {
                cpufreq_policy_free(policy, true);
                return 0;
        }
 
-       ret = __cpufreq_remove_dev_prepare(dev, sif);
+       if (cpu != policy->kobj_cpu) {
+               remove_cpu_dev_symlink(policy, cpu);
+       } else {
+               /*
+                * The CPU owning the policy object is going away.  Move it to
+                * another suitable CPU.
+                */
+               unsigned int new_cpu = cpumask_first(policy->real_cpus);
+               struct device *new_dev = get_cpu_device(new_cpu);
+
+               dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
 
-       if (!ret)
-               ret = __cpufreq_remove_dev_finish(dev, sif);
+               sysfs_remove_link(&new_dev->kobj, "cpufreq");
+               policy->kobj_cpu = new_cpu;
+               WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
+       }
 
-       return ret;
+       return 0;
 }
 
 static void handle_update(struct work_struct *work)
@@ -2395,11 +2397,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
                        break;
 
                case CPU_DOWN_PREPARE:
-                       __cpufreq_remove_dev_prepare(dev, NULL);
+                       __cpufreq_remove_dev_prepare(dev);
                        break;
 
                case CPU_POST_DEAD:
-                       __cpufreq_remove_dev_finish(dev, NULL);
+                       __cpufreq_remove_dev_finish(dev);
                        break;
 
                case CPU_DOWN_FAILED:
index 15ada47bb720b710454795d8d7e83c235c2fccfc..fcb929ec5304a9b233f0d2a55be394e5c1ed25f1 100644 (file)
@@ -681,6 +681,7 @@ static struct cpu_defaults knl_params = {
                .get_max = core_get_max_pstate,
                .get_min = core_get_min_pstate,
                .get_turbo = knl_get_turbo_pstate,
+               .get_scaling = core_get_scaling,
                .set = core_set_pstate,
        },
 };
index 4fd9961d552e8a0c12604f1cfef83f2e15bea66d..d4253742543841d6d261dfea22156259b0a13183 100644 (file)
@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
        return ret;
 }
 
-static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
+static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
+       int len)
 {
        struct cper_mem_err_compact cmem;
 
+       /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
+       if (len == sizeof(struct cper_sec_mem_err_old) &&
+           (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
+               pr_err(FW_WARN "valid bits set for fields beyond structure\n");
+               return;
+       }
        if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
                printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
        if (mem->validation_bits & CPER_MEM_VALID_PA)
@@ -405,8 +412,10 @@ static void cper_estatus_print_section(
        } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
                struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
                printk("%s""section_type: memory error\n", newpfx);
-               if (gdata->error_data_length >= sizeof(*mem_err))
-                       cper_print_mem(newpfx, mem_err);
+               if (gdata->error_data_length >=
+                   sizeof(struct cper_sec_mem_err_old))
+                       cper_print_mem(newpfx, mem_err,
+                                      gdata->error_data_length);
                else
                        goto err_section_too_small;
        } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
index 01657830b470a49e8209fd39fa829d4a1fbb3610..31b00f91cfcd5a04848be288837d6d90c0110f44 100644 (file)
@@ -1614,6 +1614,9 @@ struct amdgpu_uvd {
 #define AMDGPU_MAX_VCE_HANDLES 16
 #define AMDGPU_VCE_FIRMWARE_OFFSET 256
 
+#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
+#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
+
 struct amdgpu_vce {
        struct amdgpu_bo        *vcpu_bo;
        uint64_t                gpu_addr;
@@ -1626,6 +1629,7 @@ struct amdgpu_vce {
        const struct firmware   *fw;    /* VCE firmware */
        struct amdgpu_ring      ring[AMDGPU_MAX_VCE_RINGS];
        struct amdgpu_irq_src   irq;
+       unsigned                harvest_config;
 };
 
 /*
@@ -1862,6 +1866,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
 
+struct amdgpu_ip_block_status {
+       bool valid;
+       bool sw;
+       bool hw;
+};
+
 struct amdgpu_device {
        struct device                   *dev;
        struct drm_device               *ddev;
@@ -2004,7 +2014,7 @@ struct amdgpu_device {
 
        const struct amdgpu_ip_block_version *ip_blocks;
        int                             num_ip_blocks;
-       bool                            *ip_block_enabled;
+       struct amdgpu_ip_block_status   *ip_block_status;
        struct mutex    mn_lock;
        DECLARE_HASHTABLE(mn_hash, 7);
 
index d79009b6586713e777d90647aaefec7b9c49b286..99f158e1baffa711073d1251d6d05c72cb68976e 100644 (file)
@@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-       adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
-       if (adev->ip_block_enabled == NULL)
+       adev->ip_block_status = kcalloc(adev->num_ip_blocks,
+                                       sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
+       if (adev->ip_block_status == NULL)
                return -ENOMEM;
 
        if (adev->ip_blocks == NULL) {
@@ -1203,18 +1204,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
                        DRM_ERROR("disabled ip block: %d\n", i);
-                       adev->ip_block_enabled[i] = false;
+                       adev->ip_block_status[i].valid = false;
                } else {
                        if (adev->ip_blocks[i].funcs->early_init) {
                                r = adev->ip_blocks[i].funcs->early_init((void *)adev);
                                if (r == -ENOENT)
-                                       adev->ip_block_enabled[i] = false;
+                                       adev->ip_block_status[i].valid = false;
                                else if (r)
                                        return r;
                                else
-                                       adev->ip_block_enabled[i] = true;
+                                       adev->ip_block_status[i].valid = true;
                        } else {
-                               adev->ip_block_enabled[i] = true;
+                               adev->ip_block_status[i].valid = true;
                        }
                }
        }
@@ -1227,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
        int i, r;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].valid)
                        continue;
                r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
                if (r)
                        return r;
+               adev->ip_block_status[i].sw = true;
                /* need to do gmc hw init early so we can allocate gpu mem */
                if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
                        r = amdgpu_vram_scratch_init(adev);
@@ -1243,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
                        r = amdgpu_wb_init(adev);
                        if (r)
                                return r;
+                       adev->ip_block_status[i].hw = true;
                }
        }
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].sw)
                        continue;
                /* gmc hw init is done early */
                if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
@@ -1255,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
                r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
                if (r)
                        return r;
+               adev->ip_block_status[i].hw = true;
        }
 
        return 0;
@@ -1265,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
        int i = 0, r;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].valid)
                        continue;
                /* enable clockgating to save power */
                r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1287,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
        int i, r;
 
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].hw)
                        continue;
                if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
                        amdgpu_wb_fini(adev);
@@ -1300,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
                        return r;
                r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
                /* XXX handle errors */
+               adev->ip_block_status[i].hw = false;
        }
 
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].sw)
                        continue;
                r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
                /* XXX handle errors */
-               adev->ip_block_enabled[i] = false;
+               adev->ip_block_status[i].sw = false;
+               adev->ip_block_status[i].valid = false;
        }
 
        return 0;
@@ -1318,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
        int i, r;
 
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].valid)
                        continue;
                /* ungate blocks so that suspend can properly shut them down */
                r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1336,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
        int i, r;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].valid)
                        continue;
                r = adev->ip_blocks[i].funcs->resume(adev);
                if (r)
@@ -1582,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        amdgpu_fence_driver_fini(adev);
        amdgpu_fbdev_fini(adev);
        r = amdgpu_fini(adev);
-       kfree(adev->ip_block_enabled);
-       adev->ip_block_enabled = NULL;
+       kfree(adev->ip_block_status);
+       adev->ip_block_status = NULL;
        adev->accel_working = false;
        /* free i2c buses */
        amdgpu_i2c_fini(adev);
index ae43b58c9733a1962cbd6dae4ce42fb8c52fa81a..4afc507820c01db355600631e67f98a3e5d4a644 100644 (file)
@@ -449,7 +449,7 @@ out:
  * vital here, so they are not reported back to userspace.
  */
 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
-                                   struct amdgpu_bo_va *bo_va)
+                                   struct amdgpu_bo_va *bo_va, uint32_t operation)
 {
        struct ttm_validate_buffer tv, *entry;
        struct amdgpu_bo_list_entry *vm_bos;
@@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
        if (r)
                goto error_unlock;
 
-       r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
+
+       if (operation == AMDGPU_VA_OP_MAP)
+               r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
 
 error_unlock:
        mutex_unlock(&bo_va->vm->mutex);
@@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        }
 
        if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
-               amdgpu_gem_va_update_vm(adev, bo_va);
+               amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
 
        drm_gem_object_unreference_unlocked(gobj);
        return r;
index 52dff75aac6f3e5c3ac04252ec19c122bace0558..bc0fac618a3f01121edb740207b9866b6818f71f 100644 (file)
@@ -180,16 +180,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
        if (vm) {
                /* do context switch */
                amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
-       }
 
-       if (vm && ring->funcs->emit_gds_switch)
-               amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
-                                           ib->gds_base, ib->gds_size,
-                                           ib->gws_base, ib->gws_size,
-                                           ib->oa_base, ib->oa_size);
+               if (ring->funcs->emit_gds_switch)
+                       amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
+                                                   ib->gds_base, ib->gds_size,
+                                                   ib->gws_base, ib->gws_size,
+                                                   ib->oa_base, ib->oa_size);
 
-       if (ring->funcs->emit_hdp_flush)
-               amdgpu_ring_emit_hdp_flush(ring);
+               if (ring->funcs->emit_hdp_flush)
+                       amdgpu_ring_emit_hdp_flush(ring);
+       }
 
        old_ctx = ring->current_ctx;
        for (i = 0; i < num_ibs; ++i) {
index 5533434c7a8fad8dd22bab138c84236befe13167..9736892bcdf932c328a883473a6c3e23d560cd38 100644 (file)
@@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 
                for (i = 0; i < adev->num_ip_blocks; i++) {
                        if (adev->ip_blocks[i].type == type &&
-                           adev->ip_block_enabled[i]) {
+                           adev->ip_block_status[i].valid) {
                                ip.hw_ip_version_major = adev->ip_blocks[i].major;
                                ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
                                ip.capabilities_flags = 0;
@@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 
                for (i = 0; i < adev->num_ip_blocks; i++)
                        if (adev->ip_blocks[i].type == type &&
-                           adev->ip_block_enabled[i] &&
+                           adev->ip_block_status[i].valid &&
                            count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
                                count++;
 
@@ -416,7 +416,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                return n ? -EFAULT : 0;
        }
        case AMDGPU_INFO_DEV_INFO: {
-               struct drm_amdgpu_info_device dev_info;
+               struct drm_amdgpu_info_device dev_info = {};
                struct amdgpu_cu_info cu_info;
 
                dev_info.device_id = dev->pdev->device;
@@ -459,6 +459,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
                dev_info.vram_type = adev->mc.vram_type;
                dev_info.vram_bit_width = adev->mc.vram_width;
+               dev_info.vce_harvest_config = adev->vce.harvest_config;
 
                return copy_to_user(out, &dev_info,
                                    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
index 1a2d419cbf164e8939cf7e584480dc17c0b93ecb..ace870afc7d45154a6bb6013cb45b773a8e63512 100644 (file)
@@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev)
        amdgpu_free_extended_power_table(adev);
 }
 
+#define ixSMUSVI_NB_CURRENTVID 0xD8230044
+#define CURRENT_NB_VID_MASK 0xff000000
+#define CURRENT_NB_VID__SHIFT 24
+#define ixSMUSVI_GFX_CURRENTVID  0xD8230048
+#define CURRENT_GFX_VID_MASK 0xff000000
+#define CURRENT_GFX_VID__SHIFT 24
+
 static void
 cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
                                               struct seq_file *m)
 {
+       struct cz_power_info *pi = cz_get_pi(adev);
        struct amdgpu_clock_voltage_dependency_table *table =
                &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-       u32 current_index =
-               (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
-               TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
-               TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
-       u32 sclk, tmp;
-       u16 vddc;
-
-       if (current_index >= NUM_SCLK_LEVELS) {
-               seq_printf(m, "invalid dpm profile %d\n", current_index);
+       struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
+               &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+       struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
+               &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+       u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX),
+                                      TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
+       u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+                                     TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
+       u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+                                     TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
+       u32 sclk, vclk, dclk, ecclk, tmp;
+       u16 vddnb, vddgfx;
+
+       if (sclk_index >= NUM_SCLK_LEVELS) {
+               seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index);
        } else {
-               sclk = table->entries[current_index].clk;
-               tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
-                       SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
-                       SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
-               vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
-               seq_printf(m, "power level %d    sclk: %u vddc: %u\n",
-                          current_index, sclk, vddc);
+               sclk = table->entries[sclk_index].clk;
+               seq_printf(m, "%u sclk: %u\n", sclk_index, sclk);
+       }
+
+       tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) &
+              CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+       vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+       tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) &
+              CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+       vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+       seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
+
+       seq_printf(m, "uvd    %sabled\n", pi->uvd_power_gated ? "dis" : "en");
+       if (!pi->uvd_power_gated) {
+               if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+                       seq_printf(m, "invalid uvd dpm level %d\n", uvd_index);
+               } else {
+                       vclk = uvd_table->entries[uvd_index].vclk;
+                       dclk = uvd_table->entries[uvd_index].dclk;
+                       seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk);
+               }
+       }
+
+       seq_printf(m, "vce    %sabled\n", pi->vce_power_gated ? "dis" : "en");
+       if (!pi->vce_power_gated) {
+               if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+                       seq_printf(m, "invalid vce dpm level %d\n", vce_index);
+               } else {
+                       ecclk = vce_table->entries[vce_index].ecclk;
+                       seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk);
+               }
        }
 }
 
index 6e77964f1b640d6841ca216ab2c7f5eed73077f5..e70a26f587a03f7dde6788f88add8f32a25ad764 100644 (file)
@@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
        struct drm_device *dev = crtc->dev;
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       unsigned type;
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
@@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                dce_v10_0_vga_enable(crtc, true);
                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
                dce_v10_0_vga_enable(crtc, false);
+               /* Make sure VBLANK interrupt is still enabled */
+               type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+               amdgpu_irq_update(adev, &adev->crtc_irq, type);
                drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
                dce_v10_0_crtc_load_lut(crtc);
                break;
index 7f7abb0e0be53026d98e074d4c0a756484eb3e1b..dcb402ee048a602ecf03aade115a872212781811 100644 (file)
@@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
        struct drm_device *dev = crtc->dev;
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       unsigned type;
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
@@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                dce_v11_0_vga_enable(crtc, true);
                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
                dce_v11_0_vga_enable(crtc, false);
+               /* Make sure VBLANK interrupt is still enabled */
+               type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+               amdgpu_irq_update(adev, &adev->crtc_irq, type);
                drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
                dce_v11_0_crtc_load_lut(crtc);
                break;
index 2c188fb9fd22ff1a3528673beb8866639d5ef631..2db6ab0a543dada20b64d3d5d89eb4d5f36a1873 100644 (file)
@@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
  * sheduling on the ring.  This function schedules the IB
  * on the gfx ring for execution by the GPU.
  */
-static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib)
 {
        bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
        u32 next_rptr = ring->wptr + 5;
 
        /* drop the CE preamble IB for the same context */
-       if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
-           (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
-           !need_ctx_switch)
+       if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
                return;
 
-       if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
-               control |= INDIRECT_BUFFER_VALID;
-
-       if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
+       if (need_ctx_switch)
                next_rptr += 2;
 
        next_rptr += 4;
@@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, next_rptr);
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
-       if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
+       if (need_ctx_switch) {
                amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
                amdgpu_ring_write(ring, 0);
        }
@@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, control);
 }
 
+static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+                                 struct amdgpu_ib *ib)
+{
+       u32 header, control = 0;
+       u32 next_rptr = ring->wptr + 5;
+
+       control |= INDIRECT_BUFFER_VALID;
+       next_rptr += 4;
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
+       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+       amdgpu_ring_write(ring, next_rptr);
+
+       header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+       control |= ib->length_dw |
+                          (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+
+       amdgpu_ring_write(ring, header);
+       amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+                                         (2 << 0) |
+#endif
+                                         (ib->gpu_addr & 0xFFFFFFFC));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+       amdgpu_ring_write(ring, control);
+}
+
 /**
  * gfx_v7_0_ring_test_ib - basic ring IB test
  *
@@ -5555,7 +5579,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
        .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
        .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
        .parse_cs = NULL,
-       .emit_ib = gfx_v7_0_ring_emit_ib,
+       .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
        .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
        .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
@@ -5571,7 +5595,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
        .get_wptr = gfx_v7_0_ring_get_wptr_compute,
        .set_wptr = gfx_v7_0_ring_set_wptr_compute,
        .parse_cs = NULL,
-       .emit_ib = gfx_v7_0_ring_emit_ib,
+       .emit_ib = gfx_v7_0_ring_emit_ib_compute,
        .emit_fence = gfx_v7_0_ring_emit_fence_compute,
        .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
        .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
index 1c7c992dea37a446d66baa91bb25ad60967915e6..9e1d4ddbf475027e10c6e0d6d77a63efb4eec3b3 100644 (file)
@@ -3753,7 +3753,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, 0x20); /* poll interval */
 }
 
-static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
+static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib)
 {
        bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -3761,15 +3761,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
        u32 next_rptr = ring->wptr + 5;
 
        /* drop the CE preamble IB for the same context */
-       if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
-           (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
-           !need_ctx_switch)
+       if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
                return;
 
-       if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
-               control |= INDIRECT_BUFFER_VALID;
-
-       if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
+       if (need_ctx_switch)
                next_rptr += 2;
 
        next_rptr += 4;
@@ -3780,7 +3775,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, next_rptr);
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
-       if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
+       if (need_ctx_switch) {
                amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
                amdgpu_ring_write(ring, 0);
        }
@@ -3803,6 +3798,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, control);
 }
 
+static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+                                 struct amdgpu_ib *ib)
+{
+       u32 header, control = 0;
+       u32 next_rptr = ring->wptr + 5;
+
+       control |= INDIRECT_BUFFER_VALID;
+
+       next_rptr += 4;
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
+       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+       amdgpu_ring_write(ring, next_rptr);
+
+       header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+       control |= ib->length_dw |
+                          (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+
+       amdgpu_ring_write(ring, header);
+       amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+                                         (2 << 0) |
+#endif
+                                         (ib->gpu_addr & 0xFFFFFFFC));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+       amdgpu_ring_write(ring, control);
+}
+
 static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
                                         u64 seq, unsigned flags)
 {
@@ -4224,7 +4249,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
        .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
        .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
        .parse_cs = NULL,
-       .emit_ib = gfx_v8_0_ring_emit_ib,
+       .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
        .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
        .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
@@ -4240,7 +4265,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
        .get_wptr = gfx_v8_0_ring_get_wptr_compute,
        .set_wptr = gfx_v8_0_ring_set_wptr_compute,
        .parse_cs = NULL,
-       .emit_ib = gfx_v8_0_ring_emit_ib,
+       .emit_ib = gfx_v8_0_ring_emit_ib_compute,
        .emit_fence = gfx_v8_0_ring_emit_fence_compute,
        .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
        .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
index d62c4002e39cc7acc5a3427d0aed92bedf12c984..d1064ca3670ec31376293950c76452414e94ff23 100644 (file)
@@ -35,6 +35,8 @@
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 #include "gca/gfx_8_0_d.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
 
 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT    0x04
 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK      0x10
@@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (idx = 0; idx < 2; ++idx) {
+
+               if (adev->vce.harvest_config & (1 << idx))
+                       continue;
+
                if(idx == 0)
                        WREG32_P(mmGRBM_GFX_INDEX, 0,
                                ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
@@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
        return 0;
 }
 
+#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS     0xC0014074
+#define VCE_HARVEST_FUSE_MACRO__SHIFT       27
+#define VCE_HARVEST_FUSE_MACRO__MASK        0x18000000
+
+static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
+{
+       u32 tmp;
+       unsigned ret;
+
+       if (adev->flags & AMDGPU_IS_APU)
+               tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
+                      VCE_HARVEST_FUSE_MACRO__MASK) >>
+                       VCE_HARVEST_FUSE_MACRO__SHIFT;
+       else
+               tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
+                      CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
+                       CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
+
+       switch (tmp) {
+       case 1:
+               ret = AMDGPU_VCE_HARVEST_VCE0;
+               break;
+       case 2:
+               ret = AMDGPU_VCE_HARVEST_VCE1;
+               break;
+       case 3:
+               ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
+               break;
+       default:
+               ret = 0;
+       }
+
+       return ret;
+}
+
 static int vce_v3_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
+
+       if ((adev->vce.harvest_config &
+            (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
+           (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
+               return -ENOENT;
+
        vce_v3_0_set_ring_funcs(adev);
        vce_v3_0_set_irq_funcs(adev);
 
index f69b92535505b5ae1c899d6f9b08f76501851fa7..5ae5c69231280a5e1c23882289388f9ceb75a701 100644 (file)
@@ -355,6 +355,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
                planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
 
        drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
+       drm_crtc_vblank_reset(&crtc->base);
 
        dc->crtc = &crtc->base;
 
index 60b0c13d7ff5cc6f4c338c9ed3d7f423d684c84e..6fad1f9648f38870b2162cb74a6320f50c34aabc 100644 (file)
@@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
 
        pm_runtime_enable(dev->dev);
 
-       ret = atmel_hlcdc_dc_modeset_init(dev);
+       ret = drm_vblank_init(dev, 1);
        if (ret < 0) {
-               dev_err(dev->dev, "failed to initialize mode setting\n");
+               dev_err(dev->dev, "failed to initialize vblank\n");
                goto err_periph_clk_disable;
        }
 
-       drm_mode_config_reset(dev);
-
-       ret = drm_vblank_init(dev, 1);
+       ret = atmel_hlcdc_dc_modeset_init(dev);
        if (ret < 0) {
-               dev_err(dev->dev, "failed to initialize vblank\n");
+               dev_err(dev->dev, "failed to initialize mode setting\n");
                goto err_periph_clk_disable;
        }
 
+       drm_mode_config_reset(dev);
+
        pm_runtime_get_sync(dev->dev);
        ret = drm_irq_install(dev, dc->hlcdc->irq);
        pm_runtime_put_sync(dev->dev);
index 357bd04a173b6aa51b896394a2e2f9f5754f7a21..fed748311b928cc534f781505a37686ec7a46ba3 100644 (file)
@@ -5398,12 +5398,9 @@ void drm_mode_config_reset(struct drm_device *dev)
                if (encoder->funcs->reset)
                        encoder->funcs->reset(encoder);
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               connector->status = connector_status_unknown;
-
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
                if (connector->funcs->reset)
                        connector->funcs->reset(connector);
-       }
 }
 EXPORT_SYMBOL(drm_mode_config_reset);
 
index 5f27290201e074c5d9a1673d710c96846b61c08c..fd1de451c8c6bae13f42eaae5e9cfe572039ea18 100644 (file)
@@ -3303,15 +3303,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
 #define I915_READ64(reg)       dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
 #define I915_READ64_2x32(lower_reg, upper_reg) ({                      \
-               u32 upper = I915_READ(upper_reg);                       \
-               u32 lower = I915_READ(lower_reg);                       \
-               u32 tmp = I915_READ(upper_reg);                         \
-               if (upper != tmp) {                                     \
-                       upper = tmp;                                    \
-                       lower = I915_READ(lower_reg);                   \
-                       WARN_ON(I915_READ(upper_reg) != upper);         \
-               }                                                       \
-               (u64)upper << 32 | lower; })
+       u32 upper, lower, tmp;                                          \
+       tmp = I915_READ(upper_reg);                                     \
+       do {                                                            \
+               upper = tmp;                                            \
+               lower = I915_READ(lower_reg);                           \
+               tmp = I915_READ(upper_reg);                             \
+       } while (upper != tmp);                                         \
+       (u64)upper << 32 | lower; })
 
 #define POSTING_READ(reg)      (void)I915_READ_NOTRACE(reg)
 #define POSTING_READ16(reg)    (void)I915_READ16_NOTRACE(reg)
index 56b52a4767d48c56e40f6d4444204485d7cc5d94..31e8269e6e3dab33d809f693f7cc6ce8cf318975 100644 (file)
@@ -1923,6 +1923,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
                vma->vm->insert_entries(vma->vm, pages,
                                        vma->node.start,
                                        cache_level, pte_flags);
+
+               /* Note the inconsistency here is due to absence of the
+                * aliasing ppgtt on gen4 and earlier. Though we always
+                * request PIN_USER for execbuffer (translated to LOCAL_BIND),
+                * without the appgtt, we cannot honour that request and so
+                * must substitute it with a global binding. Since we do this
+                * behind the upper layers back, we need to explicitly set
+                * the bound flag ourselves.
+                */
+               vma->bound |= GLOBAL_BIND;
+
        }
 
        if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
index 633bd1fcab6925881048e7310f9aa40e4f9db868..d19c9db5e18c9d57057ad78ffdbdfa1a65b861b4 100644 (file)
@@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
        }
 
        /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
-       args->phys_swizzle_mode = args->swizzle_mode;
+       if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+               args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
+       else
+               args->phys_swizzle_mode = args->swizzle_mode;
        if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
                args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
        if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
index a6d8a3ee7750adecb552415c8ffab06d599f46ff..260389acfb7752d01ce0ebd59597f06a671b2ea1 100644 (file)
@@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_reg_read *reg = data;
        struct register_whitelist const *entry = whitelist;
+       unsigned size;
+       u64 offset;
        int i, ret = 0;
 
        for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
-               if (entry->offset == reg->offset &&
+               if (entry->offset == (reg->offset & -entry->size) &&
                    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
                        break;
        }
@@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
        if (i == ARRAY_SIZE(whitelist))
                return -EINVAL;
 
+       /* We use the low bits to encode extra flags as the register should
+        * be naturally aligned (and those that are not so aligned merely
+        * limit the available flags for that register).
+        */
+       offset = entry->offset;
+       size = entry->size;
+       size |= reg->offset ^ offset;
+
        intel_runtime_pm_get(dev_priv);
 
-       switch (entry->size) {
+       switch (size) {
+       case 8 | 1:
+               reg->val = I915_READ64_2x32(offset, offset+4);
+               break;
        case 8:
-               reg->val = I915_READ64(reg->offset);
+               reg->val = I915_READ64(offset);
                break;
        case 4:
-               reg->val = I915_READ(reg->offset);
+               reg->val = I915_READ(offset);
                break;
        case 2:
-               reg->val = I915_READ16(reg->offset);
+               reg->val = I915_READ16(offset);
                break;
        case 1:
-               reg->val = I915_READ8(reg->offset);
+               reg->val = I915_READ8(offset);
                break;
        default:
-               MISSING_CASE(entry->size);
                ret = -EINVAL;
                goto out;
        }
index 0d1dbb73793355043d514f2c4239e16386671e00..247a424445f75bb4d27573e0d9ac208262a6ac97 100644 (file)
@@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
        uint32_t op_mode = 0;
        uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
        uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
-       enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb);
+       enum mdp4_frame_format frame_type;
 
        if (!(crtc && fb)) {
                DBG("%s: disabled!", mdp4_plane->name);
                return 0;
        }
 
+       frame_type = mdp4_get_frame_format(fb);
+
        /* src values are in Q16 fixed point, convert to integer: */
        src_x = src_x >> 16;
        src_y = src_y >> 16;
index 206f758f7d64849af986e6c70589bf96d7b0cb7b..e253db5de5aa5955a2841f9ff8cdc63e5b519fac 100644 (file)
@@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
 
 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
 {
+       int i;
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+       int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
+
+       for (i = 0; i < nplanes; i++) {
+               struct drm_plane *plane = state->planes[i];
+               struct drm_plane_state *plane_state = state->plane_states[i];
+
+               if (!plane)
+                       continue;
+
+               mdp5_plane_complete_commit(plane, plane_state);
+       }
+
        mdp5_disable(mdp5_kms);
 }
 
index e0eb24587c84d7887a69c47c0428ad3f0d4f1396..e79ac09b72168c4f2af8be041d541736a81223fc 100644 (file)
@@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane,
                struct drm_mode_object *obj);
 uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
 void mdp5_plane_complete_flip(struct drm_plane *plane);
+void mdp5_plane_complete_commit(struct drm_plane *plane,
+       struct drm_plane_state *state);
 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
                enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
index 57b8f56ae9d06fb458266181a8344858e381e6b5..22275568ab8be3a500c82f138b4c4bb04a088de4 100644 (file)
@@ -31,8 +31,6 @@ struct mdp5_plane {
 
        uint32_t nformats;
        uint32_t formats[32];
-
-       bool enabled;
 };
 #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
 
@@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state)
        return state->fb && state->crtc;
 }
 
-static int mdp5_plane_disable(struct drm_plane *plane)
-{
-       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
-       struct mdp5_kms *mdp5_kms = get_kms(plane);
-       enum mdp5_pipe pipe = mdp5_plane->pipe;
-
-       DBG("%s: disable", mdp5_plane->name);
-
-       if (mdp5_kms) {
-               /* Release the memory we requested earlier from the SMP: */
-               mdp5_smp_release(mdp5_kms->smp, pipe);
-       }
-
-       return 0;
-}
-
 static void mdp5_plane_destroy(struct drm_plane *plane)
 {
        struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
@@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
 
        if (!plane_enabled(state)) {
                to_mdp5_plane_state(state)->pending = true;
-               mdp5_plane_disable(plane);
        } else if (to_mdp5_plane_state(state)->mode_changed) {
                int ret;
                to_mdp5_plane_state(state)->pending = true;
@@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
        return mdp5_plane->flush_mask;
 }
 
+/* called after vsync in thread context */
+void mdp5_plane_complete_commit(struct drm_plane *plane,
+       struct drm_plane_state *state)
+{
+       struct mdp5_kms *mdp5_kms = get_kms(plane);
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+       enum mdp5_pipe pipe = mdp5_plane->pipe;
+
+       if (!plane_enabled(plane->state)) {
+               DBG("%s: free SMP", mdp5_plane->name);
+               mdp5_smp_release(mdp5_kms->smp, pipe);
+       }
+}
+
 /* initialize plane */
 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
                enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
index 16702aecf0df714e211b8d7900fc06299e0e92f8..64a27d86f2f521444469b29e70ca9861f17d09db 100644 (file)
  * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
  *
  * For each block that can be dynamically allocated, it can be either
- * free, or pending/in-use by a client. The updates happen in three steps:
+ *     free:
+ *     The block is free.
+ *
+ *     pending:
+ *     The block is allocated to some client and not free.
+ *
+ *     configured:
+ *     The block is allocated to some client, and assigned to that
+ *     client in MDP5_MDP_SMP_ALLOC registers.
+ *
+ *     inuse:
+ *     The block is being actively used by a client.
+ *
+ * The updates happen in the following steps:
  *
  *  1) mdp5_smp_request():
  *     When plane scanout is setup, calculate required number of
- *     blocks needed per client, and request.  Blocks not inuse or
- *     pending by any other client are added to client's pending
- *     set.
+ *     blocks needed per client, and request. Blocks neither inuse nor
+ *     configured nor pending by any other client are added to client's
+ *     pending set.
+ *     For shrinking, blocks in pending but not in configured can be freed
+ *     directly, but those already in configured will be freed later by
+ *     mdp5_smp_commit.
  *
  *  2) mdp5_smp_configure():
  *     As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
  *     are configured for the union(pending, inuse)
+ *     Current pending is copied to configured.
+ *     It is assumed that mdp5_smp_request and mdp5_smp_configure not run
+ *     concurrently for the same pipe.
  *
  *  3) mdp5_smp_commit():
- *     After next vblank, copy pending -> inuse.  Optionally update
+ *     After next vblank, copy configured -> inuse.  Optionally update
  *     MDP5_SMP_ALLOC registers if there are newly unused blocks
  *
+ *  4) mdp5_smp_release():
+ *     Must be called after the pipe is disabled and no longer uses any SMB
+ *
  * On the next vblank after changes have been committed to hw, the
  * client's pending blocks become it's in-use blocks (and no-longer
  * in-use blocks become available to other clients).
@@ -77,6 +99,9 @@ struct mdp5_smp {
        struct mdp5_client_smp_state client_state[MAX_CLIENTS];
 };
 
+static void update_smp_state(struct mdp5_smp *smp,
+               u32 cid, mdp5_smp_state_t *assigned);
+
 static inline
 struct mdp5_kms *get_kms(struct mdp5_smp *smp)
 {
@@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
                for (i = cur_nblks; i > nblks; i--) {
                        int blk = find_first_bit(ps->pending, cnt);
                        clear_bit(blk, ps->pending);
-                       /* don't clear in global smp_state until _commit() */
+
+                       /* clear in global smp_state if not in configured
+                        * otherwise until _commit()
+                        */
+                       if (!test_bit(blk, ps->configured))
+                               clear_bit(blk, smp->state);
                }
        }
 
@@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
 /* Release SMP blocks for all clients of the pipe */
 void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
 {
-       int i, nblks;
+       int i;
+       unsigned long flags;
+       int cnt = smp->blk_cnt;
+
+       for (i = 0; i < pipe2nclients(pipe); i++) {
+               mdp5_smp_state_t assigned;
+               u32 cid = pipe2client(pipe, i);
+               struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+
+               spin_lock_irqsave(&smp->state_lock, flags);
+
+               /* clear hw assignment */
+               bitmap_or(assigned, ps->inuse, ps->configured, cnt);
+               update_smp_state(smp, CID_UNUSED, &assigned);
+
+               /* free to global pool */
+               bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
+               bitmap_andnot(smp->state, smp->state, assigned, cnt);
+
+               /* clear client's infor */
+               bitmap_zero(ps->pending, cnt);
+               bitmap_zero(ps->configured, cnt);
+               bitmap_zero(ps->inuse, cnt);
+
+               spin_unlock_irqrestore(&smp->state_lock, flags);
+       }
 
-       for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
-               smp_request_block(smp, pipe2client(pipe, i), 0);
        set_fifo_thresholds(smp, pipe, 0);
 }
 
@@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
                u32 cid = pipe2client(pipe, i);
                struct mdp5_client_smp_state *ps = &smp->client_state[cid];
 
-               bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+               /*
+                * if vblank has not happened since last smp_configure
+                * skip the configure for now
+                */
+               if (!bitmap_equal(ps->inuse, ps->configured, cnt))
+                       continue;
+
+               bitmap_copy(ps->configured, ps->pending, cnt);
+               bitmap_or(assigned, ps->inuse, ps->configured, cnt);
                update_smp_state(smp, cid, &assigned);
        }
 }
 
-/* step #3: after vblank, copy pending -> inuse: */
+/* step #3: after vblank, copy configured -> inuse: */
 void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
 {
        int cnt = smp->blk_cnt;
@@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
                 * using, which can be released and made available to other
                 * clients:
                 */
-               if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+               if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
                        unsigned long flags;
 
                        spin_lock_irqsave(&smp->state_lock, flags);
@@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
                        update_smp_state(smp, CID_UNUSED, &released);
                }
 
-               bitmap_copy(ps->inuse, ps->pending, cnt);
+               bitmap_copy(ps->inuse, ps->configured, cnt);
        }
 }
 
index e47179f635852a2f6ee546378200ce0a54a64499..5b6c2363f59280266a58a407a17c91b00eb8d9a3 100644 (file)
@@ -23,6 +23,7 @@
 
 struct mdp5_client_smp_state {
        mdp5_smp_state_t inuse;
+       mdp5_smp_state_t configured;
        mdp5_smp_state_t pending;
 };
 
index 1b22d8bfe142097f507435613b5bbd25311d4700..1ceb4f22dd8997a7b4e772d82e646abb1a87c7ff 100644 (file)
@@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
 
        timeout = ktime_add_ms(ktime_get(), 1000);
 
-       ret = msm_wait_fence_interruptable(dev, c->fence, &timeout);
-       if (ret) {
-               WARN_ON(ret);  // TODO unswap state back?  or??
-               commit_destroy(c);
-               return ret;
-       }
+       /* uninterruptible wait */
+       msm_wait_fence(dev, c->fence, &timeout, false);
 
        complete_commit(c);
 
index b7ef56ed8d1cf5280f942d83d2305672b414e0d8..d3467b115e0482a6eca2ebd6a5810e31f69220fa 100644 (file)
@@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
  * Fences:
  */
 
-int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
-               ktime_t *timeout)
+int msm_wait_fence(struct drm_device *dev, uint32_t fence,
+               ktime_t *timeout , bool interruptible)
 {
        struct msm_drm_private *priv = dev->dev_private;
        int ret;
@@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
                        remaining_jiffies = timespec_to_jiffies(&ts);
                }
 
-               ret = wait_event_interruptible_timeout(priv->fence_event,
+               if (interruptible)
+                       ret = wait_event_interruptible_timeout(priv->fence_event,
+                               fence_completed(dev, fence),
+                               remaining_jiffies);
+               else
+                       ret = wait_event_timeout(priv->fence_event,
                                fence_completed(dev, fence),
                                remaining_jiffies);
 
@@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       return msm_wait_fence_interruptable(dev, args->fence, &timeout);
+       return msm_wait_fence(dev, args->fence, &timeout, true);
 }
 
 static const struct drm_ioctl_desc msm_ioctls[] = {
index e7c5ea125d45ed42ebaa043522a88b4bcb2c2e3f..4ff0ec9c994b33f84421bd800e88abd8f7b3834a 100644 (file)
@@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev,
 
 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 
-int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
-               ktime_t *timeout);
+int msm_wait_fence(struct drm_device *dev, uint32_t fence,
+               ktime_t *timeout, bool interruptible);
 int msm_queue_fence_cb(struct drm_device *dev,
                struct msm_fence_cb *cb, uint32_t fence);
 void msm_update_fence(struct drm_device *dev, uint32_t fence);
index f211b80e3a1e0604489b1ed91a65e4e1b489b894..c76cc853b08a57effec626b8c6f537b270ca61ac 100644 (file)
@@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
                if (op & MSM_PREP_NOSYNC)
                        timeout = NULL;
 
-               ret = msm_wait_fence_interruptable(dev, fence, timeout);
+               ret = msm_wait_fence(dev, fence, timeout, true);
        }
 
        /* TODO cache maintenance */
index dd7a7ab603e2c202ea297575fab0aba003234f5b..831461bc98a549e8a3627cbd1cf5a1b4a3250c7b 100644 (file)
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
-       BUG_ON(!msm_obj->sgt);  /* should have already pinned! */
-       return msm_obj->sgt;
+       int npages = obj->size >> PAGE_SHIFT;
+
+       if (WARN_ON(!msm_obj->pages))  /* should have already pinned! */
+               return NULL;
+
+       return drm_prime_pages_to_sg(msm_obj->pages, npages);
 }
 
 void *msm_gem_prime_vmap(struct drm_gem_object *obj)
index 649024d4daf1da6e5aa1e074e303ba02ec312de0..477cbb12809b029c2de6d62bdd0bcb3ba415001c 100644 (file)
@@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
        nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
        nvif_client_fini(&cli->base);
        usif_client_fini(cli);
+       kfree(cli);
 }
 
 static void
@@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
 
        pm_runtime_get_sync(dev->dev);
 
+       mutex_lock(&cli->mutex);
        if (cli->abi16)
                nouveau_abi16_fini(cli->abi16);
+       mutex_unlock(&cli->mutex);
 
        mutex_lock(&drm->client.mutex);
        list_del(&cli->head);
index 775277f1edb0a4ae4c1a2862418827878710ddb8..dcfbbfaf1739781724e312a4fc15cded1298358d 100644 (file)
@@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_IOMMU_API)
+
 static void nouveau_platform_probe_iommu(struct device *dev,
                                         struct nouveau_platform_gpu *gpu)
 {
@@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev,
        }
 }
 
+#else
+
+static void nouveau_platform_probe_iommu(struct device *dev,
+                                        struct nouveau_platform_gpu *gpu)
+{
+}
+
+static void nouveau_platform_remove_iommu(struct device *dev,
+                                         struct nouveau_platform_gpu *gpu)
+{
+}
+
+#endif
+
 static int nouveau_platform_probe(struct platform_device *pdev)
 {
        struct nouveau_platform_gpu *gpu;
index 18f4497157885a897a9cbec14c390d397a02ca25..7464aef34674965bbf077570b703abf5bf2630e5 100644 (file)
@@ -175,15 +175,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
        node->page_shift = 12;
 
        switch (drm->device.info.family) {
+       case NV_DEVICE_INFO_V0_TNT:
+       case NV_DEVICE_INFO_V0_CELSIUS:
+       case NV_DEVICE_INFO_V0_KELVIN:
+       case NV_DEVICE_INFO_V0_RANKINE:
+       case NV_DEVICE_INFO_V0_CURIE:
+               break;
        case NV_DEVICE_INFO_V0_TESLA:
                if (drm->device.info.chipset != 0x50)
                        node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
                break;
        case NV_DEVICE_INFO_V0_FERMI:
        case NV_DEVICE_INFO_V0_KEPLER:
+       case NV_DEVICE_INFO_V0_MAXWELL:
                node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
                break;
        default:
+               NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
+                       drm->device.info.family);
                break;
        }
 
index 4ef602c5469d2563ee89d4153f0315abdc485a20..495c57644ced91e4a45c7b8b6205fff7d32ff8ae 100644 (file)
@@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
        if (ret)
                return ret;
 
-       if (RING_SPACE(chan, 49)) {
+       if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
                nouveau_fbcon_gpu_lockup(info);
                return 0;
        }
index 7da7958556a3abe68f94eac820c62aa8b41a923a..981342d142ff61b6c6292fc14399eb7a43ad08a6 100644 (file)
@@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
 {
        struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
 
-       if (show && nv_crtc->cursor.nvbo)
+       if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
                nv50_crtc_cursor_show(nv_crtc);
        else
                nv50_crtc_cursor_hide(nv_crtc);
index 394c89abcc97d92cf1b0352ea6e060b9ee011027..901130b0607291da4d08401f82e6ed05688c84b5 100644 (file)
@@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
        if (ret)
                return ret;
 
-       ret = RING_SPACE(chan, 59);
+       ret = RING_SPACE(chan, 58);
        if (ret) {
                nouveau_fbcon_gpu_lockup(info);
                return ret;
@@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
        OUT_RING(chan, info->var.yres_virtual);
        OUT_RING(chan, upper_32_bits(fb->vma.offset));
        OUT_RING(chan, lower_32_bits(fb->vma.offset));
+       FIRE_RING(chan);
 
        return 0;
 }
index 61246677e8dcdd901119a84d5bc4e6c09287978b..fcd2e5f27bb9539ba113e790222713b10127f825 100644 (file)
@@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
                return -EINVAL;
        }
 
-       ret = RING_SPACE(chan, 60);
+       ret = RING_SPACE(chan, 58);
        if (ret) {
                WARN_ON(1);
                nouveau_fbcon_gpu_lockup(info);
index 9ef6728c528d999ef625a9c1472de95c0c02ba24..7f2f05f78cc8cb7a17e7c7f2445c741981c9389a 100644 (file)
@@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
                case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
                default:
                        nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
-                       return 0x0000;
+                       return NULL;
                }
        }
 
index e10f9644140f5d9fcd6e73446c74634d2b13906a..52c22b02600598cfa7d18e424d69a99cce4879e7 100644 (file)
@@ -165,15 +165,31 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
        return 0;
 }
 
+static int
+gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
+{
+       struct nvkm_object *obj = (void *)chan;
+       struct gk104_fifo_priv *priv = (void *)obj->engine;
+
+       nv_wr32(priv, 0x002634, chan->base.chid);
+       if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
+               nv_error(priv, "channel %d [%s] kick timeout\n",
+                        chan->base.chid, nvkm_client_name(chan));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 static int
 gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                          struct nvkm_object *object)
 {
        struct nvkm_bar *bar = nvkm_bar(parent);
-       struct gk104_fifo_priv *priv = (void *)parent->engine;
        struct gk104_fifo_base *base = (void *)parent->parent;
        struct gk104_fifo_chan *chan = (void *)parent;
        u32 addr;
+       int ret;
 
        switch (nv_engidx(object->engine)) {
        case NVDEV_ENGINE_SW    : return 0;
@@ -188,13 +204,9 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                return -EINVAL;
        }
 
-       nv_wr32(priv, 0x002634, chan->base.chid);
-       if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
-               nv_error(priv, "channel %d [%s] kick timeout\n",
-                        chan->base.chid, nvkm_client_name(chan));
-               if (suspend)
-                       return -EBUSY;
-       }
+       ret = gk104_fifo_chan_kick(chan);
+       if (ret && suspend)
+               return ret;
 
        if (addr) {
                nv_wo32(base, addr + 0x00, 0x00000000);
@@ -319,6 +331,7 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
                gk104_fifo_runlist_update(priv, chan->engine);
        }
 
+       gk104_fifo_chan_kick(chan);
        nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
        return nvkm_fifo_channel_fini(&chan->base, suspend);
 }
index 5606c25e5d02998fc415a5b9705d9a40868f2a10..ca11ddb6ed467588ebe5cb98e359b91e07da92c4 100644 (file)
@@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
                gf100_gr_zbc_clear_depth(priv, index);
 }
 
+/**
+ * Wait until GR goes idle. GR is considered idle if it is disabled by the
+ * MC (0x200) register, or GR is not busy and a context switch is not in
+ * progress.
+ */
+int
+gf100_gr_wait_idle(struct gf100_gr_priv *priv)
+{
+       unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
+       bool gr_enabled, ctxsw_active, gr_busy;
+
+       do {
+               /*
+                * required to make sure FIFO_ENGINE_STATUS (0x2640) is
+                * up-to-date
+                */
+               nv_rd32(priv, 0x400700);
+
+               gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
+               ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
+               gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
+
+               if (!gr_enabled || (!gr_busy && !ctxsw_active))
+                       return 0;
+       } while (time_before(jiffies, end_jiffies));
+
+       nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
+                gr_enabled, ctxsw_active, gr_busy);
+       return -EAGAIN;
+}
+
 void
 gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
 {
@@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
 
                while (addr < next) {
                        nv_wr32(priv, 0x400200, addr);
-                       nv_wait(priv, 0x400700, 0x00000002, 0x00000000);
+                       /**
+                        * Wait for GR to go idle after submitting a
+                        * GO_IDLE bundle
+                        */
+                       if ((addr & 0xffff) == 0xe100)
+                               gf100_gr_wait_idle(priv);
+                       nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
                        addr += init->pitch;
                }
        }
index 8af1a89eda84d13436b0345301a3e767cc710c19..c9533fdac4fc85a2b0189429d6564cbc119734a7 100644 (file)
@@ -181,6 +181,7 @@ struct gf100_gr_oclass {
        int ppc_nr;
 };
 
+int  gf100_gr_wait_idle(struct gf100_gr_priv *);
 void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
 void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
 void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
index 2006c445938d9493773ef2dbf9bb07d045a67b96..4cf36a3aa81460c2c062b6cc41e70571530c1eb8 100644 (file)
@@ -332,9 +332,12 @@ static void
 nvkm_perfctx_dtor(struct nvkm_object *object)
 {
        struct nvkm_pm *ppm = (void *)object->engine;
+       struct nvkm_perfctx *ctx = (void *)object;
+
        mutex_lock(&nv_subdev(ppm)->mutex);
-       nvkm_engctx_destroy(&ppm->context->base);
-       ppm->context = NULL;
+       nvkm_engctx_destroy(&ctx->base);
+       if (ppm->context == ctx)
+               ppm->context = NULL;
        mutex_unlock(&nv_subdev(ppm)->mutex);
 }
 
@@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        mutex_lock(&nv_subdev(ppm)->mutex);
        if (ppm->context == NULL)
                ppm->context = ctx;
-       mutex_unlock(&nv_subdev(ppm)->mutex);
-
        if (ctx != ppm->context)
-               return -EBUSY;
+               ret = -EBUSY;
+       mutex_unlock(&nv_subdev(ppm)->mutex);
 
-       return 0;
+       return ret;
 }
 
 struct nvkm_oclass
index f67cdae1e90a599a04c46dd7a654102a7731bdf0..f4611e3f097187002e68dc54af44c45bfd0006d9 100644 (file)
@@ -1284,6 +1284,44 @@ init_zm_reg_sequence(struct nvbios_init *init)
        }
 }
 
+/**
+ * INIT_PLL_INDIRECT - opcode 0x59
+ *
+ */
+static void
+init_pll_indirect(struct nvbios_init *init)
+{
+       struct nvkm_bios *bios = init->bios;
+       u32  reg = nv_ro32(bios, init->offset + 1);
+       u16 addr = nv_ro16(bios, init->offset + 5);
+       u32 freq = (u32)nv_ro16(bios, addr) * 1000;
+
+       trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
+             reg, addr, freq);
+       init->offset += 7;
+
+       init_prog_pll(init, reg, freq);
+}
+
+/**
+ * INIT_ZM_REG_INDIRECT - opcode 0x5a
+ *
+ */
+static void
+init_zm_reg_indirect(struct nvbios_init *init)
+{
+       struct nvkm_bios *bios = init->bios;
+       u32  reg = nv_ro32(bios, init->offset + 1);
+       u16 addr = nv_ro16(bios, init->offset + 5);
+       u32 data = nv_ro32(bios, addr);
+
+       trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
+             reg, addr, data);
+       init->offset += 7;
+
+       init_wr32(init, addr, data);
+}
+
 /**
  * INIT_SUB_DIRECT - opcode 0x5b
  *
@@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode {
        [0x56] = { init_condition_time },
        [0x57] = { init_ltime },
        [0x58] = { init_zm_reg_sequence },
+       [0x59] = { init_pll_indirect },
+       [0x5a] = { init_zm_reg_indirect },
        [0x5b] = { init_sub_direct },
        [0x5c] = { init_jump },
        [0x5e] = { init_i2c_if },
index 822d32a28d6e15a38f8c9db722288c27c9349d41..065e9f5c8db98a0a2b70b5b2d4a203d350899465 100644 (file)
@@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
               struct gt215_clk_info *info)
 {
        struct gt215_clk_priv *priv = (void *)clock;
-       u32 oclk, sclk, sdiv, diff;
+       u32 oclk, sclk, sdiv;
+       s32 diff;
 
        info->clk = 0;
 
index c0fdb89e74ac4a41944e2d7c2f41ca3daa6c4ed6..24dcdfb58a8d852039317a5017ee34dcc9cf3039 100644 (file)
@@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
        nv_wr32(priv, 0x12004c, 0x4);
        nv_wr32(priv, 0x122204, 0x2);
        nv_rd32(priv, 0x122204);
+
+       /*
+        * Bug: increase clock timeout to avoid operation failure at high
+        * gpcclk rate.
+        */
+       nv_wr32(priv, 0x122354, 0x800);
+       nv_wr32(priv, 0x128328, 0x800);
+       nv_wr32(priv, 0x124320, 0x800);
 }
 
 static void
index 80614f1b207474ae407251d6811add96925e8241..282143f49d72e60ba5c66f79fc818ca3b37ea942 100644 (file)
@@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
 {
        struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
        struct nv04_instobj_priv *node = (void *)object;
+       struct nvkm_subdev *subdev = (void *)priv;
+
+       mutex_lock(&subdev->mutex);
        nvkm_mm_free(&priv->heap, &node->mem);
+       mutex_unlock(&subdev->mutex);
+
        nvkm_instobj_destroy(&node->base);
 }
 
@@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
        struct nv04_instobj_priv *node;
        struct nvkm_instobj_args *args = data;
+       struct nvkm_subdev *subdev = (void *)priv;
        int ret;
 
        if (!args->align)
@@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
+       mutex_lock(&subdev->mutex);
        ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
                           args->align, &node->mem);
+       mutex_unlock(&subdev->mutex);
        if (ret)
                return ret;
 
index dd39f434b4a7eccf1446ff5cd1857a2cf5de972d..c3872598b85a3856787b1bf0b7113633a468e020 100644 (file)
@@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        encoder_mode = atombios_get_encoder_mode(encoder);
        if (connector && (radeon_audio != 0) &&
            ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
-            (ENCODER_MODE_IS_DP(encoder_mode) &&
-             drm_detect_monitor_audio(radeon_connector_edid(connector)))))
+            ENCODER_MODE_IS_DP(encoder_mode)))
                radeon_audio_mode_set(encoder, adjusted_mode);
 }
 
index 68fd9fc677e35f1ca161295694301ec21c089e4d..44480c1b9738cc2625cee481ccf02aa20ed6bb1f 100644 (file)
@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 offset;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->offset;
-
-       WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
-              AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+       WREG32(AFMT_AUDIO_SRC_CONTROL +  dig->afmt->offset,
+              AFMT_AUDIO_SRC_SELECT(dig->pin->id));
 }
 
 void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
-               struct drm_connector *connector, struct drm_display_mode *mode)
+                                   struct drm_connector *connector,
+                                   struct drm_display_mode *mode)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 tmp = 0, offset;
+       u32 tmp = 0;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
                if (connector->latency_present[1])
                        tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
                else
                        tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
        }
-       WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+       WREG32_ENDPOINT(dig->pin->offset,
+                       AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
 }
 
 void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
-       u8 *sadb, int sad_count)
+                                            u8 *sadb, int sad_count)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 offset, tmp;
+       u32 tmp;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        /* program the speaker allocation */
-       tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+       tmp = RREG32_ENDPOINT(dig->pin->offset,
+                             AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
        tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
        /* set HDMI mode */
        tmp |= HDMI_CONNECTION;
@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
                tmp |= SPEAKER_ALLOCATION(sadb[0]);
        else
                tmp |= SPEAKER_ALLOCATION(5); /* stereo */
-       WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+       WREG32_ENDPOINT(dig->pin->offset,
+                       AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
 }
 
 void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
-       u8 *sadb, int sad_count)
+                                          u8 *sadb, int sad_count)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 offset, tmp;
+       u32 tmp;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        /* program the speaker allocation */
-       tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+       tmp = RREG32_ENDPOINT(dig->pin->offset,
+                             AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
        tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
        /* set DP mode */
        tmp |= DP_CONNECTION;
@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
                tmp |= SPEAKER_ALLOCATION(sadb[0]);
        else
                tmp |= SPEAKER_ALLOCATION(5); /* stereo */
-       WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+       WREG32_ENDPOINT(dig->pin->offset,
+                       AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
 }
 
 void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
-       struct cea_sad *sads, int sad_count)
+                             struct cea_sad *sads, int sad_count)
 {
-       u32 offset;
        int i;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
                { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
        };
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
                u32 value = 0;
                u8 stereo_freqs = 0;
@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
 
                value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
 
-               WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
+               WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
        }
 }
 
@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
 }
 
 void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
-       struct radeon_crtc *crtc, unsigned int clock)
+                            struct radeon_crtc *crtc, unsigned int clock)
 {
        /* Two dtos; generally use dto0 for HDMI */
        u32 value = 0;
@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
 }
 
 void dce6_dp_audio_set_dto(struct radeon_device *rdev,
-       struct radeon_crtc *crtc, unsigned int clock)
+                          struct radeon_crtc *crtc, unsigned int clock)
 {
        /* Two dtos; generally use dto1 for DP */
        u32 value = 0;
index fa719c53449bcd90e009e1b59d1b3e1ed5bff6f3..fbc8d88d6e5de1afe43c11884340e90bfa8e57a2 100644 (file)
@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
 static void radeon_audio_enable(struct radeon_device *rdev,
                                struct r600_audio_pin *pin, u8 enable_mask)
 {
+       struct drm_encoder *encoder;
+       struct radeon_encoder *radeon_encoder;
+       struct radeon_encoder_atom_dig *dig;
+       int pin_count = 0;
+
+       if (!pin)
+               return;
+
+       if (rdev->mode_info.mode_config_initialized) {
+               list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+                       if (radeon_encoder_is_digital(encoder)) {
+                               radeon_encoder = to_radeon_encoder(encoder);
+                               dig = radeon_encoder->enc_priv;
+                               if (dig->pin == pin)
+                                       pin_count++;
+                       }
+               }
+
+               if ((pin_count > 1) && (enable_mask == 0))
+                       return;
+       }
+
        if (rdev->audio.funcs->enable)
                rdev->audio.funcs->enable(rdev, pin, enable_mask);
 }
@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
 
 static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
 {
-       struct radeon_encoder *radeon_encoder;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = NULL;
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct cea_sad *sads;
        int sad_count;
 
-       list_for_each_entry(connector,
-               &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
-
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
+       if (!connector)
                return;
-       }
 
        sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
        if (sad_count <= 0) {
@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
        }
        BUG_ON(!sads);
 
-       radeon_encoder = to_radeon_encoder(encoder);
-
        if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
                radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
 
@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
 
 static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
 {
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = NULL;
        u8 *sadb = NULL;
        int sad_count;
 
-       list_for_each_entry(connector,
-                           &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
-
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
+       if (!connector)
                return;
-       }
 
-       sad_count = drm_edid_to_speaker_allocation(
-               radeon_connector_edid(connector), &sadb);
+       sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
+                                                  &sadb);
        if (sad_count < 0) {
                DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
                          sad_count);
@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
 }
 
 static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                                             struct drm_display_mode *mode)
 {
-       struct radeon_encoder *radeon_encoder;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = 0;
-
-       list_for_each_entry(connector,
-               &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
+       if (!connector)
                return;
-       }
-
-       radeon_encoder = to_radeon_encoder(encoder);
 
        if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
                radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
 }
 
 void radeon_audio_detect(struct drm_connector *connector,
+                        struct drm_encoder *encoder,
                         enum drm_connector_status status)
 {
-       struct radeon_device *rdev;
-       struct radeon_encoder *radeon_encoder;
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig;
 
-       if (!connector || !connector->encoder)
+       if (!radeon_audio_chipset_supported(rdev))
                return;
 
-       rdev = connector->encoder->dev->dev_private;
-
-       if (!radeon_audio_chipset_supported(rdev))
+       if (!radeon_encoder_is_digital(encoder))
                return;
 
-       radeon_encoder = to_radeon_encoder(connector->encoder);
        dig = radeon_encoder->enc_priv;
 
        if (status == connector_status_connected) {
-               if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-                       radeon_encoder->audio = NULL;
-                       return;
-               }
-
                if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
                        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
                        radeon_encoder->audio = rdev->audio.hdmi_funcs;
                }
 
-               dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
-               radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+               if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+                       if (!dig->pin)
+                               dig->pin = radeon_audio_get_pin(encoder);
+                       radeon_audio_enable(rdev, dig->pin, 0xf);
+               } else {
+                       radeon_audio_enable(rdev, dig->pin, 0);
+                       dig->pin = NULL;
+               }
        } else {
-               radeon_audio_enable(rdev, dig->afmt->pin, 0);
-               dig->afmt->pin = NULL;
+               radeon_audio_enable(rdev, dig->pin, 0);
+               dig->pin = NULL;
        }
 }
 
@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
 }
 
 static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                                      struct drm_display_mode *mode)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = NULL;
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
        struct hdmi_avi_infoframe frame;
        int err;
 
-       list_for_each_entry(connector,
-               &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
-
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
-               return -ENOENT;
-       }
+       if (!connector)
+               return -EINVAL;
 
        err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
        if (err < 0) {
@@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
                return err;
        }
 
-       if (dig && dig->afmt &&
-               radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
+       if (dig && dig->afmt && radeon_encoder->audio &&
+           radeon_encoder->audio->set_avi_packet)
                radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
                        buffer, sizeof(buffer));
 
@@ -722,30 +696,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
        if (!dig || !dig->afmt)
                return;
 
-       radeon_audio_set_mute(encoder, true);
+       if (!connector)
+               return;
 
-       radeon_audio_write_speaker_allocation(encoder);
-       radeon_audio_write_sad_regs(encoder);
-       radeon_audio_write_latency_fields(encoder, mode);
-       radeon_audio_set_dto(encoder, mode->clock);
-       radeon_audio_set_vbi_packet(encoder);
-       radeon_hdmi_set_color_depth(encoder);
-       radeon_audio_update_acr(encoder, mode->clock);
-       radeon_audio_set_audio_packet(encoder);
-       radeon_audio_select_pin(encoder);
+       if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+               radeon_audio_set_mute(encoder, true);
 
-       if (radeon_audio_set_avi_packet(encoder, mode) < 0)
-               return;
+               radeon_audio_write_speaker_allocation(encoder);
+               radeon_audio_write_sad_regs(encoder);
+               radeon_audio_write_latency_fields(encoder, mode);
+               radeon_audio_set_dto(encoder, mode->clock);
+               radeon_audio_set_vbi_packet(encoder);
+               radeon_hdmi_set_color_depth(encoder);
+               radeon_audio_update_acr(encoder, mode->clock);
+               radeon_audio_set_audio_packet(encoder);
+               radeon_audio_select_pin(encoder);
+
+               if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+                       return;
 
-       radeon_audio_set_mute(encoder, false);
+               radeon_audio_set_mute(encoder, false);
+       } else {
+               radeon_hdmi_set_color_depth(encoder);
+
+               if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+                       return;
+       }
 }
 
 static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                                    struct drm_display_mode *mode)
 {
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
@@ -759,22 +744,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
        if (!dig || !dig->afmt)
                return;
 
-       radeon_audio_write_speaker_allocation(encoder);
-       radeon_audio_write_sad_regs(encoder);
-       radeon_audio_write_latency_fields(encoder, mode);
-       if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
-               radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
-       else
-               radeon_audio_set_dto(encoder, dig_connector->dp_clock);
-       radeon_audio_set_audio_packet(encoder);
-       radeon_audio_select_pin(encoder);
-
-       if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+       if (!connector)
                return;
+
+       if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+               radeon_audio_write_speaker_allocation(encoder);
+               radeon_audio_write_sad_regs(encoder);
+               radeon_audio_write_latency_fields(encoder, mode);
+               if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
+                       radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+               else
+                       radeon_audio_set_dto(encoder, dig_connector->dp_clock);
+               radeon_audio_set_audio_packet(encoder);
+               radeon_audio_select_pin(encoder);
+
+               if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+                       return;
+       }
 }
 
 void radeon_audio_mode_set(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                          struct drm_display_mode *mode)
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 
index 8438304f7139549c9ab785ec3f1decd50ac3254a..059cc3012062a7b50708620c557771dcfea821c0 100644 (file)
@@ -68,7 +68,8 @@ struct radeon_audio_funcs
 
 int radeon_audio_init(struct radeon_device *rdev);
 void radeon_audio_detect(struct drm_connector *connector,
-       enum drm_connector_status status);
+                        struct drm_encoder *encoder,
+                        enum drm_connector_status status);
 u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
        u32 offset, u32 reg);
 void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
index 3e5f6b71f3adad72b0aae490ede2ae1f426c9ff5..c097d3a82bda734888ca46d05c14794738163642 100644 (file)
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
 
                        if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
                            (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+                               u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+
+                               if (hss > lvds->native_mode.hdisplay)
+                                       hss = (10 - 1) * 8;
+
                                lvds->native_mode.htotal = lvds->native_mode.hdisplay +
                                        (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
                                lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
-                                       (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+                                       hss;
                                lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
                                        (RBIOS8(tmp + 23) * 8);
 
index cebb65e07e1d13f0bee01ee753f4c8a76e0e22d5..94b21ae70ef725781c8dc7d0ec8a30fbd48f0e0e 100644 (file)
@@ -1379,8 +1379,16 @@ out:
        /* updated in get modes as well since we need to know if it's analog or digital */
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0)
-               radeon_audio_detect(connector, ret);
+       if ((radeon_audio != 0) && radeon_connector->use_digital) {
+               const struct drm_connector_helper_funcs *connector_funcs =
+                       connector->helper_private;
+
+               encoder = connector_funcs->best_encoder(connector);
+               if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
+                       radeon_connector_get_edid(connector);
+                       radeon_audio_detect(connector, encoder, ret);
+               }
+       }
 
 exit:
        pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
 
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0)
-               radeon_audio_detect(connector, ret);
+       if ((radeon_audio != 0) && encoder) {
+               radeon_connector_get_edid(connector);
+               radeon_audio_detect(connector, encoder, ret);
+       }
 
 out:
        pm_runtime_mark_last_busy(connector->dev->dev);
index 07909d817381a21a18fdeb5dd7cd8b4be92fbc8e..aecc3e3dec0ca093441e3871df414627b51e92ec 100644 (file)
@@ -237,7 +237,6 @@ struct radeon_afmt {
        int offset;
        bool last_buffer_filled_status;
        int id;
-       struct r600_audio_pin *pin;
 };
 
 struct radeon_mode_info {
@@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
        uint8_t backlight_level;
        int panel_mode;
        struct radeon_afmt *afmt;
+       struct r600_audio_pin *pin;
        int active_mst_links;
 };
 
index 882cccdad27249c8e3afa992419fb0fb16bec97b..ac6fe40b99f753b267a5e78647c0bdb2e078c5bd 100644 (file)
@@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
        else if (boot_cpu_data.x86 > 3)
                tmp = pgprot_noncached(tmp);
 #endif
-#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__)
+#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
+    defined(__powerpc__)
        if (caching_flags & TTM_PL_FLAG_WC)
                tmp = pgprot_writecombine(tmp);
        else
index f822fd2a1adabc4b3e53d86155c2d9e50d3d241e..884d82f9190e214636a539cb2db884a1d175c945 100644 (file)
@@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = {
                .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
+               .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
index 157c627750535e8943769e3078de068e4f1c7a47..e6fce23b121adb662656bea26d8a4ace812afc9c 100644 (file)
@@ -1782,6 +1782,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -2463,6 +2466,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
        { }
index 3318de690e00666bf7da60c84189a1fc54a0a544..a2dbbbe0d8d7e81b06ac6d646737413fe4d1d357 100644 (file)
@@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
        struct cp2112_force_read_report report;
        int ret;
 
+       if (size > sizeof(dev->read_data))
+               size = sizeof(dev->read_data);
        report.report = CP2112_DATA_READ_FORCE_SEND;
        report.length = cpu_to_be16(size);
 
index b04b0820d816323a01d147c702503b0797734ea4..b3b225b75d0ab7e497c389d0c9caa777da1f6948 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI   0x0290
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO    0x0291
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS    0x0292
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI   0x0272
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO            0x0273
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS            0x0274
 #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY   0x030a
 #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY    0x030b
 #define USB_DEVICE_ID_APPLE_IRCONTROL  0x8240
index 6a9b05b328a9d40e06b09ed8f8c6e6010669374b..7c811252c1cefebb2418944a7f391117a8a92b6d 100644 (file)
@@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
        /*
         * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
         * for the stylus.
+        * The check for mt_report_id ensures we don't process
+        * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical
+        * collection, but within the report ID.
         */
        if (field->physical == HID_DG_STYLUS)
                return 0;
+       else if ((field->physical == 0) &&
+                (field->report->id != td->mt_report_id) &&
+                (td->mt_report_id != -1))
+               return 0;
 
        if (field->application == HID_DG_TOUCHSCREEN ||
            field->application == HID_DG_TOUCHPAD)
index 53e7de7cb9e25e6861f1acb5e3438e590b2e70ad..20f9a653444c21d0ef89f4561d4d68544dff4bf0 100644 (file)
@@ -87,6 +87,9 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
index 4c0ffca97befd61cf3cdd947138d14070c4e7895..44958d79d598dfc3a7e6938a2babbf3e1fdc2188 100644 (file)
@@ -1271,11 +1271,13 @@ fail_leds:
        pad_input_dev = NULL;
        wacom_wac->pad_registered = false;
 fail_register_pad_input:
-       input_unregister_device(touch_input_dev);
+       if (touch_input_dev)
+               input_unregister_device(touch_input_dev);
        wacom_wac->touch_input = NULL;
        wacom_wac->touch_registered = false;
 fail_register_touch_input:
-       input_unregister_device(pen_input_dev);
+       if (pen_input_dev)
+               input_unregister_device(pen_input_dev);
        wacom_wac->pen_input = NULL;
        wacom_wac->pen_registered = false;
 fail_register_pen_input:
index 232da89f4e886fe02b82d452c1a0868f0b65b967..0d244239e55def103f786254f3f617a84b2f0ba2 100644 (file)
@@ -2213,6 +2213,9 @@ void wacom_setup_device_quirks(struct wacom *wacom)
                        features->x_max = 4096;
                        features->y_max = 4096;
                }
+               else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
+                       features->device_type |= WACOM_DEVICETYPE_PAD;
+               }
        }
 
        /*
index 28fcb2e246d55a7acc52703e434b98de3e22c45b..fbfc02bb2cfa13c5bc22ece3a47a3cac1af07005 100644 (file)
@@ -195,7 +195,7 @@ abort:
 }
 
 static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
-                                unsigned int voltage)
+                                unsigned long voltage)
 {
        int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
        int err;
index b77b82f244800628843c103ae7dfcf157bd774ce..6153df735e82ca4fd3d605e159510675410546fc 100644 (file)
@@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev,
        return sprintf(buf, "%d\n", val);
 }
 
-static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
-                         const char *buf, size_t count)
+static ssize_t store_enable(struct device *dev,
+                           struct device_attribute *devattr,
+                           const char *buf, size_t count)
 {
        int index = to_sensor_dev_attr(devattr)->index;
        struct nct7904_data *data = dev_get_drvdata(dev);
@@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
 
        if (kstrtoul(buf, 10, &val) < 0)
                return -EINVAL;
-       if (val > 1 || (val && !data->fan_mode[index]))
+       if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index]))
                return -EINVAL;
 
        ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index,
-                               val ? data->fan_mode[index] : 0);
+                               val == 2 ? data->fan_mode[index] : 0);
 
        return ret ? ret : count;
 }
 
-/* Return 0 for manual mode or 1 for SmartFan mode */
-static ssize_t show_mode(struct device *dev,
-                        struct device_attribute *devattr, char *buf)
+/* Return 1 for manual mode or 2 for SmartFan mode */
+static ssize_t show_enable(struct device *dev,
+                          struct device_attribute *devattr, char *buf)
 {
        int index = to_sensor_dev_attr(devattr)->index;
        struct nct7904_data *data = dev_get_drvdata(dev);
@@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev,
        if (val < 0)
                return val;
 
-       return sprintf(buf, "%d\n", val ? 1 : 0);
+       return sprintf(buf, "%d\n", val ? 2 : 1);
 }
 
 /* 2 attributes per channel: pwm and mode */
-static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
                        show_pwm, store_pwm, 0);
-static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR,
-                       show_mode, store_mode, 0);
-static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+                       show_enable, store_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
                        show_pwm, store_pwm, 1);
-static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR,
-                       show_mode, store_mode, 1);
-static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
+                       show_enable, store_enable, 1);
+static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR,
                        show_pwm, store_pwm, 2);
-static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR,
-                       show_mode, store_mode, 2);
-static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
+                       show_enable, store_enable, 2);
+static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR,
                        show_pwm, store_pwm, 3);
-static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR,
-                       show_mode, store_mode, 3);
+static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
+                       show_enable, store_enable, 3);
 
 static struct attribute *nct7904_fanctl_attrs[] = {
-       &sensor_dev_attr_fan1_pwm.dev_attr.attr,
-       &sensor_dev_attr_fan1_mode.dev_attr.attr,
-       &sensor_dev_attr_fan2_pwm.dev_attr.attr,
-       &sensor_dev_attr_fan2_mode.dev_attr.attr,
-       &sensor_dev_attr_fan3_pwm.dev_attr.attr,
-       &sensor_dev_attr_fan3_mode.dev_attr.attr,
-       &sensor_dev_attr_fan4_pwm.dev_attr.attr,
-       &sensor_dev_attr_fan4_mode.dev_attr.attr,
+       &sensor_dev_attr_pwm1.dev_attr.attr,
+       &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm2.dev_attr.attr,
+       &sensor_dev_attr_pwm2_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm3.dev_attr.attr,
+       &sensor_dev_attr_pwm3_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm4.dev_attr.attr,
+       &sensor_dev_attr_pwm4_enable.dev_attr.attr,
        NULL
 };
 
index e8e2077c7244b5623efc8cd07521a9105dfbe192..13ea1ea23328501f4969c5453e603cf67e5b81b2 100644 (file)
@@ -557,21 +557,21 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
        if (src & MMA8452_TRANSIENT_SRC_XTRANSE)
                iio_push_event(indio_dev,
                               IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
-                                                 IIO_EV_TYPE_THRESH,
+                                                 IIO_EV_TYPE_MAG,
                                                  IIO_EV_DIR_RISING),
                               ts);
 
        if (src & MMA8452_TRANSIENT_SRC_YTRANSE)
                iio_push_event(indio_dev,
                               IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y,
-                                                 IIO_EV_TYPE_THRESH,
+                                                 IIO_EV_TYPE_MAG,
                                                  IIO_EV_DIR_RISING),
                               ts);
 
        if (src & MMA8452_TRANSIENT_SRC_ZTRANSE)
                iio_push_event(indio_dev,
                               IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z,
-                                                 IIO_EV_TYPE_THRESH,
+                                                 IIO_EV_TYPE_MAG,
                                                  IIO_EV_DIR_RISING),
                               ts);
 }
@@ -644,7 +644,7 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev,
 
 static const struct iio_event_spec mma8452_transient_event[] = {
        {
-               .type = IIO_EV_TYPE_THRESH,
+               .type = IIO_EV_TYPE_MAG,
                .dir = IIO_EV_DIR_RISING,
                .mask_separate = BIT(IIO_EV_INFO_ENABLE),
                .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
index 8d9c9b9215ddc1ef5530df512d475a618cb71d8f..d819823f725747e5cd4ac8bcb50d3d833093ed2c 100644 (file)
@@ -299,6 +299,8 @@ static int mcp320x_probe(struct spi_device *spi)
        indio_dev->channels = chip_info->channels;
        indio_dev->num_channels = chip_info->num_channels;
 
+       adc->chip_info = chip_info;
+
        adc->transfer[0].tx_buf = &adc->tx_buf;
        adc->transfer[0].len = sizeof(adc->tx_buf);
        adc->transfer[1].rx_buf = adc->rx_buf;
index 480f335a0f9faee31bc6b2ac14f7899f18d6c9be..819632bf1fda7fee8fddd2acb3c02d9ff112b1ff 100644 (file)
@@ -635,7 +635,7 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev,
        struct vf610_adc *info = iio_priv(indio_dev);
 
        if ((readval == NULL) ||
-               (!(reg % 4) || (reg > VF610_REG_ADC_PCTL)))
+               ((reg % 4) || (reg > VF610_REG_ADC_PCTL)))
                return -EINVAL;
 
        *readval = readl(info->regs + reg);
index c1a218236be5c54f9829436cbb606c0d758cef74..11a027adc204aeb630c66707eaf9d5d0ce169ebb 100644 (file)
@@ -200,7 +200,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
                              int *val, int *val2)
 {
        u8 reg;
-       u16 buf;
+       __be16 buf;
        int ret;
        struct stk3310_data *data = iio_priv(indio_dev);
 
@@ -222,7 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
                dev_err(&data->client->dev, "register read failed\n");
                return ret;
        }
-       *val = swab16(buf);
+       *val = be16_to_cpu(buf);
 
        return IIO_VAL_INT;
 }
@@ -235,7 +235,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
                               int val, int val2)
 {
        u8 reg;
-       u16 buf;
+       __be16 buf;
        int ret;
        unsigned int index;
        struct stk3310_data *data = iio_priv(indio_dev);
@@ -252,7 +252,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
        else
                return -EINVAL;
 
-       buf = swab16(val);
+       buf = cpu_to_be16(val);
        ret = regmap_bulk_write(data->regmap, reg, &buf, 2);
        if (ret < 0)
                dev_err(&client->dev, "failed to set PS threshold!\n");
@@ -301,7 +301,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
                            int *val, int *val2, long mask)
 {
        u8 reg;
-       u16 buf;
+       __be16 buf;
        int ret;
        unsigned int index;
        struct stk3310_data *data = iio_priv(indio_dev);
@@ -322,7 +322,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
                        mutex_unlock(&data->lock);
                        return ret;
                }
-               *val = swab16(buf);
+               *val = be16_to_cpu(buf);
                mutex_unlock(&data->lock);
                return IIO_VAL_INT;
        case IIO_CHAN_INFO_INT_TIME:
@@ -608,13 +608,7 @@ static int stk3310_probe(struct i2c_client *client,
        if (ret < 0)
                return ret;
 
-       ret = iio_device_register(indio_dev);
-       if (ret < 0) {
-               dev_err(&client->dev, "device_register failed\n");
-               stk3310_set_state(data, STK3310_STATE_STANDBY);
-       }
-
-       if (client->irq <= 0)
+       if (client->irq < 0)
                client->irq = stk3310_gpio_probe(client);
 
        if (client->irq >= 0) {
@@ -629,6 +623,12 @@ static int stk3310_probe(struct i2c_client *client,
                                        client->irq);
        }
 
+       ret = iio_device_register(indio_dev);
+       if (ret < 0) {
+               dev_err(&client->dev, "device_register failed\n");
+               stk3310_set_state(data, STK3310_STATE_STANDBY);
+       }
+
        return ret;
 }
 
index dcadfc4f06619a1a147d560581bb7bfabdd3f663..efb9350b0d766a0915a2c091c04b9bdea15c5d4e 100644 (file)
@@ -90,6 +90,7 @@ config IIO_ST_MAGN_SPI_3AXIS
 config BMC150_MAGN
        tristate "Bosch BMC150 Magnetometer Driver"
        depends on I2C
+       select REGMAP_I2C
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
index d4c1788699911e934fc3c1baffa3de8928ac26a6..1347a1f2e46f8194bad0bfe49bd91ea6a948ac8e 100644 (file)
@@ -706,11 +706,11 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
                goto err_poweroff;
        }
        if (chip_id != BMC150_MAGN_CHIP_ID_VAL) {
-               dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret);
+               dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id);
                ret = -ENODEV;
                goto err_poweroff;
        }
-       dev_dbg(&data->client->dev, "Chip id %x\n", ret);
+       dev_dbg(&data->client->dev, "Chip id %x\n", chip_id);
 
        preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET];
        ret = bmc150_magn_set_odr(data, preset.odr);
index d927397a6ef77e1f2e72b576108f475c5d40b2ef..706ebfd6297fa7c0a311a80bfcb431b533d2cc70 100644 (file)
@@ -202,8 +202,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set)
                coil_bit = MMC35240_CTRL0_RESET_BIT;
 
        return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0,
-                                 MMC35240_CTRL0_REFILL_BIT,
-                                 coil_bit);
+                                 coil_bit, coil_bit);
+
 }
 
 static int mmc35240_init(struct mmc35240_data *data)
@@ -222,14 +222,15 @@ static int mmc35240_init(struct mmc35240_data *data)
 
        /*
         * make sure we restore sensor characteristics, by doing
-        * a RESET/SET sequence
+        * a SET/RESET sequence, the axis polarity being naturally
+        * aligned after RESET
         */
-       ret = mmc35240_hw_set(data, false);
+       ret = mmc35240_hw_set(data, true);
        if (ret < 0)
                return ret;
        usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1);
 
-       ret = mmc35240_hw_set(data, true);
+       ret = mmc35240_hw_set(data, false);
        if (ret < 0)
                return ret;
 
@@ -503,6 +504,7 @@ static int mmc35240_probe(struct i2c_client *client,
        }
 
        data = iio_priv(indio_dev);
+       i2c_set_clientdata(client, indio_dev);
        data->client = client;
        data->regmap = regmap;
        data->res = MMC35240_16_BITS_SLOW;
index cb2e8ad8bfdcd02e75b9ea4f82e31c598e95109e..7a2b639eaa96e2ea440f3102c5da101d41edcd2b 100644 (file)
@@ -204,7 +204,7 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
                *val = ret;
                return IIO_VAL_INT;
        case IIO_CHAN_INFO_OFFSET:
-               *val = 13657;
+               *val = -13657;
                *val2 = 500000;
                return IIO_VAL_INT_PLUS_MICRO;
        case IIO_CHAN_INFO_SCALE:
index b1b73232f21702161d7dc068f08e23e6759e04e8..bbbe0184e5922f6dab1fce7c56a92d1422da986e 100644 (file)
@@ -736,6 +736,10 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
        /*
         * T3 only supports 32 bits of size.
         */
+       if (sizeof(phys_addr_t) > 4) {
+               pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
+               return ERR_PTR(-ENOTSUPP);
+       }
        bl.size = 0xffffffff;
        bl.addr = 0;
        kva = 0;
index 2d7e503d13cb5b9c2855936ce162f41d49ca0ced..871dbe56216a27e75ee64a1e4646ca027dd22a27 100644 (file)
@@ -31,6 +31,8 @@
  * SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/idr.h>
@@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        u32 bar0 = 0, bar1 = 0;
 
 #ifdef CONFIG_X86_64
-       if (WARN(pat_enabled(),
-                "ipath needs PAT disabled, boot with nopat kernel parameter\n")) {
+       if (pat_enabled()) {
+               pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
                ret = -ENODEV;
                goto bail;
        }
index b396344fae16af33153f0625104d95c17f488a8d..6a36338593cd0a1c09b1ad67c1d7d57f856b0e93 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_H__
 #define __OCRDMA_H__
index 1554cca5712aafd5659d1cfb80bafff7095f67ca..430b1350fe96ecd1eb16a67f18c2bc3a8e6afa80 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_ABI_H__
 #define __OCRDMA_ABI_H__
index 29b27675dd709e8271708c6ca91cc9deb0a1076e..44766fee1f4e2cacb2d971f7858ad6e3eb4c4875 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #include <net/neighbour.h>
 #include <net/netevent.h>
index cf366fe03cb822580fe96b69e5873ad255a34632..04a30ae674739b87dc266194f68aa27271df653e 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_AH_H__
 #define __OCRDMA_AH_H__
index 47615ff33bc6a1fb8c0c703b9f975acc6afe79c2..aab391a15db429104f52765346455ba07efa424b 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) CNA Adapters.              *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #include <linux/sched.h>
 #include <linux/interrupt.h>
index e905972fceb7d48ff882800390c1330367815caf..7ed885c1851e28740b81a0588493c0d0ca92bc42 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) CNA Adapters.              *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_HW_H__
 #define __OCRDMA_HW_H__
index d98a707a5eb9b3e27a51548a0fbe2ae9b893ebab..b119a3413a155574ae1eb1bdd020bb7d77e42822 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #include <linux/module.h>
 #include <linux/idr.h>
@@ -46,7 +61,7 @@
 MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
 MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
 MODULE_AUTHOR("Emulex Corporation");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
 
 static LIST_HEAD(ocrdma_dev_list);
 static DEFINE_SPINLOCK(ocrdma_devlist_lock);
index 02ad0aee99afc0c5e9449c4f58353e57d38903f1..80006b24aa118e752f444383fc9f4f3c3bafb191 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_SLI_H__
 #define __OCRDMA_SLI_H__
index 48d7ef51aa0c209678e0ed4bbe97bc5ff9a881d5..69334e214571b94ae0305bc8c9821aa0e261b7fe 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2014 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #include <rdma/ib_addr.h>
 #include <rdma/ib_pma.h>
index 091edd68a8a34678e5283b2374c58274c84580b3..c9e58d04c7b8d15c15c5d8ae4205c87cc3be521c 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2014 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_STATS_H__
 #define __OCRDMA_STATS_H__
index 5bb61eb58f2c71859969d73ac6e326d4dafc51fd..bc84cd462ecf3208e8576ad1ba1084578ddb7c29 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #include <linux/dma-mapping.h>
 #include <rdma/ib_verbs.h>
index b15c608efa7b03c72a2eb44e1f7b8af919db13b0..eaccb2d3cb9ff52f51fe0bbba3334053d519d047 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_VERBS_H__
 #define __OCRDMA_VERBS_H__
index 9e6ee82a8fd76f490de93d6117754e2a6657ec72..851c8219d50104105ec8d97a3ba743cb6f59626b 100644 (file)
@@ -177,7 +177,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
                else
                        size += ipoib_recvq_size * ipoib_max_conn_qp;
        } else
-               goto out_free_wq;
+               if (ret != -ENOSYS)
+                       goto out_free_wq;
 
        cq_attr.cqe = size;
        priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
index 7717009631271ea6965a7d60b9dcf3f74acdd1d6..d851e1828d6f5152e9c8ca49de9b64a3b953f180 100644 (file)
@@ -775,6 +775,17 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        ret = isert_rdma_post_recvl(isert_conn);
        if (ret)
                goto out_conn_dev;
+       /*
+        * Obtain the second reference now before isert_rdma_accept() to
+        * ensure that any initiator generated REJECT CM event that occurs
+        * asynchronously won't drop the last reference until the error path
+        * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
+        * isert_free_conn() -> isert_put_conn() -> kref_put().
+        */
+       if (!kref_get_unless_zero(&isert_conn->kref)) {
+               isert_warn("conn %p connect_release is running\n", isert_conn);
+               goto out_conn_dev;
+       }
 
        ret = isert_rdma_accept(isert_conn);
        if (ret)
@@ -836,11 +847,6 @@ isert_connected_handler(struct rdma_cm_id *cma_id)
 
        isert_info("conn %p\n", isert_conn);
 
-       if (!kref_get_unless_zero(&isert_conn->kref)) {
-               isert_warn("conn %p connect_release is running\n", isert_conn);
-               return;
-       }
-
        mutex_lock(&isert_conn->mutex);
        if (isert_conn->state != ISER_CONN_FULL_FEATURE)
                isert_conn->state = ISER_CONN_UP;
index 074a65ed17bb4d595d2b48f13e0d2e03c2267bc6..766bf26601163c37aebf265160b4d3d0ce09617f 100644 (file)
@@ -71,6 +71,18 @@ static void input_leds_event(struct input_handle *handle, unsigned int type,
 {
 }
 
+static int input_leds_get_count(struct input_dev *dev)
+{
+       unsigned int led_code;
+       int count = 0;
+
+       for_each_set_bit(led_code, dev->ledbit, LED_CNT)
+               if (input_led_info[led_code].name)
+                       count++;
+
+       return count;
+}
+
 static int input_leds_connect(struct input_handler *handler,
                              struct input_dev *dev,
                              const struct input_device_id *id)
@@ -81,7 +93,7 @@ static int input_leds_connect(struct input_handler *handler,
        int led_no;
        int error;
 
-       num_leds = bitmap_weight(dev->ledbit, LED_CNT);
+       num_leds = input_leds_get_count(dev);
        if (!num_leds)
                return -ENXIO;
 
@@ -112,7 +124,7 @@ static int input_leds_connect(struct input_handler *handler,
                led->handle = &leds->handle;
                led->code = led_code;
 
-               if (WARN_ON(!input_led_info[led_code].name))
+               if (!input_led_info[led_code].name)
                        continue;
 
                led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
index b10709f0461559c4d5ca03f47ae1d82c75663e91..30e3442518f85cfe06732e0f331962458396008c 100644 (file)
@@ -2,6 +2,7 @@
  * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver
  *
  * Copyright (C) 2008     Henrik Rydberg (rydberg@euromail.se)
+ * Copyright (C) 2015      John Horan (knasher@gmail.com)
  *
  * The USB initialization and package decoding was made by
  * Scott Shawcroft as part of the touchd user-space driver project:
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI   0x0290
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO    0x0291
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS    0x0292
+/* MacbookPro12,1 (2015) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI   0x0272
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO    0x0273
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS    0x0274
 
 #define BCM5974_DEVICE(prod) {                                 \
        .match_flags = (USB_DEVICE_ID_MATCH_DEVICE |            \
@@ -152,6 +157,10 @@ static const struct usb_device_id bcm5974_table[] = {
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
+       /* MacbookPro12,1 */
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
        /* Terminating entry */
        {}
 };
@@ -180,21 +189,47 @@ struct bt_data {
 enum tp_type {
        TYPE1,                  /* plain trackpad */
        TYPE2,                  /* button integrated in trackpad */
-       TYPE3                   /* additional header fields since June 2013 */
+       TYPE3,                  /* additional header fields since June 2013 */
+       TYPE4                   /* additional header field for pressure data */
 };
 
 /* trackpad finger data offsets, le16-aligned */
-#define FINGER_TYPE1           (13 * sizeof(__le16))
-#define FINGER_TYPE2           (15 * sizeof(__le16))
-#define FINGER_TYPE3           (19 * sizeof(__le16))
+#define HEADER_TYPE1           (13 * sizeof(__le16))
+#define HEADER_TYPE2           (15 * sizeof(__le16))
+#define HEADER_TYPE3           (19 * sizeof(__le16))
+#define HEADER_TYPE4           (23 * sizeof(__le16))
 
 /* trackpad button data offsets */
+#define BUTTON_TYPE1           0
 #define BUTTON_TYPE2           15
 #define BUTTON_TYPE3           23
+#define BUTTON_TYPE4           31
 
 /* list of device capability bits */
 #define HAS_INTEGRATED_BUTTON  1
 
+/* trackpad finger data block size */
+#define FSIZE_TYPE1            (14 * sizeof(__le16))
+#define FSIZE_TYPE2            (14 * sizeof(__le16))
+#define FSIZE_TYPE3            (14 * sizeof(__le16))
+#define FSIZE_TYPE4            (15 * sizeof(__le16))
+
+/* offset from header to finger struct */
+#define DELTA_TYPE1            (0 * sizeof(__le16))
+#define DELTA_TYPE2            (0 * sizeof(__le16))
+#define DELTA_TYPE3            (0 * sizeof(__le16))
+#define DELTA_TYPE4            (1 * sizeof(__le16))
+
+/* usb control message mode switch data */
+#define USBMSG_TYPE1           8, 0x300, 0, 0, 0x1, 0x8
+#define USBMSG_TYPE2           8, 0x300, 0, 0, 0x1, 0x8
+#define USBMSG_TYPE3           8, 0x300, 0, 0, 0x1, 0x8
+#define USBMSG_TYPE4           2, 0x302, 2, 1, 0x1, 0x0
+
+/* Wellspring initialization constants */
+#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID                1
+#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID       9
+
 /* trackpad finger structure, le16-aligned */
 struct tp_finger {
        __le16 origin;          /* zero when switching track finger */
@@ -207,14 +242,13 @@ struct tp_finger {
        __le16 orientation;     /* 16384 when point, else 15 bit angle */
        __le16 touch_major;     /* touch area, major axis */
        __le16 touch_minor;     /* touch area, minor axis */
-       __le16 unused[3];       /* zeros */
+       __le16 unused[2];       /* zeros */
+       __le16 pressure;        /* pressure on forcetouch touchpad */
        __le16 multi;           /* one finger: varies, more fingers: constant */
 } __attribute__((packed,aligned(2)));
 
 /* trackpad finger data size, empirically at least ten fingers */
 #define MAX_FINGERS            16
-#define SIZEOF_FINGER          sizeof(struct tp_finger)
-#define SIZEOF_ALL_FINGERS     (MAX_FINGERS * SIZEOF_FINGER)
 #define MAX_FINGER_ORIENTATION 16384
 
 /* device-specific parameters */
@@ -232,8 +266,17 @@ struct bcm5974_config {
        int bt_datalen;         /* data length of the button interface */
        int tp_ep;              /* the endpoint of the trackpad interface */
        enum tp_type tp_type;   /* type of trackpad interface */
-       int tp_offset;          /* offset to trackpad finger data */
+       int tp_header;          /* bytes in header block */
        int tp_datalen;         /* data length of the trackpad interface */
+       int tp_button;          /* offset to button data */
+       int tp_fsize;           /* bytes in single finger block */
+       int tp_delta;           /* offset from header to finger struct */
+       int um_size;            /* usb control message length */
+       int um_req_val;         /* usb control message value */
+       int um_req_idx;         /* usb control message index */
+       int um_switch_idx;      /* usb control message mode switch index */
+       int um_switch_on;       /* usb control message mode switch on */
+       int um_switch_off;      /* usb control message mode switch off */
        struct bcm5974_param p; /* finger pressure limits */
        struct bcm5974_param w; /* finger width limits */
        struct bcm5974_param x; /* horizontal limits */
@@ -259,6 +302,24 @@ struct bcm5974 {
        int slots[MAX_FINGERS];                         /* slot assignments */
 };
 
+/* trackpad finger block data, le16-aligned */
+static const struct tp_finger *get_tp_finger(const struct bcm5974 *dev, int i)
+{
+       const struct bcm5974_config *c = &dev->cfg;
+       u8 *f_base = dev->tp_data + c->tp_header + c->tp_delta;
+
+       return (const struct tp_finger *)(f_base + i * c->tp_fsize);
+}
+
+#define DATAFORMAT(type)                               \
+       type,                                           \
+       HEADER_##type,                                  \
+       HEADER_##type + (MAX_FINGERS) * (FSIZE_##type), \
+       BUTTON_##type,                                  \
+       FSIZE_##type,                                   \
+       DELTA_##type,                                   \
+       USBMSG_##type
+
 /* logical signal quality */
 #define SN_PRESSURE    45              /* pressure signal-to-noise ratio */
 #define SN_WIDTH       25              /* width signal-to-noise ratio */
@@ -273,7 +334,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING_JIS,
                0,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE1),
                { SN_PRESSURE, 0, 256 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4824, 5342 },
@@ -286,7 +347,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING2_JIS,
                0,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE1),
                { SN_PRESSURE, 0, 256 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4824, 4824 },
@@ -299,7 +360,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING3_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4460, 5166 },
@@ -312,7 +373,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4620, 5140 },
@@ -325,7 +386,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4616, 5112 },
@@ -338,7 +399,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING5_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4415, 5050 },
@@ -351,7 +412,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING6_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4620, 5140 },
@@ -364,7 +425,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4750, 5280 },
@@ -377,7 +438,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4620, 5140 },
@@ -390,7 +451,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4750, 5280 },
@@ -403,7 +464,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4750, 5280 },
@@ -416,13 +477,26 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING8_JIS,
                HAS_INTEGRATED_BUTTON,
                0, sizeof(struct bt_data),
-               0x83, TYPE3, FINGER_TYPE3, FINGER_TYPE3 + SIZEOF_ALL_FINGERS,
+               0x83, DATAFORMAT(TYPE3),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4620, 5140 },
                { SN_COORD, -150, 6600 },
                { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
        },
+       {
+               USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI,
+               USB_DEVICE_ID_APPLE_WELLSPRING9_ISO,
+               USB_DEVICE_ID_APPLE_WELLSPRING9_JIS,
+               HAS_INTEGRATED_BUTTON,
+               0, sizeof(struct bt_data),
+               0x83, DATAFORMAT(TYPE4),
+               { SN_PRESSURE, 0, 300 },
+               { SN_WIDTH, 0, 2048 },
+               { SN_COORD, -4828, 5345 },
+               { SN_COORD, -203, 6803 },
+               { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
+       },
        {}
 };
 
@@ -549,19 +623,18 @@ static int report_tp_state(struct bcm5974 *dev, int size)
        struct input_dev *input = dev->input;
        int raw_n, i, n = 0;
 
-       if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0)
+       if (size < c->tp_header || (size - c->tp_header) % c->tp_fsize != 0)
                return -EIO;
 
-       /* finger data, le16-aligned */
-       f = (const struct tp_finger *)(dev->tp_data + c->tp_offset);
-       raw_n = (size - c->tp_offset) / SIZEOF_FINGER;
+       raw_n = (size - c->tp_header) / c->tp_fsize;
 
        for (i = 0; i < raw_n; i++) {
-               if (raw2int(f[i].touch_major) == 0)
+               f = get_tp_finger(dev, i);
+               if (raw2int(f->touch_major) == 0)
                        continue;
-               dev->pos[n].x = raw2int(f[i].abs_x);
-               dev->pos[n].y = c->y.min + c->y.max - raw2int(f[i].abs_y);
-               dev->index[n++] = &f[i];
+               dev->pos[n].x = raw2int(f->abs_x);
+               dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y);
+               dev->index[n++] = f;
        }
 
        input_mt_assign_slots(input, dev->slots, dev->pos, n, 0);
@@ -572,32 +645,22 @@ static int report_tp_state(struct bcm5974 *dev, int size)
 
        input_mt_sync_frame(input);
 
-       report_synaptics_data(input, c, f, raw_n);
+       report_synaptics_data(input, c, get_tp_finger(dev, 0), raw_n);
 
-       /* type 2 reports button events via ibt only */
-       if (c->tp_type == TYPE2) {
-               int ibt = raw2int(dev->tp_data[BUTTON_TYPE2]);
+       /* later types report button events via integrated button only */
+       if (c->caps & HAS_INTEGRATED_BUTTON) {
+               int ibt = raw2int(dev->tp_data[c->tp_button]);
                input_report_key(input, BTN_LEFT, ibt);
        }
 
-       if (c->tp_type == TYPE3)
-               input_report_key(input, BTN_LEFT, dev->tp_data[BUTTON_TYPE3]);
-
        input_sync(input);
 
        return 0;
 }
 
-/* Wellspring initialization constants */
-#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID                1
-#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID       9
-#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE          0x300
-#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX          0
-#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE           0x01
-#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE           0x08
-
 static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
 {
+       const struct bcm5974_config *c = &dev->cfg;
        int retval = 0, size;
        char *data;
 
@@ -605,7 +668,7 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
        if (dev->cfg.tp_type == TYPE3)
                return 0;
 
-       data = kmalloc(8, GFP_KERNEL);
+       data = kmalloc(c->um_size, GFP_KERNEL);
        if (!data) {
                dev_err(&dev->intf->dev, "out of memory\n");
                retval = -ENOMEM;
@@ -616,28 +679,24 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
        size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
                        BCM5974_WELLSPRING_MODE_READ_REQUEST_ID,
                        USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-                       BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
-                       BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
+                       c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
 
-       if (size != 8) {
+       if (size != c->um_size) {
                dev_err(&dev->intf->dev, "could not read from device\n");
                retval = -EIO;
                goto out;
        }
 
        /* apply the mode switch */
-       data[0] = on ?
-               BCM5974_WELLSPRING_MODE_VENDOR_VALUE :
-               BCM5974_WELLSPRING_MODE_NORMAL_VALUE;
+       data[c->um_switch_idx] = on ? c->um_switch_on : c->um_switch_off;
 
        /* write configuration */
        size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
                        BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID,
                        USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-                       BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
-                       BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
+                       c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
 
-       if (size != 8) {
+       if (size != c->um_size) {
                dev_err(&dev->intf->dev, "could not write to device\n");
                retval = -EIO;
                goto out;
index ce3d40004458c87392339472f654462fae7cf0bc..22b9ca901f4e96c22499ce9723c0d2bd897c6fbb 100644 (file)
@@ -1167,7 +1167,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
        struct input_dev *dev = psmouse->dev;
        struct elantech_data *etd = psmouse->private;
        unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
-       unsigned int x_res = 0, y_res = 0;
+       unsigned int x_res = 31, y_res = 31;
 
        if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
                return -1;
@@ -1232,8 +1232,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
                /* For X to recognize me as touchpad. */
                input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
                input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
-               input_abs_set_res(dev, ABS_X, x_res);
-               input_abs_set_res(dev, ABS_Y, y_res);
                /*
                 * range of pressure and width is the same as v2,
                 * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
@@ -1246,8 +1244,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
                input_mt_init_slots(dev, ETP_MAX_FINGERS, 0);
                input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
                input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
-               input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
-               input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
                input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
                                     ETP_PMAX_V2, 0, 0);
                /*
@@ -1259,6 +1255,13 @@ static int elantech_set_input_params(struct psmouse *psmouse)
                break;
        }
 
+       input_abs_set_res(dev, ABS_X, x_res);
+       input_abs_set_res(dev, ABS_Y, y_res);
+       if (etd->hw_version > 1) {
+               input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
+               input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
+       }
+
        etd->y_max = y_max;
        etd->width = width;
 
index 3a32caf06bf1dae9b9fb678947bb9c7243fc25da..6025eb430c0a5010c908961ccf8897943fd3c945 100644 (file)
@@ -1484,12 +1484,12 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
        priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
 
        psmouse_info(psmouse,
-                    "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n",
+                    "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n",
                     SYN_ID_MODEL(priv->identity),
                     SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
                     priv->model_id,
                     priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
-                    priv->board_id, priv->firmware_id);
+                    priv->ext_cap_10, priv->board_id, priv->firmware_id);
 
        set_input_params(psmouse, priv);
 
index b4d12e29abff72008125c2406b3808cf174e0f71..e36162b28c2aae268166c2e735e318b793b9dfa8 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/dmi.h>
 #include <linux/i2c.h>
 #include <linux/input.h>
 #include <linux/input/mt.h>
@@ -34,6 +35,7 @@ struct goodix_ts_data {
        int abs_y_max;
        unsigned int max_touch_num;
        unsigned int int_trigger_type;
+       bool rotated_screen;
 };
 
 #define GOODIX_MAX_HEIGHT              4096
@@ -60,6 +62,30 @@ static const unsigned long goodix_irq_flags[] = {
        IRQ_TYPE_LEVEL_HIGH,
 };
 
+/*
+ * Those tablets have their coordinates origin at the bottom right
+ * of the tablet, as if rotated 180 degrees
+ */
+static const struct dmi_system_id rotated_screen[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+       {
+               .ident = "WinBook TW100",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
+               }
+       },
+       {
+               .ident = "WinBook TW700",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
+               },
+       },
+#endif
+       {}
+};
+
 /**
  * goodix_i2c_read - read data from a register of the i2c slave device.
  *
@@ -129,6 +155,11 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
        int input_y = get_unaligned_le16(&coor_data[3]);
        int input_w = get_unaligned_le16(&coor_data[5]);
 
+       if (ts->rotated_screen) {
+               input_x = ts->abs_x_max - input_x;
+               input_y = ts->abs_y_max - input_y;
+       }
+
        input_mt_slot(ts->input_dev, id);
        input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
        input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
@@ -223,6 +254,11 @@ static void goodix_read_config(struct goodix_ts_data *ts)
                ts->abs_y_max = GOODIX_MAX_HEIGHT;
                ts->max_touch_num = GOODIX_MAX_CONTACTS;
        }
+
+       ts->rotated_screen = dmi_check_system(rotated_screen);
+       if (ts->rotated_screen)
+               dev_dbg(&ts->client->dev,
+                        "Applying '180 degrees rotated screen' quirk\n");
 }
 
 /**
index f2c6c352c55af2d3bb4dbf243516b19b9c62a65a..2c41107240dec274e5ec34724a882d87189f0103 100644 (file)
@@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
                goto err_out;
        }
 
+       /* TSC-25 data sheet specifies a delay after the RESET command */
+       msleep(150);
+
        /* set coordinate output rate */
        buf[0] = buf[1] = 0xFF;
        ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
index a57e9b7498953bb9ebf947695caea46d73e2bfeb..658ee39e65696898422bcd9c825d8a49fbc37359 100644 (file)
@@ -76,8 +76,6 @@ LIST_HEAD(hpet_map);
  * Domain for untranslated devices - only allocated
  * if iommu=pt passed on kernel cmd line.
  */
-static struct protection_domain *pt_domain;
-
 static const struct iommu_ops amd_iommu_ops;
 
 static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@@ -96,7 +94,7 @@ struct iommu_dev_data {
        struct protection_domain *domain; /* Domain the device is bound to */
        u16 devid;                        /* PCI Device ID */
        bool iommu_v2;                    /* Device can make use of IOMMUv2 */
-       bool passthrough;                 /* Default for device is pt_domain */
+       bool passthrough;                 /* Device is identity mapped */
        struct {
                bool enabled;
                int qdep;
@@ -116,7 +114,6 @@ struct iommu_cmd {
 struct kmem_cache *amd_iommu_irq_cache;
 
 static void update_domain(struct protection_domain *domain);
-static int alloc_passthrough_domain(void);
 static int protection_domain_init(struct protection_domain *domain);
 
 /****************************************************************************
@@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev,
        dev_data = get_dev_data(dev);
 
        if (domain->flags & PD_IOMMUV2_MASK) {
-               if (!dev_data->iommu_v2 || !dev_data->passthrough)
+               if (!dev_data->passthrough)
                        return -EINVAL;
 
-               if (pdev_iommuv2_enable(pdev) != 0)
-                       return -EINVAL;
+               if (dev_data->iommu_v2) {
+                       if (pdev_iommuv2_enable(pdev) != 0)
+                               return -EINVAL;
 
-               dev_data->ats.enabled = true;
-               dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
-               dev_data->pri_tlp     = pci_pri_tlp_required(pdev);
+                       dev_data->ats.enabled = true;
+                       dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
+                       dev_data->pri_tlp     = pci_pri_tlp_required(pdev);
+               }
        } else if (amd_iommu_iotlb_sup &&
                   pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
                dev_data->ats.enabled = true;
@@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
        do_detach(head);
 
        spin_unlock_irqrestore(&domain->lock, flags);
-
-       /*
-        * If we run in passthrough mode the device must be assigned to the
-        * passthrough domain if it is detached from any other domain.
-        * Make sure we can deassign from the pt_domain itself.
-        */
-       if (dev_data->passthrough &&
-           (dev_data->domain == NULL && domain != pt_domain))
-               __attach_device(dev_data, pt_domain);
 }
 
 /*
@@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev)
        __detach_device(dev_data);
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 
-       if (domain->flags & PD_IOMMUV2_MASK)
+       if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
                pdev_iommuv2_disable(to_pci_dev(dev));
        else if (dev_data->ats.enabled)
                pci_disable_ats(to_pci_dev(dev));
@@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev)
 
        BUG_ON(!dev_data);
 
-       if (dev_data->iommu_v2)
+       if (iommu_pass_through || dev_data->iommu_v2)
                iommu_request_dm_for_dev(dev);
 
        /* Domains are initialized for this device - have a look what we ended up with */
        domain = iommu_get_domain_for_dev(dev);
-       if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+       if (domain->type == IOMMU_DOMAIN_IDENTITY)
                dev_data->passthrough = true;
-               dev->archdata.dma_ops = &nommu_dma_ops;
-       } else {
+       else
                dev->archdata.dma_ops = &amd_iommu_dma_ops;
-       }
 
 out:
        iommu_completion_wait(iommu);
@@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void)
 
 int __init amd_iommu_init_dma_ops(void)
 {
+       swiotlb        = iommu_pass_through ? 1 : 0;
        iommu_detected = 1;
-       swiotlb = 0;
+
+       /*
+        * In case we don't initialize SWIOTLB (actually the common case
+        * when AMD IOMMU is enabled), make sure there are global
+        * dma_ops set as a fall-back for devices not handled by this
+        * driver (for example non-PCI devices).
+        */
+       if (!swiotlb)
+               dma_ops = &nommu_dma_ops;
 
        amd_iommu_stats_init();
 
@@ -2947,21 +2944,6 @@ out_err:
        return NULL;
 }
 
-static int alloc_passthrough_domain(void)
-{
-       if (pt_domain != NULL)
-               return 0;
-
-       /* allocate passthrough domain */
-       pt_domain = protection_domain_alloc();
-       if (!pt_domain)
-               return -ENOMEM;
-
-       pt_domain->mode = PAGE_MODE_NONE;
-
-       return 0;
-}
-
 static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
 {
        struct protection_domain *pdomain;
@@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = {
  *
  *****************************************************************************/
 
-int __init amd_iommu_init_passthrough(void)
-{
-       struct iommu_dev_data *dev_data;
-       struct pci_dev *dev = NULL;
-       int ret;
-
-       ret = alloc_passthrough_domain();
-       if (ret)
-               return ret;
-
-       for_each_pci_dev(dev) {
-               if (!check_device(&dev->dev))
-                       continue;
-
-               dev_data = get_dev_data(&dev->dev);
-               dev_data->passthrough = true;
-
-               attach_device(&dev->dev, pt_domain);
-       }
-
-       amd_iommu_stats_init();
-
-       pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
-
-       return 0;
-}
-
 /* IOMMUv2 specific functions */
 int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
 {
@@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
                struct amd_iommu *iommu;
                int qdep;
 
-               BUG_ON(!dev_data->ats.enabled);
+               /*
+                  There might be non-IOMMUv2 capable devices in an IOMMUv2
+                * domain.
+                */
+               if (!dev_data->ats.enabled)
+                       continue;
 
                qdep  = dev_data->ats.qdep;
                iommu = amd_iommu_rlookup_table[dev_data->devid];
index dbda9ae68c5d70fff7926e39a2ef506b7165bf65..a24495eb4e26c5c596efa79c084b14c19fe5932c 100644 (file)
@@ -2026,14 +2026,6 @@ static bool detect_ivrs(void)
        return true;
 }
 
-static int amd_iommu_init_dma(void)
-{
-       if (iommu_pass_through)
-               return amd_iommu_init_passthrough();
-       else
-               return amd_iommu_init_dma_ops();
-}
-
 /****************************************************************************
  *
  * AMD IOMMU Initialization State Machine
@@ -2073,7 +2065,7 @@ static int __init state_next(void)
                init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
                break;
        case IOMMU_INTERRUPTS_EN:
-               ret = amd_iommu_init_dma();
+               ret = amd_iommu_init_dma_ops();
                init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
                break;
        case IOMMU_DMA_OPS:
index 3465faf1809e4cb1d6630e5cdc8f87cd4e405bd2..f7b875bb70d42138027f49ebde8150d27ce14cd2 100644 (file)
@@ -132,11 +132,19 @@ static struct device_state *get_device_state(u16 devid)
 
 static void free_device_state(struct device_state *dev_state)
 {
+       struct iommu_group *group;
+
        /*
         * First detach device from domain - No more PRI requests will arrive
         * from that device after it is unbound from the IOMMUv2 domain.
         */
-       iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
+       group = iommu_group_get(&dev_state->pdev->dev);
+       if (WARN_ON(!group))
+               return;
+
+       iommu_detach_group(dev_state->domain, group);
+
+       iommu_group_put(group);
 
        /* Everything is down now, free the IOMMUv2 domain */
        iommu_domain_free(dev_state->domain);
@@ -731,6 +739,7 @@ EXPORT_SYMBOL(amd_iommu_unbind_pasid);
 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
 {
        struct device_state *dev_state;
+       struct iommu_group *group;
        unsigned long flags;
        int ret, tmp;
        u16 devid;
@@ -776,10 +785,16 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
        if (ret)
                goto out_free_domain;
 
-       ret = iommu_attach_device(dev_state->domain, &pdev->dev);
-       if (ret != 0)
+       group = iommu_group_get(&pdev->dev);
+       if (!group)
                goto out_free_domain;
 
+       ret = iommu_attach_group(dev_state->domain, group);
+       if (ret != 0)
+               goto out_drop_group;
+
+       iommu_group_put(group);
+
        spin_lock_irqsave(&state_lock, flags);
 
        if (__get_device_state(devid) != NULL) {
@@ -794,6 +809,9 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
 
        return 0;
 
+out_drop_group:
+       iommu_group_put(group);
+
 out_free_domain:
        iommu_domain_free(dev_state->domain);
 
index 8e9ec81ce4bbd85473d6d6a35e7c3567569187ee..da902baaa7946aac569b7ebe8a316c647dfd8187 100644 (file)
  * Stream table.
  *
  * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
- * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus)
+ * 2lvl: 128k L1 entries,
+ *       256 lazy entries per table (each table covers a PCI bus)
  */
-#define STRTAB_L1_SZ_SHIFT             16
+#define STRTAB_L1_SZ_SHIFT             20
 #define STRTAB_SPLIT                   8
 
 #define STRTAB_L1_DESC_DWORDS          1
 #define ARM64_TCR_TG0_SHIFT            14
 #define ARM64_TCR_TG0_MASK             0x3UL
 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT   8
-#define ARM64_TCR_IRGN0_SHIFT          24
+#define ARM64_TCR_IRGN0_SHIFT          8
 #define ARM64_TCR_IRGN0_MASK           0x3UL
 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT   10
-#define ARM64_TCR_ORGN0_SHIFT          26
+#define ARM64_TCR_ORGN0_SHIFT          10
 #define ARM64_TCR_ORGN0_MASK           0x3UL
 #define CTXDESC_CD_0_TCR_SH0_SHIFT     12
 #define ARM64_TCR_SH0_SHIFT            12
@@ -542,6 +543,9 @@ struct arm_smmu_device {
 #define ARM_SMMU_FEAT_HYP              (1 << 12)
        u32                             features;
 
+#define ARM_SMMU_OPT_SKIP_PREFETCH     (1 << 0)
+       u32                             options;
+
        struct arm_smmu_cmdq            cmdq;
        struct arm_smmu_evtq            evtq;
        struct arm_smmu_priq            priq;
@@ -602,11 +606,35 @@ struct arm_smmu_domain {
 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
 static LIST_HEAD(arm_smmu_devices);
 
+struct arm_smmu_option_prop {
+       u32 opt;
+       const char *prop;
+};
+
+static struct arm_smmu_option_prop arm_smmu_options[] = {
+       { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
+       { 0, NULL},
+};
+
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 {
        return container_of(dom, struct arm_smmu_domain, domain);
 }
 
+static void parse_driver_options(struct arm_smmu_device *smmu)
+{
+       int i = 0;
+
+       do {
+               if (of_property_read_bool(smmu->dev->of_node,
+                                               arm_smmu_options[i].prop)) {
+                       smmu->options |= arm_smmu_options[i].opt;
+                       dev_notice(smmu->dev, "option %s\n",
+                               arm_smmu_options[i].prop);
+               }
+       } while (arm_smmu_options[++i].opt);
+}
+
 /* Low-level queue manipulation functions */
 static bool queue_full(struct arm_smmu_queue *q)
 {
@@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
        arm_smmu_sync_ste_for_sid(smmu, sid);
 
        /* It's likely that we'll want to use the new STE soon */
-       arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+       if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
+               arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
 }
 
 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
@@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
                return 0;
 
        size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
-       strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS];
+       strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
 
        desc->span = STRTAB_SPLIT + 1;
        desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
@@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
 {
        void *strtab;
        u64 reg;
-       u32 size;
+       u32 size, l1size;
        int ret;
        struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
 
        /* Calculate the L1 size, capped to the SIDSIZE */
        size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
        size = min(size, smmu->sid_bits - STRTAB_SPLIT);
-       if (size + STRTAB_SPLIT < smmu->sid_bits)
+       cfg->num_l1_ents = 1 << size;
+
+       size += STRTAB_SPLIT;
+       if (size < smmu->sid_bits)
                dev_warn(smmu->dev,
                         "2-level strtab only covers %u/%u bits of SID\n",
-                        size + STRTAB_SPLIT, smmu->sid_bits);
+                        size, smmu->sid_bits);
 
-       cfg->num_l1_ents = 1 << size;
-       size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
-       strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
+       l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
+       strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
                                     GFP_KERNEL);
        if (!strtab) {
                dev_err(smmu->dev,
@@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
        ret = arm_smmu_init_l1_strtab(smmu);
        if (ret)
                dma_free_coherent(smmu->dev,
-                                 cfg->num_l1_ents *
-                                 (STRTAB_L1_DESC_DWORDS << 3),
+                                 l1size,
                                  strtab,
                                  cfg->strtab_dma);
        return ret;
@@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
        if (irq > 0)
                smmu->gerr_irq = irq;
 
+       parse_driver_options(smmu);
+
        /* Probe the h/w */
        ret = arm_smmu_device_probe(smmu);
        if (ret)
index a98a7b27aca1dec2cb2f53319df8a49abcf8e645..0649b94f59584ca5b885cd0ecad595a84af0d89d 100644 (file)
@@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
 
 static void domain_exit(struct dmar_domain *domain)
 {
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
        struct page *freelist = NULL;
-       int i;
 
        /* Domain 0 is reserved, so dont process it */
        if (!domain)
@@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain)
 
        /* clear attached or cached domains */
        rcu_read_lock();
-       for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
-               iommu_detach_domain(domain, g_iommus[i]);
+       for_each_active_iommu(iommu, drhd)
+               if (domain_type_is_vm(domain) ||
+                   test_bit(iommu->seq_id, domain->iommu_bmp))
+                       iommu_detach_domain(domain, iommu);
        rcu_read_unlock();
 
        dma_free_pagelist(freelist);
index b59727309072462fb3d0dc42935000d9570a7589..bfec3bdfe59847c8f3553e178a8b33a1c5a708fd 100644 (file)
@@ -259,7 +259,7 @@ config DM_CRYPT
          the ciphers you're going to use in the cryptoapi configuration.
 
          For further information on dm-crypt and userspace tools see:
-         <http://code.google.com/p/cryptsetup/wiki/DMCrypt>
+         <https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt>
 
          To compile this code as a module, choose M here: the module will
          be called dm-crypt.
index ed2346ddf4c9fb54dafeb92ae9c795a0584444e8..e51de52eeb94f71c9d6712a61d31e49f8e6f2f60 100644 (file)
@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
        bitmap_super_t *sb;
        unsigned long chunksize, daemon_sleep, write_behind;
 
-       bitmap->storage.sb_page = alloc_page(GFP_KERNEL);
+       bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (bitmap->storage.sb_page == NULL)
                return -ENOMEM;
        bitmap->storage.sb_page->index = 0;
@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
        sb->state = cpu_to_le32(bitmap->flags);
        bitmap->events_cleared = bitmap->mddev->events;
        sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
+       bitmap->mddev->bitmap_info.nodes = 0;
 
        kunmap_atomic(sb);
 
@@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
        unsigned long sectors_reserved = 0;
        int err = -EINVAL;
        struct page *sb_page;
+       loff_t offset = bitmap->mddev->bitmap_info.offset;
 
        if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
                chunksize = 128 * 1024 * 1024;
@@ -584,9 +586,9 @@ re_read:
                bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
                /* to 4k blocks */
                bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
-               bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3);
+               offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
                pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
-                       bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset);
+                       bitmap->cluster_slot, offset);
        }
 
        if (bitmap->storage.file) {
@@ -597,7 +599,7 @@ re_read:
                                bitmap, bytes, sb_page);
        } else {
                err = read_sb_page(bitmap->mddev,
-                                  bitmap->mddev->bitmap_info.offset,
+                                  offset,
                                   sb_page,
                                   0, sizeof(bitmap_super_t));
        }
@@ -611,8 +613,16 @@ re_read:
        daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
        write_behind = le32_to_cpu(sb->write_behind);
        sectors_reserved = le32_to_cpu(sb->sectors_reserved);
-       nodes = le32_to_cpu(sb->nodes);
-       strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
+       /* XXX: This is a hack to ensure that we don't use clustering
+        *  in case:
+        *      - dm-raid is in use and
+        *      - the nodes written in bitmap_sb is erroneous.
+        */
+       if (!bitmap->mddev->sync_super) {
+               nodes = le32_to_cpu(sb->nodes);
+               strlcpy(bitmap->mddev->bitmap_info.cluster_name,
+                               sb->cluster_name, 64);
+       }
 
        /* verify that the bitmap-specific fields are valid */
        if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -671,7 +681,7 @@ out:
        kunmap_atomic(sb);
        /* Assiging chunksize is required for "re_read" */
        bitmap->mddev->bitmap_info.chunksize = chunksize;
-       if (nodes && (bitmap->cluster_slot < 0)) {
+       if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
                err = md_setup_cluster(bitmap->mddev, nodes);
                if (err) {
                        pr_err("%s: Could not setup cluster service (%d)\n",
@@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
        if (IS_ERR(bitmap))
                return PTR_ERR(bitmap);
 
-       rv = bitmap_read_sb(bitmap);
-       if (rv)
-               goto err;
-
        rv = bitmap_init_from_disk(bitmap, 0);
        if (rv)
                goto err;
index b6f22651dd356e7e5b4d0bed4268f4ac9dd62772..48a4a826ae07649419d033b99c564b2adb9da6ea 100644 (file)
@@ -1686,7 +1686,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
 
        if (from_cblock(cache_size)) {
                mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
-               if (!mq->cache_hit_bits && mq->cache_hit_bits) {
+               if (!mq->cache_hit_bits) {
                        DMERR("couldn't allocate cache hit bitset");
                        goto bad_cache_hit_bits;
                }
index b680da5d7b93b5f68d65c0449df1b87b8e1b528b..1fe93cfea7d309a659d79fe2b953b5f2dbe7b466 100644 (file)
@@ -424,6 +424,7 @@ static void free_migration(struct dm_cache_migration *mg)
                wake_up(&cache->migration_wait);
 
        mempool_free(mg, cache->migration_pool);
+       wake_worker(cache);
 }
 
 static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
@@ -1966,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache)
                 * this bio might require one, we pause until there are some
                 * prepared mappings to process.
                 */
+               prealloc_used = true;
                if (prealloc_data_structs(cache, &structs)) {
                        spin_lock_irqsave(&cache->lock, flags);
                        bio_list_merge(&cache->deferred_bios, &bios);
@@ -1981,7 +1983,6 @@ static void process_deferred_bios(struct cache *cache)
                        process_discard_bio(cache, &structs, bio);
                else
                        process_bio(cache, &structs, bio);
-               prealloc_used = true;
        }
 
        if (prealloc_used)
@@ -2010,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache)
                 * this bio might require one, we pause until there are some
                 * prepared mappings to process.
                 */
+               prealloc_used = true;
                if (prealloc_data_structs(cache, &structs)) {
                        spin_lock_irqsave(&cache->lock, flags);
                        list_splice(&cells, &cache->deferred_cells);
@@ -2018,7 +2020,6 @@ static void process_deferred_cells(struct cache *cache)
                }
 
                process_cell(cache, &structs, cell);
-               prealloc_used = true;
        }
 
        if (prealloc_used)
@@ -2080,6 +2081,7 @@ static void writeback_some_dirty_blocks(struct cache *cache)
                if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
                        break; /* no work to do */
 
+               prealloc_used = true;
                if (prealloc_data_structs(cache, &structs) ||
                    get_cell(cache, oblock, &structs, &old_ocell)) {
                        policy_set_dirty(cache->policy, oblock);
@@ -2087,7 +2089,6 @@ static void writeback_some_dirty_blocks(struct cache *cache)
                }
 
                writeback(cache, &structs, oblock, cblock, old_ocell);
-               prealloc_used = true;
        }
 
        if (prealloc_used)
index 1c50c580215c84d97cdb7b1de07c3b9fe6a66f63..d2bbe8cc1e9786b66af798df9d8666d3fb96223c 100644 (file)
@@ -666,16 +666,21 @@ static void requeue_io(struct thin_c *tc)
        requeue_deferred_cells(tc);
 }
 
-static void error_retry_list(struct pool *pool)
+static void error_retry_list_with_code(struct pool *pool, int error)
 {
        struct thin_c *tc;
 
        rcu_read_lock();
        list_for_each_entry_rcu(tc, &pool->active_thins, list)
-               error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO);
+               error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
        rcu_read_unlock();
 }
 
+static void error_retry_list(struct pool *pool)
+{
+       return error_retry_list_with_code(pool, -EIO);
+}
+
 /*
  * This section of code contains the logic for processing a thin device's IO.
  * Much of the code depends on pool object resources (lists, workqueues, etc)
@@ -2297,7 +2302,7 @@ static void do_no_space_timeout(struct work_struct *ws)
        if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
                pool->pf.error_if_no_space = true;
                notify_of_pool_mode_change_to_oods(pool);
-               error_retry_list(pool);
+               error_retry_list_with_code(pool, -ENOSPC);
        }
 }
 
index fcfc4b9b26728029e59023ae3762d271b8f7a676..0072190515e0f6edca1e09718dae5102b3e7d274 100644 (file)
@@ -44,6 +44,7 @@ struct resync_info {
 
 /* md_cluster_info flags */
 #define                MD_CLUSTER_WAITING_FOR_NEWDISK          1
+#define                MD_CLUSTER_SUSPEND_READ_BALANCING       2
 
 
 struct md_cluster_info {
@@ -275,6 +276,9 @@ clear_bit:
 
 static void recover_prep(void *arg)
 {
+       struct mddev *mddev = arg;
+       struct md_cluster_info *cinfo = mddev->cluster_info;
+       set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
 }
 
 static void recover_slot(void *arg, struct dlm_slot *slot)
@@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots,
 
        cinfo->slot_number = our_slot;
        complete(&cinfo->completion);
+       clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
 }
 
 static const struct dlm_lockspace_ops md_ls_ops = {
@@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev)
        resync_send(mddev, RESYNCING, 0, 0);
 }
 
-static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi)
+static int area_resyncing(struct mddev *mddev, int direction,
+               sector_t lo, sector_t hi)
 {
        struct md_cluster_info *cinfo = mddev->cluster_info;
        int ret = 0;
        struct suspend_info *s;
 
+       if ((direction == READ) &&
+               test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
+               return 1;
+
        spin_lock_irq(&cinfo->suspend_lock);
        if (list_empty(&cinfo->suspend_list))
                goto out;
index 6817ee00e053d7d7e74b734185414c0cb3b18a38..00defe2badbc7952ec7d4cf358ad0408a2460b7f 100644 (file)
@@ -18,7 +18,7 @@ struct md_cluster_operations {
        int (*metadata_update_start)(struct mddev *mddev);
        int (*metadata_update_finish)(struct mddev *mddev);
        int (*metadata_update_cancel)(struct mddev *mddev);
-       int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi);
+       int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi);
        int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
        int (*add_new_disk_finish)(struct mddev *mddev);
        int (*new_disk_ack)(struct mddev *mddev, bool ack);
index d429c30cd51471c26cb1c07cb3e6a413106133d4..0c2a4e8b873c659dbc260b2aa5484c7a5e87b176 100644 (file)
@@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev)
 {
        struct md_personality *pers = mddev->pers;
        mddev_detach(mddev);
+       /* Ensure ->event_work is done */
+       flush_workqueue(md_misc_wq);
        spin_lock(&mddev->lock);
        mddev->ready = 0;
        mddev->pers = NULL;
@@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
        err = request_module("md-cluster");
        if (err) {
                pr_err("md-cluster module not found.\n");
-               return err;
+               return -ENOENT;
        }
 
        spin_lock(&pers_lock);
index f80f1af61ce70bce15a72d242d87c1c34da49a79..94f5b55069e09610f21ea640f5dff3efd7e580ca 100644 (file)
@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
                spin_lock_irqsave(&conf->device_lock, flags);
                if (r1_bio->mddev->degraded == conf->raid_disks ||
                    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
-                    !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
+                    test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
                        uptodate = 1;
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
@@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
 
        if ((conf->mddev->recovery_cp < this_sector + sectors) ||
            (mddev_is_clustered(conf->mddev) &&
-           md_cluster_ops->area_resyncing(conf->mddev, this_sector,
+           md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
                    this_sector + sectors)))
                choose_first = 1;
        else
@@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
            ((bio_end_sector(bio) > mddev->suspend_lo &&
            bio->bi_iter.bi_sector < mddev->suspend_hi) ||
            (mddev_is_clustered(mddev) &&
-            md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
+            md_cluster_ops->area_resyncing(mddev, WRITE,
+                    bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
                /* As the suspend_* range is controlled by
                 * userspace, we want an interruptible
                 * wait.
@@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
                        if (bio_end_sector(bio) <= mddev->suspend_lo ||
                            bio->bi_iter.bi_sector >= mddev->suspend_hi ||
                            (mddev_is_clustered(mddev) &&
-                            !md_cluster_ops->area_resyncing(mddev,
+                            !md_cluster_ops->area_resyncing(mddev, WRITE,
                                     bio->bi_iter.bi_sector, bio_end_sector(bio))))
                                break;
                        schedule();
index 940f2f3654617918d8eef951262c3ca120ab83ce..38c58e19cfce3d7bdea554b26474080a88e02cca 100644 (file)
@@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
                        /* far_copies must be 1 */
                        conf->prev.stride = conf->dev_sectors;
        }
+       conf->reshape_safe = conf->reshape_progress;
        spin_lock_init(&conf->device_lock);
        INIT_LIST_HEAD(&conf->retry_list);
 
@@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev)
                }
                conf->offset_diff = min_offset_diff;
 
-               conf->reshape_safe = conf->reshape_progress;
                clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
                clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
                set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev)
                conf->reshape_progress = size;
        } else
                conf->reshape_progress = 0;
+       conf->reshape_safe = conf->reshape_progress;
        spin_unlock_irq(&conf->device_lock);
 
        if (mddev->delta_disks && mddev->bitmap) {
@@ -4170,6 +4171,7 @@ abort:
                rdev->new_data_offset = rdev->data_offset;
        smp_wmb();
        conf->reshape_progress = MaxSector;
+       conf->reshape_safe = MaxSector;
        mddev->reshape_position = MaxSector;
        spin_unlock_irq(&conf->device_lock);
        return ret;
@@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf)
        md_finish_reshape(conf->mddev);
        smp_wmb();
        conf->reshape_progress = MaxSector;
+       conf->reshape_safe = MaxSector;
        spin_unlock_irq(&conf->device_lock);
 
        /* read-ahead size must cover two whole stripes, which is
index 59e44e99eef3bacd4703fd6883513688f7c58b09..643d217bfa13ac8caa3dee9f9dd65d57f165bbe8 100644 (file)
@@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
        if (!sc)
                return -ENOMEM;
 
+       /* Need to ensure auto-resizing doesn't interfere */
+       mutex_lock(&conf->cache_size_mutex);
+
        for (i = conf->max_nr_stripes; i; i--) {
                nsh = alloc_stripe(sc, GFP_KERNEL);
                if (!nsh)
@@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
                        kmem_cache_free(sc, nsh);
                }
                kmem_cache_destroy(sc);
+               mutex_unlock(&conf->cache_size_mutex);
                return -ENOMEM;
        }
        /* Step 2 - Must use GFP_NOIO now.
@@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
        } else
                err = -ENOMEM;
 
+       mutex_unlock(&conf->cache_size_mutex);
        /* Step 4, return new stripes to service */
        while(!list_empty(&newstripes)) {
                nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                                 &first_bad, &bad_sectors))
                        set_bit(R5_ReadRepl, &dev->flags);
                else {
-                       if (rdev)
+                       if (rdev && !test_bit(Faulty, &rdev->flags))
                                set_bit(R5_NeedReplace, &dev->flags);
+                       else
+                               clear_bit(R5_NeedReplace, &dev->flags);
                        rdev = rcu_dereference(conf->disks[i].rdev);
                        clear_bit(R5_ReadRepl, &dev->flags);
                }
@@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread)
        pr_debug("%d stripes handled\n", handled);
 
        spin_unlock_irq(&conf->device_lock);
-       if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
+       if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
+           mutex_trylock(&conf->cache_size_mutex)) {
                grow_one_stripe(conf, __GFP_NOWARN);
                /* Set flag even if allocation failed.  This helps
                 * slow down allocation requests when mem is short
                 */
                set_bit(R5_DID_ALLOC, &conf->cache_state);
+               mutex_unlock(&conf->cache_size_mutex);
        }
 
        async_tx_issue_pending_all();
@@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
                return -EINVAL;
 
        conf->min_nr_stripes = size;
+       mutex_lock(&conf->cache_size_mutex);
        while (size < conf->max_nr_stripes &&
               drop_one_stripe(conf))
                ;
+       mutex_unlock(&conf->cache_size_mutex);
 
 
        err = md_allow_write(mddev);
        if (err)
                return err;
 
+       mutex_lock(&conf->cache_size_mutex);
        while (size > conf->max_nr_stripes)
                if (!grow_one_stripe(conf, GFP_KERNEL))
                        break;
+       mutex_unlock(&conf->cache_size_mutex);
 
        return 0;
 }
@@ -6371,11 +6384,18 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
                                      struct shrink_control *sc)
 {
        struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
-       int ret = 0;
-       while (ret < sc->nr_to_scan) {
-               if (drop_one_stripe(conf) == 0)
-                       return SHRINK_STOP;
-               ret++;
+       unsigned long ret = SHRINK_STOP;
+
+       if (mutex_trylock(&conf->cache_size_mutex)) {
+               ret= 0;
+               while (ret < sc->nr_to_scan) {
+                       if (drop_one_stripe(conf) == 0) {
+                               ret = SHRINK_STOP;
+                               break;
+                       }
+                       ret++;
+               }
+               mutex_unlock(&conf->cache_size_mutex);
        }
        return ret;
 }
@@ -6444,6 +6464,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                goto abort;
        spin_lock_init(&conf->device_lock);
        seqcount_init(&conf->gen_lock);
+       mutex_init(&conf->cache_size_mutex);
        init_waitqueue_head(&conf->wait_for_quiescent);
        for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
                init_waitqueue_head(&conf->wait_for_stripe[i]);
index 02c3bf8fbfe7aa1b0a1c568605393dc3769c6b18..d05144278690ca9b8cb4ad933d6d23dc36b9b0f8 100644 (file)
@@ -482,7 +482,8 @@ struct r5conf {
         */
        int                     active_name;
        char                    cache_name[2][32];
-       struct kmem_cache               *slab_cache; /* for allocating stripes */
+       struct kmem_cache       *slab_cache; /* for allocating stripes */
+       struct mutex            cache_size_mutex; /* Protect changes to cache size */
 
        int                     seq_flush, seq_write;
        int                     quiesce;
index 4cb365d4ffdcc9c4e12cde82f46d411cb86c7a13..8b95eefb610b4097787a7363c6e43b94ce260efb 100644 (file)
@@ -38,6 +38,8 @@
     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/fb.h>
@@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv)
 {
        int rc;
 
+#ifdef CONFIG_X86_64
+       if (pat_enabled()) {
+               pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n");
+               return -ENODEV;
+       }
+#endif
+
        if (itv->osd_info) {
                IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id);
                return -EBUSY;
@@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void)
        int registered = 0;
        int err;
 
-#ifdef CONFIG_X86_64
-       if (WARN(pat_enabled(),
-                "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) {
-               return -ENODEV;
-       }
-#endif
 
        if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
                printk(KERN_ERR "ivtvfb:  ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
index 8eb0a9500a90626b5605025309b1abf53e2ee7cb..e9513d651cd36b611920a2f6410c905b344fbf13 100644 (file)
@@ -682,7 +682,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
        /* Fill in the data structures */
        devno = MKDEV(MAJOR(mei_devt), dev->minor);
        cdev_init(&dev->cdev, &mei_fops);
-       dev->cdev.owner = mei_fops.owner;
+       dev->cdev.owner = parent->driver->owner;
 
        /* Add the device */
        ret = cdev_add(&dev->cdev, devno, 1);
index 41e3bdb100611ab2f20836d0f8d7e3a16d9b068a..6dfdae3452d609fd07c82cce24f933771a093a70 100644 (file)
@@ -357,7 +357,7 @@ static void scif_p2p_freesg(struct scatterlist *sg)
 }
 
 static struct scatterlist *
-scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
+scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt)
 {
        struct scatterlist *sg;
        struct page *page;
@@ -368,16 +368,11 @@ scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
                return NULL;
        sg_init_table(sg, page_cnt);
        for (i = 0; i < page_cnt; i++) {
-               page = vmalloc_to_page((void __force *)va);
-               if (!page)
-                       goto p2p_sg_err;
+               page = pfn_to_page(pa >> PAGE_SHIFT);
                sg_set_page(&sg[i], page, page_size, 0);
-               va += page_size;
+               pa += page_size;
        }
        return sg;
-p2p_sg_err:
-       kfree(sg);
-       return NULL;
 }
 
 /* Init p2p mappings required to access peerdev from scifdev */
@@ -395,14 +390,14 @@ scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
        p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
        if (!p2p)
                return NULL;
-       p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va,
+       p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa,
                                                    PAGE_SIZE, num_mmio_pages);
        if (!p2p->ppi_sg[SCIF_PPI_MMIO])
                goto free_p2p;
        p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
        sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
        num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
-       p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va,
+       p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa,
                                                    1 << sg_page_shift,
                                                    num_aper_chunks);
        p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
index c9c3d20b784b669bf130cffb716b8525470738fa..a1b820fcb2a6ff60093011d696761ba4ca15e584 100644 (file)
@@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
 
        ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
 
+       mmc_blk_put(md);
+
        return ret;
 }
 
index fd9a58e216a50ffde01e8e9fb7610ef09c417af3..6a0f9c79be2652bdf843c40692e7dd188d76567a 100644 (file)
@@ -779,6 +779,7 @@ config MMC_TOSHIBA_PCI
 
 config MMC_MTK
        tristate "MediaTek SD/MMC Card Interface support"
+       depends on HAS_DMA
        help
          This selects the MediaTek(R) Secure digital and Multimedia card Interface.
          If you have a machine with a integrated SD/MMC card reader, say Y or M here.
index b2b411da297b06e73441f8dd51c8bae0b004bcc0..4d120323689043f21c522b44389757f8391d45fc 100644 (file)
@@ -1062,9 +1062,14 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
 
                if (status & (CTO_EN | CCRC_EN))
                        end_cmd = 1;
+               if (host->data || host->response_busy) {
+                       end_trans = !end_cmd;
+                       host->response_busy = 0;
+               }
                if (status & (CTO_EN | DTO_EN))
                        hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
-               else if (status & (CCRC_EN | DCRC_EN))
+               else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
+                                  BADA_EN))
                        hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
 
                if (status & ACE_EN) {
@@ -1081,10 +1086,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
                        }
                        dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
                }
-               if (host->data || host->response_busy) {
-                       end_trans = !end_cmd;
-                       host->response_busy = 0;
-               }
        }
 
        OMAP_HSMMC_WRITE(host->base, STAT, status);
index faf0cb910c968abcce26c431422adff3e438f81d..c6b9f6492e1a2529b7f683686bc4d939229dba63 100644 (file)
@@ -581,13 +581,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
 static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-       struct pltfm_imx_data *imx_data = pltfm_host->priv;
-       struct esdhc_platform_data *boarddata = &imx_data->boarddata;
 
-       if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock))
-               return boarddata->f_max;
-       else
-               return pltfm_host->clock;
+       return pltfm_host->clock;
 }
 
 static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
@@ -878,34 +873,19 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
 static int
 sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
                         struct sdhci_host *host,
-                        struct esdhc_platform_data *boarddata)
+                        struct pltfm_imx_data *imx_data)
 {
        struct device_node *np = pdev->dev.of_node;
-
-       if (!np)
-               return -ENODEV;
-
-       if (of_get_property(np, "non-removable", NULL))
-               boarddata->cd_type = ESDHC_CD_PERMANENT;
-
-       if (of_get_property(np, "fsl,cd-controller", NULL))
-               boarddata->cd_type = ESDHC_CD_CONTROLLER;
+       struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+       int ret;
 
        if (of_get_property(np, "fsl,wp-controller", NULL))
                boarddata->wp_type = ESDHC_WP_CONTROLLER;
 
-       boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
-       if (gpio_is_valid(boarddata->cd_gpio))
-               boarddata->cd_type = ESDHC_CD_GPIO;
-
        boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
        if (gpio_is_valid(boarddata->wp_gpio))
                boarddata->wp_type = ESDHC_WP_GPIO;
 
-       of_property_read_u32(np, "bus-width", &boarddata->max_bus_width);
-
-       of_property_read_u32(np, "max-frequency", &boarddata->f_max);
-
        if (of_find_property(np, "no-1-8-v", NULL))
                boarddata->support_vsel = false;
        else
@@ -916,29 +896,119 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
 
        mmc_of_parse_voltage(np, &host->ocr_mask);
 
+       /* sdr50 and sdr104 needs work on 1.8v signal voltage */
+       if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
+           !IS_ERR(imx_data->pins_default)) {
+               imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
+                                               ESDHC_PINCTRL_STATE_100MHZ);
+               imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
+                                               ESDHC_PINCTRL_STATE_200MHZ);
+               if (IS_ERR(imx_data->pins_100mhz) ||
+                               IS_ERR(imx_data->pins_200mhz)) {
+                       dev_warn(mmc_dev(host->mmc),
+                               "could not get ultra high speed state, work on normal mode\n");
+                       /*
+                        * fall back to not support uhs by specify no 1.8v quirk
+                        */
+                       host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+               }
+       } else {
+               host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+       }
+
        /* call to generic mmc_of_parse to support additional capabilities */
-       return mmc_of_parse(host->mmc);
+       ret = mmc_of_parse(host->mmc);
+       if (ret)
+               return ret;
+
+       if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+               host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
+       return 0;
 }
 #else
 static inline int
 sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
                         struct sdhci_host *host,
-                        struct esdhc_platform_data *boarddata)
+                        struct pltfm_imx_data *imx_data)
 {
        return -ENODEV;
 }
 #endif
 
+static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
+                        struct sdhci_host *host,
+                        struct pltfm_imx_data *imx_data)
+{
+       struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+       int err;
+
+       if (!host->mmc->parent->platform_data) {
+               dev_err(mmc_dev(host->mmc), "no board data!\n");
+               return -EINVAL;
+       }
+
+       imx_data->boarddata = *((struct esdhc_platform_data *)
+                               host->mmc->parent->platform_data);
+       /* write_protect */
+       if (boarddata->wp_type == ESDHC_WP_GPIO) {
+               err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
+               if (err) {
+                       dev_err(mmc_dev(host->mmc),
+                               "failed to request write-protect gpio!\n");
+                       return err;
+               }
+               host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+       }
+
+       /* card_detect */
+       switch (boarddata->cd_type) {
+       case ESDHC_CD_GPIO:
+               err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
+               if (err) {
+                       dev_err(mmc_dev(host->mmc),
+                               "failed to request card-detect gpio!\n");
+                       return err;
+               }
+               /* fall through */
+
+       case ESDHC_CD_CONTROLLER:
+               /* we have a working card_detect back */
+               host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+               break;
+
+       case ESDHC_CD_PERMANENT:
+               host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+               break;
+
+       case ESDHC_CD_NONE:
+               break;
+       }
+
+       switch (boarddata->max_bus_width) {
+       case 8:
+               host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
+               break;
+       case 4:
+               host->mmc->caps |= MMC_CAP_4_BIT_DATA;
+               break;
+       case 1:
+       default:
+               host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+               break;
+       }
+
+       return 0;
+}
+
 static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
 {
        const struct of_device_id *of_id =
                        of_match_device(imx_esdhc_dt_ids, &pdev->dev);
        struct sdhci_pltfm_host *pltfm_host;
        struct sdhci_host *host;
-       struct esdhc_platform_data *boarddata;
        int err;
        struct pltfm_imx_data *imx_data;
-       bool dt = true;
 
        host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
        if (IS_ERR(host))
@@ -1030,84 +1100,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
        if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
                host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
 
-       boarddata = &imx_data->boarddata;
-       if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) {
-               if (!host->mmc->parent->platform_data) {
-                       dev_err(mmc_dev(host->mmc), "no board data!\n");
-                       err = -EINVAL;
-                       goto disable_clk;
-               }
-               imx_data->boarddata = *((struct esdhc_platform_data *)
-                                       host->mmc->parent->platform_data);
-               dt = false;
-       }
-       /* write_protect */
-       if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) {
-               err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
-               if (err) {
-                       dev_err(mmc_dev(host->mmc),
-                               "failed to request write-protect gpio!\n");
-                       goto disable_clk;
-               }
-               host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
-       }
-
-       /* card_detect */
-       switch (boarddata->cd_type) {
-       case ESDHC_CD_GPIO:
-               if (dt)
-                       break;
-               err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
-               if (err) {
-                       dev_err(mmc_dev(host->mmc),
-                               "failed to request card-detect gpio!\n");
-                       goto disable_clk;
-               }
-               /* fall through */
-
-       case ESDHC_CD_CONTROLLER:
-               /* we have a working card_detect back */
-               host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
-               break;
-
-       case ESDHC_CD_PERMANENT:
-               host->mmc->caps |= MMC_CAP_NONREMOVABLE;
-               break;
-
-       case ESDHC_CD_NONE:
-               break;
-       }
-
-       switch (boarddata->max_bus_width) {
-       case 8:
-               host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
-               break;
-       case 4:
-               host->mmc->caps |= MMC_CAP_4_BIT_DATA;
-               break;
-       case 1:
-       default:
-               host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
-               break;
-       }
-
-       /* sdr50 and sdr104 needs work on 1.8v signal voltage */
-       if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
-           !IS_ERR(imx_data->pins_default)) {
-               imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
-                                               ESDHC_PINCTRL_STATE_100MHZ);
-               imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
-                                               ESDHC_PINCTRL_STATE_200MHZ);
-               if (IS_ERR(imx_data->pins_100mhz) ||
-                               IS_ERR(imx_data->pins_200mhz)) {
-                       dev_warn(mmc_dev(host->mmc),
-                               "could not get ultra high speed state, work on normal mode\n");
-                       /* fall back to not support uhs by specify no 1.8v quirk */
-                       host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
-               }
-       } else {
-               host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
-       }
+       if (of_id)
+               err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+       else
+               err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
+       if (err)
+               goto disable_clk;
 
        err = sdhci_add_host(host);
        if (err)
index 3497cfaf683c539edbd062199e03f5692e43bb26..a870c42731d7a4eea86b39e9e965d94a0603631d 100644 (file)
@@ -45,6 +45,6 @@
 #define ESDHC_DMA_SYSCTL       0x40c
 #define ESDHC_DMA_SNOOP                0x00000040
 
-#define ESDHC_HOST_CONTROL_RES 0x05
+#define ESDHC_HOST_CONTROL_RES 0x01
 
 #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
index 9cd5fc62f130871aaa6390ac8e25b0f0f2fedf03..946d37f94a31b29e8739304ec71cf7b1468eead6 100644 (file)
@@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
                        goto err_of_parse;
                sdhci_get_of_property(pdev);
                pdata = pxav3_get_mmc_pdata(dev);
+               pdev->dev.platform_data = pdata;
        } else if (pdata) {
                /* on-chip device */
                if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
index bc1445238fb3053b8d2a03a7bb35a25ddfff4b61..1dbe932320309fc87f75a40cc2552ba5a784d5bd 100644 (file)
@@ -2866,6 +2866,7 @@ int sdhci_add_host(struct sdhci_host *host)
        u32 max_current_caps;
        unsigned int ocr_avail;
        unsigned int override_timeout_clk;
+       u32 max_clk;
        int ret;
 
        WARN_ON(host == NULL);
@@ -2978,8 +2979,11 @@ int sdhci_add_host(struct sdhci_host *host)
                                                      GFP_KERNEL);
                host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
                if (!host->adma_table || !host->align_buffer) {
-                       dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
-                                         host->adma_table, host->adma_addr);
+                       if (host->adma_table)
+                               dma_free_coherent(mmc_dev(mmc),
+                                                 host->adma_table_sz,
+                                                 host->adma_table,
+                                                 host->adma_addr);
                        kfree(host->align_buffer);
                        pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
                                mmc_hostname(mmc));
@@ -3047,18 +3051,22 @@ int sdhci_add_host(struct sdhci_host *host)
         * Set host parameters.
         */
        mmc->ops = &sdhci_ops;
-       mmc->f_max = host->max_clk;
+       max_clk = host->max_clk;
+
        if (host->ops->get_min_clock)
                mmc->f_min = host->ops->get_min_clock(host);
        else if (host->version >= SDHCI_SPEC_300) {
                if (host->clk_mul) {
                        mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
-                       mmc->f_max = host->max_clk * host->clk_mul;
+                       max_clk = host->max_clk * host->clk_mul;
                } else
                        mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
        } else
                mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
 
+       if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
+               mmc->f_max = max_clk;
+
        if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
                host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
                                        SDHCI_TIMEOUT_CLK_SHIFT;
index a5233422f9dc5f770b98d0555560fb87a5f83ff0..7384455792bfb629ed6a2b9a5dbe40d1f58f2627 100644 (file)
@@ -458,10 +458,15 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
                nvdimm_bus_unlock(dev);
        }
        if (is_nd_btt(dev) && probe) {
+               struct nd_btt *nd_btt = to_nd_btt(dev);
+
                nd_region = to_nd_region(dev->parent);
                nvdimm_bus_lock(dev);
                if (nd_region->btt_seed == dev)
                        nd_region_create_btt_seed(nd_region);
+               if (nd_region->ns_seed == &nd_btt->ndns->dev &&
+                               is_nd_blk(dev->parent))
+                       nd_region_create_blk_seed(nd_region);
                nvdimm_bus_unlock(dev);
        }
 }
index 8df1b1777745e8e0dfb46611197a8238c929af4e..59bb8556e43ac8f975485465ca8d1b0fdfeba3c5 100644 (file)
@@ -47,7 +47,7 @@ config OF_DYNAMIC
 
 config OF_ADDRESS
        def_bool y
-       depends on !SPARC
+       depends on !SPARC && HAS_IOMEM
        select OF_ADDRESS_PCI if PCI
 
 config OF_ADDRESS_PCI
index 18016341d5a91656b29220b414faddae48188648..9f71770b6226f9ed3d4ceab7bb95ea3f4ba55b6c 100644 (file)
@@ -979,7 +979,6 @@ static struct platform_driver unittest_driver = {
        .remove                 = unittest_remove,
        .driver = {
                .name           = "unittest",
-               .owner          = THIS_MODULE,
                .of_match_table = of_match_ptr(unittest_match),
        },
 };
@@ -1666,7 +1665,6 @@ static const struct i2c_device_id unittest_i2c_dev_id[] = {
 static struct i2c_driver unittest_i2c_dev_driver = {
        .driver = {
                .name = "unittest-i2c-dev",
-               .owner = THIS_MODULE,
        },
        .probe = unittest_i2c_dev_probe,
        .remove = unittest_i2c_dev_remove,
@@ -1761,7 +1759,6 @@ static const struct i2c_device_id unittest_i2c_mux_id[] = {
 static struct i2c_driver unittest_i2c_mux_driver = {
        .driver = {
                .name = "unittest-i2c-mux",
-               .owner = THIS_MODULE,
        },
        .probe = unittest_i2c_mux_probe,
        .remove = unittest_i2c_mux_remove,
index 8067f54ce050a6c8cf3a4c18d9b4ea560f017d94..5ce5ef211bdbdf575752e150edc6e26cc683e82d 100644 (file)
@@ -891,8 +891,10 @@ parport_register_dev_model(struct parport *port, const char *name,
        par_dev->dev.release = free_pardevice;
        par_dev->devmodel = true;
        ret = device_register(&par_dev->dev);
-       if (ret)
-               goto err_put_dev;
+       if (ret) {
+               put_device(&par_dev->dev);
+               goto err_put_port;
+       }
 
        /* Chain this onto the list */
        par_dev->prev = NULL;
@@ -907,7 +909,8 @@ parport_register_dev_model(struct parport *port, const char *name,
                        spin_unlock(&port->physport->pardevice_lock);
                        pr_debug("%s: cannot grant exclusive access for device %s\n",
                                 port->name, name);
-                       goto err_put_dev;
+                       device_unregister(&par_dev->dev);
+                       goto err_put_port;
                }
                port->flags |= PARPORT_FLAG_EXCL;
        }
@@ -938,8 +941,6 @@ parport_register_dev_model(struct parport *port, const char *name,
 
        return par_dev;
 
-err_put_dev:
-       put_device(&par_dev->dev);
 err_free_devname:
        kfree(devname);
 err_free_par_dev:
index c0e6ede3e27d7cd07d92c247af58367e1b6d85a1..6b8dd162f644214ba24fd666b5aa6c94467d964d 100644 (file)
@@ -56,6 +56,7 @@ config PHY_EXYNOS_MIPI_VIDEO
 
 config PHY_PXA_28NM_HSIC
        tristate "Marvell USB HSIC 28nm PHY Driver"
+       depends on HAS_IOMEM
        select GENERIC_PHY
        help
          Enable this to support Marvell USB HSIC PHY driver for Marvell
@@ -66,6 +67,7 @@ config PHY_PXA_28NM_HSIC
 
 config PHY_PXA_28NM_USB2
        tristate "Marvell USB 2.0 28nm PHY Driver"
+       depends on HAS_IOMEM
        select GENERIC_PHY
        help
          Enable this to support Marvell USB 2.0 PHY driver for Marvell
index c6fc95b530835569b040f193f6cf051f7198f7b9..335e06d66ed9a5100cbdda511e6c206238651814 100644 (file)
 
 static const u32 phy_berlin_pll_dividers[] = {
        /* Berlin 2 */
-       CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
-       /* Berlin 2CD */
        CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55),
+       /* Berlin 2CD/Q */
+       CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
 };
 
 struct phy_berlin_usb_priv {
index 53f295c1bab1a72108d84b955714b8aef4d7e951..3510b81db3faabcda59a7148e31a32ce403805a5 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/delay.h>
 #include <linux/phy/omap_control_phy.h>
 #include <linux/of_platform.h>
-#include <linux/spinlock.h>
 
 #define        PLL_STATUS              0x00000004
 #define        PLL_GO                  0x00000008
@@ -83,10 +82,6 @@ struct ti_pipe3 {
        struct clk              *refclk;
        struct clk              *div_clk;
        struct pipe3_dpll_map   *dpll_map;
-       bool                    enabled;
-       spinlock_t              lock;   /* serialize clock enable/disable */
-       /* the below flag is needed specifically for SATA */
-       bool                    refclk_enabled;
 };
 
 static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -137,6 +132,9 @@ static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy)
        return NULL;
 }
 
+static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy);
+static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy);
+
 static int ti_pipe3_power_off(struct phy *x)
 {
        struct ti_pipe3 *phy = phy_get_drvdata(x);
@@ -217,6 +215,7 @@ static int ti_pipe3_init(struct phy *x)
        u32 val;
        int ret = 0;
 
+       ti_pipe3_enable_clocks(phy);
        /*
         * Set pcie_pcs register to 0x96 for proper functioning of phy
         * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
@@ -250,33 +249,35 @@ static int ti_pipe3_exit(struct phy *x)
        u32 val;
        unsigned long timeout;
 
-       /* SATA DPLL can't be powered down due to Errata i783 and PCIe
-        * does not have internal DPLL
-        */
-       if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") ||
-           of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie"))
+       /* SATA DPLL can't be powered down due to Errata i783 */
+       if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
                return 0;
 
-       /* Put DPLL in IDLE mode */
-       val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
-       val |= PLL_IDLE;
-       ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
-
-       /* wait for LDO and Oscillator to power down */
-       timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
-       do {
-               cpu_relax();
-               val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
-               if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
-                       break;
-       } while (!time_after(jiffies, timeout));
+       /* PCIe doesn't have internal DPLL */
+       if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
+               /* Put DPLL in IDLE mode */
+               val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+               val |= PLL_IDLE;
+               ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
 
-       if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
-               dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
-                       val);
-               return -EBUSY;
+               /* wait for LDO and Oscillator to power down */
+               timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
+               do {
+                       cpu_relax();
+                       val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
+                       if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
+                               break;
+               } while (!time_after(jiffies, timeout));
+
+               if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
+                       dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
+                               val);
+                       return -EBUSY;
+               }
        }
 
+       ti_pipe3_disable_clocks(phy);
+
        return 0;
 }
 static struct phy_ops ops = {
@@ -306,7 +307,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        phy->dev                = &pdev->dev;
-       spin_lock_init(&phy->lock);
 
        if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
                match = of_match_device(ti_pipe3_id_table, &pdev->dev);
@@ -402,6 +402,10 @@ static int ti_pipe3_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, phy);
        pm_runtime_enable(phy->dev);
+       /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */
+       if (of_device_is_compatible(node, "ti,phy-pipe3-sata"))
+               if (!IS_ERR(phy->refclk))
+                       clk_prepare_enable(phy->refclk);
 
        generic_phy = devm_phy_create(phy->dev, NULL, &ops);
        if (IS_ERR(generic_phy))
@@ -413,63 +417,33 @@ static int ti_pipe3_probe(struct platform_device *pdev)
        if (IS_ERR(phy_provider))
                return PTR_ERR(phy_provider);
 
-       pm_runtime_get(&pdev->dev);
-
        return 0;
 }
 
 static int ti_pipe3_remove(struct platform_device *pdev)
 {
-       if (!pm_runtime_suspended(&pdev->dev))
-               pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy)
+static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
 {
-       if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) {
-               int ret;
+       int ret = 0;
 
+       if (!IS_ERR(phy->refclk)) {
                ret = clk_prepare_enable(phy->refclk);
                if (ret) {
                        dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
                        return ret;
                }
-               phy->refclk_enabled = true;
        }
 
-       return 0;
-}
-
-static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy)
-{
-       if (!IS_ERR(phy->refclk))
-               clk_disable_unprepare(phy->refclk);
-
-       phy->refclk_enabled = false;
-}
-
-static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
-{
-       int ret = 0;
-       unsigned long flags;
-
-       spin_lock_irqsave(&phy->lock, flags);
-       if (phy->enabled)
-               goto err1;
-
-       ret = ti_pipe3_enable_refclk(phy);
-       if (ret)
-               goto err1;
-
        if (!IS_ERR(phy->wkupclk)) {
                ret = clk_prepare_enable(phy->wkupclk);
                if (ret) {
                        dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
-                       goto err2;
+                       goto disable_refclk;
                }
        }
 
@@ -477,96 +451,33 @@ static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
                ret = clk_prepare_enable(phy->div_clk);
                if (ret) {
                        dev_err(phy->dev, "Failed to enable div_clk %d\n", ret);
-                       goto err3;
+                       goto disable_wkupclk;
                }
        }
 
-       phy->enabled = true;
-       spin_unlock_irqrestore(&phy->lock, flags);
        return 0;
 
-err3:
+disable_wkupclk:
        if (!IS_ERR(phy->wkupclk))
                clk_disable_unprepare(phy->wkupclk);
 
-err2:
+disable_refclk:
        if (!IS_ERR(phy->refclk))
                clk_disable_unprepare(phy->refclk);
 
-       ti_pipe3_disable_refclk(phy);
-err1:
-       spin_unlock_irqrestore(&phy->lock, flags);
        return ret;
 }
 
 static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&phy->lock, flags);
-       if (!phy->enabled) {
-               spin_unlock_irqrestore(&phy->lock, flags);
-               return;
-       }
-
        if (!IS_ERR(phy->wkupclk))
                clk_disable_unprepare(phy->wkupclk);
-       /* Don't disable refclk for SATA PHY due to Errata i783 */
-       if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
-               ti_pipe3_disable_refclk(phy);
+       if (!IS_ERR(phy->refclk))
+               clk_disable_unprepare(phy->refclk);
        if (!IS_ERR(phy->div_clk))
                clk_disable_unprepare(phy->div_clk);
-       phy->enabled = false;
-       spin_unlock_irqrestore(&phy->lock, flags);
-}
-
-static int ti_pipe3_runtime_suspend(struct device *dev)
-{
-       struct ti_pipe3 *phy = dev_get_drvdata(dev);
-
-       ti_pipe3_disable_clocks(phy);
-       return 0;
 }
 
-static int ti_pipe3_runtime_resume(struct device *dev)
-{
-       struct ti_pipe3 *phy = dev_get_drvdata(dev);
-       int ret = 0;
-
-       ret = ti_pipe3_enable_clocks(phy);
-       return ret;
-}
-
-static int ti_pipe3_suspend(struct device *dev)
-{
-       struct ti_pipe3 *phy = dev_get_drvdata(dev);
-
-       ti_pipe3_disable_clocks(phy);
-       return 0;
-}
-
-static int ti_pipe3_resume(struct device *dev)
-{
-       struct ti_pipe3 *phy = dev_get_drvdata(dev);
-       int ret;
-
-       ret = ti_pipe3_enable_clocks(phy);
-       if (ret)
-               return ret;
-
-       pm_runtime_disable(dev);
-       pm_runtime_set_active(dev);
-       pm_runtime_enable(dev);
-       return 0;
-}
-#endif
-
-static const struct dev_pm_ops ti_pipe3_pm_ops = {
-       SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend,
-                          ti_pipe3_runtime_resume, NULL)
-       SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume)
-};
-
 static const struct of_device_id ti_pipe3_id_table[] = {
        {
                .compatible = "ti,phy-usb3",
@@ -592,7 +503,6 @@ static struct platform_driver ti_pipe3_driver = {
        .remove         = ti_pipe3_remove,
        .driver         = {
                .name   = "ti-pipe3",
-               .pm     = &ti_pipe3_pm_ops,
                .of_match_table = ti_pipe3_id_table,
        },
 };
index 832932bdc977d21e84f2f8f563ce8aa937c64bc1..7fd4f511d78fd6bdc685c857e280f1581d20b035 100644 (file)
@@ -130,7 +130,7 @@ struct pm800_regulators {
                .owner  = THIS_MODULE,                                  \
                .n_voltages = ARRAY_SIZE(ldo_volt_table),               \
                .vsel_reg       = PM800_##vreg##_VOUT,                  \
-               .vsel_mask      = 0x1f,                                 \
+               .vsel_mask      = 0xf,                                  \
                .enable_reg     = PM800_##ereg,                         \
                .enable_mask    = 1 << (ebit),                          \
                .volt_table     = ldo_volt_table,                       \
index c9f72019bd689afbb4e51528932689dc097b191b..78387a6cbae59e40a6fb05fc255647cacfe3209b 100644 (file)
@@ -109,6 +109,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
 static struct regulator *create_regulator(struct regulator_dev *rdev,
                                          struct device *dev,
                                          const char *supply_name);
+static void _regulator_put(struct regulator *regulator);
 
 static const char *rdev_get_name(struct regulator_dev *rdev)
 {
@@ -1105,6 +1106,9 @@ static int set_supply(struct regulator_dev *rdev,
 
        rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
 
+       if (!try_module_get(supply_rdev->owner))
+               return -ENODEV;
+
        rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
        if (rdev->supply == NULL) {
                err = -ENOMEM;
@@ -1381,9 +1385,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
        }
 
        if (!r) {
-               dev_err(dev, "Failed to resolve %s-supply for %s\n",
-                       rdev->supply_name, rdev->desc->name);
-               return -EPROBE_DEFER;
+               if (have_full_constraints()) {
+                       r = dummy_regulator_rdev;
+               } else {
+                       dev_err(dev, "Failed to resolve %s-supply for %s\n",
+                               rdev->supply_name, rdev->desc->name);
+                       return -EPROBE_DEFER;
+               }
        }
 
        /* Recursively resolve the supply of the supply */
@@ -1398,8 +1406,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
        /* Cascade always-on state to supply */
        if (_regulator_is_enabled(rdev)) {
                ret = regulator_enable(rdev->supply);
-               if (ret < 0)
+               if (ret < 0) {
+                       if (rdev->supply)
+                               _regulator_put(rdev->supply);
                        return ret;
+               }
        }
 
        return 0;
index 6f2bdad8b4d8fd6ce2e552d4c44c8cf4a3ee60ba..e94ddcf97722331e3cdde0645697169a35ba8c12 100644 (file)
@@ -450,7 +450,7 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
                pdata->control_flags  |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE;
 
        if (of_property_read_bool(np, "maxim,enable-bias-control"))
-               pdata->control_flags  |= MAX8973_BIAS_ENABLE;
+               pdata->control_flags  |= MAX8973_CONTROL_BIAS_ENABLE;
 
        return pdata;
 }
index 326ffb55337117d6dc2a96d7fce9cea773e18a14..72fc3c32db49828ce6a2a9256ecabd01bdb034e8 100644 (file)
@@ -34,6 +34,8 @@
 #include <linux/mfd/samsung/s2mps14.h>
 #include <linux/mfd/samsung/s2mpu02.h>
 
+/* The highest number of possible regulators for supported devices. */
+#define S2MPS_REGULATOR_MAX            S2MPS13_REGULATOR_MAX
 struct s2mps11_info {
        unsigned int rdev_num;
        int ramp_delay2;
@@ -49,7 +51,7 @@ struct s2mps11_info {
         * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
         * the suspend mode was enabled.
         */
-       unsigned long long s2mps14_suspend_state:50;
+       DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
 
        /* Array of size rdev_num with GPIO-s for external sleep control */
        int *ext_control_gpio;
@@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
        switch (s2mps11->dev_type) {
        case S2MPS13X:
        case S2MPS14X:
-               if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+               if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
                        val = S2MPS14_ENABLE_SUSPEND;
                else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
                        val = S2MPS14_ENABLE_EXT_CONTROL;
@@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
                        val = rdev->desc->enable_mask;
                break;
        case S2MPU02:
-               if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+               if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
                        val = S2MPU02_ENABLE_SUSPEND;
                else
                        val = rdev->desc->enable_mask;
@@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
        if (ret < 0)
                return ret;
 
-       s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev));
+       set_bit(rdev_get_id(rdev), s2mps11->suspend_state);
        /*
         * Don't enable suspend mode if regulator is already disabled because
         * this would effectively for a short time turn on the regulator after
@@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
        case S2MPS11X:
                s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
                regulators = s2mps11_regulators;
+               BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
                break;
        case S2MPS13X:
                s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
                regulators = s2mps13_regulators;
+               BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
                break;
        case S2MPS14X:
                s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
                regulators = s2mps14_regulators;
+               BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
                break;
        case S2MPU02:
                s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
                regulators = s2mpu02_regulators;
+               BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
                break;
        default:
                dev_err(&pdev->dev, "Invalid device type: %u\n",
index 95bccfd3f169a8a3a167368f972cbbd08de7c869..e5225ad9c5b12fdd9d723704d66eb9ec2ff5b734 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the S/390 specific device drivers
 #
 
-obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
+obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
 
 drivers-y += drivers/s390/built-in.o
 
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/kvm/Makefile
deleted file mode 100644 (file)
index 241891a..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-# Makefile for kvm guest drivers on s390
-#
-# Copyright IBM Corp. 2008
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (version 2 only)
-# as published by the Free Software Foundation.
-
-obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
deleted file mode 100644 (file)
index 53fb975..0000000
+++ /dev/null
@@ -1,510 +0,0 @@
-/*
- * virtio for kvm on s390
- *
- * Copyright IBM Corp. 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- */
-
-#include <linux/kernel_stat.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/err.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/slab.h>
-#include <linux/virtio_console.h>
-#include <linux/interrupt.h>
-#include <linux/virtio_ring.h>
-#include <linux/export.h>
-#include <linux/pfn.h>
-#include <asm/io.h>
-#include <asm/kvm_para.h>
-#include <asm/kvm_virtio.h>
-#include <asm/sclp.h>
-#include <asm/setup.h>
-#include <asm/irq.h>
-
-#define VIRTIO_SUBCODE_64 0x0D00
-
-/*
- * The pointer to our (page) of device descriptions.
- */
-static void *kvm_devices;
-static struct work_struct hotplug_work;
-
-struct kvm_device {
-       struct virtio_device vdev;
-       struct kvm_device_desc *desc;
-};
-
-#define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev)
-
-/*
- * memory layout:
- * - kvm_device_descriptor
- *        struct kvm_device_desc
- * - configuration
- *        struct kvm_vqconfig
- * - feature bits
- * - config space
- */
-static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc)
-{
-       return (struct kvm_vqconfig *)(desc + 1);
-}
-
-static u8 *kvm_vq_features(const struct kvm_device_desc *desc)
-{
-       return (u8 *)(kvm_vq_config(desc) + desc->num_vq);
-}
-
-static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc)
-{
-       return kvm_vq_features(desc) + desc->feature_len * 2;
-}
-
-/*
- * The total size of the config page used by this device (incl. desc)
- */
-static unsigned desc_size(const struct kvm_device_desc *desc)
-{
-       return sizeof(*desc)
-               + desc->num_vq * sizeof(struct kvm_vqconfig)
-               + desc->feature_len * 2
-               + desc->config_len;
-}
-
-/* This gets the device's feature bits. */
-static u64 kvm_get_features(struct virtio_device *vdev)
-{
-       unsigned int i;
-       u32 features = 0;
-       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
-       u8 *in_features = kvm_vq_features(desc);
-
-       for (i = 0; i < min(desc->feature_len * 8, 32); i++)
-               if (in_features[i / 8] & (1 << (i % 8)))
-                       features |= (1 << i);
-       return features;
-}
-
-static int kvm_finalize_features(struct virtio_device *vdev)
-{
-       unsigned int i, bits;
-       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
-       /* Second half of bitmap is features we accept. */
-       u8 *out_features = kvm_vq_features(desc) + desc->feature_len;
-
-       /* Give virtio_ring a chance to accept features. */
-       vring_transport_features(vdev);
-
-       /* Make sure we don't have any features > 32 bits! */
-       BUG_ON((u32)vdev->features != vdev->features);
-
-       memset(out_features, 0, desc->feature_len);
-       bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
-       for (i = 0; i < bits; i++) {
-               if (__virtio_test_bit(vdev, i))
-                       out_features[i / 8] |= (1 << (i % 8));
-       }
-
-       return 0;
-}
-
-/*
- * Reading and writing elements in config space
- */
-static void kvm_get(struct virtio_device *vdev, unsigned int offset,
-                  void *buf, unsigned len)
-{
-       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
-
-       BUG_ON(offset + len > desc->config_len);
-       memcpy(buf, kvm_vq_configspace(desc) + offset, len);
-}
-
-static void kvm_set(struct virtio_device *vdev, unsigned int offset,
-                  const void *buf, unsigned len)
-{
-       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
-
-       BUG_ON(offset + len > desc->config_len);
-       memcpy(kvm_vq_configspace(desc) + offset, buf, len);
-}
-
-/*
- * The operations to get and set the status word just access
- * the status field of the device descriptor. set_status will also
- * make a hypercall to the host, to tell about status changes
- */
-static u8 kvm_get_status(struct virtio_device *vdev)
-{
-       return to_kvmdev(vdev)->desc->status;
-}
-
-static void kvm_set_status(struct virtio_device *vdev, u8 status)
-{
-       BUG_ON(!status);
-       to_kvmdev(vdev)->desc->status = status;
-       kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS,
-                      (unsigned long) to_kvmdev(vdev)->desc);
-}
-
-/*
- * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the
- * descriptor address. The Host will zero the status and all the
- * features.
- */
-static void kvm_reset(struct virtio_device *vdev)
-{
-       kvm_hypercall1(KVM_S390_VIRTIO_RESET,
-                      (unsigned long) to_kvmdev(vdev)->desc);
-}
-
-/*
- * When the virtio_ring code wants to notify the Host, it calls us here and we
- * make a hypercall.  We hand the address  of the virtqueue so the Host
- * knows which virtqueue we're talking about.
- */
-static bool kvm_notify(struct virtqueue *vq)
-{
-       long rc;
-       struct kvm_vqconfig *config = vq->priv;
-
-       rc = kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
-       if (rc < 0)
-               return false;
-       return true;
-}
-
-/*
- * This routine finds the first virtqueue described in the configuration of
- * this device and sets it up.
- */
-static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
-                                    unsigned index,
-                                    void (*callback)(struct virtqueue *vq),
-                                    const char *name)
-{
-       struct kvm_device *kdev = to_kvmdev(vdev);
-       struct kvm_vqconfig *config;
-       struct virtqueue *vq;
-       int err;
-
-       if (index >= kdev->desc->num_vq)
-               return ERR_PTR(-ENOENT);
-
-       if (!name)
-               return NULL;
-
-       config = kvm_vq_config(kdev->desc)+index;
-
-       err = vmem_add_mapping(config->address,
-                              vring_size(config->num,
-                                         KVM_S390_VIRTIO_RING_ALIGN));
-       if (err)
-               goto out;
-
-       vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN,
-                                vdev, true, (void *) config->address,
-                                kvm_notify, callback, name);
-       if (!vq) {
-               err = -ENOMEM;
-               goto unmap;
-       }
-
-       /*
-        * register a callback token
-        * The host will sent this via the external interrupt parameter
-        */
-       config->token = (u64) vq;
-
-       vq->priv = config;
-       return vq;
-unmap:
-       vmem_remove_mapping(config->address,
-                           vring_size(config->num,
-                                      KVM_S390_VIRTIO_RING_ALIGN));
-out:
-       return ERR_PTR(err);
-}
-
-static void kvm_del_vq(struct virtqueue *vq)
-{
-       struct kvm_vqconfig *config = vq->priv;
-
-       vring_del_virtqueue(vq);
-       vmem_remove_mapping(config->address,
-                           vring_size(config->num,
-                                      KVM_S390_VIRTIO_RING_ALIGN));
-}
-
-static void kvm_del_vqs(struct virtio_device *vdev)
-{
-       struct virtqueue *vq, *n;
-
-       list_for_each_entry_safe(vq, n, &vdev->vqs, list)
-               kvm_del_vq(vq);
-}
-
-static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
-                       struct virtqueue *vqs[],
-                       vq_callback_t *callbacks[],
-                       const char *names[])
-{
-       struct kvm_device *kdev = to_kvmdev(vdev);
-       int i;
-
-       /* We must have this many virtqueues. */
-       if (nvqs > kdev->desc->num_vq)
-               return -ENOENT;
-
-       for (i = 0; i < nvqs; ++i) {
-               vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]);
-               if (IS_ERR(vqs[i]))
-                       goto error;
-       }
-       return 0;
-
-error:
-       kvm_del_vqs(vdev);
-       return PTR_ERR(vqs[i]);
-}
-
-static const char *kvm_bus_name(struct virtio_device *vdev)
-{
-       return "";
-}
-
-/*
- * The config ops structure as defined by virtio config
- */
-static const struct virtio_config_ops kvm_vq_configspace_ops = {
-       .get_features = kvm_get_features,
-       .finalize_features = kvm_finalize_features,
-       .get = kvm_get,
-       .set = kvm_set,
-       .get_status = kvm_get_status,
-       .set_status = kvm_set_status,
-       .reset = kvm_reset,
-       .find_vqs = kvm_find_vqs,
-       .del_vqs = kvm_del_vqs,
-       .bus_name = kvm_bus_name,
-};
-
-/*
- * The root device for the kvm virtio devices.
- * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
- */
-static struct device *kvm_root;
-
-/*
- * adds a new device and register it with virtio
- * appropriate drivers are loaded by the device model
- */
-static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset)
-{
-       struct kvm_device *kdev;
-
-       kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
-       if (!kdev) {
-               printk(KERN_EMERG "Cannot allocate kvm dev %u type %u\n",
-                      offset, d->type);
-               return;
-       }
-
-       kdev->vdev.dev.parent = kvm_root;
-       kdev->vdev.id.device = d->type;
-       kdev->vdev.config = &kvm_vq_configspace_ops;
-       kdev->desc = d;
-
-       if (register_virtio_device(&kdev->vdev) != 0) {
-               printk(KERN_ERR "Failed to register kvm device %u type %u\n",
-                      offset, d->type);
-               kfree(kdev);
-       }
-}
-
-/*
- * scan_devices() simply iterates through the device page.
- * The type 0 is reserved to mean "end of devices".
- */
-static void scan_devices(void)
-{
-       unsigned int i;
-       struct kvm_device_desc *d;
-
-       for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
-               d = kvm_devices + i;
-
-               if (d->type == 0)
-                       break;
-
-               add_kvm_device(d, i);
-       }
-}
-
-/*
- * match for a kvm device with a specific desc pointer
- */
-static int match_desc(struct device *dev, void *data)
-{
-       struct virtio_device *vdev = dev_to_virtio(dev);
-       struct kvm_device *kdev = to_kvmdev(vdev);
-
-       return kdev->desc == data;
-}
-
-/*
- * hotplug_device tries to find changes in the device page.
- */
-static void hotplug_devices(struct work_struct *dummy)
-{
-       unsigned int i;
-       struct kvm_device_desc *d;
-       struct device *dev;
-
-       for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
-               d = kvm_devices + i;
-
-               /* end of list */
-               if (d->type == 0)
-                       break;
-
-               /* device already exists */
-               dev = device_find_child(kvm_root, d, match_desc);
-               if (dev) {
-                       /* XXX check for hotplug remove */
-                       put_device(dev);
-                       continue;
-               }
-
-               /* new device */
-               printk(KERN_INFO "Adding new virtio device %p\n", d);
-               add_kvm_device(d, i);
-       }
-}
-
-/*
- * we emulate the request_irq behaviour on top of s390 extints
- */
-static void kvm_extint_handler(struct ext_code ext_code,
-                              unsigned int param32, unsigned long param64)
-{
-       struct virtqueue *vq;
-       u32 param;
-
-       if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64)
-               return;
-       inc_irq_stat(IRQEXT_VRT);
-
-       /* The LSB might be overloaded, we have to mask it */
-       vq = (struct virtqueue *)(param64 & ~1UL);
-
-       /* We use ext_params to decide what this interrupt means */
-       param = param32 & VIRTIO_PARAM_MASK;
-
-       switch (param) {
-       case VIRTIO_PARAM_CONFIG_CHANGED:
-               virtio_config_changed(vq->vdev);
-               break;
-       case VIRTIO_PARAM_DEV_ADD:
-               schedule_work(&hotplug_work);
-               break;
-       case VIRTIO_PARAM_VRING_INTERRUPT:
-       default:
-               vring_interrupt(0, vq);
-               break;
-       }
-}
-
-/*
- * For s390-virtio, we expect a page above main storage containing
- * the virtio configuration. Try to actually load from this area
- * in order to figure out if the host provides this page.
- */
-static int __init test_devices_support(unsigned long addr)
-{
-       int ret = -EIO;
-
-       asm volatile(
-               "0:     lura    0,%1\n"
-               "1:     xgr     %0,%0\n"
-               "2:\n"
-               EX_TABLE(0b,2b)
-               EX_TABLE(1b,2b)
-               : "+d" (ret)
-               : "a" (addr)
-               : "0", "cc");
-       return ret;
-}
-/*
- * Init function for virtio
- * devices are in a single page above top of "normal" + standby mem
- */
-static int __init kvm_devices_init(void)
-{
-       int rc;
-       unsigned long total_memory_size = sclp.rzm * sclp.rnmax;
-
-       if (!MACHINE_IS_KVM)
-               return -ENODEV;
-
-       if (test_devices_support(total_memory_size) < 0)
-               return -ENODEV;
-
-       rc = vmem_add_mapping(total_memory_size, PAGE_SIZE);
-       if (rc)
-               return rc;
-
-       kvm_devices = (void *) total_memory_size;
-
-       kvm_root = root_device_register("kvm_s390");
-       if (IS_ERR(kvm_root)) {
-               rc = PTR_ERR(kvm_root);
-               printk(KERN_ERR "Could not register kvm_s390 root device");
-               vmem_remove_mapping(total_memory_size, PAGE_SIZE);
-               return rc;
-       }
-
-       INIT_WORK(&hotplug_work, hotplug_devices);
-
-       irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
-       register_external_irq(EXT_IRQ_CP_SERVICE, kvm_extint_handler);
-
-       scan_devices();
-       return 0;
-}
-
-/* code for early console output with virtio_console */
-static __init int early_put_chars(u32 vtermno, const char *buf, int count)
-{
-       char scratch[17];
-       unsigned int len = count;
-
-       if (len > sizeof(scratch) - 1)
-               len = sizeof(scratch) - 1;
-       scratch[len] = '\0';
-       memcpy(scratch, buf, len);
-       kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, __pa(scratch));
-       return len;
-}
-
-static int __init s390_virtio_console_init(void)
-{
-       if (sclp.has_vt220 || sclp.has_linemode)
-               return -ENODEV;
-       return virtio_cons_early_init(early_put_chars);
-}
-console_initcall(s390_virtio_console_init);
-
-
-/*
- * We do this after core stuff, but before the drivers.
- */
-postcore_initcall(kvm_devices_init);
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
deleted file mode 100644 (file)
index f8d8fdb..0000000
+++ /dev/null
@@ -1,1387 +0,0 @@
-/*
- * ccw based virtio transport
- *
- * Copyright IBM Corp. 2012, 2014
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
- */
-
-#include <linux/kernel_stat.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/err.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/virtio_ring.h>
-#include <linux/pfn.h>
-#include <linux/async.h>
-#include <linux/wait.h>
-#include <linux/list.h>
-#include <linux/bitops.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/kvm_para.h>
-#include <linux/notifier.h>
-#include <asm/setup.h>
-#include <asm/irq.h>
-#include <asm/cio.h>
-#include <asm/ccwdev.h>
-#include <asm/virtio-ccw.h>
-#include <asm/isc.h>
-#include <asm/airq.h>
-
-/*
- * virtio related functions
- */
-
-struct vq_config_block {
-       __u16 index;
-       __u16 num;
-} __packed;
-
-#define VIRTIO_CCW_CONFIG_SIZE 0x100
-/* same as PCI config space size, should be enough for all drivers */
-
-struct virtio_ccw_device {
-       struct virtio_device vdev;
-       __u8 *status;
-       __u8 config[VIRTIO_CCW_CONFIG_SIZE];
-       struct ccw_device *cdev;
-       __u32 curr_io;
-       int err;
-       unsigned int revision; /* Transport revision */
-       wait_queue_head_t wait_q;
-       spinlock_t lock;
-       struct list_head virtqueues;
-       unsigned long indicators;
-       unsigned long indicators2;
-       struct vq_config_block *config_block;
-       bool is_thinint;
-       bool going_away;
-       bool device_lost;
-       unsigned int config_ready;
-       void *airq_info;
-};
-
-struct vq_info_block_legacy {
-       __u64 queue;
-       __u32 align;
-       __u16 index;
-       __u16 num;
-} __packed;
-
-struct vq_info_block {
-       __u64 desc;
-       __u32 res0;
-       __u16 index;
-       __u16 num;
-       __u64 avail;
-       __u64 used;
-} __packed;
-
-struct virtio_feature_desc {
-       __u32 features;
-       __u8 index;
-} __packed;
-
-struct virtio_thinint_area {
-       unsigned long summary_indicator;
-       unsigned long indicator;
-       u64 bit_nr;
-       u8 isc;
-} __packed;
-
-struct virtio_rev_info {
-       __u16 revision;
-       __u16 length;
-       __u8 data[];
-};
-
-/* the highest virtio-ccw revision we support */
-#define VIRTIO_CCW_REV_MAX 1
-
-struct virtio_ccw_vq_info {
-       struct virtqueue *vq;
-       int num;
-       void *queue;
-       union {
-               struct vq_info_block s;
-               struct vq_info_block_legacy l;
-       } *info_block;
-       int bit_nr;
-       struct list_head node;
-       long cookie;
-};
-
-#define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
-
-#define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
-#define MAX_AIRQ_AREAS 20
-
-static int virtio_ccw_use_airq = 1;
-
-struct airq_info {
-       rwlock_t lock;
-       u8 summary_indicator;
-       struct airq_struct airq;
-       struct airq_iv *aiv;
-};
-static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
-
-#define CCW_CMD_SET_VQ 0x13
-#define CCW_CMD_VDEV_RESET 0x33
-#define CCW_CMD_SET_IND 0x43
-#define CCW_CMD_SET_CONF_IND 0x53
-#define CCW_CMD_READ_FEAT 0x12
-#define CCW_CMD_WRITE_FEAT 0x11
-#define CCW_CMD_READ_CONF 0x22
-#define CCW_CMD_WRITE_CONF 0x21
-#define CCW_CMD_WRITE_STATUS 0x31
-#define CCW_CMD_READ_VQ_CONF 0x32
-#define CCW_CMD_SET_IND_ADAPTER 0x73
-#define CCW_CMD_SET_VIRTIO_REV 0x83
-
-#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
-#define VIRTIO_CCW_DOING_RESET 0x00040000
-#define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
-#define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
-#define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
-#define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
-#define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
-#define VIRTIO_CCW_DOING_SET_IND 0x01000000
-#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
-#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
-#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
-#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
-#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
-
-static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
-{
-       return container_of(vdev, struct virtio_ccw_device, vdev);
-}
-
-static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
-{
-       unsigned long i, flags;
-
-       write_lock_irqsave(&info->lock, flags);
-       for (i = 0; i < airq_iv_end(info->aiv); i++) {
-               if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) {
-                       airq_iv_free_bit(info->aiv, i);
-                       airq_iv_set_ptr(info->aiv, i, 0);
-                       break;
-               }
-       }
-       write_unlock_irqrestore(&info->lock, flags);
-}
-
-static void virtio_airq_handler(struct airq_struct *airq)
-{
-       struct airq_info *info = container_of(airq, struct airq_info, airq);
-       unsigned long ai;
-
-       inc_irq_stat(IRQIO_VAI);
-       read_lock(&info->lock);
-       /* Walk through indicators field, summary indicator active. */
-       for (ai = 0;;) {
-               ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
-               if (ai == -1UL)
-                       break;
-               vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
-       }
-       info->summary_indicator = 0;
-       smp_wmb();
-       /* Walk through indicators field, summary indicator not active. */
-       for (ai = 0;;) {
-               ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
-               if (ai == -1UL)
-                       break;
-               vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
-       }
-       read_unlock(&info->lock);
-}
-
-static struct airq_info *new_airq_info(void)
-{
-       struct airq_info *info;
-       int rc;
-
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
-       if (!info)
-               return NULL;
-       rwlock_init(&info->lock);
-       info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
-       if (!info->aiv) {
-               kfree(info);
-               return NULL;
-       }
-       info->airq.handler = virtio_airq_handler;
-       info->airq.lsi_ptr = &info->summary_indicator;
-       info->airq.lsi_mask = 0xff;
-       info->airq.isc = VIRTIO_AIRQ_ISC;
-       rc = register_adapter_interrupt(&info->airq);
-       if (rc) {
-               airq_iv_release(info->aiv);
-               kfree(info);
-               return NULL;
-       }
-       return info;
-}
-
-static void destroy_airq_info(struct airq_info *info)
-{
-       if (!info)
-               return;
-
-       unregister_adapter_interrupt(&info->airq);
-       airq_iv_release(info->aiv);
-       kfree(info);
-}
-
-static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
-                                       u64 *first, void **airq_info)
-{
-       int i, j;
-       struct airq_info *info;
-       unsigned long indicator_addr = 0;
-       unsigned long bit, flags;
-
-       for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
-               if (!airq_areas[i])
-                       airq_areas[i] = new_airq_info();
-               info = airq_areas[i];
-               if (!info)
-                       return 0;
-               write_lock_irqsave(&info->lock, flags);
-               bit = airq_iv_alloc(info->aiv, nvqs);
-               if (bit == -1UL) {
-                       /* Not enough vacancies. */
-                       write_unlock_irqrestore(&info->lock, flags);
-                       continue;
-               }
-               *first = bit;
-               *airq_info = info;
-               indicator_addr = (unsigned long)info->aiv->vector;
-               for (j = 0; j < nvqs; j++) {
-                       airq_iv_set_ptr(info->aiv, bit + j,
-                                       (unsigned long)vqs[j]);
-               }
-               write_unlock_irqrestore(&info->lock, flags);
-       }
-       return indicator_addr;
-}
-
-static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
-{
-       struct virtio_ccw_vq_info *info;
-
-       list_for_each_entry(info, &vcdev->virtqueues, node)
-               drop_airq_indicator(info->vq, vcdev->airq_info);
-}
-
-static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
-{
-       unsigned long flags;
-       __u32 ret;
-
-       spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
-       if (vcdev->err)
-               ret = 0;
-       else
-               ret = vcdev->curr_io & flag;
-       spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
-       return ret;
-}
-
-static int ccw_io_helper(struct virtio_ccw_device *vcdev,
-                        struct ccw1 *ccw, __u32 intparm)
-{
-       int ret;
-       unsigned long flags;
-       int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
-
-       do {
-               spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
-               ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
-               if (!ret) {
-                       if (!vcdev->curr_io)
-                               vcdev->err = 0;
-                       vcdev->curr_io |= flag;
-               }
-               spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
-               cpu_relax();
-       } while (ret == -EBUSY);
-       wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
-       return ret ? ret : vcdev->err;
-}
-
-static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
-                                     struct ccw1 *ccw)
-{
-       int ret;
-       unsigned long *indicatorp = NULL;
-       struct virtio_thinint_area *thinint_area = NULL;
-       struct airq_info *airq_info = vcdev->airq_info;
-
-       if (vcdev->is_thinint) {
-               thinint_area = kzalloc(sizeof(*thinint_area),
-                                      GFP_DMA | GFP_KERNEL);
-               if (!thinint_area)
-                       return;
-               thinint_area->summary_indicator =
-                       (unsigned long) &airq_info->summary_indicator;
-               thinint_area->isc = VIRTIO_AIRQ_ISC;
-               ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
-               ccw->count = sizeof(*thinint_area);
-               ccw->cda = (__u32)(unsigned long) thinint_area;
-       } else {
-               indicatorp = kmalloc(sizeof(&vcdev->indicators),
-                                    GFP_DMA | GFP_KERNEL);
-               if (!indicatorp)
-                       return;
-               *indicatorp = 0;
-               ccw->cmd_code = CCW_CMD_SET_IND;
-               ccw->count = sizeof(vcdev->indicators);
-               ccw->cda = (__u32)(unsigned long) indicatorp;
-       }
-       /* Deregister indicators from host. */
-       vcdev->indicators = 0;
-       ccw->flags = 0;
-       ret = ccw_io_helper(vcdev, ccw,
-                           vcdev->is_thinint ?
-                           VIRTIO_CCW_DOING_SET_IND_ADAPTER :
-                           VIRTIO_CCW_DOING_SET_IND);
-       if (ret && (ret != -ENODEV))
-               dev_info(&vcdev->cdev->dev,
-                        "Failed to deregister indicators (%d)\n", ret);
-       else if (vcdev->is_thinint)
-               virtio_ccw_drop_indicators(vcdev);
-       kfree(indicatorp);
-       kfree(thinint_area);
-}
-
-static inline long do_kvm_notify(struct subchannel_id schid,
-                                unsigned long queue_index,
-                                long cookie)
-{
-       register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
-       register struct subchannel_id __schid asm("2") = schid;
-       register unsigned long __index asm("3") = queue_index;
-       register long __rc asm("2");
-       register long __cookie asm("4") = cookie;
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
-                     "d"(__cookie)
-                     : "memory", "cc");
-       return __rc;
-}
-
-static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
-{
-       struct virtio_ccw_vq_info *info = vq->priv;
-       struct virtio_ccw_device *vcdev;
-       struct subchannel_id schid;
-
-       vcdev = to_vc_device(info->vq->vdev);
-       ccw_device_get_schid(vcdev->cdev, &schid);
-       info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
-       if (info->cookie < 0)
-               return false;
-       return true;
-}
-
-static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
-                                  struct ccw1 *ccw, int index)
-{
-       vcdev->config_block->index = index;
-       ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
-       ccw->flags = 0;
-       ccw->count = sizeof(struct vq_config_block);
-       ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
-       ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
-       return vcdev->config_block->num;
-}
-
-static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
-{
-       struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
-       struct virtio_ccw_vq_info *info = vq->priv;
-       unsigned long flags;
-       unsigned long size;
-       int ret;
-       unsigned int index = vq->index;
-
-       /* Remove from our list. */
-       spin_lock_irqsave(&vcdev->lock, flags);
-       list_del(&info->node);
-       spin_unlock_irqrestore(&vcdev->lock, flags);
-
-       /* Release from host. */
-       if (vcdev->revision == 0) {
-               info->info_block->l.queue = 0;
-               info->info_block->l.align = 0;
-               info->info_block->l.index = index;
-               info->info_block->l.num = 0;
-               ccw->count = sizeof(info->info_block->l);
-       } else {
-               info->info_block->s.desc = 0;
-               info->info_block->s.index = index;
-               info->info_block->s.num = 0;
-               info->info_block->s.avail = 0;
-               info->info_block->s.used = 0;
-               ccw->count = sizeof(info->info_block->s);
-       }
-       ccw->cmd_code = CCW_CMD_SET_VQ;
-       ccw->flags = 0;
-       ccw->cda = (__u32)(unsigned long)(info->info_block);
-       ret = ccw_io_helper(vcdev, ccw,
-                           VIRTIO_CCW_DOING_SET_VQ | index);
-       /*
-        * -ENODEV isn't considered an error: The device is gone anyway.
-        * This may happen on device detach.
-        */
-       if (ret && (ret != -ENODEV))
-               dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
-                        ret, index);
-
-       vring_del_virtqueue(vq);
-       size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
-       free_pages_exact(info->queue, size);
-       kfree(info->info_block);
-       kfree(info);
-}
-
-static void virtio_ccw_del_vqs(struct virtio_device *vdev)
-{
-       struct virtqueue *vq, *n;
-       struct ccw1 *ccw;
-       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
-
-       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
-       if (!ccw)
-               return;
-
-       virtio_ccw_drop_indicator(vcdev, ccw);
-
-       list_for_each_entry_safe(vq, n, &vdev->vqs, list)
-               virtio_ccw_del_vq(vq, ccw);
-
-       kfree(ccw);
-}
-
-static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
-                                            int i, vq_callback_t *callback,
-                                            const char *name,
-                                            struct ccw1 *ccw)
-{
-       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
-       int err;
-       struct virtqueue *vq = NULL;
-       struct virtio_ccw_vq_info *info;
-       unsigned long size = 0; /* silence the compiler */
-       unsigned long flags;
-
-       /* Allocate queue. */
-       info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
-       if (!info) {
-               dev_warn(&vcdev->cdev->dev, "no info\n");
-               err = -ENOMEM;
-               goto out_err;
-       }
-       info->info_block = kzalloc(sizeof(*info->info_block),
-                                  GFP_DMA | GFP_KERNEL);
-       if (!info->info_block) {
-               dev_warn(&vcdev->cdev->dev, "no info block\n");
-               err = -ENOMEM;
-               goto out_err;
-       }
-       info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
-       size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
-       info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
-       if (info->queue == NULL) {
-               dev_warn(&vcdev->cdev->dev, "no queue\n");
-               err = -ENOMEM;
-               goto out_err;
-       }
-
-       vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
-                                true, info->queue, virtio_ccw_kvm_notify,
-                                callback, name);
-       if (!vq) {
-               /* For now, we fail if we can't get the requested size. */
-               dev_warn(&vcdev->cdev->dev, "no vq\n");
-               err = -ENOMEM;
-               goto out_err;
-       }
-
-       /* Register it with the host. */
-       if (vcdev->revision == 0) {
-               info->info_block->l.queue = (__u64)info->queue;
-               info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
-               info->info_block->l.index = i;
-               info->info_block->l.num = info->num;
-               ccw->count = sizeof(info->info_block->l);
-       } else {
-               info->info_block->s.desc = (__u64)info->queue;
-               info->info_block->s.index = i;
-               info->info_block->s.num = info->num;
-               info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
-               info->info_block->s.used = (__u64)virtqueue_get_used(vq);
-               ccw->count = sizeof(info->info_block->s);
-       }
-       ccw->cmd_code = CCW_CMD_SET_VQ;
-       ccw->flags = 0;
-       ccw->cda = (__u32)(unsigned long)(info->info_block);
-       err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
-       if (err) {
-               dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
-               goto out_err;
-       }
-
-       info->vq = vq;
-       vq->priv = info;
-
-       /* Save it to our list. */
-       spin_lock_irqsave(&vcdev->lock, flags);
-       list_add(&info->node, &vcdev->virtqueues);
-       spin_unlock_irqrestore(&vcdev->lock, flags);
-
-       return vq;
-
-out_err:
-       if (vq)
-               vring_del_virtqueue(vq);
-       if (info) {
-               if (info->queue)
-                       free_pages_exact(info->queue, size);
-               kfree(info->info_block);
-       }
-       kfree(info);
-       return ERR_PTR(err);
-}
-
-static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
-                                          struct virtqueue *vqs[], int nvqs,
-                                          struct ccw1 *ccw)
-{
-       int ret;
-       struct virtio_thinint_area *thinint_area = NULL;
-       struct airq_info *info;
-
-       thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
-       if (!thinint_area) {
-               ret = -ENOMEM;
-               goto out;
-       }
-       /* Try to get an indicator. */
-       thinint_area->indicator = get_airq_indicator(vqs, nvqs,
-                                                    &thinint_area->bit_nr,
-                                                    &vcdev->airq_info);
-       if (!thinint_area->indicator) {
-               ret = -ENOSPC;
-               goto out;
-       }
-       info = vcdev->airq_info;
-       thinint_area->summary_indicator =
-               (unsigned long) &info->summary_indicator;
-       thinint_area->isc = VIRTIO_AIRQ_ISC;
-       ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
-       ccw->flags = CCW_FLAG_SLI;
-       ccw->count = sizeof(*thinint_area);
-       ccw->cda = (__u32)(unsigned long)thinint_area;
-       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
-       if (ret) {
-               if (ret == -EOPNOTSUPP) {
-                       /*
-                        * The host does not support adapter interrupts
-                        * for virtio-ccw, stop trying.
-                        */
-                       virtio_ccw_use_airq = 0;
-                       pr_info("Adapter interrupts unsupported on host\n");
-               } else
-                       dev_warn(&vcdev->cdev->dev,
-                                "enabling adapter interrupts = %d\n", ret);
-               virtio_ccw_drop_indicators(vcdev);
-       }
-out:
-       kfree(thinint_area);
-       return ret;
-}
-
-static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
-                              struct virtqueue *vqs[],
-                              vq_callback_t *callbacks[],
-                              const char *names[])
-{
-       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
-       unsigned long *indicatorp = NULL;
-       int ret, i;
-       struct ccw1 *ccw;
-
-       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
-       if (!ccw)
-               return -ENOMEM;
-
-       for (i = 0; i < nvqs; ++i) {
-               vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
-                                            ccw);
-               if (IS_ERR(vqs[i])) {
-                       ret = PTR_ERR(vqs[i]);
-                       vqs[i] = NULL;
-                       goto out;
-               }
-       }
-       ret = -ENOMEM;
-       /* We need a data area under 2G to communicate. */
-       indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
-       if (!indicatorp)
-               goto out;
-       *indicatorp = (unsigned long) &vcdev->indicators;
-       if (vcdev->is_thinint) {
-               ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
-               if (ret)
-                       /* no error, just fall back to legacy interrupts */
-                       vcdev->is_thinint = 0;
-       }
-       if (!vcdev->is_thinint) {
-               /* Register queue indicators with host. */
-               vcdev->indicators = 0;
-               ccw->cmd_code = CCW_CMD_SET_IND;
-               ccw->flags = 0;
-               ccw->count = sizeof(vcdev->indicators);
-               ccw->cda = (__u32)(unsigned long) indicatorp;
-               ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
-               if (ret)
-                       goto out;
-       }
-       /* Register indicators2 with host for config changes */
-       *indicatorp = (unsigned long) &vcdev->indicators2;
-       vcdev->indicators2 = 0;
-       ccw->cmd_code = CCW_CMD_SET_CONF_IND;
-       ccw->flags = 0;
-       ccw->count = sizeof(vcdev->indicators2);
-       ccw->cda = (__u32)(unsigned long) indicatorp;
-       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
-       if (ret)
-               goto out;
-
-       kfree(indicatorp);
-       kfree(ccw);
-       return 0;
-out:
-       kfree(indicatorp);
-       kfree(ccw);
-       virtio_ccw_del_vqs(vdev);
-       return ret;
-}
-
-static void virtio_ccw_reset(struct virtio_device *vdev)
-{
-       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
-       struct ccw1 *ccw;
-
-       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
-       if (!ccw)
-               return;
-
-       /* Zero status bits. */
-       *vcdev->status = 0;
-
-       /* Send a reset ccw on device. */
-       ccw->cmd_code = CCW_CMD_VDEV_RESET;
-       ccw->flags = 0;
-       ccw->count = 0;
-       ccw->cda = 0;
-       ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
-       kfree(ccw);
-}
-
-static u64 virtio_ccw_get_features(struct virtio_device *vdev)
-{
-       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
-       struct virtio_feature_desc *features;
-       int ret;
-       u64 rc;
-       struct ccw1 *ccw;
-
-       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
-       if (!ccw)
-               return 0;
-
-       features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
-       if (!features) {
-               rc = 0;
-               goto out_free;
-       }
-       /* Read the feature bits from the host. */
-       features->index = 0;
-       ccw->cmd_code = CCW_CMD_READ_FEAT;
-       ccw->flags = 0;
-       ccw->count = sizeof(*features);
-       ccw->cda = (__u32)(unsigned long)features;
-       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
-       if (ret) {
-               rc = 0;
-               goto out_free;
-       }
-
-       rc = le32_to_cpu(features->features);
-
-       if (vcdev->revision == 0)
-               goto out_free;
-
-       /* Read second half of the feature bits from the host. */
-       features->index = 1;
-       ccw->cmd_code = CCW_CMD_READ_FEAT;
-       ccw->flags = 0;
-       ccw->count = sizeof(*features);
-       ccw->cda = (__u32)(unsigned long)features;
-       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
-       if (ret == 0)
-               rc |= (u64)le32_to_cpu(features->features) << 32;
-
-out_free:
-       kfree(features);
-       kfree(ccw);
-       return rc;
-}
-
-static int virtio_ccw_finalize_features(struct virtio_device *vdev)
-{
-       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
-       struct virtio_feature_desc *features;
-       struct ccw1 *ccw;
-       int ret;
-
-       if (vcdev->revision >= 1 &&
-           !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
-               dev_err(&vdev->dev, "virtio: device uses revision 1 "
-                       "but does not have VIRTIO_F_VERSION_1\n");
-               return -EINVAL;
-       }
-
-       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
-       if (!ccw)
-               return -ENOMEM;
-
-       features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
-       if (!features) {
-               ret = -ENOMEM;
-               goto out_free;
-       }
-       /* Give virtio_ring a chance to accept features. */
-       vring_transport_features(vdev);
-
-       features->index = 0;
-       features->features = cpu_to_le32((u32)vdev->features);
-       /* Write the first half of the feature bits to the host. */
-       ccw->cmd_code = CCW_CMD_WRITE_FEAT;
-       ccw->flags = 0;
-       ccw->count = sizeof(*features);
-       ccw->cda = (__u32)(unsigned long)features;
-       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
-       if (ret)
-               goto out_free;
-
-       if (vcdev->revision == 0)
-               goto out_free;
-
-       features->index = 1;
-       features->features = cpu_to_le32(vdev->features >> 32);
-       /* Write the second half of the feature bits to the host. */
-       ccw->cmd_code = CCW_CMD_WRITE_FEAT;
-       ccw->flags = 0;
-       ccw->count = sizeof(*features);
-       ccw->cda = (__u32)(unsigned long)features;
-       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
-
-out_free:
-       kfree(features);
-       kfree(ccw);
-
-       return ret;
-}
-
-static void virtio_ccw_get_config(struct virtio_device *vdev,
-                                 unsigned int offset, void *buf, unsigned len)
-{
-       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
-       int ret;
-       struct ccw1 *ccw;
-       void *config_area;
-
-       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
-       if (!ccw)
-               return;
-
-       config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
-       if (!config_area)
-               goto out_free;
-
-       /* Read the config area from the host. */
-       ccw->cmd_code = CCW_CMD_READ_CONF;
-       ccw->flags = 0;
-       ccw->count = offset + len;
-       ccw->cda = (__u32)(unsigned long)config_area;
-       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
-       if (ret)
-               goto out_free;
-
-       memcpy(vcdev->config, config_area, offset + len);
-       if (buf)
-               memcpy(buf, &vcdev->config[offset], len);
-       if (vcdev->config_ready < offset + len)
-               vcdev->config_ready = offset + len;
-
-out_free:
-       kfree(config_area);
-       kfree(ccw);
-}
-
-static void virtio_ccw_set_config(struct virtio_device *vdev,
-                                 unsigned int offset, const void *buf,
-                                 unsigned len)
-{
-       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
-       struct ccw1 *ccw;
-       void *config_area;
-
-       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
-       if (!ccw)
-               return;
-
-       config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
-       if (!config_area)
-               goto out_free;
-
-       /* Make sure we don't overwrite fields. */
-       if (vcdev->config_ready < offset)
-               virtio_ccw_get_config(vdev, 0, NULL, offset);
-       memcpy(&vcdev->config[offset], buf, len);
-       /* Write the config area to the host. */
-       memcpy(config_area, vcdev->config, sizeof(vcdev->config));
-       ccw->cmd_code = CCW_CMD_WRITE_CONF;
-       ccw->flags = 0;
-       ccw->count = offset + len;
-       ccw->cda = (__u32)(unsigned long)config_area;
-       ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
-
-out_free:
-       kfree(config_area);
-       kfree(ccw);
-}
-
-static u8 virtio_ccw_get_status(struct virtio_device *vdev)
-{
-       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
-
-       return *vcdev->status;
-}
-
-static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
-{
-       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
-       u8 old_status = *vcdev->status;
-       struct ccw1 *ccw;
-       int ret;
-
-       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
-       if (!ccw)
-               return;
-
-       /* Write the status to the host. */
-       *vcdev->status = status;
-       ccw->cmd_code = CCW_CMD_WRITE_STATUS;
-       ccw->flags = 0;
-       ccw->count = sizeof(status);
-       ccw->cda = (__u32)(unsigned long)vcdev->status;
-       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
-       /* Write failed? We assume status is unchanged. */
-       if (ret)
-               *vcdev->status = old_status;
-       kfree(ccw);
-}
-
-static struct virtio_config_ops virtio_ccw_config_ops = {
-       .get_features = virtio_ccw_get_features,
-       .finalize_features = virtio_ccw_finalize_features,
-       .get = virtio_ccw_get_config,
-       .set = virtio_ccw_set_config,
-       .get_status = virtio_ccw_get_status,
-       .set_status = virtio_ccw_set_status,
-       .reset = virtio_ccw_reset,
-       .find_vqs = virtio_ccw_find_vqs,
-       .del_vqs = virtio_ccw_del_vqs,
-};
-
-
-/*
- * ccw bus driver related functions
- */
-
-static void virtio_ccw_release_dev(struct device *_d)
-{
-       struct virtio_device *dev = container_of(_d, struct virtio_device,
-                                                dev);
-       struct virtio_ccw_device *vcdev = to_vc_device(dev);
-
-       kfree(vcdev->status);
-       kfree(vcdev->config_block);
-       kfree(vcdev);
-}
-
-static int irb_is_error(struct irb *irb)
-{
-       if (scsw_cstat(&irb->scsw) != 0)
-               return 1;
-       if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
-               return 1;
-       if (scsw_cc(&irb->scsw) != 0)
-               return 1;
-       return 0;
-}
-
-static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
-                                             int index)
-{
-       struct virtio_ccw_vq_info *info;
-       unsigned long flags;
-       struct virtqueue *vq;
-
-       vq = NULL;
-       spin_lock_irqsave(&vcdev->lock, flags);
-       list_for_each_entry(info, &vcdev->virtqueues, node) {
-               if (info->vq->index == index) {
-                       vq = info->vq;
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&vcdev->lock, flags);
-       return vq;
-}
-
-static void virtio_ccw_int_handler(struct ccw_device *cdev,
-                                  unsigned long intparm,
-                                  struct irb *irb)
-{
-       __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
-       struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
-       int i;
-       struct virtqueue *vq;
-
-       if (!vcdev)
-               return;
-       /* Check if it's a notification from the host. */
-       if ((intparm == 0) &&
-           (scsw_stctl(&irb->scsw) ==
-            (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
-               /* OK */
-       }
-       if (irb_is_error(irb)) {
-               /* Command reject? */
-               if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
-                   (irb->ecw[0] & SNS0_CMD_REJECT))
-                       vcdev->err = -EOPNOTSUPP;
-               else
-                       /* Map everything else to -EIO. */
-                       vcdev->err = -EIO;
-       }
-       if (vcdev->curr_io & activity) {
-               switch (activity) {
-               case VIRTIO_CCW_DOING_READ_FEAT:
-               case VIRTIO_CCW_DOING_WRITE_FEAT:
-               case VIRTIO_CCW_DOING_READ_CONFIG:
-               case VIRTIO_CCW_DOING_WRITE_CONFIG:
-               case VIRTIO_CCW_DOING_WRITE_STATUS:
-               case VIRTIO_CCW_DOING_SET_VQ:
-               case VIRTIO_CCW_DOING_SET_IND:
-               case VIRTIO_CCW_DOING_SET_CONF_IND:
-               case VIRTIO_CCW_DOING_RESET:
-               case VIRTIO_CCW_DOING_READ_VQ_CONF:
-               case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
-               case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
-                       vcdev->curr_io &= ~activity;
-                       wake_up(&vcdev->wait_q);
-                       break;
-               default:
-                       /* don't know what to do... */
-                       dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
-                                activity);
-                       WARN_ON(1);
-                       break;
-               }
-       }
-       for_each_set_bit(i, &vcdev->indicators,
-                        sizeof(vcdev->indicators) * BITS_PER_BYTE) {
-               /* The bit clear must happen before the vring kick. */
-               clear_bit(i, &vcdev->indicators);
-               barrier();
-               vq = virtio_ccw_vq_by_ind(vcdev, i);
-               vring_interrupt(0, vq);
-       }
-       if (test_bit(0, &vcdev->indicators2)) {
-               virtio_config_changed(&vcdev->vdev);
-               clear_bit(0, &vcdev->indicators2);
-       }
-}
-
-/*
- * We usually want to autoonline all devices, but give the admin
- * a way to exempt devices from this.
- */
-#define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
-                    (8*sizeof(long)))
-static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
-
-static char *no_auto = "";
-
-module_param(no_auto, charp, 0444);
-MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
-
-static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
-{
-       struct ccw_dev_id id;
-
-       ccw_device_get_id(cdev, &id);
-       if (test_bit(id.devno, devs_no_auto[id.ssid]))
-               return 0;
-       return 1;
-}
-
-static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
-{
-       struct ccw_device *cdev = data;
-       int ret;
-
-       ret = ccw_device_set_online(cdev);
-       if (ret)
-               dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
-}
-
-static int virtio_ccw_probe(struct ccw_device *cdev)
-{
-       cdev->handler = virtio_ccw_int_handler;
-
-       if (virtio_ccw_check_autoonline(cdev))
-               async_schedule(virtio_ccw_auto_online, cdev);
-       return 0;
-}
-
-static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev)
-{
-       unsigned long flags;
-       struct virtio_ccw_device *vcdev;
-
-       spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
-       vcdev = dev_get_drvdata(&cdev->dev);
-       if (!vcdev || vcdev->going_away) {
-               spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
-               return NULL;
-       }
-       vcdev->going_away = true;
-       spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
-       return vcdev;
-}
-
-static void virtio_ccw_remove(struct ccw_device *cdev)
-{
-       unsigned long flags;
-       struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
-
-       if (vcdev && cdev->online) {
-               if (vcdev->device_lost)
-                       virtio_break_device(&vcdev->vdev);
-               unregister_virtio_device(&vcdev->vdev);
-               spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
-               dev_set_drvdata(&cdev->dev, NULL);
-               spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
-       }
-       cdev->handler = NULL;
-}
-
-static int virtio_ccw_offline(struct ccw_device *cdev)
-{
-       unsigned long flags;
-       struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
-
-       if (!vcdev)
-               return 0;
-       if (vcdev->device_lost)
-               virtio_break_device(&vcdev->vdev);
-       unregister_virtio_device(&vcdev->vdev);
-       spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
-       dev_set_drvdata(&cdev->dev, NULL);
-       spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
-       return 0;
-}
-
-static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
-{
-       struct virtio_rev_info *rev;
-       struct ccw1 *ccw;
-       int ret;
-
-       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
-       if (!ccw)
-               return -ENOMEM;
-       rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
-       if (!rev) {
-               kfree(ccw);
-               return -ENOMEM;
-       }
-
-       /* Set transport revision */
-       ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
-       ccw->flags = 0;
-       ccw->count = sizeof(*rev);
-       ccw->cda = (__u32)(unsigned long)rev;
-
-       vcdev->revision = VIRTIO_CCW_REV_MAX;
-       do {
-               rev->revision = vcdev->revision;
-               /* none of our supported revisions carry payload */
-               rev->length = 0;
-               ret = ccw_io_helper(vcdev, ccw,
-                                   VIRTIO_CCW_DOING_SET_VIRTIO_REV);
-               if (ret == -EOPNOTSUPP) {
-                       if (vcdev->revision == 0)
-                               /*
-                                * The host device does not support setting
-                                * the revision: let's operate it in legacy
-                                * mode.
-                                */
-                               ret = 0;
-                       else
-                               vcdev->revision--;
-               }
-       } while (ret == -EOPNOTSUPP);
-
-       kfree(ccw);
-       kfree(rev);
-       return ret;
-}
-
-static int virtio_ccw_online(struct ccw_device *cdev)
-{
-       int ret;
-       struct virtio_ccw_device *vcdev;
-       unsigned long flags;
-
-       vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
-       if (!vcdev) {
-               dev_warn(&cdev->dev, "Could not get memory for virtio\n");
-               ret = -ENOMEM;
-               goto out_free;
-       }
-       vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
-                                  GFP_DMA | GFP_KERNEL);
-       if (!vcdev->config_block) {
-               ret = -ENOMEM;
-               goto out_free;
-       }
-       vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
-       if (!vcdev->status) {
-               ret = -ENOMEM;
-               goto out_free;
-       }
-
-       vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
-
-       vcdev->vdev.dev.parent = &cdev->dev;
-       vcdev->vdev.dev.release = virtio_ccw_release_dev;
-       vcdev->vdev.config = &virtio_ccw_config_ops;
-       vcdev->cdev = cdev;
-       init_waitqueue_head(&vcdev->wait_q);
-       INIT_LIST_HEAD(&vcdev->virtqueues);
-       spin_lock_init(&vcdev->lock);
-
-       spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
-       dev_set_drvdata(&cdev->dev, vcdev);
-       spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
-       vcdev->vdev.id.vendor = cdev->id.cu_type;
-       vcdev->vdev.id.device = cdev->id.cu_model;
-
-       ret = virtio_ccw_set_transport_rev(vcdev);
-       if (ret)
-               goto out_free;
-
-       ret = register_virtio_device(&vcdev->vdev);
-       if (ret) {
-               dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
-                        ret);
-               goto out_put;
-       }
-       return 0;
-out_put:
-       spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
-       dev_set_drvdata(&cdev->dev, NULL);
-       spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
-       put_device(&vcdev->vdev.dev);
-       return ret;
-out_free:
-       if (vcdev) {
-               kfree(vcdev->status);
-               kfree(vcdev->config_block);
-       }
-       kfree(vcdev);
-       return ret;
-}
-
-static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
-{
-       int rc;
-       struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
-
-       /*
-        * Make sure vcdev is set
-        * i.e. set_offline/remove callback not already running
-        */
-       if (!vcdev)
-               return NOTIFY_DONE;
-
-       switch (event) {
-       case CIO_GONE:
-               vcdev->device_lost = true;
-               rc = NOTIFY_DONE;
-               break;
-       default:
-               rc = NOTIFY_DONE;
-               break;
-       }
-       return rc;
-}
-
-static struct ccw_device_id virtio_ids[] = {
-       { CCW_DEVICE(0x3832, 0) },
-       {},
-};
-MODULE_DEVICE_TABLE(ccw, virtio_ids);
-
-static struct ccw_driver virtio_ccw_driver = {
-       .driver = {
-               .owner = THIS_MODULE,
-               .name = "virtio_ccw",
-       },
-       .ids = virtio_ids,
-       .probe = virtio_ccw_probe,
-       .remove = virtio_ccw_remove,
-       .set_offline = virtio_ccw_offline,
-       .set_online = virtio_ccw_online,
-       .notify = virtio_ccw_cio_notify,
-       .int_class = IRQIO_VIR,
-};
-
-static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
-                          int max_digit, int max_val)
-{
-       int diff;
-
-       diff = 0;
-       *val = 0;
-
-       while (diff <= max_digit) {
-               int value = hex_to_bin(**cp);
-
-               if (value < 0)
-                       break;
-               *val = *val * 16 + value;
-               (*cp)++;
-               diff++;
-       }
-
-       if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
-               return 1;
-
-       return 0;
-}
-
-static int __init parse_busid(char *str, unsigned int *cssid,
-                             unsigned int *ssid, unsigned int *devno)
-{
-       char *str_work;
-       int rc, ret;
-
-       rc = 1;
-
-       if (*str == '\0')
-               goto out;
-
-       str_work = str;
-       ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
-       if (ret || (str_work[0] != '.'))
-               goto out;
-       str_work++;
-       ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
-       if (ret || (str_work[0] != '.'))
-               goto out;
-       str_work++;
-       ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
-       if (ret || (str_work[0] != '\0'))
-               goto out;
-
-       rc = 0;
-out:
-       return rc;
-}
-
-static void __init no_auto_parse(void)
-{
-       unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
-       char *parm, *str;
-       int rc;
-
-       str = no_auto;
-       while ((parm = strsep(&str, ","))) {
-               rc = parse_busid(strsep(&parm, "-"), &from_cssid,
-                                &from_ssid, &from);
-               if (rc)
-                       continue;
-               if (parm != NULL) {
-                       rc = parse_busid(parm, &to_cssid,
-                                        &to_ssid, &to);
-                       if ((from_ssid > to_ssid) ||
-                           ((from_ssid == to_ssid) && (from > to)))
-                               rc = -EINVAL;
-               } else {
-                       to_cssid = from_cssid;
-                       to_ssid = from_ssid;
-                       to = from;
-               }
-               if (rc)
-                       continue;
-               while ((from_ssid < to_ssid) ||
-                      ((from_ssid == to_ssid) && (from <= to))) {
-                       set_bit(from, devs_no_auto[from_ssid]);
-                       from++;
-                       if (from > __MAX_SUBCHANNEL) {
-                               from_ssid++;
-                               from = 0;
-                       }
-               }
-       }
-}
-
-static int __init virtio_ccw_init(void)
-{
-       /* parse no_auto string before we do anything further */
-       no_auto_parse();
-       return ccw_driver_register(&virtio_ccw_driver);
-}
-module_init(virtio_ccw_init);
-
-static void __exit virtio_ccw_exit(void)
-{
-       int i;
-
-       ccw_driver_unregister(&virtio_ccw_driver);
-       for (i = 0; i < MAX_AIRQ_AREAS; i++)
-               destroy_airq_info(airq_areas[i]);
-}
-module_exit(virtio_ccw_exit);
diff --git a/drivers/s390/virtio/Makefile b/drivers/s390/virtio/Makefile
new file mode 100644 (file)
index 0000000..241891a
--- /dev/null
@@ -0,0 +1,9 @@
+# Makefile for kvm guest drivers on s390
+#
+# Copyright IBM Corp. 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (version 2 only)
+# as published by the Free Software Foundation.
+
+obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o
diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
new file mode 100644 (file)
index 0000000..53fb975
--- /dev/null
@@ -0,0 +1,510 @@
+/*
+ * virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/slab.h>
+#include <linux/virtio_console.h>
+#include <linux/interrupt.h>
+#include <linux/virtio_ring.h>
+#include <linux/export.h>
+#include <linux/pfn.h>
+#include <asm/io.h>
+#include <asm/kvm_para.h>
+#include <asm/kvm_virtio.h>
+#include <asm/sclp.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+
+#define VIRTIO_SUBCODE_64 0x0D00
+
+/*
+ * The pointer to our (page) of device descriptions.
+ */
+static void *kvm_devices;
+static struct work_struct hotplug_work;
+
+struct kvm_device {
+       struct virtio_device vdev;
+       struct kvm_device_desc *desc;
+};
+
+#define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev)
+
+/*
+ * memory layout:
+ * - kvm_device_descriptor
+ *        struct kvm_device_desc
+ * - configuration
+ *        struct kvm_vqconfig
+ * - feature bits
+ * - config space
+ */
+static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc)
+{
+       return (struct kvm_vqconfig *)(desc + 1);
+}
+
+static u8 *kvm_vq_features(const struct kvm_device_desc *desc)
+{
+       return (u8 *)(kvm_vq_config(desc) + desc->num_vq);
+}
+
+static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc)
+{
+       return kvm_vq_features(desc) + desc->feature_len * 2;
+}
+
+/*
+ * The total size of the config page used by this device (incl. desc)
+ */
+static unsigned desc_size(const struct kvm_device_desc *desc)
+{
+       return sizeof(*desc)
+               + desc->num_vq * sizeof(struct kvm_vqconfig)
+               + desc->feature_len * 2
+               + desc->config_len;
+}
+
+/* This gets the device's feature bits. */
+static u64 kvm_get_features(struct virtio_device *vdev)
+{
+       unsigned int i;
+       u32 features = 0;
+       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+       u8 *in_features = kvm_vq_features(desc);
+
+       for (i = 0; i < min(desc->feature_len * 8, 32); i++)
+               if (in_features[i / 8] & (1 << (i % 8)))
+                       features |= (1 << i);
+       return features;
+}
+
+static int kvm_finalize_features(struct virtio_device *vdev)
+{
+       unsigned int i, bits;
+       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+       /* Second half of bitmap is features we accept. */
+       u8 *out_features = kvm_vq_features(desc) + desc->feature_len;
+
+       /* Give virtio_ring a chance to accept features. */
+       vring_transport_features(vdev);
+
+       /* Make sure we don't have any features > 32 bits! */
+       BUG_ON((u32)vdev->features != vdev->features);
+
+       memset(out_features, 0, desc->feature_len);
+       bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
+       for (i = 0; i < bits; i++) {
+               if (__virtio_test_bit(vdev, i))
+                       out_features[i / 8] |= (1 << (i % 8));
+       }
+
+       return 0;
+}
+
+/*
+ * Reading and writing elements in config space
+ */
+static void kvm_get(struct virtio_device *vdev, unsigned int offset,
+                  void *buf, unsigned len)
+{
+       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+
+       BUG_ON(offset + len > desc->config_len);
+       memcpy(buf, kvm_vq_configspace(desc) + offset, len);
+}
+
+static void kvm_set(struct virtio_device *vdev, unsigned int offset,
+                  const void *buf, unsigned len)
+{
+       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+
+       BUG_ON(offset + len > desc->config_len);
+       memcpy(kvm_vq_configspace(desc) + offset, buf, len);
+}
+
+/*
+ * The operations to get and set the status word just access
+ * the status field of the device descriptor. set_status will also
+ * make a hypercall to the host, to tell about status changes
+ */
+static u8 kvm_get_status(struct virtio_device *vdev)
+{
+       return to_kvmdev(vdev)->desc->status;
+}
+
+static void kvm_set_status(struct virtio_device *vdev, u8 status)
+{
+       BUG_ON(!status);
+       to_kvmdev(vdev)->desc->status = status;
+       kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS,
+                      (unsigned long) to_kvmdev(vdev)->desc);
+}
+
+/*
+ * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the
+ * descriptor address. The Host will zero the status and all the
+ * features.
+ */
+static void kvm_reset(struct virtio_device *vdev)
+{
+       kvm_hypercall1(KVM_S390_VIRTIO_RESET,
+                      (unsigned long) to_kvmdev(vdev)->desc);
+}
+
+/*
+ * When the virtio_ring code wants to notify the Host, it calls us here and we
+ * make a hypercall.  We hand the address  of the virtqueue so the Host
+ * knows which virtqueue we're talking about.
+ */
+static bool kvm_notify(struct virtqueue *vq)
+{
+       long rc;
+       struct kvm_vqconfig *config = vq->priv;
+
+       rc = kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
+       if (rc < 0)
+               return false;
+       return true;
+}
+
+/*
+ * This routine finds the first virtqueue described in the configuration of
+ * this device and sets it up.
+ */
+static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
+                                    unsigned index,
+                                    void (*callback)(struct virtqueue *vq),
+                                    const char *name)
+{
+       struct kvm_device *kdev = to_kvmdev(vdev);
+       struct kvm_vqconfig *config;
+       struct virtqueue *vq;
+       int err;
+
+       if (index >= kdev->desc->num_vq)
+               return ERR_PTR(-ENOENT);
+
+       if (!name)
+               return NULL;
+
+       config = kvm_vq_config(kdev->desc)+index;
+
+       err = vmem_add_mapping(config->address,
+                              vring_size(config->num,
+                                         KVM_S390_VIRTIO_RING_ALIGN));
+       if (err)
+               goto out;
+
+       vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN,
+                                vdev, true, (void *) config->address,
+                                kvm_notify, callback, name);
+       if (!vq) {
+               err = -ENOMEM;
+               goto unmap;
+       }
+
+       /*
+        * register a callback token
+        * The host will sent this via the external interrupt parameter
+        */
+       config->token = (u64) vq;
+
+       vq->priv = config;
+       return vq;
+unmap:
+       vmem_remove_mapping(config->address,
+                           vring_size(config->num,
+                                      KVM_S390_VIRTIO_RING_ALIGN));
+out:
+       return ERR_PTR(err);
+}
+
+static void kvm_del_vq(struct virtqueue *vq)
+{
+       struct kvm_vqconfig *config = vq->priv;
+
+       vring_del_virtqueue(vq);
+       vmem_remove_mapping(config->address,
+                           vring_size(config->num,
+                                      KVM_S390_VIRTIO_RING_ALIGN));
+}
+
+static void kvm_del_vqs(struct virtio_device *vdev)
+{
+       struct virtqueue *vq, *n;
+
+       list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+               kvm_del_vq(vq);
+}
+
+static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+                       struct virtqueue *vqs[],
+                       vq_callback_t *callbacks[],
+                       const char *names[])
+{
+       struct kvm_device *kdev = to_kvmdev(vdev);
+       int i;
+
+       /* We must have this many virtqueues. */
+       if (nvqs > kdev->desc->num_vq)
+               return -ENOENT;
+
+       for (i = 0; i < nvqs; ++i) {
+               vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]);
+               if (IS_ERR(vqs[i]))
+                       goto error;
+       }
+       return 0;
+
+error:
+       kvm_del_vqs(vdev);
+       return PTR_ERR(vqs[i]);
+}
+
+static const char *kvm_bus_name(struct virtio_device *vdev)
+{
+       return "";
+}
+
+/*
+ * The config ops structure as defined by virtio config
+ */
+static const struct virtio_config_ops kvm_vq_configspace_ops = {
+       .get_features = kvm_get_features,
+       .finalize_features = kvm_finalize_features,
+       .get = kvm_get,
+       .set = kvm_set,
+       .get_status = kvm_get_status,
+       .set_status = kvm_set_status,
+       .reset = kvm_reset,
+       .find_vqs = kvm_find_vqs,
+       .del_vqs = kvm_del_vqs,
+       .bus_name = kvm_bus_name,
+};
+
+/*
+ * The root device for the kvm virtio devices.
+ * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
+ */
+static struct device *kvm_root;
+
+/*
+ * adds a new device and register it with virtio
+ * appropriate drivers are loaded by the device model
+ */
+static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset)
+{
+       struct kvm_device *kdev;
+
+       kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+       if (!kdev) {
+               printk(KERN_EMERG "Cannot allocate kvm dev %u type %u\n",
+                      offset, d->type);
+               return;
+       }
+
+       kdev->vdev.dev.parent = kvm_root;
+       kdev->vdev.id.device = d->type;
+       kdev->vdev.config = &kvm_vq_configspace_ops;
+       kdev->desc = d;
+
+       if (register_virtio_device(&kdev->vdev) != 0) {
+               printk(KERN_ERR "Failed to register kvm device %u type %u\n",
+                      offset, d->type);
+               kfree(kdev);
+       }
+}
+
+/*
+ * scan_devices() simply iterates through the device page.
+ * The type 0 is reserved to mean "end of devices".
+ */
+static void scan_devices(void)
+{
+       unsigned int i;
+       struct kvm_device_desc *d;
+
+       for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
+               d = kvm_devices + i;
+
+               if (d->type == 0)
+                       break;
+
+               add_kvm_device(d, i);
+       }
+}
+
+/*
+ * match for a kvm device with a specific desc pointer
+ */
+static int match_desc(struct device *dev, void *data)
+{
+       struct virtio_device *vdev = dev_to_virtio(dev);
+       struct kvm_device *kdev = to_kvmdev(vdev);
+
+       return kdev->desc == data;
+}
+
+/*
+ * hotplug_device tries to find changes in the device page.
+ */
+static void hotplug_devices(struct work_struct *dummy)
+{
+       unsigned int i;
+       struct kvm_device_desc *d;
+       struct device *dev;
+
+       for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
+               d = kvm_devices + i;
+
+               /* end of list */
+               if (d->type == 0)
+                       break;
+
+               /* device already exists */
+               dev = device_find_child(kvm_root, d, match_desc);
+               if (dev) {
+                       /* XXX check for hotplug remove */
+                       put_device(dev);
+                       continue;
+               }
+
+               /* new device */
+               printk(KERN_INFO "Adding new virtio device %p\n", d);
+               add_kvm_device(d, i);
+       }
+}
+
+/*
+ * we emulate the request_irq behaviour on top of s390 extints
+ */
+static void kvm_extint_handler(struct ext_code ext_code,
+                              unsigned int param32, unsigned long param64)
+{
+       struct virtqueue *vq;
+       u32 param;
+
+       if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64)
+               return;
+       inc_irq_stat(IRQEXT_VRT);
+
+       /* The LSB might be overloaded, we have to mask it */
+       vq = (struct virtqueue *)(param64 & ~1UL);
+
+       /* We use ext_params to decide what this interrupt means */
+       param = param32 & VIRTIO_PARAM_MASK;
+
+       switch (param) {
+       case VIRTIO_PARAM_CONFIG_CHANGED:
+               virtio_config_changed(vq->vdev);
+               break;
+       case VIRTIO_PARAM_DEV_ADD:
+               schedule_work(&hotplug_work);
+               break;
+       case VIRTIO_PARAM_VRING_INTERRUPT:
+       default:
+               vring_interrupt(0, vq);
+               break;
+       }
+}
+
+/*
+ * For s390-virtio, we expect a page above main storage containing
+ * the virtio configuration. Try to actually load from this area
+ * in order to figure out if the host provides this page.
+ */
+static int __init test_devices_support(unsigned long addr)
+{
+       int ret = -EIO;
+
+       asm volatile(
+               "0:     lura    0,%1\n"
+               "1:     xgr     %0,%0\n"
+               "2:\n"
+               EX_TABLE(0b,2b)
+               EX_TABLE(1b,2b)
+               : "+d" (ret)
+               : "a" (addr)
+               : "0", "cc");
+       return ret;
+}
+/*
+ * Init function for virtio
+ * devices are in a single page above top of "normal" + standby mem
+ */
+static int __init kvm_devices_init(void)
+{
+       int rc;
+       unsigned long total_memory_size = sclp.rzm * sclp.rnmax;
+
+       if (!MACHINE_IS_KVM)
+               return -ENODEV;
+
+       if (test_devices_support(total_memory_size) < 0)
+               return -ENODEV;
+
+       rc = vmem_add_mapping(total_memory_size, PAGE_SIZE);
+       if (rc)
+               return rc;
+
+       kvm_devices = (void *) total_memory_size;
+
+       kvm_root = root_device_register("kvm_s390");
+       if (IS_ERR(kvm_root)) {
+               rc = PTR_ERR(kvm_root);
+               printk(KERN_ERR "Could not register kvm_s390 root device");
+               vmem_remove_mapping(total_memory_size, PAGE_SIZE);
+               return rc;
+       }
+
+       INIT_WORK(&hotplug_work, hotplug_devices);
+
+       irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
+       register_external_irq(EXT_IRQ_CP_SERVICE, kvm_extint_handler);
+
+       scan_devices();
+       return 0;
+}
+
+/* code for early console output with virtio_console */
+static __init int early_put_chars(u32 vtermno, const char *buf, int count)
+{
+       char scratch[17];
+       unsigned int len = count;
+
+       if (len > sizeof(scratch) - 1)
+               len = sizeof(scratch) - 1;
+       scratch[len] = '\0';
+       memcpy(scratch, buf, len);
+       kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, __pa(scratch));
+       return len;
+}
+
+static int __init s390_virtio_console_init(void)
+{
+       if (sclp.has_vt220 || sclp.has_linemode)
+               return -ENODEV;
+       return virtio_cons_early_init(early_put_chars);
+}
+console_initcall(s390_virtio_console_init);
+
+
+/*
+ * We do this after core stuff, but before the drivers.
+ */
+postcore_initcall(kvm_devices_init);
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
new file mode 100644 (file)
index 0000000..f8d8fdb
--- /dev/null
@@ -0,0 +1,1387 @@
+/*
+ * ccw based virtio transport
+ *
+ * Copyright IBM Corp. 2012, 2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/virtio_ring.h>
+#include <linux/pfn.h>
+#include <linux/async.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/kvm_para.h>
+#include <linux/notifier.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/virtio-ccw.h>
+#include <asm/isc.h>
+#include <asm/airq.h>
+
+/*
+ * virtio related functions
+ */
+
+struct vq_config_block {
+       __u16 index;
+       __u16 num;
+} __packed;
+
+#define VIRTIO_CCW_CONFIG_SIZE 0x100
+/* same as PCI config space size, should be enough for all drivers */
+
+struct virtio_ccw_device {
+       struct virtio_device vdev;
+       __u8 *status;
+       __u8 config[VIRTIO_CCW_CONFIG_SIZE];
+       struct ccw_device *cdev;
+       __u32 curr_io;
+       int err;
+       unsigned int revision; /* Transport revision */
+       wait_queue_head_t wait_q;
+       spinlock_t lock;
+       struct list_head virtqueues;
+       unsigned long indicators;
+       unsigned long indicators2;
+       struct vq_config_block *config_block;
+       bool is_thinint;
+       bool going_away;
+       bool device_lost;
+       unsigned int config_ready;
+       void *airq_info;
+};
+
+struct vq_info_block_legacy {
+       __u64 queue;
+       __u32 align;
+       __u16 index;
+       __u16 num;
+} __packed;
+
+struct vq_info_block {
+       __u64 desc;
+       __u32 res0;
+       __u16 index;
+       __u16 num;
+       __u64 avail;
+       __u64 used;
+} __packed;
+
+struct virtio_feature_desc {
+       __u32 features;
+       __u8 index;
+} __packed;
+
+struct virtio_thinint_area {
+       unsigned long summary_indicator;
+       unsigned long indicator;
+       u64 bit_nr;
+       u8 isc;
+} __packed;
+
+struct virtio_rev_info {
+       __u16 revision;
+       __u16 length;
+       __u8 data[];
+};
+
+/* the highest virtio-ccw revision we support */
+#define VIRTIO_CCW_REV_MAX 1
+
+struct virtio_ccw_vq_info {
+       struct virtqueue *vq;
+       int num;
+       void *queue;
+       union {
+               struct vq_info_block s;
+               struct vq_info_block_legacy l;
+       } *info_block;
+       int bit_nr;
+       struct list_head node;
+       long cookie;
+};
+
+#define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
+
+#define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
+#define MAX_AIRQ_AREAS 20
+
+static int virtio_ccw_use_airq = 1;
+
+struct airq_info {
+       rwlock_t lock;
+       u8 summary_indicator;
+       struct airq_struct airq;
+       struct airq_iv *aiv;
+};
+static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
+
+#define CCW_CMD_SET_VQ 0x13
+#define CCW_CMD_VDEV_RESET 0x33
+#define CCW_CMD_SET_IND 0x43
+#define CCW_CMD_SET_CONF_IND 0x53
+#define CCW_CMD_READ_FEAT 0x12
+#define CCW_CMD_WRITE_FEAT 0x11
+#define CCW_CMD_READ_CONF 0x22
+#define CCW_CMD_WRITE_CONF 0x21
+#define CCW_CMD_WRITE_STATUS 0x31
+#define CCW_CMD_READ_VQ_CONF 0x32
+#define CCW_CMD_SET_IND_ADAPTER 0x73
+#define CCW_CMD_SET_VIRTIO_REV 0x83
+
+#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
+#define VIRTIO_CCW_DOING_RESET 0x00040000
+#define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
+#define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
+#define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
+#define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
+#define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
+#define VIRTIO_CCW_DOING_SET_IND 0x01000000
+#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
+#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
+#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
+#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
+#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
+
+static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
+{
+       return container_of(vdev, struct virtio_ccw_device, vdev);
+}
+
+static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
+{
+       unsigned long i, flags;
+
+       write_lock_irqsave(&info->lock, flags);
+       for (i = 0; i < airq_iv_end(info->aiv); i++) {
+               if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) {
+                       airq_iv_free_bit(info->aiv, i);
+                       airq_iv_set_ptr(info->aiv, i, 0);
+                       break;
+               }
+       }
+       write_unlock_irqrestore(&info->lock, flags);
+}
+
+static void virtio_airq_handler(struct airq_struct *airq)
+{
+       struct airq_info *info = container_of(airq, struct airq_info, airq);
+       unsigned long ai;
+
+       inc_irq_stat(IRQIO_VAI);
+       read_lock(&info->lock);
+       /* Walk through indicators field, summary indicator active. */
+       for (ai = 0;;) {
+               ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
+               if (ai == -1UL)
+                       break;
+               vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
+       }
+       info->summary_indicator = 0;
+       smp_wmb();
+       /* Walk through indicators field, summary indicator not active. */
+       for (ai = 0;;) {
+               ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
+               if (ai == -1UL)
+                       break;
+               vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
+       }
+       read_unlock(&info->lock);
+}
+
+static struct airq_info *new_airq_info(void)
+{
+       struct airq_info *info;
+       int rc;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return NULL;
+       rwlock_init(&info->lock);
+       info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
+       if (!info->aiv) {
+               kfree(info);
+               return NULL;
+       }
+       info->airq.handler = virtio_airq_handler;
+       info->airq.lsi_ptr = &info->summary_indicator;
+       info->airq.lsi_mask = 0xff;
+       info->airq.isc = VIRTIO_AIRQ_ISC;
+       rc = register_adapter_interrupt(&info->airq);
+       if (rc) {
+               airq_iv_release(info->aiv);
+               kfree(info);
+               return NULL;
+       }
+       return info;
+}
+
+static void destroy_airq_info(struct airq_info *info)
+{
+       if (!info)
+               return;
+
+       unregister_adapter_interrupt(&info->airq);
+       airq_iv_release(info->aiv);
+       kfree(info);
+}
+
+static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
+                                       u64 *first, void **airq_info)
+{
+       int i, j;
+       struct airq_info *info;
+       unsigned long indicator_addr = 0;
+       unsigned long bit, flags;
+
+       for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
+               if (!airq_areas[i])
+                       airq_areas[i] = new_airq_info();
+               info = airq_areas[i];
+               if (!info)
+                       return 0;
+               write_lock_irqsave(&info->lock, flags);
+               bit = airq_iv_alloc(info->aiv, nvqs);
+               if (bit == -1UL) {
+                       /* Not enough vacancies. */
+                       write_unlock_irqrestore(&info->lock, flags);
+                       continue;
+               }
+               *first = bit;
+               *airq_info = info;
+               indicator_addr = (unsigned long)info->aiv->vector;
+               for (j = 0; j < nvqs; j++) {
+                       airq_iv_set_ptr(info->aiv, bit + j,
+                                       (unsigned long)vqs[j]);
+               }
+               write_unlock_irqrestore(&info->lock, flags);
+       }
+       return indicator_addr;
+}
+
+static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
+{
+       struct virtio_ccw_vq_info *info;
+
+       list_for_each_entry(info, &vcdev->virtqueues, node)
+               drop_airq_indicator(info->vq, vcdev->airq_info);
+}
+
+static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
+{
+       unsigned long flags;
+       __u32 ret;
+
+       spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
+       if (vcdev->err)
+               ret = 0;
+       else
+               ret = vcdev->curr_io & flag;
+       spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
+       return ret;
+}
+
+static int ccw_io_helper(struct virtio_ccw_device *vcdev,
+                        struct ccw1 *ccw, __u32 intparm)
+{
+       int ret;
+       unsigned long flags;
+       int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
+
+       do {
+               spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
+               ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
+               if (!ret) {
+                       if (!vcdev->curr_io)
+                               vcdev->err = 0;
+                       vcdev->curr_io |= flag;
+               }
+               spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
+               cpu_relax();
+       } while (ret == -EBUSY);
+       wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
+       return ret ? ret : vcdev->err;
+}
+
+static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
+                                     struct ccw1 *ccw)
+{
+       int ret;
+       unsigned long *indicatorp = NULL;
+       struct virtio_thinint_area *thinint_area = NULL;
+       struct airq_info *airq_info = vcdev->airq_info;
+
+       if (vcdev->is_thinint) {
+               thinint_area = kzalloc(sizeof(*thinint_area),
+                                      GFP_DMA | GFP_KERNEL);
+               if (!thinint_area)
+                       return;
+               thinint_area->summary_indicator =
+                       (unsigned long) &airq_info->summary_indicator;
+               thinint_area->isc = VIRTIO_AIRQ_ISC;
+               ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
+               ccw->count = sizeof(*thinint_area);
+               ccw->cda = (__u32)(unsigned long) thinint_area;
+       } else {
+               indicatorp = kmalloc(sizeof(&vcdev->indicators),
+                                    GFP_DMA | GFP_KERNEL);
+               if (!indicatorp)
+                       return;
+               *indicatorp = 0;
+               ccw->cmd_code = CCW_CMD_SET_IND;
+               ccw->count = sizeof(vcdev->indicators);
+               ccw->cda = (__u32)(unsigned long) indicatorp;
+       }
+       /* Deregister indicators from host. */
+       vcdev->indicators = 0;
+       ccw->flags = 0;
+       ret = ccw_io_helper(vcdev, ccw,
+                           vcdev->is_thinint ?
+                           VIRTIO_CCW_DOING_SET_IND_ADAPTER :
+                           VIRTIO_CCW_DOING_SET_IND);
+       if (ret && (ret != -ENODEV))
+               dev_info(&vcdev->cdev->dev,
+                        "Failed to deregister indicators (%d)\n", ret);
+       else if (vcdev->is_thinint)
+               virtio_ccw_drop_indicators(vcdev);
+       kfree(indicatorp);
+       kfree(thinint_area);
+}
+
+static inline long do_kvm_notify(struct subchannel_id schid,
+                                unsigned long queue_index,
+                                long cookie)
+{
+       register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
+       register struct subchannel_id __schid asm("2") = schid;
+       register unsigned long __index asm("3") = queue_index;
+       register long __rc asm("2");
+       register long __cookie asm("4") = cookie;
+
+       asm volatile ("diag 2,4,0x500\n"
+                     : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
+                     "d"(__cookie)
+                     : "memory", "cc");
+       return __rc;
+}
+
+static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
+{
+       struct virtio_ccw_vq_info *info = vq->priv;
+       struct virtio_ccw_device *vcdev;
+       struct subchannel_id schid;
+
+       vcdev = to_vc_device(info->vq->vdev);
+       ccw_device_get_schid(vcdev->cdev, &schid);
+       info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
+       if (info->cookie < 0)
+               return false;
+       return true;
+}
+
+static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
+                                  struct ccw1 *ccw, int index)
+{
+       vcdev->config_block->index = index;
+       ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
+       ccw->flags = 0;
+       ccw->count = sizeof(struct vq_config_block);
+       ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
+       ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
+       return vcdev->config_block->num;
+}
+
+static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
+{
+       struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
+       struct virtio_ccw_vq_info *info = vq->priv;
+       unsigned long flags;
+       unsigned long size;
+       int ret;
+       unsigned int index = vq->index;
+
+       /* Remove from our list. */
+       spin_lock_irqsave(&vcdev->lock, flags);
+       list_del(&info->node);
+       spin_unlock_irqrestore(&vcdev->lock, flags);
+
+       /* Release from host. */
+       if (vcdev->revision == 0) {
+               info->info_block->l.queue = 0;
+               info->info_block->l.align = 0;
+               info->info_block->l.index = index;
+               info->info_block->l.num = 0;
+               ccw->count = sizeof(info->info_block->l);
+       } else {
+               info->info_block->s.desc = 0;
+               info->info_block->s.index = index;
+               info->info_block->s.num = 0;
+               info->info_block->s.avail = 0;
+               info->info_block->s.used = 0;
+               ccw->count = sizeof(info->info_block->s);
+       }
+       ccw->cmd_code = CCW_CMD_SET_VQ;
+       ccw->flags = 0;
+       ccw->cda = (__u32)(unsigned long)(info->info_block);
+       ret = ccw_io_helper(vcdev, ccw,
+                           VIRTIO_CCW_DOING_SET_VQ | index);
+       /*
+        * -ENODEV isn't considered an error: The device is gone anyway.
+        * This may happen on device detach.
+        */
+       if (ret && (ret != -ENODEV))
+               dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
+                        ret, index);
+
+       vring_del_virtqueue(vq);
+       size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
+       free_pages_exact(info->queue, size);
+       kfree(info->info_block);
+       kfree(info);
+}
+
+static void virtio_ccw_del_vqs(struct virtio_device *vdev)
+{
+       struct virtqueue *vq, *n;
+       struct ccw1 *ccw;
+       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+               return;
+
+       virtio_ccw_drop_indicator(vcdev, ccw);
+
+       list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+               virtio_ccw_del_vq(vq, ccw);
+
+       kfree(ccw);
+}
+
+static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
+                                            int i, vq_callback_t *callback,
+                                            const char *name,
+                                            struct ccw1 *ccw)
+{
+       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+       int err;
+       struct virtqueue *vq = NULL;
+       struct virtio_ccw_vq_info *info;
+       unsigned long size = 0; /* silence the compiler */
+       unsigned long flags;
+
+       /* Allocate queue. */
+       info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
+       if (!info) {
+               dev_warn(&vcdev->cdev->dev, "no info\n");
+               err = -ENOMEM;
+               goto out_err;
+       }
+       info->info_block = kzalloc(sizeof(*info->info_block),
+                                  GFP_DMA | GFP_KERNEL);
+       if (!info->info_block) {
+               dev_warn(&vcdev->cdev->dev, "no info block\n");
+               err = -ENOMEM;
+               goto out_err;
+       }
+       info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
+       size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
+       info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
+       if (info->queue == NULL) {
+               dev_warn(&vcdev->cdev->dev, "no queue\n");
+               err = -ENOMEM;
+               goto out_err;
+       }
+
+       vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
+                                true, info->queue, virtio_ccw_kvm_notify,
+                                callback, name);
+       if (!vq) {
+               /* For now, we fail if we can't get the requested size. */
+               dev_warn(&vcdev->cdev->dev, "no vq\n");
+               err = -ENOMEM;
+               goto out_err;
+       }
+
+       /* Register it with the host. */
+       if (vcdev->revision == 0) {
+               info->info_block->l.queue = (__u64)info->queue;
+               info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
+               info->info_block->l.index = i;
+               info->info_block->l.num = info->num;
+               ccw->count = sizeof(info->info_block->l);
+       } else {
+               info->info_block->s.desc = (__u64)info->queue;
+               info->info_block->s.index = i;
+               info->info_block->s.num = info->num;
+               info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
+               info->info_block->s.used = (__u64)virtqueue_get_used(vq);
+               ccw->count = sizeof(info->info_block->s);
+       }
+       ccw->cmd_code = CCW_CMD_SET_VQ;
+       ccw->flags = 0;
+       ccw->cda = (__u32)(unsigned long)(info->info_block);
+       err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
+       if (err) {
+               dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
+               goto out_err;
+       }
+
+       info->vq = vq;
+       vq->priv = info;
+
+       /* Save it to our list. */
+       spin_lock_irqsave(&vcdev->lock, flags);
+       list_add(&info->node, &vcdev->virtqueues);
+       spin_unlock_irqrestore(&vcdev->lock, flags);
+
+       return vq;
+
+out_err:
+       if (vq)
+               vring_del_virtqueue(vq);
+       if (info) {
+               if (info->queue)
+                       free_pages_exact(info->queue, size);
+               kfree(info->info_block);
+       }
+       kfree(info);
+       return ERR_PTR(err);
+}
+
+static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
+                                          struct virtqueue *vqs[], int nvqs,
+                                          struct ccw1 *ccw)
+{
+       int ret;
+       struct virtio_thinint_area *thinint_area = NULL;
+       struct airq_info *info;
+
+       thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
+       if (!thinint_area) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       /* Try to get an indicator. */
+       thinint_area->indicator = get_airq_indicator(vqs, nvqs,
+                                                    &thinint_area->bit_nr,
+                                                    &vcdev->airq_info);
+       if (!thinint_area->indicator) {
+               ret = -ENOSPC;
+               goto out;
+       }
+       info = vcdev->airq_info;
+       thinint_area->summary_indicator =
+               (unsigned long) &info->summary_indicator;
+       thinint_area->isc = VIRTIO_AIRQ_ISC;
+       ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
+       ccw->flags = CCW_FLAG_SLI;
+       ccw->count = sizeof(*thinint_area);
+       ccw->cda = (__u32)(unsigned long)thinint_area;
+       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
+       if (ret) {
+               if (ret == -EOPNOTSUPP) {
+                       /*
+                        * The host does not support adapter interrupts
+                        * for virtio-ccw, stop trying.
+                        */
+                       virtio_ccw_use_airq = 0;
+                       pr_info("Adapter interrupts unsupported on host\n");
+               } else
+                       dev_warn(&vcdev->cdev->dev,
+                                "enabling adapter interrupts = %d\n", ret);
+               virtio_ccw_drop_indicators(vcdev);
+       }
+out:
+       kfree(thinint_area);
+       return ret;
+}
+
+static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+                              struct virtqueue *vqs[],
+                              vq_callback_t *callbacks[],
+                              const char *names[])
+{
+       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+       unsigned long *indicatorp = NULL;
+       int ret, i;
+       struct ccw1 *ccw;
+
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+               return -ENOMEM;
+
+       for (i = 0; i < nvqs; ++i) {
+               vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
+                                            ccw);
+               if (IS_ERR(vqs[i])) {
+                       ret = PTR_ERR(vqs[i]);
+                       vqs[i] = NULL;
+                       goto out;
+               }
+       }
+       ret = -ENOMEM;
+       /* We need a data area under 2G to communicate. */
+       indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
+       if (!indicatorp)
+               goto out;
+       *indicatorp = (unsigned long) &vcdev->indicators;
+       if (vcdev->is_thinint) {
+               ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
+               if (ret)
+                       /* no error, just fall back to legacy interrupts */
+                       vcdev->is_thinint = 0;
+       }
+       if (!vcdev->is_thinint) {
+               /* Register queue indicators with host. */
+               vcdev->indicators = 0;
+               ccw->cmd_code = CCW_CMD_SET_IND;
+               ccw->flags = 0;
+               ccw->count = sizeof(vcdev->indicators);
+               ccw->cda = (__u32)(unsigned long) indicatorp;
+               ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
+               if (ret)
+                       goto out;
+       }
+       /* Register indicators2 with host for config changes */
+       *indicatorp = (unsigned long) &vcdev->indicators2;
+       vcdev->indicators2 = 0;
+       ccw->cmd_code = CCW_CMD_SET_CONF_IND;
+       ccw->flags = 0;
+       ccw->count = sizeof(vcdev->indicators2);
+       ccw->cda = (__u32)(unsigned long) indicatorp;
+       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
+       if (ret)
+               goto out;
+
+       kfree(indicatorp);
+       kfree(ccw);
+       return 0;
+out:
+       kfree(indicatorp);
+       kfree(ccw);
+       virtio_ccw_del_vqs(vdev);
+       return ret;
+}
+
+static void virtio_ccw_reset(struct virtio_device *vdev)
+{
+       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+       struct ccw1 *ccw;
+
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+               return;
+
+       /* Zero status bits. */
+       *vcdev->status = 0;
+
+       /* Send a reset ccw on device. */
+       ccw->cmd_code = CCW_CMD_VDEV_RESET;
+       ccw->flags = 0;
+       ccw->count = 0;
+       ccw->cda = 0;
+       ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
+       kfree(ccw);
+}
+
+static u64 virtio_ccw_get_features(struct virtio_device *vdev)
+{
+       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+       struct virtio_feature_desc *features;
+       int ret;
+       u64 rc;
+       struct ccw1 *ccw;
+
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+               return 0;
+
+       features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+       if (!features) {
+               rc = 0;
+               goto out_free;
+       }
+       /* Read the feature bits from the host. */
+       features->index = 0;
+       ccw->cmd_code = CCW_CMD_READ_FEAT;
+       ccw->flags = 0;
+       ccw->count = sizeof(*features);
+       ccw->cda = (__u32)(unsigned long)features;
+       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
+       if (ret) {
+               rc = 0;
+               goto out_free;
+       }
+
+       rc = le32_to_cpu(features->features);
+
+       if (vcdev->revision == 0)
+               goto out_free;
+
+       /* Read second half of the feature bits from the host. */
+       features->index = 1;
+       ccw->cmd_code = CCW_CMD_READ_FEAT;
+       ccw->flags = 0;
+       ccw->count = sizeof(*features);
+       ccw->cda = (__u32)(unsigned long)features;
+       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
+       if (ret == 0)
+               rc |= (u64)le32_to_cpu(features->features) << 32;
+
+out_free:
+       kfree(features);
+       kfree(ccw);
+       return rc;
+}
+
+static int virtio_ccw_finalize_features(struct virtio_device *vdev)
+{
+       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+       struct virtio_feature_desc *features;
+       struct ccw1 *ccw;
+       int ret;
+
+       if (vcdev->revision >= 1 &&
+           !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
+               dev_err(&vdev->dev, "virtio: device uses revision 1 "
+                       "but does not have VIRTIO_F_VERSION_1\n");
+               return -EINVAL;
+       }
+
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+               return -ENOMEM;
+
+       features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+       if (!features) {
+               ret = -ENOMEM;
+               goto out_free;
+       }
+       /* Give virtio_ring a chance to accept features. */
+       vring_transport_features(vdev);
+
+       features->index = 0;
+       features->features = cpu_to_le32((u32)vdev->features);
+       /* Write the first half of the feature bits to the host. */
+       ccw->cmd_code = CCW_CMD_WRITE_FEAT;
+       ccw->flags = 0;
+       ccw->count = sizeof(*features);
+       ccw->cda = (__u32)(unsigned long)features;
+       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
+       if (ret)
+               goto out_free;
+
+       if (vcdev->revision == 0)
+               goto out_free;
+
+       features->index = 1;
+       features->features = cpu_to_le32(vdev->features >> 32);
+       /* Write the second half of the feature bits to the host. */
+       ccw->cmd_code = CCW_CMD_WRITE_FEAT;
+       ccw->flags = 0;
+       ccw->count = sizeof(*features);
+       ccw->cda = (__u32)(unsigned long)features;
+       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
+
+out_free:
+       kfree(features);
+       kfree(ccw);
+
+       return ret;
+}
+
+static void virtio_ccw_get_config(struct virtio_device *vdev,
+                                 unsigned int offset, void *buf, unsigned len)
+{
+       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+       int ret;
+       struct ccw1 *ccw;
+       void *config_area;
+
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+               return;
+
+       config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+       if (!config_area)
+               goto out_free;
+
+       /* Read the config area from the host. */
+       ccw->cmd_code = CCW_CMD_READ_CONF;
+       ccw->flags = 0;
+       ccw->count = offset + len;
+       ccw->cda = (__u32)(unsigned long)config_area;
+       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
+       if (ret)
+               goto out_free;
+
+       memcpy(vcdev->config, config_area, offset + len);
+       if (buf)
+               memcpy(buf, &vcdev->config[offset], len);
+       if (vcdev->config_ready < offset + len)
+               vcdev->config_ready = offset + len;
+
+out_free:
+       kfree(config_area);
+       kfree(ccw);
+}
+
+static void virtio_ccw_set_config(struct virtio_device *vdev,
+                                 unsigned int offset, const void *buf,
+                                 unsigned len)
+{
+       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+       struct ccw1 *ccw;
+       void *config_area;
+
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+               return;
+
+       config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+       if (!config_area)
+               goto out_free;
+
+       /* Make sure we don't overwrite fields. */
+       if (vcdev->config_ready < offset)
+               virtio_ccw_get_config(vdev, 0, NULL, offset);
+       memcpy(&vcdev->config[offset], buf, len);
+       /* Write the config area to the host. */
+       memcpy(config_area, vcdev->config, sizeof(vcdev->config));
+       ccw->cmd_code = CCW_CMD_WRITE_CONF;
+       ccw->flags = 0;
+       ccw->count = offset + len;
+       ccw->cda = (__u32)(unsigned long)config_area;
+       ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
+
+out_free:
+       kfree(config_area);
+       kfree(ccw);
+}
+
+static u8 virtio_ccw_get_status(struct virtio_device *vdev)
+{
+       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+
+       return *vcdev->status;
+}
+
+static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
+{
+       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+       u8 old_status = *vcdev->status;
+       struct ccw1 *ccw;
+       int ret;
+
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+               return;
+
+       /* Write the status to the host. */
+       *vcdev->status = status;
+       ccw->cmd_code = CCW_CMD_WRITE_STATUS;
+       ccw->flags = 0;
+       ccw->count = sizeof(status);
+       ccw->cda = (__u32)(unsigned long)vcdev->status;
+       ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
+       /* Write failed? We assume status is unchanged. */
+       if (ret)
+               *vcdev->status = old_status;
+       kfree(ccw);
+}
+
+static struct virtio_config_ops virtio_ccw_config_ops = {
+       .get_features = virtio_ccw_get_features,
+       .finalize_features = virtio_ccw_finalize_features,
+       .get = virtio_ccw_get_config,
+       .set = virtio_ccw_set_config,
+       .get_status = virtio_ccw_get_status,
+       .set_status = virtio_ccw_set_status,
+       .reset = virtio_ccw_reset,
+       .find_vqs = virtio_ccw_find_vqs,
+       .del_vqs = virtio_ccw_del_vqs,
+};
+
+
+/*
+ * ccw bus driver related functions
+ */
+
+static void virtio_ccw_release_dev(struct device *_d)
+{
+       struct virtio_device *dev = container_of(_d, struct virtio_device,
+                                                dev);
+       struct virtio_ccw_device *vcdev = to_vc_device(dev);
+
+       kfree(vcdev->status);
+       kfree(vcdev->config_block);
+       kfree(vcdev);
+}
+
+static int irb_is_error(struct irb *irb)
+{
+       if (scsw_cstat(&irb->scsw) != 0)
+               return 1;
+       if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+               return 1;
+       if (scsw_cc(&irb->scsw) != 0)
+               return 1;
+       return 0;
+}
+
+static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
+                                             int index)
+{
+       struct virtio_ccw_vq_info *info;
+       unsigned long flags;
+       struct virtqueue *vq;
+
+       vq = NULL;
+       spin_lock_irqsave(&vcdev->lock, flags);
+       list_for_each_entry(info, &vcdev->virtqueues, node) {
+               if (info->vq->index == index) {
+                       vq = info->vq;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&vcdev->lock, flags);
+       return vq;
+}
+
+static void virtio_ccw_int_handler(struct ccw_device *cdev,
+                                  unsigned long intparm,
+                                  struct irb *irb)
+{
+       __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
+       struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+       int i;
+       struct virtqueue *vq;
+
+       if (!vcdev)
+               return;
+       /* Check if it's a notification from the host. */
+       if ((intparm == 0) &&
+           (scsw_stctl(&irb->scsw) ==
+            (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
+               /* OK */
+       }
+       if (irb_is_error(irb)) {
+               /* Command reject? */
+               if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
+                   (irb->ecw[0] & SNS0_CMD_REJECT))
+                       vcdev->err = -EOPNOTSUPP;
+               else
+                       /* Map everything else to -EIO. */
+                       vcdev->err = -EIO;
+       }
+       if (vcdev->curr_io & activity) {
+               switch (activity) {
+               case VIRTIO_CCW_DOING_READ_FEAT:
+               case VIRTIO_CCW_DOING_WRITE_FEAT:
+               case VIRTIO_CCW_DOING_READ_CONFIG:
+               case VIRTIO_CCW_DOING_WRITE_CONFIG:
+               case VIRTIO_CCW_DOING_WRITE_STATUS:
+               case VIRTIO_CCW_DOING_SET_VQ:
+               case VIRTIO_CCW_DOING_SET_IND:
+               case VIRTIO_CCW_DOING_SET_CONF_IND:
+               case VIRTIO_CCW_DOING_RESET:
+               case VIRTIO_CCW_DOING_READ_VQ_CONF:
+               case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
+               case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
+                       vcdev->curr_io &= ~activity;
+                       wake_up(&vcdev->wait_q);
+                       break;
+               default:
+                       /* don't know what to do... */
+                       dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
+                                activity);
+                       WARN_ON(1);
+                       break;
+               }
+       }
+       for_each_set_bit(i, &vcdev->indicators,
+                        sizeof(vcdev->indicators) * BITS_PER_BYTE) {
+               /* The bit clear must happen before the vring kick. */
+               clear_bit(i, &vcdev->indicators);
+               barrier();
+               vq = virtio_ccw_vq_by_ind(vcdev, i);
+               vring_interrupt(0, vq);
+       }
+       if (test_bit(0, &vcdev->indicators2)) {
+               virtio_config_changed(&vcdev->vdev);
+               clear_bit(0, &vcdev->indicators2);
+       }
+}
+
+/*
+ * We usually want to autoonline all devices, but give the admin
+ * a way to exempt devices from this.
+ */
+#define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
+                    (8*sizeof(long)))
+static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
+
+static char *no_auto = "";
+
+module_param(no_auto, charp, 0444);
+MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
+
+static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
+{
+       struct ccw_dev_id id;
+
+       ccw_device_get_id(cdev, &id);
+       if (test_bit(id.devno, devs_no_auto[id.ssid]))
+               return 0;
+       return 1;
+}
+
+static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
+{
+       struct ccw_device *cdev = data;
+       int ret;
+
+       ret = ccw_device_set_online(cdev);
+       if (ret)
+               dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
+}
+
+static int virtio_ccw_probe(struct ccw_device *cdev)
+{
+       cdev->handler = virtio_ccw_int_handler;
+
+       if (virtio_ccw_check_autoonline(cdev))
+               async_schedule(virtio_ccw_auto_online, cdev);
+       return 0;
+}
+
+static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev)
+{
+       unsigned long flags;
+       struct virtio_ccw_device *vcdev;
+
+       spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+       vcdev = dev_get_drvdata(&cdev->dev);
+       if (!vcdev || vcdev->going_away) {
+               spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+               return NULL;
+       }
+       vcdev->going_away = true;
+       spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+       return vcdev;
+}
+
+static void virtio_ccw_remove(struct ccw_device *cdev)
+{
+       unsigned long flags;
+       struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
+
+       if (vcdev && cdev->online) {
+               if (vcdev->device_lost)
+                       virtio_break_device(&vcdev->vdev);
+               unregister_virtio_device(&vcdev->vdev);
+               spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+               dev_set_drvdata(&cdev->dev, NULL);
+               spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+       }
+       cdev->handler = NULL;
+}
+
+static int virtio_ccw_offline(struct ccw_device *cdev)
+{
+       unsigned long flags;
+       struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
+
+       if (!vcdev)
+               return 0;
+       if (vcdev->device_lost)
+               virtio_break_device(&vcdev->vdev);
+       unregister_virtio_device(&vcdev->vdev);
+       spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+       dev_set_drvdata(&cdev->dev, NULL);
+       spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+       return 0;
+}
+
+static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
+{
+       struct virtio_rev_info *rev;
+       struct ccw1 *ccw;
+       int ret;
+
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+               return -ENOMEM;
+       rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
+       if (!rev) {
+               kfree(ccw);
+               return -ENOMEM;
+       }
+
+       /* Set transport revision */
+       ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
+       ccw->flags = 0;
+       ccw->count = sizeof(*rev);
+       ccw->cda = (__u32)(unsigned long)rev;
+
+       vcdev->revision = VIRTIO_CCW_REV_MAX;
+       do {
+               rev->revision = vcdev->revision;
+               /* none of our supported revisions carry payload */
+               rev->length = 0;
+               ret = ccw_io_helper(vcdev, ccw,
+                                   VIRTIO_CCW_DOING_SET_VIRTIO_REV);
+               if (ret == -EOPNOTSUPP) {
+                       if (vcdev->revision == 0)
+                               /*
+                                * The host device does not support setting
+                                * the revision: let's operate it in legacy
+                                * mode.
+                                */
+                               ret = 0;
+                       else
+                               vcdev->revision--;
+               }
+       } while (ret == -EOPNOTSUPP);
+
+       kfree(ccw);
+       kfree(rev);
+       return ret;
+}
+
+static int virtio_ccw_online(struct ccw_device *cdev)
+{
+       int ret;
+       struct virtio_ccw_device *vcdev;
+       unsigned long flags;
+
+       vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
+       if (!vcdev) {
+               dev_warn(&cdev->dev, "Could not get memory for virtio\n");
+               ret = -ENOMEM;
+               goto out_free;
+       }
+       vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
+                                  GFP_DMA | GFP_KERNEL);
+       if (!vcdev->config_block) {
+               ret = -ENOMEM;
+               goto out_free;
+       }
+       vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
+       if (!vcdev->status) {
+               ret = -ENOMEM;
+               goto out_free;
+       }
+
+       vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
+
+       vcdev->vdev.dev.parent = &cdev->dev;
+       vcdev->vdev.dev.release = virtio_ccw_release_dev;
+       vcdev->vdev.config = &virtio_ccw_config_ops;
+       vcdev->cdev = cdev;
+       init_waitqueue_head(&vcdev->wait_q);
+       INIT_LIST_HEAD(&vcdev->virtqueues);
+       spin_lock_init(&vcdev->lock);
+
+       spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+       dev_set_drvdata(&cdev->dev, vcdev);
+       spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+       vcdev->vdev.id.vendor = cdev->id.cu_type;
+       vcdev->vdev.id.device = cdev->id.cu_model;
+
+       ret = virtio_ccw_set_transport_rev(vcdev);
+       if (ret)
+               goto out_free;
+
+       ret = register_virtio_device(&vcdev->vdev);
+       if (ret) {
+               dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
+                        ret);
+               goto out_put;
+       }
+       return 0;
+out_put:
+       spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+       dev_set_drvdata(&cdev->dev, NULL);
+       spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+       put_device(&vcdev->vdev.dev);
+       return ret;
+out_free:
+       if (vcdev) {
+               kfree(vcdev->status);
+               kfree(vcdev->config_block);
+       }
+       kfree(vcdev);
+       return ret;
+}
+
+static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
+{
+       int rc;
+       struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+
+       /*
+        * Make sure vcdev is set
+        * i.e. set_offline/remove callback not already running
+        */
+       if (!vcdev)
+               return NOTIFY_DONE;
+
+       switch (event) {
+       case CIO_GONE:
+               vcdev->device_lost = true;
+               rc = NOTIFY_DONE;
+               break;
+       default:
+               rc = NOTIFY_DONE;
+               break;
+       }
+       return rc;
+}
+
+static struct ccw_device_id virtio_ids[] = {
+       { CCW_DEVICE(0x3832, 0) },
+       {},
+};
+MODULE_DEVICE_TABLE(ccw, virtio_ids);
+
+static struct ccw_driver virtio_ccw_driver = {
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = "virtio_ccw",
+       },
+       .ids = virtio_ids,
+       .probe = virtio_ccw_probe,
+       .remove = virtio_ccw_remove,
+       .set_offline = virtio_ccw_offline,
+       .set_online = virtio_ccw_online,
+       .notify = virtio_ccw_cio_notify,
+       .int_class = IRQIO_VIR,
+};
+
+static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
+                          int max_digit, int max_val)
+{
+       int diff;
+
+       diff = 0;
+       *val = 0;
+
+       while (diff <= max_digit) {
+               int value = hex_to_bin(**cp);
+
+               if (value < 0)
+                       break;
+               *val = *val * 16 + value;
+               (*cp)++;
+               diff++;
+       }
+
+       if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
+               return 1;
+
+       return 0;
+}
+
+static int __init parse_busid(char *str, unsigned int *cssid,
+                             unsigned int *ssid, unsigned int *devno)
+{
+       char *str_work;
+       int rc, ret;
+
+       rc = 1;
+
+       if (*str == '\0')
+               goto out;
+
+       str_work = str;
+       ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
+       if (ret || (str_work[0] != '.'))
+               goto out;
+       str_work++;
+       ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
+       if (ret || (str_work[0] != '.'))
+               goto out;
+       str_work++;
+       ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
+       if (ret || (str_work[0] != '\0'))
+               goto out;
+
+       rc = 0;
+out:
+       return rc;
+}
+
+static void __init no_auto_parse(void)
+{
+       unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
+       char *parm, *str;
+       int rc;
+
+       str = no_auto;
+       while ((parm = strsep(&str, ","))) {
+               rc = parse_busid(strsep(&parm, "-"), &from_cssid,
+                                &from_ssid, &from);
+               if (rc)
+                       continue;
+               if (parm != NULL) {
+                       rc = parse_busid(parm, &to_cssid,
+                                        &to_ssid, &to);
+                       if ((from_ssid > to_ssid) ||
+                           ((from_ssid == to_ssid) && (from > to)))
+                               rc = -EINVAL;
+               } else {
+                       to_cssid = from_cssid;
+                       to_ssid = from_ssid;
+                       to = from;
+               }
+               if (rc)
+                       continue;
+               while ((from_ssid < to_ssid) ||
+                      ((from_ssid == to_ssid) && (from <= to))) {
+                       set_bit(from, devs_no_auto[from_ssid]);
+                       from++;
+                       if (from > __MAX_SUBCHANNEL) {
+                               from_ssid++;
+                               from = 0;
+                       }
+               }
+       }
+}
+
+static int __init virtio_ccw_init(void)
+{
+       /* parse no_auto string before we do anything further */
+       no_auto_parse();
+       return ccw_driver_register(&virtio_ccw_driver);
+}
+module_init(virtio_ccw_init);
+
+static void __exit virtio_ccw_exit(void)
+{
+       int i;
+
+       ccw_driver_unregister(&virtio_ccw_driver);
+       for (i = 0; i < MAX_AIRQ_AREAS; i++)
+               destroy_airq_info(airq_areas[i]);
+}
+module_exit(virtio_ccw_exit);
index 82b92c414a9cfe3152933e9cfc191c79c480568c..437254e1c4dee0c9972ff723ad31575911002993 100644 (file)
@@ -738,7 +738,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
                ql_log(ql_log_info, vha, 0x706f,
                    "Issuing MPI reset.\n");
 
-               if (IS_QLA83XX(ha)) {
+               if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
                        uint32_t idc_control;
 
                        qla83xx_idc_lock(vha, 0);
index 0e6ee3ca30e667591db375ba73d9009b286ec93b..8b011aef12bd5ec6e72f5d7a57bfa2285ae64404 100644 (file)
  * |                              |                    | 0xd031-0xd0ff |
  * |                              |                    | 0xd101-0xd1fe |
  * |                              |                    | 0xd214-0xd2fe |
- * | Target Mode                 |       0xe079       |                |
- * | Target Mode Management      |       0xf072       | 0xf002         |
+ * | Target Mode                 |       0xe080       |                |
+ * | Target Mode Management      |       0xf096       | 0xf002         |
  * |                              |                    | 0xf046-0xf049  |
- * | Target Mode Task Management  |      0x1000b      |                |
+ * | Target Mode Task Management  |      0x1000d      |                |
  * ----------------------------------------------------------------------
  */
 
index e86201d3b8c6d9eccbd1132e96cffb08310b7988..9ad819edcd67af2c73ca5a0a85a8863398b3c6f7 100644 (file)
 #define RESPONSE_ENTRY_CNT_FX00                256     /* Number of response entries.*/
 
 struct req_que;
+struct qla_tgt_sess;
 
 /*
  * (sd.h is not exported, hence local inclusion)
@@ -2026,6 +2027,7 @@ typedef struct fc_port {
        uint16_t port_id;
 
        unsigned long retry_delay_timestamp;
+       struct qla_tgt_sess *tgt_session;
 } fc_port_t;
 
 #include "qla_mr.h"
@@ -3154,13 +3156,13 @@ struct qla_hw_data {
 /* Bit 21 of fw_attributes decides the MCTP capabilities */
 #define IS_MCTP_CAPABLE(ha)    (IS_QLA2031(ha) && \
                                ((ha)->fw_attributes_ext[0] & BIT_0))
-#define IS_PI_UNINIT_CAPABLE(ha)       (IS_QLA83XX(ha))
-#define IS_PI_IPGUARD_CAPABLE(ha)      (IS_QLA83XX(ha))
+#define IS_PI_UNINIT_CAPABLE(ha)       (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_PI_IPGUARD_CAPABLE(ha)      (IS_QLA83XX(ha) || IS_QLA27XX(ha))
 #define IS_PI_DIFB_DIX0_CAPABLE(ha)    (0)
-#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha)        (IS_QLA83XX(ha))
+#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha)        (IS_QLA83XX(ha) || IS_QLA27XX(ha))
 #define IS_PI_SPLIT_DET_CAPABLE(ha)    (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
     (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
-#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
 #define IS_TGT_MODE_CAPABLE(ha)        (ha->tgt.atio_q_length)
 #define IS_SHADOW_REG_CAPABLE(ha)  (IS_QLA27XX(ha))
 #define IS_DPORT_CAPABLE(ha)  (IS_QLA83XX(ha) || IS_QLA27XX(ha))
@@ -3579,6 +3581,16 @@ typedef struct scsi_qla_host {
        uint16_t        fcoe_fcf_idx;
        uint8_t         fcoe_vn_port_mac[6];
 
+       /* list of commands waiting on workqueue */
+       struct list_head        qla_cmd_list;
+       struct list_head        qla_sess_op_cmd_list;
+       spinlock_t              cmd_list_lock;
+
+       /* Counter to detect races between ELS and RSCN events */
+       atomic_t                generation_tick;
+       /* Time when global fcport update has been scheduled */
+       int                     total_fcport_update_gen;
+
        uint32_t        vp_abort_cnt;
 
        struct fc_vport *fc_vport;      /* holds fc_vport * for each vport */
index 664013115c9da7d7912d0b22fa34ddf91e7b977b..11f2f3279eab2984fe131510cc669b329f149bae 100644 (file)
@@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data)
                        QLA_LOGIO_LOGIN_RETRIED : 0;
                qla2x00_post_async_login_done_work(fcport->vha, fcport,
                        lio->u.logio.data);
+       } else if (sp->type == SRB_LOGOUT_CMD) {
+               qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
        }
 }
 
@@ -497,7 +499,10 @@ void
 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
     uint16_t *data)
 {
-       qla2x00_mark_device_lost(vha, fcport, 1, 0);
+       /* Don't re-login in target mode */
+       if (!fcport->tgt_session)
+               qla2x00_mark_device_lost(vha, fcport, 1, 0);
+       qlt_logo_completion_handler(fcport, data[0]);
        return;
 }
 
@@ -1538,7 +1543,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
                mem_size = (ha->fw_memory_size - 0x11000 + 1) *
                    sizeof(uint16_t);
        } else if (IS_FWI2_CAPABLE(ha)) {
-               if (IS_QLA83XX(ha))
+               if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
                        fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
                else if (IS_QLA81XX(ha))
                        fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -1550,7 +1555,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
                mem_size = (ha->fw_memory_size - 0x100000 + 1) *
                    sizeof(uint32_t);
                if (ha->mqenable) {
-                       if (!IS_QLA83XX(ha))
+                       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
                                mq_size = sizeof(struct qla2xxx_mq_chain);
                        /*
                         * Allocate maximum buffer size for all queues.
@@ -2922,21 +2927,14 @@ qla2x00_rport_del(void *data)
 {
        fc_port_t *fcport = data;
        struct fc_rport *rport;
-       scsi_qla_host_t *vha = fcport->vha;
        unsigned long flags;
 
        spin_lock_irqsave(fcport->vha->host->host_lock, flags);
        rport = fcport->drport ? fcport->drport: fcport->rport;
        fcport->drport = NULL;
        spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
-       if (rport) {
+       if (rport)
                fc_remote_port_delete(rport);
-               /*
-                * Release the target mode FC NEXUS in qla_target.c code
-                * if target mod is enabled.
-                */
-               qlt_fc_port_deleted(vha, fcport);
-       }
 }
 
 /**
@@ -3303,6 +3301,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
         * Create target mode FC NEXUS in qla_target.c if target mode is
         * enabled..
         */
+
        qlt_fc_port_added(vha, fcport);
 
        spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -3341,8 +3340,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 
        if (IS_QLAFX00(vha->hw)) {
                qla2x00_set_fcport_state(fcport, FCS_ONLINE);
-               qla2x00_reg_remote_port(vha, fcport);
-               return;
+               goto reg_port;
        }
        fcport->login_retry = 0;
        fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
@@ -3350,7 +3348,16 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
        qla2x00_set_fcport_state(fcport, FCS_ONLINE);
        qla2x00_iidma_fcport(vha, fcport);
        qla24xx_update_fcport_fcp_prio(vha, fcport);
-       qla2x00_reg_remote_port(vha, fcport);
+
+reg_port:
+       if (qla_ini_mode_enabled(vha))
+               qla2x00_reg_remote_port(vha, fcport);
+       else {
+               /*
+                * Create target mode FC NEXUS in qla_target.c
+                */
+               qlt_fc_port_added(vha, fcport);
+       }
 }
 
 /*
@@ -3375,6 +3382,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
        LIST_HEAD(new_fcports);
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+       int             discovery_gen;
 
        /* If FL port exists, then SNS is present */
        if (IS_FWI2_CAPABLE(ha))
@@ -3445,6 +3453,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                        fcport->scan_state = QLA_FCPORT_SCAN;
                }
 
+               /* Mark the time right before querying FW for connected ports.
+                * This process is long, asynchronous and by the time it's done,
+                * collected information might not be accurate anymore. E.g.
+                * disconnected port might have re-connected and a brand new
+                * session has been created. In this case session's generation
+                * will be newer than discovery_gen. */
+               qlt_do_generation_tick(vha, &discovery_gen);
+
                rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
                if (rval != QLA_SUCCESS)
                        break;
@@ -3460,20 +3476,44 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                        if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
                                continue;
 
-                       if (fcport->scan_state == QLA_FCPORT_SCAN &&
-                           atomic_read(&fcport->state) == FCS_ONLINE) {
-                               qla2x00_mark_device_lost(vha, fcport,
-                                   ql2xplogiabsentdevice, 0);
-                               if (fcport->loop_id != FC_NO_LOOP_ID &&
-                                   (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
-                                   fcport->port_type != FCT_INITIATOR &&
-                                   fcport->port_type != FCT_BROADCAST) {
-                                       ha->isp_ops->fabric_logout(vha,
-                                           fcport->loop_id,
-                                           fcport->d_id.b.domain,
-                                           fcport->d_id.b.area,
-                                           fcport->d_id.b.al_pa);
-                                       qla2x00_clear_loop_id(fcport);
+                       if (fcport->scan_state == QLA_FCPORT_SCAN) {
+                               if (qla_ini_mode_enabled(base_vha) &&
+                                   atomic_read(&fcport->state) == FCS_ONLINE) {
+                                       qla2x00_mark_device_lost(vha, fcport,
+                                           ql2xplogiabsentdevice, 0);
+                                       if (fcport->loop_id != FC_NO_LOOP_ID &&
+                                           (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+                                           fcport->port_type != FCT_INITIATOR &&
+                                           fcport->port_type != FCT_BROADCAST) {
+                                               ha->isp_ops->fabric_logout(vha,
+                                                   fcport->loop_id,
+                                                   fcport->d_id.b.domain,
+                                                   fcport->d_id.b.area,
+                                                   fcport->d_id.b.al_pa);
+                                               qla2x00_clear_loop_id(fcport);
+                                       }
+                               } else if (!qla_ini_mode_enabled(base_vha)) {
+                                       /*
+                                        * In target mode, explicitly kill
+                                        * sessions and log out of devices
+                                        * that are gone, so that we don't
+                                        * end up with an initiator using the
+                                        * wrong ACL (if the fabric recycles
+                                        * an FC address and we have a stale
+                                        * session around) and so that we don't
+                                        * report initiators that are no longer
+                                        * on the fabric.
+                                        */
+                                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
+                                           "port gone, logging out/killing session: "
+                                           "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
+                                           "scan_state %d\n",
+                                           fcport->port_name,
+                                           atomic_read(&fcport->state),
+                                           fcport->flags, fcport->fc4_type,
+                                           fcport->scan_state);
+                                       qlt_fc_port_deleted(vha, fcport,
+                                           discovery_gen);
                                }
                        }
                }
@@ -3494,6 +3534,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                            (fcport->flags & FCF_LOGIN_NEEDED) == 0)
                                continue;
 
+                       /*
+                        * If we're not an initiator, skip looking for devices
+                        * and logging in.  There's no reason for us to do it,
+                        * and it seems to actively cause problems in target
+                        * mode if we race with the initiator logging into us
+                        * (we might get the "port ID used" status back from
+                        * our login command and log out the initiator, which
+                        * seems to cause havoc).
+                        */
+                       if (!qla_ini_mode_enabled(base_vha)) {
+                               if (fcport->scan_state == QLA_FCPORT_FOUND) {
+                                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
+                                           "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
+                                           "scan_state %d (initiator mode disabled; skipping "
+                                           "login)\n", fcport->port_name,
+                                           atomic_read(&fcport->state),
+                                           fcport->flags, fcport->fc4_type,
+                                           fcport->scan_state);
+                               }
+                               continue;
+                       }
+
                        if (fcport->loop_id == FC_NO_LOOP_ID) {
                                fcport->loop_id = next_loopid;
                                rval = qla2x00_find_new_loop_id(
@@ -3520,16 +3582,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                            test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
                                break;
 
-                       /* Find a new loop ID to use. */
-                       fcport->loop_id = next_loopid;
-                       rval = qla2x00_find_new_loop_id(base_vha, fcport);
-                       if (rval != QLA_SUCCESS) {
-                               /* Ran out of IDs to use */
-                               break;
-                       }
+                       /*
+                        * If we're not an initiator, skip looking for devices
+                        * and logging in.  There's no reason for us to do it,
+                        * and it seems to actively cause problems in target
+                        * mode if we race with the initiator logging into us
+                        * (we might get the "port ID used" status back from
+                        * our login command and log out the initiator, which
+                        * seems to cause havoc).
+                        */
+                       if (qla_ini_mode_enabled(base_vha)) {
+                               /* Find a new loop ID to use. */
+                               fcport->loop_id = next_loopid;
+                               rval = qla2x00_find_new_loop_id(base_vha,
+                                   fcport);
+                               if (rval != QLA_SUCCESS) {
+                                       /* Ran out of IDs to use */
+                                       break;
+                               }
 
-                       /* Login and update database */
-                       qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+                               /* Login and update database */
+                               qla2x00_fabric_dev_login(vha, fcport,
+                                   &next_loopid);
+                       } else {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
+                                       "new port %8phC state 0x%x flags 0x%x fc4_type "
+                                       "0x%x scan_state %d (initiator mode disabled; "
+                                       "skipping login)\n",
+                                       fcport->port_name,
+                                       atomic_read(&fcport->state),
+                                       fcport->flags, fcport->fc4_type,
+                                       fcport->scan_state);
+                       }
 
                        list_move_tail(&fcport->list, &vha->vp_fcports);
                }
@@ -3725,11 +3809,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                        fcport->fp_speed = new_fcport->fp_speed;
 
                        /*
-                        * If address the same and state FCS_ONLINE, nothing
-                        * changed.
+                        * If address the same and state FCS_ONLINE
+                        * (or in target mode), nothing changed.
                         */
                        if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
-                           atomic_read(&fcport->state) == FCS_ONLINE) {
+                           (atomic_read(&fcport->state) == FCS_ONLINE ||
+                            !qla_ini_mode_enabled(base_vha))) {
                                break;
                        }
 
@@ -3749,6 +3834,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                         * Log it out if still logged in and mark it for
                         * relogin later.
                         */
+                       if (!qla_ini_mode_enabled(base_vha)) {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
+                                        "port changed FC ID, %8phC"
+                                        " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
+                                        fcport->port_name,
+                                        fcport->d_id.b.domain,
+                                        fcport->d_id.b.area,
+                                        fcport->d_id.b.al_pa,
+                                        fcport->loop_id,
+                                        new_fcport->d_id.b.domain,
+                                        new_fcport->d_id.b.area,
+                                        new_fcport->d_id.b.al_pa);
+                               fcport->d_id.b24 = new_fcport->d_id.b24;
+                               break;
+                       }
+
                        fcport->d_id.b24 = new_fcport->d_id.b24;
                        fcport->flags |= FCF_LOGIN_NEEDED;
                        if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3768,6 +3869,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                if (found)
                        continue;
                /* If device was not in our fcports list, then add it. */
+               new_fcport->scan_state = QLA_FCPORT_FOUND;
                list_add_tail(&new_fcport->list, new_fcports);
 
                /* Allocate a new replacement fcport. */
@@ -4188,6 +4290,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
                            atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
                                spin_unlock_irqrestore(&ha->vport_slock, flags);
                                qla2x00_rport_del(fcport);
+
+                               /*
+                                * Release the target mode FC NEXUS in
+                                * qla_target.c, if target mod is enabled.
+                                */
+                               qlt_fc_port_deleted(vha, fcport,
+                                   base_vha->total_fcport_update_gen);
+
                                spin_lock_irqsave(&ha->vport_slock, flags);
                        }
                }
index 36fbd4c7af8f50e52cd0289bc31d9ccfca8bbe24..6f02b26a35cff5b06916273a0bf1c8643f523689 100644 (file)
@@ -1943,6 +1943,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
        logio->control_flags =
            cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
+       if (!sp->fcport->tgt_session ||
+           !sp->fcport->tgt_session->keep_nport_handle)
+               logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
        logio->port_id[1] = sp->fcport->d_id.b.area;
index 02b1c1c5355b948db22e1e69d2ae513387d198fa..b2f713ad90346093b40b515b70be4062666c6b80 100644 (file)
@@ -2415,7 +2415,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
                        *orig_iocb_cnt = mcp->mb[10];
                if (vha->hw->flags.npiv_supported && max_npiv_vports)
                        *max_npiv_vports = mcp->mb[11];
-               if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs)
+               if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) ||
+                   IS_QLA27XX(vha->hw)) && max_fcfs)
                        *max_fcfs = mcp->mb[12];
        }
 
@@ -3898,7 +3899,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        spin_lock_irqsave(&ha->hardware_lock, flags);
        if (!(rsp->options & BIT_0)) {
                WRT_REG_DWORD(rsp->rsp_q_out, 0);
-               if (!IS_QLA83XX(ha))
+               if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
                        WRT_REG_DWORD(rsp->rsp_q_in, 0);
        }
 
@@ -5345,7 +5346,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA83XX(ha))
+       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
                return QLA_FUNCTION_FAILED;
 
        ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
index a28815b8276f090901498dafaf39d12c97db42eb..8a5cac8448c76518107d95744b39ea76717958f5 100644 (file)
@@ -2504,6 +2504,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->mbx_count = MAILBOX_REGISTER_COUNT;
                req_length = REQUEST_ENTRY_CNT_24XX;
                rsp_length = RESPONSE_ENTRY_CNT_2300;
+               ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
                ha->gid_list_info_size = 8;
@@ -3229,11 +3230,15 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
                spin_lock_irqsave(vha->host->host_lock, flags);
                fcport->drport = rport;
                spin_unlock_irqrestore(vha->host->host_lock, flags);
+               qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
                set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
                qla2xxx_wake_dpc(base_vha);
        } else {
-               fc_remote_port_delete(rport);
-               qlt_fc_port_deleted(vha, fcport);
+               int now;
+               if (rport)
+                       fc_remote_port_delete(rport);
+               qlt_do_generation_tick(vha, &now);
+               qlt_fc_port_deleted(vha, fcport, now);
        }
 }
 
@@ -3763,8 +3768,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
        INIT_LIST_HEAD(&vha->vp_fcports);
        INIT_LIST_HEAD(&vha->work_list);
        INIT_LIST_HEAD(&vha->list);
+       INIT_LIST_HEAD(&vha->qla_cmd_list);
+       INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
 
        spin_lock_init(&vha->work_lock);
+       spin_lock_init(&vha->cmd_list_lock);
 
        sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
        ql_dbg(ql_dbg_init, vha, 0x0041,
index 028e8c8a7de9a897ac68ad8c09a2d47be0276f5c..2feb5f38edcd98ec3607f18a020d1cedee5bfc40 100644 (file)
@@ -1697,7 +1697,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
 {
        uint32_t led_select_value = 0;
 
-       if (!IS_QLA83XX(ha))
+       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
                goto out;
 
        if (ha->port_no == 0)
index b749026aa592445d70dd51056314ff29965578ca..58651ecbd88c206e0be3b996c007baeaddc8cb19 100644 (file)
@@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
        struct atio_from_isp *atio, uint16_t status, int qfull);
 static void qlt_disable_vha(struct scsi_qla_host *vha);
+static void qlt_clear_tgt_db(struct qla_tgt *tgt);
+static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *ntfy,
+       uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+       uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
 /*
  * Global Variables
  */
@@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
 static DEFINE_MUTEX(qla_tgt_mutex);
 static LIST_HEAD(qla_tgt_glist);
 
+/* This API intentionally takes dest as a parameter, rather than returning
+ * int value to avoid caller forgetting to issue wmb() after the store */
+void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
+{
+       scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
+       *dest = atomic_inc_return(&base_vha->generation_tick);
+       /* memory barrier */
+       wmb();
+}
+
 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
 static struct qla_tgt_sess *qlt_find_sess_by_port_name(
        struct qla_tgt *tgt,
@@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
        struct qla_tgt *tgt = sess->tgt;
        struct scsi_qla_host *vha = sess->vha;
        struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
+       bool logout_started = false;
+       fc_port_t fcport;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
+               "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
+               " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
+               __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
+               sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+               sess->logout_on_delete, sess->keep_nport_handle,
+               sess->plogi_ack_needed);
 
        BUG_ON(!tgt);
+
+       if (sess->logout_on_delete) {
+               int rc;
+
+               memset(&fcport, 0, sizeof(fcport));
+               fcport.loop_id = sess->loop_id;
+               fcport.d_id = sess->s_id;
+               memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
+               fcport.vha = vha;
+               fcport.tgt_session = sess;
+
+               rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
+               if (rc != QLA_SUCCESS)
+                       ql_log(ql_log_warn, vha, 0xf085,
+                              "Schedule logo failed sess %p rc %d\n",
+                              sess, rc);
+               else
+                       logout_started = true;
+       }
+
        /*
         * Release the target session for FC Nexus from fabric module code.
         */
        if (sess->se_sess != NULL)
                ha->tgt.tgt_ops->free_session(sess);
 
+       if (logout_started) {
+               bool traced = false;
+
+               while (!ACCESS_ONCE(sess->logout_completed)) {
+                       if (!traced) {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
+                                       "%s: waiting for sess %p logout\n",
+                                       __func__, sess);
+                               traced = true;
+                       }
+                       msleep(100);
+               }
+
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
+                       "%s: sess %p logout completed\n",
+                       __func__, sess);
+       }
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       if (sess->plogi_ack_needed)
+               qlt_send_notify_ack(vha, &sess->tm_iocb,
+                                   0, 0, 0, 0, 0, 0);
+
+       list_del(&sess->sess_list_entry);
+
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
            "Unregistration of sess %p finished\n", sess);
 
@@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
 
        vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
 
-       list_del(&sess->sess_list_entry);
-       if (sess->deleted)
-               list_del(&sess->del_list_entry);
+       if (!list_empty(&sess->del_list_entry))
+               list_del_init(&sess->del_list_entry);
+       sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
 
        INIT_WORK(&sess->free_work, qlt_free_session_done);
        schedule_work(&sess->free_work);
@@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
 
        loop_id = le16_to_cpu(n->u.isp24.nport_handle);
        if (loop_id == 0xFFFF) {
-#if 0 /* FIXME: Re-enable Global event handling.. */
                /* Global event */
-               atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
-               qlt_clear_tgt_db(ha->tgt.qla_tgt);
+               atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+               qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
+#if 0 /* FIXME: do we need to choose a session here? */
                if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
                        sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
                            typeof(*sess), sess_list_entry);
@@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
        struct qla_tgt *tgt = sess->tgt;
        uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
 
-       if (sess->deleted)
-               return;
+       if (sess->deleted) {
+               /* Upgrade to unconditional deletion in case it was temporary */
+               if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
+                       list_del(&sess->del_list_entry);
+               else
+                       return;
+       }
 
        ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
            "Scheduling sess %p for deletion\n", sess);
-       list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
-       sess->deleted = 1;
 
-       if (immediate)
+       if (immediate) {
                dev_loss_tmo = 0;
+               sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+               list_add(&sess->del_list_entry, &tgt->del_sess_list);
+       } else {
+               sess->deleted = QLA_SESS_DELETION_PENDING;
+               list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+       }
 
        sess->expires = jiffies + dev_loss_tmo * HZ;
 
        ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
-           "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
-           "deletion in %u secs (expires: %lu) immed: %d\n",
-           sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
-           sess->expires, immediate);
+           "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
+           " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
+           sess->vha->vp_idx, sess->port_name, sess->loop_id,
+           sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+           dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
+           sess->generation);
 
        if (immediate)
-               schedule_delayed_work(&tgt->sess_del_work, 0);
+               mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
        else
                schedule_delayed_work(&tgt->sess_del_work,
                    sess->expires - jiffies);
@@ -578,9 +663,9 @@ out_free_id_list:
 /* ha->hardware_lock supposed to be held on entry */
 static void qlt_undelete_sess(struct qla_tgt_sess *sess)
 {
-       BUG_ON(!sess->deleted);
+       BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
 
-       list_del(&sess->del_list_entry);
+       list_del_init(&sess->del_list_entry);
        sess->deleted = 0;
 }
 
@@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
                    del_list_entry);
                elapsed = jiffies;
                if (time_after_eq(elapsed, sess->expires)) {
-                       qlt_undelete_sess(sess);
+                       /* No turning back */
+                       list_del_init(&sess->del_list_entry);
+                       sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
 
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
                            "Timeout: sess %p about to be deleted\n",
@@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
                            fcport->d_id.b.al_pa, fcport->d_id.b.area,
                            fcport->loop_id);
 
+                       /* Cannot undelete at this point */
+                       if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+                               spin_unlock_irqrestore(&ha->hardware_lock,
+                                   flags);
+                               return NULL;
+                       }
+
                        if (sess->deleted)
                                qlt_undelete_sess(sess);
 
@@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
 
                        if (sess->local && !local)
                                sess->local = 0;
+
+                       qlt_do_generation_tick(vha, &sess->generation);
+
                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
                        return sess;
@@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
        sess->s_id = fcport->d_id;
        sess->loop_id = fcport->loop_id;
        sess->local = local;
+       INIT_LIST_HEAD(&sess->del_list_entry);
+
+       /* Under normal circumstances we want to logout from firmware when
+        * session eventually ends and release corresponding nport handle.
+        * In the exception cases (e.g. when new PLOGI is waiting) corresponding
+        * code will adjust these flags as necessary. */
+       sess->logout_on_delete = 1;
+       sess->keep_nport_handle = 0;
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
            "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
@@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
        spin_lock_irqsave(&ha->hardware_lock, flags);
        list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
        vha->vha_tgt.qla_tgt->sess_count++;
+       qlt_do_generation_tick(vha, &sess->generation);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
 }
 
 /*
- * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+ * Called from qla2x00_reg_remote_port()
  */
 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
@@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
                mutex_unlock(&vha->vha_tgt.tgt_mutex);
 
                spin_lock_irqsave(&ha->hardware_lock, flags);
+       } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+               /* Point of no return */
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               return;
        } else {
                kref_get(&sess->se_sess->sess_kref);
 
@@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
-void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+/*
+ * max_gen - specifies maximum session generation
+ * at which this deletion requestion is still valid
+ */
+void
+qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
 {
-       struct qla_hw_data *ha = vha->hw;
        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        struct qla_tgt_sess *sess;
-       unsigned long flags;
 
        if (!vha->hw->tgt.tgt_ops)
                return;
 
-       if (!tgt || (fcport->port_type != FCT_INITIATOR))
+       if (!tgt)
                return;
 
-       spin_lock_irqsave(&ha->hardware_lock, flags);
        if (tgt->tgt_stop) {
-               spin_unlock_irqrestore(&ha->hardware_lock, flags);
                return;
        }
        sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
        if (!sess) {
-               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               return;
+       }
+
+       if (max_gen - sess->generation < 0) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
+                   "Ignoring stale deletion request for se_sess %p / sess %p"
+                   " for port %8phC, req_gen %d, sess_gen %d\n",
+                   sess->se_sess, sess, sess->port_name, max_gen,
+                   sess->generation);
                return;
        }
 
@@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
 
        sess->local = 1;
        qlt_schedule_sess_for_deletion(sess, false);
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -1175,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
            FCP_TMF_CMPL, true);
 }
 
+static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
+{
+       struct qla_tgt_sess_op *op;
+       struct qla_tgt_cmd *cmd;
+
+       spin_lock(&vha->cmd_list_lock);
+
+       list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+               if (tag == op->atio.u.isp24.exchange_addr) {
+                       op->aborted = true;
+                       spin_unlock(&vha->cmd_list_lock);
+                       return 1;
+               }
+       }
+
+       list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+               if (tag == cmd->atio.u.isp24.exchange_addr) {
+                       cmd->state = QLA_TGT_STATE_ABORTED;
+                       spin_unlock(&vha->cmd_list_lock);
+                       return 1;
+               }
+       }
+
+       spin_unlock(&vha->cmd_list_lock);
+       return 0;
+}
+
+/* drop cmds for the given lun
+ * XXX only looks for cmds on the port through which lun reset was recieved
+ * XXX does not go through the list of other port (which may have cmds
+ *     for the same lun)
+ */
+static void abort_cmds_for_lun(struct scsi_qla_host *vha,
+                               uint32_t lun, uint8_t *s_id)
+{
+       struct qla_tgt_sess_op *op;
+       struct qla_tgt_cmd *cmd;
+       uint32_t key;
+
+       key = sid_to_key(s_id);
+       spin_lock(&vha->cmd_list_lock);
+       list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+               uint32_t op_key;
+               uint32_t op_lun;
+
+               op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+               op_lun = scsilun_to_int(
+                       (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
+               if (op_key == key && op_lun == lun)
+                       op->aborted = true;
+       }
+       list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+               uint32_t cmd_key;
+               uint32_t cmd_lun;
+
+               cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+               cmd_lun = scsilun_to_int(
+                       (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
+               if (cmd_key == key && cmd_lun == lun)
+                       cmd->state = QLA_TGT_STATE_ABORTED;
+       }
+       spin_unlock(&vha->cmd_list_lock);
+}
+
 /* ha->hardware_lock supposed to be held on entry */
 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
        struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
@@ -1199,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
        }
        spin_unlock(&se_sess->sess_cmd_lock);
 
-       if (!found_lun)
-               return -ENOENT;
+       /* cmd not in LIO lists, look in qla list */
+       if (!found_lun) {
+               if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
+                       /* send TASK_ABORT response immediately */
+                       qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
+                       return 0;
+               } else {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
+                           "unable to find cmd in driver or LIO for tag 0x%x\n",
+                           abts->exchange_addr_to_abort);
+                       return -ENOENT;
+               }
+       }
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
            "qla_target(%d): task abort (tag=%d)\n",
@@ -1284,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
                return;
        }
 
+       if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+               qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+               return;
+       }
+
        rc = __qlt_24xx_handle_abts(vha, abts, sess);
        if (rc != 0) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
@@ -1726,20 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
        struct qla_hw_data *ha = vha->hw;
        struct se_cmd *se_cmd = &cmd->se_cmd;
 
-       if (unlikely(cmd->aborted)) {
-               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
-                      "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
-                      vha->vp_idx, cmd, se_cmd, se_cmd->tag);
-
-               cmd->state = QLA_TGT_STATE_ABORTED;
-               cmd->cmd_flags |= BIT_6;
-
-               qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
-
-               /* !! At this point cmd could be already freed !! */
-               return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
-       }
-
        prm->cmd = cmd;
        prm->tgt = tgt;
        prm->rq_result = scsi_status;
@@ -2301,6 +2485,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
        unsigned long flags = 0;
        int res;
 
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+               cmd->state = QLA_TGT_STATE_PROCESSED;
+               if (cmd->sess->logout_completed)
+                       /* no need to terminate. FW already freed exchange. */
+                       qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+               else
+                       qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               return 0;
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        memset(&prm, 0, sizeof(prm));
        qlt_check_srr_debug(cmd, &xmit_type);
 
@@ -2313,9 +2510,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
        res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
            &full_req_cnt);
        if (unlikely(res != 0)) {
-               if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
-                       return 0;
-
                return res;
        }
 
@@ -2345,9 +2539,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
                res = qlt_build_ctio_crc2_pkt(&prm, vha);
        else
                res = qlt_24xx_build_ctio_pkt(&prm, vha);
-       if (unlikely(res != 0))
+       if (unlikely(res != 0)) {
+               vha->req->cnt += full_req_cnt;
                goto out_unmap_unlock;
-
+       }
 
        pkt = (struct ctio7_to_24xx *)prm.pkt;
 
@@ -2461,7 +2656,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
 
-       if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+       if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
+           (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
                /*
                 * Either a chip reset is active or this request was from
                 * previous life, just abort the processing.
@@ -2485,8 +2681,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
        else
                res = qlt_24xx_build_ctio_pkt(&prm, vha);
 
-       if (unlikely(res != 0))
+       if (unlikely(res != 0)) {
+               vha->req->cnt += prm.req_cnt;
                goto out_unlock_free_unmap;
+       }
+
        pkt = (struct ctio7_to_24xx *)prm.pkt;
        pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
            CTIO7_FLAGS_STATUS_MODE_0);
@@ -2649,6 +2848,89 @@ out:
 }
 
 
+/* If hardware_lock held on entry, might drop it, then reaquire */
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *ntfy)
+{
+       struct nack_to_isp *nack;
+       struct qla_hw_data *ha = vha->hw;
+       request_t *pkt;
+       int ret = 0;
+
+       ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
+           "Sending TERM ELS CTIO (ha=%p)\n", ha);
+
+       pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+       if (pkt == NULL) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe080,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet\n", vha->vp_idx, __func__);
+               return -ENOMEM;
+       }
+
+       pkt->entry_type = NOTIFY_ACK_TYPE;
+       pkt->entry_count = 1;
+       pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+       nack = (struct nack_to_isp *)pkt;
+       nack->ox_id = ntfy->ox_id;
+
+       nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+       if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+               nack->u.isp24.flags = ntfy->u.isp24.flags &
+                       __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+       }
+
+       /* terminate */
+       nack->u.isp24.flags |=
+               __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
+
+       nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+       nack->u.isp24.status = ntfy->u.isp24.status;
+       nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+       nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
+       nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+       nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+       nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+       nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+       qla2x00_start_iocbs(vha, vha->req);
+       return ret;
+}
+
+static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *imm, int ha_locked)
+{
+       unsigned long flags = 0;
+       int rc;
+
+       if (qlt_issue_marker(vha, ha_locked) < 0)
+               return;
+
+       if (ha_locked) {
+               rc = __qlt_send_term_imm_notif(vha, imm);
+
+#if 0  /* Todo  */
+               if (rc == -ENOMEM)
+                       qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#endif
+               goto done;
+       }
+
+       spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+       rc = __qlt_send_term_imm_notif(vha, imm);
+
+#if 0  /* Todo */
+       if (rc == -ENOMEM)
+               qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#endif
+
+done:
+       if (!ha_locked)
+               spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
 /* If hardware_lock held on entry, might drop it, then reaquire */
 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
 static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
@@ -2715,7 +2997,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
 static void qlt_send_term_exchange(struct scsi_qla_host *vha,
        struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
 {
-       unsigned long flags;
+       unsigned long flags = 0;
        int rc;
 
        if (qlt_issue_marker(vha, ha_locked) < 0)
@@ -2731,17 +3013,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
        rc = __qlt_send_term_exchange(vha, cmd, atio);
        if (rc == -ENOMEM)
                qlt_alloc_qfull_cmd(vha, atio, 0, 0);
-       spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 
 done:
        if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
            !cmd->cmd_sent_to_fw)) {
-               if (!ha_locked && !in_interrupt())
-                       msleep(250); /* just in case */
-
-               qlt_unmap_sg(vha, cmd);
+               if (cmd->sg_mapped)
+                       qlt_unmap_sg(vha, cmd);
                vha->hw->tgt.tgt_ops->free_cmd(cmd);
        }
+
+       if (!ha_locked)
+               spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
        return;
 }
 
@@ -2792,6 +3075,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
 
 }
 
+void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
+{
+       struct qla_tgt *tgt = cmd->tgt;
+       struct scsi_qla_host *vha = tgt->vha;
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+           "qla_target(%d): terminating exchange for aborted cmd=%p "
+           "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
+           se_cmd->tag);
+
+       cmd->state = QLA_TGT_STATE_ABORTED;
+       cmd->cmd_flags |= BIT_6;
+
+       qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+}
+EXPORT_SYMBOL(qlt_abort_cmd);
+
 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
 {
        struct qla_tgt_sess *sess = cmd->sess;
@@ -3015,7 +3316,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
                dump_stack();
        }
 
-       cmd->cmd_flags |= BIT_12;
+       cmd->cmd_flags |= BIT_17;
        ha->tgt.tgt_ops->free_cmd(cmd);
 }
 
@@ -3177,7 +3478,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
 skip_term:
 
        if (cmd->state == QLA_TGT_STATE_PROCESSED) {
-               ;
+               cmd->cmd_flags |= BIT_12;
        } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
                int rx_status = 0;
 
@@ -3191,9 +3492,11 @@ skip_term:
                ha->tgt.tgt_ops->handle_data(cmd);
                return;
        } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+               cmd->cmd_flags |= BIT_18;
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
                  "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
        } else {
+               cmd->cmd_flags |= BIT_19;
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
                    "qla_target(%d): A command in state (%d) should "
                    "not return a CTIO complete\n", vha->vp_idx, cmd->state);
@@ -3205,7 +3508,6 @@ skip_term:
                dump_stack();
        }
 
-
        ha->tgt.tgt_ops->free_cmd(cmd);
 }
 
@@ -3263,6 +3565,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
        if (tgt->tgt_stop)
                goto out_term;
 
+       if (cmd->state == QLA_TGT_STATE_ABORTED) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
+                   "cmd with tag %u is aborted\n",
+                   cmd->atio.u.isp24.exchange_addr);
+               goto out_term;
+       }
+
        cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
        cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
        cmd->unpacked_lun = scsilun_to_int(
@@ -3316,6 +3625,12 @@ out_term:
 static void qlt_do_work(struct work_struct *work)
 {
        struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+       scsi_qla_host_t *vha = cmd->vha;
+       unsigned long flags;
+
+       spin_lock_irqsave(&vha->cmd_list_lock, flags);
+       list_del(&cmd->cmd_list);
+       spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
 
        __qlt_do_work(cmd);
 }
@@ -3345,6 +3660,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
        cmd->loop_id = sess->loop_id;
        cmd->conf_compl_supported = sess->conf_compl_supported;
 
+       cmd->cmd_flags = 0;
+       cmd->jiffies_at_alloc = get_jiffies_64();
+
+       cmd->reset_count = vha->hw->chip_reset;
+
        return cmd;
 }
 
@@ -3362,14 +3682,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
        unsigned long flags;
        uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
 
+       spin_lock_irqsave(&vha->cmd_list_lock, flags);
+       list_del(&op->cmd_list);
+       spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+       if (op->aborted) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
+                   "sess_op with tag %u is aborted\n",
+                   op->atio.u.isp24.exchange_addr);
+               goto out_term;
+       }
+
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
-               "qla_target(%d): Unable to find wwn login"
-               " (s_id %x:%x:%x), trying to create it manually\n",
-               vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+           "qla_target(%d): Unable to find wwn login"
+           " (s_id %x:%x:%x), trying to create it manually\n",
+           vha->vp_idx, s_id[0], s_id[1], s_id[2]);
 
        if (op->atio.u.raw.entry_count > 1) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
-                       "Dropping multy entry atio %p\n", &op->atio);
+                   "Dropping multy entry atio %p\n", &op->atio);
                goto out_term;
        }
 
@@ -3434,10 +3765,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
 
                memcpy(&op->atio, atio, sizeof(*atio));
                op->vha = vha;
+
+               spin_lock(&vha->cmd_list_lock);
+               list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
+               spin_unlock(&vha->cmd_list_lock);
+
                INIT_WORK(&op->work, qlt_create_sess_from_atio);
                queue_work(qla_tgt_wq, &op->work);
                return 0;
        }
+
+       /* Another WWN used to have our s_id. Our PLOGI scheduled its
+        * session deletion, but it's still in sess_del_work wq */
+       if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+               ql_dbg(ql_dbg_io, vha, 0x3061,
+                   "New command while old session %p is being deleted\n",
+                   sess);
+               return -EFAULT;
+       }
+
        /*
         * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
         */
@@ -3451,13 +3797,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
                return -ENOMEM;
        }
 
-       cmd->cmd_flags = 0;
-       cmd->jiffies_at_alloc = get_jiffies_64();
-
-       cmd->reset_count = vha->hw->chip_reset;
-
        cmd->cmd_in_wq = 1;
        cmd->cmd_flags |= BIT_0;
+
+       spin_lock(&vha->cmd_list_lock);
+       list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
+       spin_unlock(&vha->cmd_list_lock);
+
        INIT_WORK(&cmd->work, qlt_do_work);
        queue_work(qla_tgt_wq, &cmd->work);
        return 0;
@@ -3471,6 +3817,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
        struct scsi_qla_host *vha = sess->vha;
        struct qla_hw_data *ha = vha->hw;
        struct qla_tgt_mgmt_cmd *mcmd;
+       struct atio_from_isp *a = (struct atio_from_isp *)iocb;
        int res;
        uint8_t tmr_func;
 
@@ -3511,6 +3858,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
                ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
                    "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
                tmr_func = TMR_LUN_RESET;
+               abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
                break;
 
        case QLA_TGT_CLEAR_TS:
@@ -3599,6 +3947,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
                    sizeof(struct atio_from_isp));
        }
 
+       if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
+               return -EFAULT;
+
        return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
 }
 
@@ -3664,22 +4015,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
        return __qlt_abort_task(vha, iocb, sess);
 }
 
+void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
+{
+       if (fcport->tgt_session) {
+               if (rc != MBS_COMMAND_COMPLETE) {
+                       ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
+                               "%s: se_sess %p / sess %p from"
+                               " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
+                               " LOGO failed: %#x\n",
+                               __func__,
+                               fcport->tgt_session->se_sess,
+                               fcport->tgt_session,
+                               fcport->port_name, fcport->loop_id,
+                               fcport->d_id.b.domain, fcport->d_id.b.area,
+                               fcport->d_id.b.al_pa, rc);
+               }
+
+               fcport->tgt_session->logout_completed = 1;
+       }
+}
+
+static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
+    struct imm_ntfy_from_isp *b)
+{
+       struct imm_ntfy_from_isp tmp;
+       memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
+       memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
+       memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
+}
+
+/*
+* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
+*
+* Schedules sessions with matching port_id/loop_id but different wwn for
+* deletion. Returns existing session with matching wwn if present.
+* Null otherwise.
+*/
+static struct qla_tgt_sess *
+qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
+    port_id_t port_id, uint16_t loop_id)
+{
+       struct qla_tgt_sess *sess = NULL, *other_sess;
+       uint64_t other_wwn;
+
+       list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
+
+               other_wwn = wwn_to_u64(other_sess->port_name);
+
+               if (wwn == other_wwn) {
+                       WARN_ON(sess);
+                       sess = other_sess;
+                       continue;
+               }
+
+               /* find other sess with nport_id collision */
+               if (port_id.b24 == other_sess->s_id.b24) {
+                       if (loop_id != other_sess->loop_id) {
+                               ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
+                                   "Invalidating sess %p loop_id %d wwn %llx.\n",
+                                   other_sess, other_sess->loop_id, other_wwn);
+
+                               /*
+                                * logout_on_delete is set by default, but another
+                                * session that has the same s_id/loop_id combo
+                                * might have cleared it when requested this session
+                                * deletion, so don't touch it
+                                */
+                               qlt_schedule_sess_for_deletion(other_sess, true);
+                       } else {
+                               /*
+                                * Another wwn used to have our s_id/loop_id
+                                * combo - kill the session, but don't log out
+                                */
+                               sess->logout_on_delete = 0;
+                               qlt_schedule_sess_for_deletion(other_sess,
+                                   true);
+                       }
+                       continue;
+               }
+
+               /* find other sess with nport handle collision */
+               if (loop_id == other_sess->loop_id) {
+                       ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
+                              "Invalidating sess %p loop_id %d wwn %llx.\n",
+                              other_sess, other_sess->loop_id, other_wwn);
+
+                       /* Same loop_id but different s_id
+                        * Ok to kill and logout */
+                       qlt_schedule_sess_for_deletion(other_sess, true);
+               }
+       }
+
+       return sess;
+}
+
+/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
+static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
+{
+       struct qla_tgt_sess_op *op;
+       struct qla_tgt_cmd *cmd;
+       uint32_t key;
+       int count = 0;
+
+       key = (((u32)s_id->b.domain << 16) |
+              ((u32)s_id->b.area   <<  8) |
+              ((u32)s_id->b.al_pa));
+
+       spin_lock(&vha->cmd_list_lock);
+       list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+               uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+               if (op_key == key) {
+                       op->aborted = true;
+                       count++;
+               }
+       }
+       list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+               uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+               if (cmd_key == key) {
+                       cmd->state = QLA_TGT_STATE_ABORTED;
+                       count++;
+               }
+       }
+       spin_unlock(&vha->cmd_list_lock);
+
+       return count;
+}
+
 /*
  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  */
 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
        struct imm_ntfy_from_isp *iocb)
 {
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess = NULL;
+       uint64_t wwn;
+       port_id_t port_id;
+       uint16_t loop_id;
+       uint16_t wd3_lo;
        int res = 0;
 
+       wwn = wwn_to_u64(iocb->u.isp24.port_name);
+
+       port_id.b.domain = iocb->u.isp24.port_id[2];
+       port_id.b.area   = iocb->u.isp24.port_id[1];
+       port_id.b.al_pa  = iocb->u.isp24.port_id[0];
+       port_id.b.rsvd_1 = 0;
+
+       loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
            "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
            vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
 
+       /* res = 1 means ack at the end of thread
+        * res = 0 means ack async/later.
+        */
        switch (iocb->u.isp24.status_subcode) {
        case ELS_PLOGI:
-       case ELS_FLOGI:
+
+               /* Mark all stale commands in qla_tgt_wq for deletion */
+               abort_cmds_for_s_id(vha, &port_id);
+
+               if (wwn)
+                       sess = qlt_find_sess_invalidate_other(tgt, wwn,
+                           port_id, loop_id);
+
+               if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
+                       res = 1;
+                       break;
+               }
+
+               if (sess->plogi_ack_needed) {
+                       /*
+                        * Initiator sent another PLOGI before last PLOGI could
+                        * finish. Swap plogi iocbs and terminate old one
+                        * without acking, new one will get acked when session
+                        * deletion completes.
+                        */
+                       ql_log(ql_log_warn, sess->vha, 0xf094,
+                           "sess %p received double plogi.\n", sess);
+
+                       qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
+
+                       qlt_send_term_imm_notif(vha, iocb, 1);
+
+                       res = 0;
+                       break;
+               }
+
+               res = 0;
+
+               /*
+                * Save immediate Notif IOCB for Ack when sess is done
+                * and being deleted.
+                */
+               memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
+               sess->plogi_ack_needed  = 1;
+
+                /*
+                 * Under normal circumstances we want to release nport handle
+                 * during LOGO process to avoid nport handle leaks inside FW.
+                 * The exception is when LOGO is done while another PLOGI with
+                 * the same nport handle is waiting as might be the case here.
+                 * Note: there is always a possibily of a race where session
+                 * deletion has already started for other reasons (e.g. ACL
+                 * removal) and now PLOGI arrives:
+                 * 1. if PLOGI arrived in FW after nport handle has been freed,
+                 *    FW must have assigned this PLOGI a new/same handle and we
+                 *    can proceed ACK'ing it as usual when session deletion
+                 *    completes.
+                 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
+                 *    bit reached it, the handle has now been released. We'll
+                 *    get an error when we ACK this PLOGI. Nothing will be sent
+                 *    back to initiator. Initiator should eventually retry
+                 *    PLOGI and situation will correct itself.
+                 */
+               sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
+                                          (sess->s_id.b24 == port_id.b24));
+               qlt_schedule_sess_for_deletion(sess, true);
+               break;
+
        case ELS_PRLI:
+               wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
+
+               if (wwn)
+                       sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
+                           loop_id);
+
+               if (sess != NULL) {
+                       if (sess->deleted) {
+                               /*
+                                * Impatient initiator sent PRLI before last
+                                * PLOGI could finish. Will force him to re-try,
+                                * while last one finishes.
+                                */
+                               ql_log(ql_log_warn, sess->vha, 0xf095,
+                                   "sess %p PRLI received, before plogi ack.\n",
+                                   sess);
+                               qlt_send_term_imm_notif(vha, iocb, 1);
+                               res = 0;
+                               break;
+                       }
+
+                       /*
+                        * This shouldn't happen under normal circumstances,
+                        * since we have deleted the old session during PLOGI
+                        */
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
+                           "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
+                           sess->loop_id, sess, iocb->u.isp24.nport_handle);
+
+                       sess->local = 0;
+                       sess->loop_id = loop_id;
+                       sess->s_id = port_id;
+
+                       if (wd3_lo & BIT_7)
+                               sess->conf_compl_supported = 1;
+
+               }
+               res = 1; /* send notify ack */
+
+               /* Make session global (not used in fabric mode) */
+               if (ha->current_topology != ISP_CFG_F) {
+                       set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+                       set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+               } else {
+                       /* todo: else - create sess here. */
+                       res = 1; /* send notify ack */
+               }
+
+               break;
+
        case ELS_LOGO:
        case ELS_PRLO:
                res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
@@ -3697,6 +4306,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
                break;
        }
 
+       case ELS_FLOGI: /* should never happen */
        default:
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
                    "qla_target(%d): Unsupported ELS command %x "
@@ -5012,6 +5622,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
                if (!sess)
                        goto out_term;
        } else {
+               if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+                       sess = NULL;
+                       goto out_term;
+               }
+
                kref_get(&sess->se_sess->sess_kref);
        }
 
@@ -5066,6 +5681,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
                if (!sess)
                        goto out_term;
        } else {
+               if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+                       sess = NULL;
+                       goto out_term;
+               }
+
                kref_get(&sess->se_sess->sess_kref);
        }
 
@@ -5552,6 +6172,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
 
        /* Adjust ring index */
        WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
+       RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
 }
 
 void
@@ -5793,7 +6414,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
        if (!QLA_TGT_MODE_ENABLED())
                return;
 
-       if  (ha->mqenable || IS_QLA83XX(ha)) {
+       if  (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
                ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
                ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
        } else {
index 985d76dd706b71b3da1b3685169a21c24af90fa0..bca584ae45b7eccc3da35a11c9583c324524a802 100644 (file)
@@ -167,7 +167,24 @@ struct imm_ntfy_from_isp {
                        uint32_t srr_rel_offs;
                        uint16_t srr_ui;
                        uint16_t srr_ox_id;
-                       uint8_t  reserved_4[19];
+                       union {
+                               struct {
+                                       uint8_t node_name[8];
+                               } plogi; /* PLOGI/ADISC/PDISC */
+                               struct {
+                                       /* PRLI word 3 bit 0-15 */
+                                       uint16_t wd3_lo;
+                                       uint8_t resv0[6];
+                               } prli;
+                               struct {
+                                       uint8_t port_id[3];
+                                       uint8_t resv1;
+                                       uint16_t nport_handle;
+                                       uint16_t resv2;
+                               } req_els;
+                       } u;
+                       uint8_t port_name[8];
+                       uint8_t resv3[3];
                        uint8_t  vp_index;
                        uint32_t reserved_5;
                        uint8_t  port_id[3];
@@ -234,6 +251,7 @@ struct nack_to_isp {
        uint8_t  reserved[2];
        uint16_t ox_id;
 } __packed;
+#define NOTIFY_ACK_FLAGS_TERMINATE     BIT_3
 #define NOTIFY_ACK_SRR_FLAGS_ACCEPT    0
 #define NOTIFY_ACK_SRR_FLAGS_REJECT    1
 
@@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
 #define        FC_TM_REJECT                4
 #define FC_TM_FAILED                5
 
-/*
- * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
- * terminated, so no more actions is needed and success should be returned
- * to target.
- */
-#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED      0x1717
-
 #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
 #define pci_dma_lo32(a) (a & 0xffffffff)
 #define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
@@ -874,6 +885,15 @@ struct qla_tgt_sess_op {
        struct scsi_qla_host *vha;
        struct atio_from_isp atio;
        struct work_struct work;
+       struct list_head cmd_list;
+       bool aborted;
+};
+
+enum qla_sess_deletion {
+       QLA_SESS_DELETION_NONE          = 0,
+       QLA_SESS_DELETION_PENDING       = 1, /* hopefully we can get rid of
+                                             * this one */
+       QLA_SESS_DELETION_IN_PROGRESS   = 2,
 };
 
 /*
@@ -884,8 +904,15 @@ struct qla_tgt_sess {
        port_id_t s_id;
 
        unsigned int conf_compl_supported:1;
-       unsigned int deleted:1;
+       unsigned int deleted:2;
        unsigned int local:1;
+       unsigned int logout_on_delete:1;
+       unsigned int plogi_ack_needed:1;
+       unsigned int keep_nport_handle:1;
+
+       unsigned char logout_completed;
+
+       int generation;
 
        struct se_session *se_sess;
        struct scsi_qla_host *vha;
@@ -897,6 +924,10 @@ struct qla_tgt_sess {
 
        uint8_t port_name[WWN_SIZE];
        struct work_struct free_work;
+
+       union {
+               struct imm_ntfy_from_isp tm_iocb;
+       };
 };
 
 struct qla_tgt_cmd {
@@ -912,7 +943,6 @@ struct qla_tgt_cmd {
        unsigned int conf_compl_supported:1;
        unsigned int sg_mapped:1;
        unsigned int free_sg:1;
-       unsigned int aborted:1; /* Needed in case of SRR */
        unsigned int write_data_transferred:1;
        unsigned int ctx_dsd_alloced:1;
        unsigned int q_full:1;
@@ -961,6 +991,9 @@ struct qla_tgt_cmd {
         * BIT_14 - Back end data received/sent.
         * BIT_15 - SRR prepare ctio
         * BIT_16 - complete free
+        * BIT_17 - flush - qlt_abort_cmd_on_host_reset
+        * BIT_18 - completion w/abort status
+        * BIT_19 - completion w/unknown status
         */
        uint32_t cmd_flags;
 };
@@ -1026,6 +1059,10 @@ struct qla_tgt_srr_ctio {
        struct qla_tgt_cmd *cmd;
 };
 
+/* Check for Switch reserved address */
+#define IS_SW_RESV_ADDR(_s_id) \
+       ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
+
 #define QLA_TGT_XMIT_DATA              1
 #define QLA_TGT_XMIT_STATUS            2
 #define QLA_TGT_XMIT_ALL               (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
@@ -1043,7 +1080,7 @@ extern int qlt_lport_register(void *, u64, u64, u64,
 extern void qlt_lport_deregister(struct scsi_qla_host *);
 extern void qlt_unreg_sess(struct qla_tgt_sess *);
 extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
-extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
 extern int __init qlt_init(void);
 extern void qlt_exit(void);
 extern void qlt_update_vp_map(struct scsi_qla_host *, int);
@@ -1073,12 +1110,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
                ha->host->active_mode |= MODE_INITIATOR;
 }
 
+static inline uint32_t sid_to_key(const uint8_t *s_id)
+{
+       uint32_t key;
+
+       key = (((unsigned long)s_id[0] << 16) |
+              ((unsigned long)s_id[1] << 8) |
+              (unsigned long)s_id[2]);
+       return key;
+}
+
 /*
  * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
  */
 extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
 extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
 extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern void qlt_abort_cmd(struct qla_tgt_cmd *);
 extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
 extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
 extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
@@ -1109,5 +1157,7 @@ extern void qlt_stop_phase2(struct qla_tgt *);
 extern irqreturn_t qla83xx_msix_atio_q(int, void *);
 extern void qlt_83xx_iospace_config(struct qla_hw_data *);
 extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
+extern void qlt_logo_completion_handler(fc_port_t *, int);
+extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
 
 #endif /* __QLA_TARGET_H */
index d9a8c6084346759778c5cd8506c42d4ead19e2cc..9224a06646e6af420139ae29cba4802cf2663637 100644 (file)
@@ -374,7 +374,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
 {
        struct qla_tgt_cmd *cmd = container_of(se_cmd,
                                struct qla_tgt_cmd, se_cmd);
-
+       cmd->cmd_flags |= BIT_3;
        cmd->bufflen = se_cmd->data_length;
        cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
 
@@ -405,7 +405,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
            se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
                spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
                wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
-                                               3000);
+                                           3 * HZ);
                return 0;
        }
        spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -541,12 +541,10 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
        cmd->cmd_flags |= BIT_4;
        cmd->bufflen = se_cmd->data_length;
        cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
-       cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
 
        cmd->sg_cnt = se_cmd->t_data_nents;
        cmd->sg = se_cmd->t_data_sg;
        cmd->offset = 0;
-       cmd->cmd_flags |= BIT_3;
 
        cmd->prot_sg_cnt = se_cmd->t_prot_nents;
        cmd->prot_sg = se_cmd->t_prot_sg;
@@ -571,7 +569,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
        cmd->sg_cnt = 0;
        cmd->offset = 0;
        cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
-       cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
        if (cmd->cmd_flags &  BIT_5) {
                pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
                dump_stack();
@@ -636,14 +633,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
 {
        struct qla_tgt_cmd *cmd = container_of(se_cmd,
                                struct qla_tgt_cmd, se_cmd);
-       struct scsi_qla_host *vha = cmd->vha;
-       struct qla_hw_data *ha = vha->hw;
-
-       if (!cmd->sg_mapped)
-               return;
-
-       pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
-       cmd->sg_mapped = 0;
+       qlt_abort_cmd(cmd);
 }
 
 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
@@ -1149,9 +1139,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
                return NULL;
        }
 
-       key = (((unsigned long)s_id[0] << 16) |
-              ((unsigned long)s_id[1] << 8) |
-              (unsigned long)s_id[2]);
+       key = sid_to_key(s_id);
        pr_debug("find_sess_by_s_id: 0x%06x\n", key);
 
        se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1186,9 +1174,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
        void *slot;
        int rc;
 
-       key = (((unsigned long)s_id[0] << 16) |
-              ((unsigned long)s_id[1] << 8) |
-              (unsigned long)s_id[2]);
+       key = sid_to_key(s_id);
        pr_debug("set_sess_by_s_id: %06x\n", key);
 
        slot = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1544,6 +1530,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
        }
 
        sess->conf_compl_supported = conf_compl_supported;
+
+       /* Reset logout parameters to default */
+       sess->logout_on_delete = 1;
+       sess->keep_nport_handle = 0;
 }
 
 /*
index 285f77544c36391a89b243001c3fc93d0703f46f..7dbbb29d24c6cf5290cd06a7a97df570a8860f18 100644 (file)
@@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
 {
        struct Scsi_Host *shost;
        struct virtio_scsi *vscsi;
-       int err, host_prot;
+       int err;
        u32 sg_elems, num_targets;
        u32 cmd_per_lun;
        u32 num_queues;
@@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev)
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
        if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
+               int host_prot;
+
                host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
                            SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
                            SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
index 0cae1694014dfbbaba171187fdbd4fc835dc1546..b0f30fb68914220ea75b401dd52b7d537c00687e 100644 (file)
@@ -612,7 +612,7 @@ config SPI_XTENSA_XTFPGA
 
 config SPI_ZYNQMP_GQSPI
        tristate "Xilinx ZynqMP GQSPI controller"
-       depends on SPI_MASTER
+       depends on SPI_MASTER && HAS_DMA
        help
          Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
 
index 788e2b176a4f7707051bcc325538e7d2a6d599f4..acce90ac7371d58d94b47dc3cb1da029969fb95f 100644 (file)
@@ -40,6 +40,7 @@
 #define SPFI_CONTROL_SOFT_RESET                        BIT(11)
 #define SPFI_CONTROL_SEND_DMA                  BIT(10)
 #define SPFI_CONTROL_GET_DMA                   BIT(9)
+#define SPFI_CONTROL_SE                        BIT(8)
 #define SPFI_CONTROL_TMODE_SHIFT               5
 #define SPFI_CONTROL_TMODE_MASK                        0x7
 #define SPFI_CONTROL_TMODE_SINGLE              0
@@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
        else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
                 xfer->rx_nbits == SPI_NBITS_QUAD)
                val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
+       val |= SPFI_CONTROL_SE;
        spfi_writel(spfi, val, SPFI_CONTROL);
 }
 
index eb7d3a6fb14c0694b55e5286933256c5962043ce..f9deb84e4e551bdd9c42c8ac204c80c18f0f8579 100644 (file)
@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
 {
        struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
 
-       if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
-           && (transfer->len > spi_imx->tx_wml))
+       if (spi_imx->dma_is_inited
+           && transfer->len > spi_imx->rx_wml * sizeof(u32)
+           && transfer->len > spi_imx->tx_wml * sizeof(u32))
                return true;
        return false;
 }
index 87b20a511a6ba66cc4ba084df028a3c073691aa2..f23f36ebaf3dc700447da2e4da6e9b1b28e8ed82 100644 (file)
@@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
        case GQSPI_SELECT_FLASH_CS_BOTH:
                instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
                        GQSPI_GENFIFO_CS_UPPER;
+               break;
        case GQSPI_SELECT_FLASH_CS_UPPER:
                instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
                break;
index dd616ff0ffc52542c3c8f8aaaf7c7d30bb9fd219..c7de64171c452325ab43884b501977a33f724b93 100644 (file)
@@ -693,6 +693,7 @@ static struct class *spidev_class;
 #ifdef CONFIG_OF
 static const struct of_device_id spidev_dt_ids[] = {
        { .compatible = "rohm,dh2228fv" },
+       { .compatible = "lineartechnology,ltc2488" },
        {},
 };
 MODULE_DEVICE_TABLE(of, spidev_dt_ids);
index 4e68b62193ed7781d3c32f4160878809acfc546d..cd77a064c772f1bbe897482d2372fe5bc6434328 100644 (file)
@@ -3998,7 +3998,13 @@ get_immediate:
        }
 
 transport_err:
-       iscsit_take_action_for_connection_exit(conn);
+       /*
+        * Avoid the normal connection failure code-path if this connection
+        * is still within LOGIN mode, and iscsi_np process context is
+        * responsible for cleaning up the early connection failure.
+        */
+       if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
+               iscsit_take_action_for_connection_exit(conn);
 out:
        return 0;
 }
@@ -4082,7 +4088,7 @@ reject:
 
 int iscsi_target_rx_thread(void *arg)
 {
-       int ret;
+       int ret, rc;
        u8 buffer[ISCSI_HDR_LEN], opcode;
        u32 checksum = 0, digest = 0;
        struct iscsi_conn *conn = arg;
@@ -4092,10 +4098,16 @@ int iscsi_target_rx_thread(void *arg)
         * connection recovery / failure event can be triggered externally.
         */
        allow_signal(SIGINT);
+       /*
+        * Wait for iscsi_post_login_handler() to complete before allowing
+        * incoming iscsi/tcp socket I/O, and/or failing the connection.
+        */
+       rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+       if (rc < 0)
+               return 0;
 
        if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
                struct completion comp;
-               int rc;
 
                init_completion(&comp);
                rc = wait_for_completion_interruptible(&comp);
@@ -4532,7 +4544,18 @@ static void iscsit_logout_post_handler_closesession(
        struct iscsi_conn *conn)
 {
        struct iscsi_session *sess = conn->sess;
-       int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+       int sleep = 1;
+       /*
+        * Traditional iscsi/tcp will invoke this logic from TX thread
+        * context during session logout, so clear tx_thread_active and
+        * sleep if iscsit_close_connection() has not already occured.
+        *
+        * Since iser-target invokes this logic from it's own workqueue,
+        * always sleep waiting for RX/TX thread shutdown to complete
+        * within iscsit_close_connection().
+        */
+       if (conn->conn_transport->transport_type == ISCSI_TCP)
+               sleep = cmpxchg(&conn->tx_thread_active, true, false);
 
        atomic_set(&conn->conn_logout_remove, 0);
        complete(&conn->conn_logout_comp);
@@ -4546,7 +4569,10 @@ static void iscsit_logout_post_handler_closesession(
 static void iscsit_logout_post_handler_samecid(
        struct iscsi_conn *conn)
 {
-       int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+       int sleep = 1;
+
+       if (conn->conn_transport->transport_type == ISCSI_TCP)
+               sleep = cmpxchg(&conn->tx_thread_active, true, false);
 
        atomic_set(&conn->conn_logout_remove, 0);
        complete(&conn->conn_logout_comp);
@@ -4765,6 +4791,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
        struct iscsi_session *sess;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
        struct se_session *se_sess, *se_sess_tmp;
+       LIST_HEAD(free_list);
        int session_count = 0;
 
        spin_lock_bh(&se_tpg->session_lock);
@@ -4786,14 +4813,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
                }
                atomic_set(&sess->session_reinstatement, 1);
                spin_unlock(&sess->conn_lock);
-               spin_unlock_bh(&se_tpg->session_lock);
 
-               iscsit_free_session(sess);
-               spin_lock_bh(&se_tpg->session_lock);
+               list_move_tail(&se_sess->sess_list, &free_list);
+       }
+       spin_unlock_bh(&se_tpg->session_lock);
+
+       list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
+               sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
 
+               iscsit_free_session(sess);
                session_count++;
        }
-       spin_unlock_bh(&se_tpg->session_lock);
 
        pr_debug("Released %d iSCSI Session(s) from Target Portal"
                        " Group: %hu\n", session_count, tpg->tpgt);
index 3d0fe4ff55904d00a702958a82413a33873de888..7e8f65e5448fdbda5645e3d5a836ad9f81408efa 100644 (file)
@@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
        init_completion(&conn->conn_logout_comp);
        init_completion(&conn->rx_half_close_comp);
        init_completion(&conn->tx_half_close_comp);
+       init_completion(&conn->rx_login_comp);
        spin_lock_init(&conn->cmd_lock);
        spin_lock_init(&conn->conn_usage_lock);
        spin_lock_init(&conn->immed_queue_lock);
@@ -644,7 +645,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
                iscsit_start_nopin_timer(conn);
 }
 
-static int iscsit_start_kthreads(struct iscsi_conn *conn)
+int iscsit_start_kthreads(struct iscsi_conn *conn)
 {
        int ret = 0;
 
@@ -679,6 +680,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn)
 
        return 0;
 out_tx:
+       send_sig(SIGINT, conn->tx_thread, 1);
        kthread_stop(conn->tx_thread);
        conn->tx_thread_active = false;
 out_bitmap:
@@ -689,7 +691,7 @@ out_bitmap:
        return ret;
 }
 
-int iscsi_post_login_handler(
+void iscsi_post_login_handler(
        struct iscsi_np *np,
        struct iscsi_conn *conn,
        u8 zero_tsih)
@@ -699,7 +701,6 @@ int iscsi_post_login_handler(
        struct se_session *se_sess = sess->se_sess;
        struct iscsi_portal_group *tpg = sess->tpg;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
-       int rc;
 
        iscsit_inc_conn_usage_count(conn);
 
@@ -739,10 +740,6 @@ int iscsi_post_login_handler(
                        sess->sess_ops->InitiatorName);
                spin_unlock_bh(&sess->conn_lock);
 
-               rc = iscsit_start_kthreads(conn);
-               if (rc)
-                       return rc;
-
                iscsi_post_login_start_timers(conn);
                /*
                 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -751,15 +748,20 @@ int iscsi_post_login_handler(
                iscsit_thread_get_cpumask(conn);
                conn->conn_rx_reset_cpumask = 1;
                conn->conn_tx_reset_cpumask = 1;
-
+               /*
+                * Wakeup the sleeping iscsi_target_rx_thread() now that
+                * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+                */
+               complete(&conn->rx_login_comp);
                iscsit_dec_conn_usage_count(conn);
+
                if (stop_timer) {
                        spin_lock_bh(&se_tpg->session_lock);
                        iscsit_stop_time2retain_timer(sess);
                        spin_unlock_bh(&se_tpg->session_lock);
                }
                iscsit_dec_session_usage_count(sess);
-               return 0;
+               return;
        }
 
        iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
@@ -800,10 +802,6 @@ int iscsi_post_login_handler(
                " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
        spin_unlock_bh(&se_tpg->session_lock);
 
-       rc = iscsit_start_kthreads(conn);
-       if (rc)
-               return rc;
-
        iscsi_post_login_start_timers(conn);
        /*
         * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -812,10 +810,12 @@ int iscsi_post_login_handler(
        iscsit_thread_get_cpumask(conn);
        conn->conn_rx_reset_cpumask = 1;
        conn->conn_tx_reset_cpumask = 1;
-
+       /*
+        * Wakeup the sleeping iscsi_target_rx_thread() now that
+        * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+        */
+       complete(&conn->rx_login_comp);
        iscsit_dec_conn_usage_count(conn);
-
-       return 0;
 }
 
 static void iscsi_handle_login_thread_timeout(unsigned long data)
@@ -1380,23 +1380,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        if (ret < 0)
                goto new_sess_out;
 
-       if (!conn->sess) {
-               pr_err("struct iscsi_conn session pointer is NULL!\n");
-               goto new_sess_out;
-       }
-
        iscsi_stop_login_thread_timer(np);
 
-       if (signal_pending(current))
-               goto new_sess_out;
-
        if (ret == 1) {
                tpg_np = conn->tpg_np;
 
-               ret = iscsi_post_login_handler(np, conn, zero_tsih);
-               if (ret < 0)
-                       goto new_sess_out;
-
+               iscsi_post_login_handler(np, conn, zero_tsih);
                iscsit_deaccess_np(np, tpg, tpg_np);
        }
 
index 1c7358081533ad1e3fb0533f424fa7749feda7d5..57aa0d0fd820f330c271836ecdc02c5a067179b2 100644 (file)
@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
 extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
 extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
 extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
-extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
+extern int iscsit_start_kthreads(struct iscsi_conn *);
+extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
 extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
                                bool, bool);
 extern int iscsi_target_login_thread(void *);
index 8c02fa34716fae5a40dbf8cb09357bba5df2e7bd..f9cde91418367071d08c3a3ebe08dc44a1a1abe3 100644 (file)
@@ -17,6 +17,7 @@
  ******************************************************************************/
 
 #include <linux/ctype.h>
+#include <linux/kthread.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
                ntohl(login_rsp->statsn), login->rsp_length);
 
        padding = ((-login->rsp_length) & 3);
+       /*
+        * Before sending the last login response containing the transition
+        * bit for full-feature-phase, go ahead and start up TX/RX threads
+        * now to avoid potential resource allocation failures after the
+        * final login response has been sent.
+        */
+       if (login->login_complete) {
+               int rc = iscsit_start_kthreads(conn);
+               if (rc) {
+                       iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+                                           ISCSI_LOGIN_STATUS_NO_RESOURCES);
+                       return -1;
+               }
+       }
 
        if (conn->conn_transport->iscsit_put_login_tx(conn, login,
                                        login->rsp_length + padding) < 0)
-               return -1;
+               goto err;
 
        login->rsp_length               = 0;
        mutex_lock(&sess->cmdsn_mutex);
@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
        mutex_unlock(&sess->cmdsn_mutex);
 
        return 0;
+
+err:
+       if (login->login_complete) {
+               if (conn->rx_thread && conn->rx_thread_active) {
+                       send_sig(SIGINT, conn->rx_thread, 1);
+                       kthread_stop(conn->rx_thread);
+               }
+               if (conn->tx_thread && conn->tx_thread_active) {
+                       send_sig(SIGINT, conn->tx_thread, 1);
+                       kthread_stop(conn->tx_thread);
+               }
+               spin_lock(&iscsit_global->ts_bitmap_lock);
+               bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+                                     get_order(1));
+               spin_unlock(&iscsit_global->ts_bitmap_lock);
+       }
+       return -1;
 }
 
 static void iscsi_target_sk_data_ready(struct sock *sk)
index 0b0de36474784987c781906243eabbed27ecab00..c2e9fea90b4a4bc16a0384d79fa9684c9f4176e0 100644 (file)
@@ -747,7 +747,7 @@ static ssize_t store_pi_prot_type(struct se_dev_attrib *da,
        if (!dev->transport->init_prot || !dev->transport->free_prot) {
                /* 0 is only allowed value for non-supporting backends */
                if (flag == 0)
-                       return 0;
+                       return count;
 
                pr_err("DIF protection not supported by backend: %s\n",
                       dev->transport->name);
@@ -1590,9 +1590,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
        u8 type = 0;
 
        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
-               return 0;
+               return count;
        if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
-               return 0;
+               return count;
 
        if (dev->export_count) {
                pr_debug("Unable to process APTPL metadata while"
@@ -1658,22 +1658,32 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                 * PR APTPL Metadata for Reservation
                 */
                case Opt_res_holder:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        res_holder = arg;
                        break;
                case Opt_res_type:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        type = (u8)arg;
                        break;
                case Opt_res_scope:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        break;
                case Opt_res_all_tg_pt:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        all_tg_pt = (int)arg;
                        break;
                case Opt_mapped_lun:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        mapped_lun = (u64)arg;
                        break;
                /*
@@ -1701,14 +1711,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                        }
                        break;
                case Opt_tpgt:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        tpgt = (u16)arg;
                        break;
                case Opt_port_rtpi:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        break;
                case Opt_target_lun:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        target_lun = (u64)arg;
                        break;
                default:
@@ -1985,7 +2001,7 @@ static ssize_t target_core_store_alua_lu_gp(
 
        lu_gp_mem = dev->dev_alua_lu_gp_mem;
        if (!lu_gp_mem)
-               return 0;
+               return count;
 
        if (count > LU_GROUP_NAME_BUF) {
                pr_err("ALUA LU Group Alias too large!\n");
index 0fdbe43b7dad99479f7288584a0d95815c4dab72..5ab7100de17eb5694b162403ef43585c4bbceaf1 100644 (file)
@@ -1474,7 +1474,7 @@ core_scsi3_decode_spec_i_port(
        LIST_HEAD(tid_dest_list);
        struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
        unsigned char *buf, *ptr, proto_ident;
-       const unsigned char *i_str;
+       const unsigned char *i_str = NULL;
        char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
        sense_reason_t ret;
        u32 tpdl, tid_len = 0;
index 4703f403f31c0dd6cc9b4d31422bbc16387b20e7..384cf88944113892135b3ff253ed97bdc6b9c8b4 100644 (file)
@@ -333,6 +333,7 @@ static int rd_configure_device(struct se_device *dev)
        dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
        dev->dev_attrib.hw_max_sectors = UINT_MAX;
        dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
+       dev->dev_attrib.is_nonrot = 1;
 
        rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
 
index b0744433315a80496a84d8d6f49e01300471f463..b5ba1ec3c35476361103d7dca47a1934cdd3289f 100644 (file)
@@ -454,10 +454,17 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
                    cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
                        buf[4] = 0x5;
                else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
-                       cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
+                        cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
                        buf[4] = 0x4;
        }
 
+       /* logical unit supports type 1 and type 3 protection */
+       if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
+           (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
+           (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
+               buf[4] |= (0x3 << 3);
+       }
+
        /* Set HEADSUP, ORDSUP, SIMPSUP */
        buf[5] = 0x07;
 
index c9c27f69e101cfc9246a81ed1f187148b08193e6..ee8bfacf20716481a23a60325b5c99ba9da35508 100644 (file)
@@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty)
  *     Locking: ctrl_lock
  */
 
-static void isig(int sig, struct tty_struct *tty)
+static void __isig(int sig, struct tty_struct *tty)
 {
-       struct n_tty_data *ldata = tty->disc_data;
        struct pid *tty_pgrp = tty_get_pgrp(tty);
        if (tty_pgrp) {
                kill_pgrp(tty_pgrp, sig, 1);
                put_pid(tty_pgrp);
        }
+}
 
-       if (!L_NOFLSH(tty)) {
+static void isig(int sig, struct tty_struct *tty)
+{
+       struct n_tty_data *ldata = tty->disc_data;
+
+       if (L_NOFLSH(tty)) {
+               /* signal only */
+               __isig(sig, tty);
+
+       } else { /* signal and flush */
                up_read(&tty->termios_rwsem);
                down_write(&tty->termios_rwsem);
 
+               __isig(sig, tty);
+
                /* clear echo buffer */
                mutex_lock(&ldata->output_lock);
                ldata->echo_head = ldata->echo_tail = 0;
index 76e65b714471d9eeb11ea12593adea987205daaa..15b4079a335e886ca62c422970c80ba8c93f9dfb 100644 (file)
@@ -1185,7 +1185,7 @@ config SERIAL_SC16IS7XX_CORE
 config SERIAL_SC16IS7XX
         tristate "SC16IS7xx serial support"
         select SERIAL_CORE
-        depends on I2C || SPI_MASTER
+        depends on (SPI_MASTER && !I2C) || I2C
         help
           This selects support for SC16IS7xx serial ports.
           Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
index 50cf5b10ceed98022cbc44609acd280ef2beba81..fd27e986b1dd3437dfd2560ec11efd8def7bf254 100644 (file)
@@ -2310,8 +2310,8 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
        void __iomem *base;
 
        base = devm_ioremap_resource(dev, mmiobase);
-       if (!base)
-               return -ENOMEM;
+       if (IS_ERR(base))
+               return PTR_ERR(base);
 
        index = pl011_probe_dt_alias(index, dev);
 
index a57301a6fe427027788aa2153547928597c71ed1..679709f51fd4cfe4739fc2edb433cc4badaf0227 100644 (file)
@@ -950,7 +950,7 @@ static int etraxfs_uart_remove(struct platform_device *pdev)
 
        port = platform_get_drvdata(pdev);
        uart_remove_one_port(&etraxfs_uart_driver, port);
-       etraxfs_uart_ports[pdev->id] = NULL;
+       etraxfs_uart_ports[port->line] = NULL;
 
        return 0;
 }
index 2c90dc31bfaabc0242e168dd688faadadd9f164a..54fdc7866ea17423836827ee374c1ecfd0b5257d 100644 (file)
@@ -1121,11 +1121,6 @@ static int imx_startup(struct uart_port *port)
 
        writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
 
-       /* Can we enable the DMA support? */
-       if (is_imx6q_uart(sport) && !uart_console(port) &&
-           !sport->dma_is_inited)
-               imx_uart_dma_init(sport);
-
        spin_lock_irqsave(&sport->port.lock, flags);
        /* Reset fifo's and state machines */
        i = 100;
@@ -1143,9 +1138,6 @@ static int imx_startup(struct uart_port *port)
        writel(USR1_RTSD, sport->port.membase + USR1);
        writel(USR2_ORE, sport->port.membase + USR2);
 
-       if (sport->dma_is_inited && !sport->dma_is_enabled)
-               imx_enable_dma(sport);
-
        temp = readl(sport->port.membase + UCR1);
        temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
 
@@ -1316,6 +1308,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
                        } else {
                                ucr2 |= UCR2_CTSC;
                        }
+
+                       /* Can we enable the DMA support? */
+                       if (is_imx6q_uart(sport) && !uart_console(port)
+                               && !sport->dma_is_inited)
+                               imx_uart_dma_init(sport);
                } else {
                        termios->c_cflag &= ~CRTSCTS;
                }
@@ -1432,6 +1429,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
        if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
                imx_enable_ms(&sport->port);
 
+       if (sport->dma_is_inited && !sport->dma_is_enabled)
+               imx_enable_dma(sport);
        spin_unlock_irqrestore(&sport->port.lock, flags);
 }
 
index 9e6576004a427e51cac6f1b59237248a706332f8..5ccc698cbbfa1ad9bde91200ea5907753fbb4b78 100644 (file)
@@ -354,6 +354,26 @@ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
                     (reg << SC16IS7XX_REG_SHIFT) | port->line, val);
 }
 
+static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
+{
+       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+       u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line;
+
+       regcache_cache_bypass(s->regmap, true);
+       regmap_raw_read(s->regmap, addr, s->buf, rxlen);
+       regcache_cache_bypass(s->regmap, false);
+}
+
+static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
+{
+       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+       u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line;
+
+       regcache_cache_bypass(s->regmap, true);
+       regmap_raw_write(s->regmap, addr, s->buf, to_send);
+       regcache_cache_bypass(s->regmap, false);
+}
+
 static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
                                  u8 mask, u8 val)
 {
@@ -508,10 +528,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
                        s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG);
                        bytes_read = 1;
                } else {
-                       regcache_cache_bypass(s->regmap, true);
-                       regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG,
-                                       s->buf, rxlen);
-                       regcache_cache_bypass(s->regmap, false);
+                       sc16is7xx_fifo_read(port, rxlen);
                        bytes_read = rxlen;
                }
 
@@ -591,9 +608,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
                        s->buf[i] = xmit->buf[xmit->tail];
                        xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
                }
-               regcache_cache_bypass(s->regmap, true);
-               regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send);
-               regcache_cache_bypass(s->regmap, false);
+
+               sc16is7xx_fifo_write(port, to_send);
        }
 
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
index 7ae1592f7ec9731c4dab1d3a12b980420aade634..f36852067f20e61ebaf67269ec08c37ea342c06a 100644 (file)
@@ -1418,7 +1418,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
        mutex_lock(&port->mutex);
        uart_shutdown(tty, state);
        tty_port_tty_set(port, NULL);
-       tty->closing = 0;
+
        spin_lock_irqsave(&port->lock, flags);
 
        if (port->blocked_open) {
@@ -1444,6 +1444,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
        mutex_unlock(&port->mutex);
 
        tty_ldisc_flush(tty);
+       tty->closing = 0;
 }
 
 static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
index ea27804d87af9ec2485b1043f8ad97b30c05713f..381a2b13682c1a587a81e9bab781bcccdc669332 100644 (file)
@@ -356,6 +356,7 @@ int paste_selection(struct tty_struct *tty)
                        schedule();
                        continue;
                }
+               __set_current_state(TASK_RUNNING);
                count = sel_buffer_lth - pasted;
                count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
                                              count);
index 8fe52989b380155926c6865721e9ba16e2a60685..4462d167900c515abbf205b2b6134016c0933390 100644 (file)
@@ -742,6 +742,8 @@ static void visual_init(struct vc_data *vc, int num, int init)
        __module_get(vc->vc_sw->owner);
        vc->vc_num = num;
        vc->vc_display_fg = &master_display_fg;
+       if (vc->vc_uni_pagedir_loc)
+               con_free_unimap(vc);
        vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
        vc->vc_uni_pagedir = NULL;
        vc->vc_hi_font_mask = 0;
index 519a77ba214cce4c4580b54856c74ba9e3af5342..b30e7423549b04b0e7d442a128048b4221c68062 100644 (file)
@@ -1944,6 +1944,7 @@ static void __exit acm_exit(void)
        usb_deregister(&acm_driver);
        tty_unregister_driver(acm_tty_driver);
        put_tty_driver(acm_tty_driver);
+       idr_destroy(&acm_minors);
 }
 
 module_init(acm_init);
index 0e6f968e93fe8a9f31f7de7199fa778b60b2f900..01c0c0477a9e93d1c8e1150b80763f5ee6c90a02 100644 (file)
@@ -242,7 +242,7 @@ static int __init ulpi_init(void)
 {
        return bus_register(&ulpi_bus);
 }
-module_init(ulpi_init);
+subsys_initcall(ulpi_init);
 
 static void __exit ulpi_exit(void)
 {
index be5b2074f9066a8c9ca3e79289117f67d52b1063..cbcd0920fb5121ba44bd7c87de9a3b3fdd9f51d6 100644 (file)
@@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd)
                                dev_name(&usb_dev->dev), retval);
                return (retval < 0) ? retval : -EMSGSIZE;
        }
-       if (usb_dev->speed == USB_SPEED_SUPER) {
+
+       if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
                retval = usb_get_bos_descriptor(usb_dev);
-               if (retval < 0) {
+               if (!retval) {
+                       usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
+               } else if (usb_dev->speed == USB_SPEED_SUPER) {
                        mutex_unlock(&usb_bus_list_lock);
                        dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
                                        dev_name(&usb_dev->dev), retval);
index 43cb2f2e3b4375aee6c362e8b08d5f2b71865d8f..73dfa194160b78fba6bec233b667b00bceac6cb1 100644 (file)
@@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
        return usb_get_intfdata(hdev->actconfig->interface[0]);
 }
 
-static int usb_device_supports_lpm(struct usb_device *udev)
+int usb_device_supports_lpm(struct usb_device *udev)
 {
        /* USB 2.1 (and greater) devices indicate LPM support through
         * their USB 2.0 Extended Capabilities BOS descriptor.
index 7eb1e26798e5f293a3f2bc508dd268bbecdb63f1..457255a3306a3c2837674d22aa9560bf30842220 100644 (file)
@@ -65,6 +65,7 @@ extern int  usb_hub_init(void);
 extern void usb_hub_cleanup(void);
 extern int usb_major_init(void);
 extern void usb_major_cleanup(void);
+extern int usb_device_supports_lpm(struct usb_device *udev);
 
 #ifdef CONFIG_PM
 
index 2ef3c8d6a9dbd3b5b8270cb5af230d793e0d7230..69e769c35cf5dc798fb34a6924c19726690592a2 100644 (file)
@@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
                dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
                ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
                break;
+       case USB_REQ_SET_INTERFACE:
+               dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
+               dwc->start_config_issued = false;
+               /* Fall through */
        default:
                dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
                ret = dwc3_ep0_delegate_req(dwc, ctrl);
index d32160d6463f5fd3ca16442cdb37f0f0289fb5dc..5da37c957b53ce34bd9820ebfd57b9f20a5815c4 100644 (file)
@@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       udc->phy_regs = ioremap(r->start, resource_size(r));
+       udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
        if (udc->phy_regs == NULL) {
                dev_err(&pdev->dev, "failed to map phy I/O memory\n");
                return -EBUSY;
index d69c35558f6852beecd1bfc85acee35c26781ec8..362ee8af5fce87df4a2e7779f743c2ec722d9a5c 100644 (file)
@@ -60,13 +60,15 @@ static DEFINE_MUTEX(udc_lock);
 int usb_gadget_map_request(struct usb_gadget *gadget,
                struct usb_request *req, int is_in)
 {
+       struct device *dev = gadget->dev.parent;
+
        if (req->length == 0)
                return 0;
 
        if (req->num_sgs) {
                int     mapped;
 
-               mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs,
+               mapped = dma_map_sg(dev, req->sg, req->num_sgs,
                                is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
                if (mapped == 0) {
                        dev_err(&gadget->dev, "failed to map SGs\n");
@@ -75,11 +77,11 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
 
                req->num_mapped_sgs = mapped;
        } else {
-               req->dma = dma_map_single(&gadget->dev, req->buf, req->length,
+               req->dma = dma_map_single(dev, req->buf, req->length,
                                is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
-               if (dma_mapping_error(&gadget->dev, req->dma)) {
-                       dev_err(&gadget->dev, "failed to map buffer\n");
+               if (dma_mapping_error(dev, req->dma)) {
+                       dev_err(dev, "failed to map buffer\n");
                        return -EFAULT;
                }
        }
@@ -95,12 +97,12 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
                return;
 
        if (req->num_mapped_sgs) {
-               dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs,
+               dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs,
                                is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
                req->num_mapped_sgs = 0;
        } else {
-               dma_unmap_single(&gadget->dev, req->dma, req->length,
+               dma_unmap_single(gadget->dev.parent, req->dma, req->length,
                                is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
        }
 }
index f7d561ed3c236290aa90334b90bd52cbfa9f7632..d029bbe9eb36a884fed45fd6f648bfca51199d65 100644 (file)
@@ -981,10 +981,6 @@ rescan_all:
                int                     completed, modified;
                __hc32                  *prev;
 
-               /* Is this ED already invisible to the hardware? */
-               if (ed->state == ED_IDLE)
-                       goto ed_idle;
-
                /* only take off EDs that the HC isn't using, accounting for
                 * frame counter wraps and EDs with partially retired TDs
                 */
@@ -1012,12 +1008,10 @@ skip_ed:
                }
 
                /* ED's now officially unlinked, hc doesn't see */
-               ed->state = ED_IDLE;
                ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
                ed->hwNextED = 0;
                wmb();
                ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
-ed_idle:
 
                /* reentrancy:  if we drop the schedule lock, someone might
                 * have modified this list.  normally it's just prepending
@@ -1088,6 +1082,7 @@ rescan_this:
                if (list_empty(&ed->td_list)) {
                        *last = ed->ed_next;
                        ed->ed_next = NULL;
+                       ed->state = ED_IDLE;
                        list_del(&ed->in_use_list);
                } else if (ohci->rh_state == OHCI_RH_RUNNING) {
                        *last = ed->ed_next;
index e9a6eec39142584032f777aa6101c00604b5ccf5..cfcfadfc94fc25b8e10d788e59514c146d32ccc1 100644 (file)
@@ -58,7 +58,7 @@
 #define CCR_PM_CKRNEN    0x0002
 #define CCR_PM_USBPW1    0x0004
 #define CCR_PM_USBPW2    0x0008
-#define CCR_PM_USBPW3    0x0008
+#define CCR_PM_USBPW3    0x0010
 #define CCR_PM_PMEE      0x0100
 #define CCR_PM_PMES      0x8000
 
index e75c565feb53ef3022312c047268367de71ae059..78241b5550df877fb09189936867f7821e09d11a 100644 (file)
@@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
        u32 pls = status_reg & PORT_PLS_MASK;
 
        /* resume state is a xHCI internal state.
-        * Do not report it to usb core.
+        * Do not report it to usb core, instead, pretend to be U3,
+        * thus usb core knows it's not ready for transfer
         */
-       if (pls == XDEV_RESUME)
+       if (pls == XDEV_RESUME) {
+               *status |= USB_SS_PORT_LS_U3;
                return;
+       }
 
        /* When the CAS bit is set then warm reset
         * should be performed on port
@@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                status |= USB_PORT_STAT_C_RESET << 16;
        /* USB3.0 only */
        if (hcd->speed == HCD_USB3) {
-               if ((raw_port_status & PORT_PLC))
+               /* Port link change with port in resume state should not be
+                * reported to usbcore, as this is an internal state to be
+                * handled by xhci driver. Reporting PLC to usbcore may
+                * cause usbcore clearing PLC first and port change event
+                * irq won't be generated.
+                */
+               if ((raw_port_status & PORT_PLC) &&
+                       (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
                        status |= USB_PORT_STAT_C_LINK_STATE << 16;
                if ((raw_port_status & PORT_WRC))
                        status |= USB_PORT_STAT_C_BH_RESET << 16;
@@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
        spin_lock_irqsave(&xhci->lock, flags);
 
        if (hcd->self.root_hub->do_remote_wakeup) {
-               if (bus_state->resuming_ports) {
+               if (bus_state->resuming_ports ||        /* USB2 */
+                   bus_state->port_remote_wakeup) {    /* USB3 */
                        spin_unlock_irqrestore(&xhci->lock, flags);
-                       xhci_dbg(xhci, "suspend failed because "
-                                               "a port is resuming\n");
+                       xhci_dbg(xhci, "suspend failed because a port is resuming\n");
                        return -EBUSY;
                }
        }
index f8336408ef07c4354ad54c43e988b6b43272eea0..3e442f77a2b9367c5bd90d2ab69beb2db841fd26 100644 (file)
@@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
                /* Attempt to use the ring cache */
                if (virt_dev->num_rings_cached == 0)
                        return -ENOMEM;
+               virt_dev->num_rings_cached--;
                virt_dev->eps[ep_index].new_ring =
                        virt_dev->ring_cache[virt_dev->num_rings_cached];
                virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
-               virt_dev->num_rings_cached--;
                xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
                                        1, type);
        }
index 4a4cb1d91ac8465d5ff274f5ded1a62e299cf92f..5590eac2b22df26ea4150d7bb8a8e1eeb2406ae3 100644 (file)
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/acpi.h>
 
 #include "xhci.h"
 #include "xhci-trace.h"
 
+#define PORT2_SSIC_CONFIG_REG2 0x883c
+#define PROG_DONE              (1 << 30)
+#define SSIC_PORT_UNUSED       (1 << 31)
+
 /* Device for a quirk */
 #define PCI_VENDOR_ID_FRESCO_LOGIC     0x1b73
 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
@@ -176,20 +181,63 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 }
 
 /*
+ * In some Intel xHCI controllers, in order to get D3 working,
+ * through a vendor specific SSIC CONFIG register at offset 0x883c,
+ * SSIC PORT need to be marked as "unused" before putting xHCI
+ * into D3. After D3 exit, the SSIC port need to be marked as "used".
+ * Without this change, xHCI might not enter D3 state.
  * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
  * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
  */
-static void xhci_pme_quirk(struct xhci_hcd *xhci)
+static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
 {
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct pci_dev          *pdev = to_pci_dev(hcd->self.controller);
        u32 val;
        void __iomem *reg;
 
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+                pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+
+               reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
+
+               /* Notify SSIC that SSIC profile programming is not done */
+               val = readl(reg) & ~PROG_DONE;
+               writel(val, reg);
+
+               /* Mark SSIC port as unused(suspend) or used(resume) */
+               val = readl(reg);
+               if (suspend)
+                       val |= SSIC_PORT_UNUSED;
+               else
+                       val &= ~SSIC_PORT_UNUSED;
+               writel(val, reg);
+
+               /* Notify SSIC that SSIC profile programming is done */
+               val = readl(reg) | PROG_DONE;
+               writel(val, reg);
+               readl(reg);
+       }
+
        reg = (void __iomem *) xhci->cap_regs + 0x80a4;
        val = readl(reg);
        writel(val | BIT(28), reg);
        readl(reg);
 }
 
+#ifdef CONFIG_ACPI
+static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
+{
+       static const u8 intel_dsm_uuid[] = {
+               0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
+               0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
+       };
+       acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL);
+}
+#else
+       static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
+#endif /* CONFIG_ACPI */
+
 /* called during probe() after chip reset completes */
 static int xhci_pci_setup(struct usb_hcd *hcd)
 {
@@ -263,6 +311,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        HCC_MAX_PSA(xhci->hcc_params) >= 4)
                xhci->shared_hcd->can_do_streams = 1;
 
+       if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+               xhci_pme_acpi_rtd3_enable(dev);
+
        /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
        pm_runtime_put_noidle(&dev->dev);
 
@@ -307,7 +358,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
                pdev->no_d3cold = true;
 
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
-               xhci_pme_quirk(xhci);
+               xhci_pme_quirk(hcd, true);
 
        return xhci_suspend(xhci, do_wakeup);
 }
@@ -340,7 +391,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
                usb_enable_intel_xhci_ports(pdev);
 
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
-               xhci_pme_quirk(xhci);
+               xhci_pme_quirk(hcd, false);
 
        retval = xhci_resume(xhci, hibernated);
        return retval;
index 94416ff7081071e3f32a682493e6a34bb8d359a8..6a8fc52aed5863391885ac11c2661a407f707a30 100644 (file)
@@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
                usb_hcd_resume_root_hub(hcd);
        }
 
+       if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
+               bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
+
        if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
                xhci_dbg(xhci, "port resume event for port %d\n", port_id);
 
index 7da0d6043d33e13afa3ce4db45d5dfa54b4d3d22..526ebc0c7e720b9d766bcf6abf1bc65672e584bb 100644 (file)
@@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
                        return -EINVAL;
        }
 
+       if (virt_dev->tt_info)
+               old_active_eps = virt_dev->tt_info->active_eps;
+
        if (virt_dev->udev != udev) {
                /* If the virt_dev and the udev does not match, this virt_dev
                 * may belong to another udev.
index 31e46cc55807a83a69f9e6e8ce9e89f176786215..ed2ebf647c380ebbdfe647544fb137283219cf87 100644 (file)
@@ -285,6 +285,7 @@ struct xhci_op_regs {
 #define XDEV_U0                (0x0 << 5)
 #define XDEV_U2                (0x2 << 5)
 #define XDEV_U3                (0x3 << 5)
+#define XDEV_INACTIVE  (0x6 << 5)
 #define XDEV_RESUME    (0xf << 5)
 /* true: port has power (see HCC_PPC) */
 #define PORT_POWER     (1 << 9)
index caf188800c679e7f24fc329903848a8c1f64d41a..6b2479123de7762f7145c93fdcd58efb11f51093 100644 (file)
@@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_READ_DISC_INFO ),
 
+/* Reported by Oliver Neukum <oneukum@suse.com>
+ * This device morphes spontaneously into another device if the access
+ * pattern of Windows isn't followed. Thus writable media would be dirty
+ * if the initial instance is used. So the device is limited to its
+ * virtual CD.
+ * And yes, the concept that BCD goes up to 9 is not heeded */
+UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
+               "ZTE,Incorporated",
+               "ZTE WCDMA Technologies MSM",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_SINGLE_LUN ),
+
 /* Reported by Sven Geggus <sven-usbst@geggus.net>
  * This encrypted pen drive returns bogus data for the initial READ(10).
  */
@@ -2074,6 +2086,17 @@ UNUSUAL_DEV(  0x1b1c, 0x1ab5, 0x0200, 0x0200,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_INITIAL_READ10 ),
 
+/* Reported by Hans de Goede <hdegoede@redhat.com>
+ * These are mini projectors using USB for both power and video data transport
+ * The usb-storage interface is a virtual windows driver CD, which the gm12u320
+ * driver automatically converts into framebuffer & kms dri device nodes.
+ */
+UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff,
+               "Grain-media Technology Corp.",
+               "USB3.0 Device GM12U320",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_DEVICE ),
+
 /* Patch by Richard Schütz <r.schtz@t-online.de>
  * This external hard drive enclosure uses a JMicron chip which
  * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
index 2fb29dfeffbd7fa70a8a881f025d73ffd50b0b25..563c510f285c47d2a7362a4da8729fdc38e1dee0 100644 (file)
@@ -689,6 +689,23 @@ struct vfio_device *vfio_device_get_from_dev(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
 
+static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
+                                                    char *buf)
+{
+       struct vfio_device *device;
+
+       mutex_lock(&group->device_lock);
+       list_for_each_entry(device, &group->device_list, group_next) {
+               if (!strcmp(dev_name(device->dev), buf)) {
+                       vfio_device_get(device);
+                       break;
+               }
+       }
+       mutex_unlock(&group->device_lock);
+
+       return device;
+}
+
 /*
  * Caller must hold a reference to the vfio_device
  */
@@ -1198,53 +1215,53 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
 {
        struct vfio_device *device;
        struct file *filep;
-       int ret = -ENODEV;
+       int ret;
 
        if (0 == atomic_read(&group->container_users) ||
            !group->container->iommu_driver || !vfio_group_viable(group))
                return -EINVAL;
 
-       mutex_lock(&group->device_lock);
-       list_for_each_entry(device, &group->device_list, group_next) {
-               if (strcmp(dev_name(device->dev), buf))
-                       continue;
+       device = vfio_device_get_from_name(group, buf);
+       if (!device)
+               return -ENODEV;
 
-               ret = device->ops->open(device->device_data);
-               if (ret)
-                       break;
-               /*
-                * We can't use anon_inode_getfd() because we need to modify
-                * the f_mode flags directly to allow more than just ioctls
-                */
-               ret = get_unused_fd_flags(O_CLOEXEC);
-               if (ret < 0) {
-                       device->ops->release(device->device_data);
-                       break;
-               }
+       ret = device->ops->open(device->device_data);
+       if (ret) {
+               vfio_device_put(device);
+               return ret;
+       }
 
-               filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
-                                          device, O_RDWR);
-               if (IS_ERR(filep)) {
-                       put_unused_fd(ret);
-                       ret = PTR_ERR(filep);
-                       device->ops->release(device->device_data);
-                       break;
-               }
+       /*
+        * We can't use anon_inode_getfd() because we need to modify
+        * the f_mode flags directly to allow more than just ioctls
+        */
+       ret = get_unused_fd_flags(O_CLOEXEC);
+       if (ret < 0) {
+               device->ops->release(device->device_data);
+               vfio_device_put(device);
+               return ret;
+       }
 
-               /*
-                * TODO: add an anon_inode interface to do this.
-                * Appears to be missing by lack of need rather than
-                * explicitly prevented.  Now there's need.
-                */
-               filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+       filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
+                                  device, O_RDWR);
+       if (IS_ERR(filep)) {
+               put_unused_fd(ret);
+               ret = PTR_ERR(filep);
+               device->ops->release(device->device_data);
+               vfio_device_put(device);
+               return ret;
+       }
+
+       /*
+        * TODO: add an anon_inode interface to do this.
+        * Appears to be missing by lack of need rather than
+        * explicitly prevented.  Now there's need.
+        */
+       filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
 
-               vfio_device_get(device);
-               atomic_inc(&group->container_users);
+       atomic_inc(&group->container_users);
 
-               fd_install(ret, filep);
-               break;
-       }
-       mutex_unlock(&group->device_lock);
+       fd_install(ret, filep);
 
        return ret;
 }
index 9e8e004bb1c38d809c2af43b9d42c053db3a41a2..eec2f11809ff2463d2a714224925af9c679fead1 100644 (file)
 #include <linux/file.h>
 #include <linux/highmem.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <linux/kthread.h>
 #include <linux/cgroup.h>
 #include <linux/module.h>
+#include <linux/sort.h>
 
 #include "vhost.h"
 
+static ushort max_mem_regions = 64;
+module_param(max_mem_regions, ushort, 0444);
+MODULE_PARM_DESC(max_mem_regions,
+       "Maximum number of memory regions in memory map. (default: 64)");
+
 enum {
-       VHOST_MEMORY_MAX_NREGIONS = 64,
        VHOST_MEMORY_F_LOG = 0x1,
 };
 
@@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
                fput(dev->log_file);
        dev->log_file = NULL;
        /* No one will access memory at this point */
-       kfree(dev->memory);
+       kvfree(dev->memory);
        dev->memory = NULL;
        WARN_ON(!list_empty(&dev->work_list));
        if (dev->worker) {
@@ -663,6 +669,25 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
 }
 EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
 
+static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
+{
+       const struct vhost_memory_region *r1 = p1, *r2 = p2;
+       if (r1->guest_phys_addr < r2->guest_phys_addr)
+               return 1;
+       if (r1->guest_phys_addr > r2->guest_phys_addr)
+               return -1;
+       return 0;
+}
+
+static void *vhost_kvzalloc(unsigned long size)
+{
+       void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+
+       if (!n)
+               n = vzalloc(size);
+       return n;
+}
+
 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 {
        struct vhost_memory mem, *newmem, *oldmem;
@@ -673,21 +698,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
                return -EFAULT;
        if (mem.padding)
                return -EOPNOTSUPP;
-       if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
+       if (mem.nregions > max_mem_regions)
                return -E2BIG;
-       newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
+       newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
        if (!newmem)
                return -ENOMEM;
 
        memcpy(newmem, &mem, size);
        if (copy_from_user(newmem->regions, m->regions,
                           mem.nregions * sizeof *m->regions)) {
-               kfree(newmem);
+               kvfree(newmem);
                return -EFAULT;
        }
+       sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
+               vhost_memory_reg_sort_cmp, NULL);
 
        if (!memory_access_ok(d, newmem, 0)) {
-               kfree(newmem);
+               kvfree(newmem);
                return -EFAULT;
        }
        oldmem = d->memory;
@@ -699,7 +726,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
                d->vqs[i]->memory = newmem;
                mutex_unlock(&d->vqs[i]->mutex);
        }
-       kfree(oldmem);
+       kvfree(oldmem);
        return 0;
 }
 
@@ -965,6 +992,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
                }
                if (eventfp != d->log_file) {
                        filep = d->log_file;
+                       d->log_file = eventfp;
                        ctx = d->log_ctx;
                        d->log_ctx = eventfp ?
                                eventfd_ctx_fileget(eventfp) : NULL;
@@ -992,17 +1020,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
 static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
                                                     __u64 addr, __u32 len)
 {
-       struct vhost_memory_region *reg;
-       int i;
+       const struct vhost_memory_region *reg;
+       int start = 0, end = mem->nregions;
 
-       /* linear search is not brilliant, but we really have on the order of 6
-        * regions in practice */
-       for (i = 0; i < mem->nregions; ++i) {
-               reg = mem->regions + i;
-               if (reg->guest_phys_addr <= addr &&
-                   reg->guest_phys_addr + reg->memory_size - 1 >= addr)
-                       return reg;
+       while (start < end) {
+               int slot = start + (end - start) / 2;
+               reg = mem->regions + slot;
+               if (addr >= reg->guest_phys_addr)
+                       end = slot;
+               else
+                       start = slot + 1;
        }
+
+       reg = mem->regions + start;
+       if (addr >= reg->guest_phys_addr &&
+               reg->guest_phys_addr + reg->memory_size > addr)
+               return reg;
        return NULL;
 }
 
index 862fbc206755511a6bd3a02eeaa525e992c545bc..564a7de17d99831083c46bc19fd859d40a5d51a2 100644 (file)
@@ -378,7 +378,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
 
        ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device);
        if (ret)
-               btrfs_error(root->fs_info, ret, "kobj add dev failed");
+               btrfs_err(root->fs_info, "kobj add dev failed %d\n", ret);
 
        printk_in_rcu(KERN_INFO
                      "BTRFS: dev_replace from %s (devid %llu) to %s started\n",
index a9aadb2ad5254cfe98d36a6eed1f12d4ae5e7925..f556c3732c2c16e22e0bcbd35f9ee1277179be5b 100644 (file)
@@ -2842,6 +2842,7 @@ int open_ctree(struct super_block *sb,
            !extent_buffer_uptodate(chunk_root->node)) {
                printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
                       sb->s_id);
+               chunk_root->node = NULL;
                goto fail_tree_roots;
        }
        btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
@@ -2879,7 +2880,7 @@ retry_root_backup:
            !extent_buffer_uptodate(tree_root->node)) {
                printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
                       sb->s_id);
-
+               tree_root->node = NULL;
                goto recovery_tree_root;
        }
 
index 171312d51799e358843216c945a18dee6d3a8790..07204bf601edac25ad2757bbd7c85961eeabc56c 100644 (file)
@@ -4227,6 +4227,24 @@ out:
        space_info->chunk_alloc = 0;
        spin_unlock(&space_info->lock);
        mutex_unlock(&fs_info->chunk_mutex);
+       /*
+        * When we allocate a new chunk we reserve space in the chunk block
+        * reserve to make sure we can COW nodes/leafs in the chunk tree or
+        * add new nodes/leafs to it if we end up needing to do it when
+        * inserting the chunk item and updating device items as part of the
+        * second phase of chunk allocation, performed by
+        * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
+        * large number of new block groups to create in our transaction
+        * handle's new_bgs list to avoid exhausting the chunk block reserve
+        * in extreme cases - like having a single transaction create many new
+        * block groups when starting to write out the free space caches of all
+        * the block groups that were made dirty during the lifetime of the
+        * transaction.
+        */
+       if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
+               btrfs_create_pending_block_groups(trans, trans->root);
+               btrfs_trans_release_chunk_metadata(trans);
+       }
        return ret;
 }
 
index 51e0f0d0053e52c965c4ebed464fc4a761523a67..f5021fcb154e3bfa3773a79d94ee4750be48ca0d 100644 (file)
@@ -2152,7 +2152,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
 
-       if (current != root->fs_info->transaction_kthread)
+       if (current != root->fs_info->transaction_kthread &&
+           current != root->fs_info->cleaner_kthread)
                btrfs_run_delayed_iputs(root);
 
        return ret;
index c3e21ccfc358b2da1170c15f09a5946afad0ff16..a7f77e1fa18c25e62e8de5f809389d041e59266e 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -319,6 +319,12 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
  * @vma: The virtual memory area where the fault occurred
  * @vmf: The description of the fault
  * @get_block: The filesystem method used to translate file offsets to blocks
+ * @complete_unwritten: The filesystem method used to convert unwritten blocks
+ *     to written so the data written to them is exposed. This is required for
+ *     required by write faults for filesystems that will return unwritten
+ *     extent mappings from @get_block, but it is optional for reads as
+ *     dax_insert_mapping() will always zero unwritten blocks. If the fs does
+ *     not support unwritten extents, the it should pass NULL.
  *
  * When a page fault occurs, filesystems may call this helper in their
  * fault handler for DAX files. __dax_fault() assumes the caller has done all
@@ -437,8 +443,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
         * as for normal BH based IO completions.
         */
        error = dax_insert_mapping(inode, &bh, vma, vmf);
-       if (buffer_unwritten(&bh))
-               complete_unwritten(&bh, !error);
+       if (buffer_unwritten(&bh)) {
+               if (complete_unwritten)
+                       complete_unwritten(&bh, !error);
+               else
+                       WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
+       }
 
  out:
        if (error == -ENOMEM)
index 9bedfa8dd3a5305d1407ce04c5fa3319a6882cd3..f71e19a9dd3c18fc6ee7b3bcb4f3da9c8b8c3d4c 100644 (file)
@@ -2072,8 +2072,6 @@ static int f2fs_set_data_page_dirty(struct page *page)
                return 1;
        }
 
-       mark_inode_dirty(inode);
-
        if (!PageDirty(page)) {
                __set_page_dirty_nobuffers(page);
                update_dirty_page(inode, page);
index ada2a3dd701ad5eb22add128a0d5e89ebb98383f..b0f38c3b37f4d5577551e6d517e801a6f644ac09 100644 (file)
@@ -1331,12 +1331,13 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
        if (ret)
                return ret;
 
-       if (f2fs_is_atomic_file(inode))
+       if (f2fs_is_atomic_file(inode)) {
+               clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
                commit_inmem_pages(inode, false);
+       }
 
        ret = f2fs_sync_file(filp, 0, LONG_MAX, 0);
        mnt_drop_write_file(filp);
-       clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
        return ret;
 }
 
@@ -1387,8 +1388,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
        f2fs_balance_fs(F2FS_I_SB(inode));
 
        if (f2fs_is_atomic_file(inode)) {
-               commit_inmem_pages(inode, false);
                clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+               commit_inmem_pages(inode, false);
        }
 
        if (f2fs_is_volatile_file(inode))
index e1e73617d13b6cb5e1fca434e4ab823dfa534eca..22fb5ef37966210cb50f5b8679b7c661803128f9 100644 (file)
@@ -556,27 +556,39 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
        if (!fio.encrypted_page)
                goto put_out;
 
-       f2fs_submit_page_bio(&fio);
+       err = f2fs_submit_page_bio(&fio);
+       if (err)
+               goto put_page_out;
+
+       /* write page */
+       lock_page(fio.encrypted_page);
+
+       if (unlikely(!PageUptodate(fio.encrypted_page)))
+               goto put_page_out;
+       if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
+               goto put_page_out;
+
+       set_page_dirty(fio.encrypted_page);
+       f2fs_wait_on_page_writeback(fio.encrypted_page, META);
+       if (clear_page_dirty_for_io(fio.encrypted_page))
+               dec_page_count(fio.sbi, F2FS_DIRTY_META);
+
+       set_page_writeback(fio.encrypted_page);
 
        /* allocate block address */
        f2fs_wait_on_page_writeback(dn.node_page, NODE);
-
        allocate_data_block(fio.sbi, NULL, fio.blk_addr,
                                        &fio.blk_addr, &sum, CURSEG_COLD_DATA);
-       dn.data_blkaddr = fio.blk_addr;
-
-       /* write page */
-       lock_page(fio.encrypted_page);
-       set_page_writeback(fio.encrypted_page);
        fio.rw = WRITE_SYNC;
        f2fs_submit_page_mbio(&fio);
 
+       dn.data_blkaddr = fio.blk_addr;
        set_data_blkaddr(&dn);
        f2fs_update_extent_cache(&dn);
        set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
        if (page->index == 0)
                set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
-
+put_page_out:
        f2fs_put_page(fio.encrypted_page, 1);
 put_out:
        f2fs_put_dnode(&dn);
@@ -605,8 +617,8 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
                        .page = page,
                        .encrypted_page = NULL,
                };
+               set_page_dirty(page);
                f2fs_wait_on_page_writeback(page, DATA);
-
                if (clear_page_dirty_for_io(page))
                        inode_dec_dirty_pages(inode);
                set_cold_data(page);
index 38e75fb1e48812b38d477a13a53f0469fb95bbd3..a13ffcc329923b85f6dee0064e56681bf8a1269c 100644 (file)
@@ -141,6 +141,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
        kunmap_atomic(dst_addr);
        SetPageUptodate(page);
 no_update:
+       set_page_dirty(page);
+
        /* clear dirty state */
        dirty = clear_page_dirty_for_io(page);
 
index 1eb343768781f3c4c89e0f6ecc2694d9a4c61c75..61b97f9cb9f657961d7ce12a490d55ece03e18a4 100644 (file)
@@ -257,6 +257,7 @@ void commit_inmem_pages(struct inode *inode, bool abort)
                if (!abort) {
                        lock_page(cur->page);
                        if (cur->page->mapping == inode->i_mapping) {
+                               set_page_dirty(cur->page);
                                f2fs_wait_on_page_writeback(cur->page, DATA);
                                if (clear_page_dirty_for_io(cur->page))
                                        inode_dec_dirty_pages(inode);
index f0520bcf209442914eff0ed60d380fb3d2c66402..518c6294bf6c0ef965e9f56baa4e11d2eccc0165 100644 (file)
@@ -702,6 +702,7 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page,
        else
                wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
 }
+EXPORT_SYMBOL_GPL(wbc_account_io);
 
 /**
  * inode_congested - test whether an inode is congested
index c7cb8a526c05fbaa5ba18934cd04eda1eb2a6e60..2b8aa15fd6dfa755368ddf21136eaab5192f1e28 100644 (file)
@@ -1361,6 +1361,36 @@ enum umount_tree_flags {
        UMOUNT_PROPAGATE = 2,
        UMOUNT_CONNECTED = 4,
 };
+
+static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
+{
+       /* Leaving mounts connected is only valid for lazy umounts */
+       if (how & UMOUNT_SYNC)
+               return true;
+
+       /* A mount without a parent has nothing to be connected to */
+       if (!mnt_has_parent(mnt))
+               return true;
+
+       /* Because the reference counting rules change when mounts are
+        * unmounted and connected, umounted mounts may not be
+        * connected to mounted mounts.
+        */
+       if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
+               return true;
+
+       /* Has it been requested that the mount remain connected? */
+       if (how & UMOUNT_CONNECTED)
+               return false;
+
+       /* Is the mount locked such that it needs to remain connected? */
+       if (IS_MNT_LOCKED(mnt))
+               return false;
+
+       /* By default disconnect the mount */
+       return true;
+}
+
 /*
  * mount_lock must be held
  * namespace_sem must be held for write
@@ -1398,10 +1428,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
                if (how & UMOUNT_SYNC)
                        p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
 
-               disconnect = !(((how & UMOUNT_CONNECTED) &&
-                               mnt_has_parent(p) &&
-                               (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
-                              IS_MNT_LOCKED_AND_LAZY(p));
+               disconnect = disconnect_mount(p, how);
 
                pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
                                 disconnect ? &unmounted : NULL);
@@ -1538,11 +1565,8 @@ void __detach_mounts(struct dentry *dentry)
        while (!hlist_empty(&mp->m_list)) {
                mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
                if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
-                       struct mount *p, *tmp;
-                       list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts,  mnt_child) {
-                               hlist_add_head(&p->mnt_umount.s_list, &unmounted);
-                               umount_mnt(p);
-                       }
+                       hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
+                       umount_mnt(mnt);
                }
                else umount_tree(mnt, UMOUNT_CONNECTED);
        }
index ecebb406cc1aec554ce780f2c3680eddc351e8da..4a90c9bb31357305ed6f38166bb0e9afabaae953 100644 (file)
@@ -775,7 +775,7 @@ static int nfs_init_server(struct nfs_server *server,
        server->options = data->options;
        server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
                NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
-               NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR;
+               NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
 
        if (data->rsize)
                server->rsize = nfs_block_size(data->rsize, NULL);
index c12951b9551eab8b0394ed2aa218cf15ce6f2c39..b3289d701eea21623f4081fee1b2807e3e2f4b3a 100644 (file)
@@ -1852,7 +1852,7 @@ ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
        struct nfs42_layoutstat_devinfo *devinfo;
        int i;
 
-       for (i = 0; i <= FF_LAYOUT_MIRROR_COUNT(pls); i++) {
+       for (i = 0; i < FF_LAYOUT_MIRROR_COUNT(pls); i++) {
                if (*dev_count >= dev_limit)
                        break;
                mirror = FF_LAYOUT_COMP(pls, i);
index b77b328a06d74f0124d2a65b51fac0fc21fbd692..0adc7d245b3dd838e32371920e23d9dda5071ee0 100644 (file)
@@ -442,8 +442,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
                        nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
                        inode->i_version = fattr->change_attr;
-               else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
-                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+               else
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
+                               | NFS_INO_REVAL_PAGECACHE);
                if (fattr->valid & NFS_ATTR_FATTR_SIZE)
                        inode->i_size = nfs_size_to_loff_t(fattr->size);
                else
@@ -1244,9 +1245,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
        if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
                cur_size = i_size_read(inode);
                new_isize = nfs_size_to_loff_t(fattr->size);
-               if (cur_size != new_isize && nfsi->nrequests == 0)
+               if (cur_size != new_isize)
                        invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
        }
+       if (nfsi->nrequests != 0)
+               invalid &= ~NFS_INO_REVAL_PAGECACHE;
 
        /* Have any file permissions changed? */
        if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
@@ -1684,13 +1687,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                        invalid |= NFS_INO_INVALID_ATTR
                                | NFS_INO_INVALID_DATA
                                | NFS_INO_INVALID_ACCESS
-                               | NFS_INO_INVALID_ACL
-                               | NFS_INO_REVAL_PAGECACHE;
+                               | NFS_INO_INVALID_ACL;
                        if (S_ISDIR(inode->i_mode))
                                nfs_force_lookup_revalidate(inode);
                        inode->i_version = fattr->change_attr;
                }
-       } else if (server->caps & NFS_CAP_CHANGE_ATTR)
+       } else
                nfsi->cache_validity |= save_cache_validity;
 
        if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
@@ -1717,7 +1719,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                        if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
                                i_size_write(inode, new_isize);
                                invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
-                               invalid &= ~NFS_INO_REVAL_PAGECACHE;
                        }
                        dprintk("NFS: isize change on server for file %s/%ld "
                                        "(%Ld to %Ld)\n",
index 7e3c4604bea8a6e6e92906b6c2096adb6df5ca3f..9b372b845f6a6ff06a4a035e2f6799d7d29cd8f7 100644 (file)
@@ -296,6 +296,22 @@ extern struct rpc_procinfo nfs4_procedures[];
 
 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
 extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
+static inline struct nfs4_label *
+nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
+{
+       if (!dst || !src)
+               return NULL;
+
+       if (src->len > NFS4_MAXLABELLEN)
+               return NULL;
+
+       dst->lfs = src->lfs;
+       dst->pi = src->pi;
+       dst->len = src->len;
+       memcpy(dst->label, src->label, src->len);
+
+       return dst;
+}
 static inline void nfs4_label_free(struct nfs4_label *label)
 {
        if (label) {
@@ -316,6 +332,11 @@ static inline void nfs4_label_free(void *label) {}
 static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
 {
 }
+static inline struct nfs4_label *
+nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
+{
+       return NULL;
+}
 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
 
 /* proc.c */
index f486b80f927ab7204159852a9740900a6c73aec6..d731bbf974aaf1d4bf695c2cb57a99cc586e0258 100644 (file)
@@ -135,7 +135,7 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
        return err;
 }
 
-loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
+static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
 {
        struct inode *inode = file_inode(filep);
        struct nfs42_seek_args args = {
@@ -171,6 +171,23 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
        return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
 }
 
+loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
+{
+       struct nfs_server *server = NFS_SERVER(file_inode(filep));
+       struct nfs4_exception exception = { };
+       int err;
+
+       do {
+               err = _nfs42_proc_llseek(filep, offset, whence);
+               if (err == -ENOTSUPP)
+                       return -EOPNOTSUPP;
+               err = nfs4_handle_exception(server, err, &exception);
+       } while (exception.retry);
+
+       return err;
+}
+
+
 static void
 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
 {
index 8bee93469617eeddffa0e56038deed9051221420..3acb1eb72930c40828bab90aeb27a3918f71138d 100644 (file)
@@ -467,7 +467,10 @@ static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
 
 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
 {
-       do_renew_lease(server->nfs_client, timestamp);
+       struct nfs_client *clp = server->nfs_client;
+
+       if (!nfs4_has_session(clp))
+               do_renew_lease(clp, timestamp);
 }
 
 struct nfs4_call_sync_data {
@@ -616,8 +619,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
                clp = session->clp;
                do_renew_lease(clp, res->sr_timestamp);
                /* Check sequence flags */
-               if (res->sr_status_flags != 0)
-                       nfs4_schedule_lease_recovery(clp);
+               nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
                nfs41_update_target_slotid(slot->table, slot, res);
                break;
        case 1:
@@ -910,6 +912,7 @@ struct nfs4_opendata {
        struct nfs_open_confirmres c_res;
        struct nfs4_string owner_name;
        struct nfs4_string group_name;
+       struct nfs4_label *a_label;
        struct nfs_fattr f_attr;
        struct nfs4_label *f_label;
        struct dentry *dir;
@@ -1013,6 +1016,10 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
        if (IS_ERR(p->f_label))
                goto err_free_p;
 
+       p->a_label = nfs4_label_alloc(server, gfp_mask);
+       if (IS_ERR(p->a_label))
+               goto err_free_f;
+
        alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
        p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
        if (IS_ERR(p->o_arg.seqid))
@@ -1041,7 +1048,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
        p->o_arg.server = server;
        p->o_arg.bitmask = nfs4_bitmask(server, label);
        p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
-       p->o_arg.label = label;
+       p->o_arg.label = nfs4_label_copy(p->a_label, label);
        p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
        switch (p->o_arg.claim) {
        case NFS4_OPEN_CLAIM_NULL:
@@ -1074,6 +1081,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
        return p;
 
 err_free_label:
+       nfs4_label_free(p->a_label);
+err_free_f:
        nfs4_label_free(p->f_label);
 err_free_p:
        kfree(p);
@@ -1093,6 +1102,7 @@ static void nfs4_opendata_free(struct kref *kref)
                nfs4_put_open_state(p->state);
        nfs4_put_state_owner(p->owner);
 
+       nfs4_label_free(p->a_label);
        nfs4_label_free(p->f_label);
 
        dput(p->dir);
@@ -1198,12 +1208,15 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state,
 
 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
 {
+       if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
+               return;
        if (state->n_wronly)
                set_bit(NFS_O_WRONLY_STATE, &state->flags);
        if (state->n_rdonly)
                set_bit(NFS_O_RDONLY_STATE, &state->flags);
        if (state->n_rdwr)
                set_bit(NFS_O_RDWR_STATE, &state->flags);
+       set_bit(NFS_OPEN_STATE, &state->flags);
 }
 
 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
@@ -7571,13 +7584,8 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
                goto out;
        }
        ret = rpc_wait_for_completion_task(task);
-       if (!ret) {
-               struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
-
-               if (task->tk_status == 0)
-                       nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
+       if (!ret)
                ret = task->tk_status;
-       }
        rpc_put_task(task);
 out:
        dprintk("<-- %s status=%d\n", __func__, ret);
@@ -7965,16 +7973,17 @@ static void nfs4_layoutreturn_release(void *calldata)
 {
        struct nfs4_layoutreturn *lrp = calldata;
        struct pnfs_layout_hdr *lo = lrp->args.layout;
+       LIST_HEAD(freeme);
 
        dprintk("--> %s\n", __func__);
        spin_lock(&lo->plh_inode->i_lock);
        if (lrp->res.lrs_present)
                pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
+       pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
        pnfs_clear_layoutreturn_waitbit(lo);
-       clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
-       rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
        lo->plh_block_lgets--;
        spin_unlock(&lo->plh_inode->i_lock);
+       pnfs_free_lseg_list(&freeme);
        pnfs_put_layout_hdr(lrp->args.layout);
        nfs_iput_and_deactive(lrp->inode);
        kfree(calldata);
@@ -8588,7 +8597,6 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
        .minor_version = 0,
        .init_caps = NFS_CAP_READDIRPLUS
                | NFS_CAP_ATOMIC_OPEN
-               | NFS_CAP_CHANGE_ATTR
                | NFS_CAP_POSIX_LOCK,
        .init_client = nfs40_init_client,
        .shutdown_client = nfs40_shutdown_client,
@@ -8614,7 +8622,6 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
        .minor_version = 1,
        .init_caps = NFS_CAP_READDIRPLUS
                | NFS_CAP_ATOMIC_OPEN
-               | NFS_CAP_CHANGE_ATTR
                | NFS_CAP_POSIX_LOCK
                | NFS_CAP_STATEID_NFSV41
                | NFS_CAP_ATOMIC_OPEN_V1,
@@ -8637,7 +8644,6 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
        .minor_version = 2,
        .init_caps = NFS_CAP_READDIRPLUS
                | NFS_CAP_ATOMIC_OPEN
-               | NFS_CAP_CHANGE_ATTR
                | NFS_CAP_POSIX_LOCK
                | NFS_CAP_STATEID_NFSV41
                | NFS_CAP_ATOMIC_OPEN_V1
index 605840dc89cf9e28c173659af201aab109f9328d..f2e2ad8944617f679a4a85934f6a276d3665229d 100644 (file)
@@ -2191,25 +2191,35 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
        }
 }
 
-static void nfs41_handle_state_revoked(struct nfs_client *clp)
+static void nfs41_handle_all_state_revoked(struct nfs_client *clp)
 {
        nfs4_reset_all_state(clp);
        dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
 }
 
+static void nfs41_handle_some_state_revoked(struct nfs_client *clp)
+{
+       nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
+       nfs4_schedule_state_manager(clp);
+
+       dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
+}
+
 static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
 {
-       /* This will need to handle layouts too */
-       nfs_expire_all_delegations(clp);
+       /* FIXME: For now, we destroy all layouts. */
+       pnfs_destroy_all_layouts(clp);
+       /* FIXME: For now, we test all delegations+open state+locks. */
+       nfs41_handle_some_state_revoked(clp);
        dprintk("%s: Recallable state revoked on server %s!\n", __func__,
                        clp->cl_hostname);
 }
 
 static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
 {
-       nfs_expire_all_delegations(clp);
-       if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
-               nfs4_schedule_state_manager(clp);
+       set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+       nfs4_schedule_state_manager(clp);
+
        dprintk("%s: server %s declared a backchannel fault\n", __func__,
                        clp->cl_hostname);
 }
@@ -2231,10 +2241,11 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
 
        if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
                nfs41_handle_server_reboot(clp);
-       if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
-                           SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
+       if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED))
+               nfs41_handle_all_state_revoked(clp);
+       if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
                            SEQ4_STATUS_ADMIN_STATE_REVOKED))
-               nfs41_handle_state_revoked(clp);
+               nfs41_handle_some_state_revoked(clp);
        if (flags & SEQ4_STATUS_LEASE_MOVED)
                nfs4_schedule_lease_moved_recovery(clp);
        if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
index 1da68d3b1edabdb78c60527f502af5f40d6cf69b..4984bbe55ff1eed1623df2196bc0de0de41a4304 100644 (file)
@@ -1100,8 +1100,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
                mirror->pg_base = 0;
                mirror->pg_recoalesce = 0;
 
-               desc->pg_moreio = 0;
-
                while (!list_empty(&head)) {
                        struct nfs_page *req;
 
@@ -1109,8 +1107,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
                        nfs_list_remove_request(req);
                        if (__nfs_pageio_add_request(desc, req))
                                continue;
-                       if (desc->pg_error < 0)
+                       if (desc->pg_error < 0) {
+                               list_splice_tail(&head, &mirror->pg_list);
+                               mirror->pg_recoalesce = 1;
                                return 0;
+                       }
                        break;
                }
        } while (mirror->pg_recoalesce);
index 0ba9a02c95664960f8c0f46ea97249bd8653fe16..70bf706b10904e156affe9dd4bea2ec9a17776c5 100644 (file)
@@ -352,7 +352,7 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
 {
        struct pnfs_layout_segment *s;
 
-       if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
+       if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
                return false;
 
        list_for_each_entry(s, &lo->plh_segs, pls_list)
@@ -362,6 +362,18 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
        return true;
 }
 
+static bool
+pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
+{
+       if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+               return false;
+       lo->plh_return_iomode = 0;
+       lo->plh_block_lgets++;
+       pnfs_get_layout_hdr(lo);
+       clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
+       return true;
+}
+
 static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
                struct pnfs_layout_hdr *lo, struct inode *inode)
 {
@@ -372,17 +384,16 @@ static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
        if (pnfs_layout_need_return(lo, lseg)) {
                nfs4_stateid stateid;
                enum pnfs_iomode iomode;
+               bool send;
 
                stateid = lo->plh_stateid;
                iomode = lo->plh_return_iomode;
-               /* decreased in pnfs_send_layoutreturn() */
-               lo->plh_block_lgets++;
-               lo->plh_return_iomode = 0;
+               send = pnfs_prepare_layoutreturn(lo);
                spin_unlock(&inode->i_lock);
-               pnfs_get_layout_hdr(lo);
-
-               /* Send an async layoutreturn so we dont deadlock */
-               pnfs_send_layoutreturn(lo, stateid, iomode, false);
+               if (send) {
+                       /* Send an async layoutreturn so we dont deadlock */
+                       pnfs_send_layoutreturn(lo, stateid, iomode, false);
+               }
        } else
                spin_unlock(&inode->i_lock);
 }
@@ -411,6 +422,10 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
                pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
 
        if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
+               if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
+                       spin_unlock(&inode->i_lock);
+                       return;
+               }
                pnfs_get_layout_hdr(lo);
                pnfs_layout_remove_lseg(lo, lseg);
                spin_unlock(&inode->i_lock);
@@ -451,6 +466,8 @@ pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
                test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
        if (atomic_dec_and_test(&lseg->pls_refcount)) {
                struct pnfs_layout_hdr *lo = lseg->pls_layout;
+               if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
+                       return;
                pnfs_get_layout_hdr(lo);
                pnfs_layout_remove_lseg(lo, lseg);
                pnfs_free_lseg_async(lseg);
@@ -924,6 +941,7 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
        clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
        smp_mb__after_atomic();
        wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
+       rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
 }
 
 static int
@@ -978,6 +996,7 @@ _pnfs_return_layout(struct inode *ino)
        LIST_HEAD(tmp_list);
        nfs4_stateid stateid;
        int status = 0, empty;
+       bool send;
 
        dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
 
@@ -1007,17 +1026,18 @@ _pnfs_return_layout(struct inode *ino)
        /* Don't send a LAYOUTRETURN if list was initially empty */
        if (empty) {
                spin_unlock(&ino->i_lock);
-               pnfs_put_layout_hdr(lo);
                dprintk("NFS: %s no layout segments to return\n", __func__);
-               goto out;
+               goto out_put_layout_hdr;
        }
 
        set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
-       lo->plh_block_lgets++;
+       send = pnfs_prepare_layoutreturn(lo);
        spin_unlock(&ino->i_lock);
        pnfs_free_lseg_list(&tmp_list);
-
-       status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
+       if (send)
+               status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
+out_put_layout_hdr:
+       pnfs_put_layout_hdr(lo);
 out:
        dprintk("<-- %s status: %d\n", __func__, status);
        return status;
@@ -1097,13 +1117,9 @@ bool pnfs_roc(struct inode *ino)
 out_noroc:
        if (lo) {
                stateid = lo->plh_stateid;
-               layoutreturn =
-                       test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
-                                          &lo->plh_flags);
-               if (layoutreturn) {
-                       lo->plh_block_lgets++;
-                       pnfs_get_layout_hdr(lo);
-               }
+               if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+                                          &lo->plh_flags))
+                       layoutreturn = pnfs_prepare_layoutreturn(lo);
        }
        spin_unlock(&ino->i_lock);
        if (layoutreturn) {
@@ -1146,15 +1162,18 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
        struct pnfs_layout_segment *lseg;
        nfs4_stateid stateid;
        u32 current_seqid;
-       bool found = false, layoutreturn = false;
+       bool layoutreturn = false;
 
        spin_lock(&ino->i_lock);
-       list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
-               if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
-                       rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
-                       found = true;
-                       goto out;
-               }
+       list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) {
+               if (!test_bit(NFS_LSEG_ROC, &lseg->pls_flags))
+                       continue;
+               if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
+                       continue;
+               rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
+               spin_unlock(&ino->i_lock);
+               return true;
+       }
        lo = nfsi->layout;
        current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
 
@@ -1162,23 +1181,19 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
         * a barrier, we choose the worst-case barrier.
         */
        *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
-out:
-       if (!found) {
-               stateid = lo->plh_stateid;
-               layoutreturn =
-                       test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
-                                          &lo->plh_flags);
-               if (layoutreturn) {
-                       lo->plh_block_lgets++;
-                       pnfs_get_layout_hdr(lo);
-               }
-       }
+       stateid = lo->plh_stateid;
+       if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+                                          &lo->plh_flags))
+               layoutreturn = pnfs_prepare_layoutreturn(lo);
+       if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+               rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
+
        spin_unlock(&ino->i_lock);
        if (layoutreturn) {
-               rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
                pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false);
+               return true;
        }
-       return found;
+       return false;
 }
 
 /*
@@ -1695,7 +1710,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
        spin_lock(&inode->i_lock);
        /* set failure bit so that pnfs path will be retried later */
        pnfs_layout_set_fail_bit(lo, iomode);
-       set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
        if (lo->plh_return_iomode == 0)
                lo->plh_return_iomode = range.iomode;
        else if (lo->plh_return_iomode != range.iomode)
@@ -2207,13 +2221,12 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
        if (ld->prepare_layoutcommit) {
                status = ld->prepare_layoutcommit(&data->args);
                if (status) {
+                       put_rpccred(data->cred);
                        spin_lock(&inode->i_lock);
                        set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
                        if (end_pos > nfsi->layout->plh_lwb)
                                nfsi->layout->plh_lwb = end_pos;
-                       spin_unlock(&inode->i_lock);
-                       put_rpccred(data->cred);
-                       goto clear_layoutcommitting;
+                       goto out_unlock;
                }
        }
 
index 65869ca9c851dbf4f0b289ca84865a018c2b6e57..75a35a1afa7944d4ac54bd94994cddf1fd05ab54 100644 (file)
@@ -1379,24 +1379,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
 {
        struct nfs_pgio_args *argp = &hdr->args;
        struct nfs_pgio_res *resp = &hdr->res;
+       u64 size = argp->offset + resp->count;
 
        if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
+               fattr->size = size;
+       if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
+               fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
                return;
-       if (argp->offset + resp->count != fattr->size)
-               return;
-       if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
+       }
+       if (size != fattr->size)
                return;
        /* Set attribute barrier */
        nfs_fattr_set_barrier(fattr);
+       /* ...and update size */
+       fattr->valid |= NFS_ATTR_FATTR_SIZE;
 }
 
 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
 {
-       struct nfs_fattr *fattr = hdr->res.fattr;
+       struct nfs_fattr *fattr = &hdr->fattr;
        struct inode *inode = hdr->inode;
 
-       if (fattr == NULL)
-               return;
        spin_lock(&inode->i_lock);
        nfs_writeback_check_extend(hdr, fattr);
        nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
index 7114ce6e6b9ef038f415550b925ac50a95be978c..0fcdbe7ca6480e7950092cfd0dc7d0ce431ef548 100644 (file)
@@ -20,8 +20,6 @@
 #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
 #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
 #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
-#define IS_MNT_LOCKED_AND_LAZY(m) \
-       (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
 
 #define CL_EXPIRE              0x01
 #define CL_SLAVE               0x02
index 20de88d1bf86205d126d5e27b43298af9b836a79..dd714037c322d0009d8df40e73bf5adf3aa892d4 100644 (file)
@@ -159,11 +159,10 @@ xfs_attr3_rmt_write_verify(
        struct xfs_buf  *bp)
 {
        struct xfs_mount *mp = bp->b_target->bt_mount;
-       struct xfs_buf_log_item *bip = bp->b_fspriv;
+       int             blksize = mp->m_attr_geo->blksize;
        char            *ptr;
        int             len;
        xfs_daddr_t     bno;
-       int             blksize = mp->m_attr_geo->blksize;
 
        /* no verification of non-crc buffers */
        if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -175,16 +174,22 @@ xfs_attr3_rmt_write_verify(
        ASSERT(len >= blksize);
 
        while (len > 0) {
+               struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
+
                if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
                        xfs_buf_ioerror(bp, -EFSCORRUPTED);
                        xfs_verifier_error(bp);
                        return;
                }
-               if (bip) {
-                       struct xfs_attr3_rmt_hdr *rmt;
 
-                       rmt = (struct xfs_attr3_rmt_hdr *)ptr;
-                       rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+               /*
+                * Ensure we aren't writing bogus LSNs to disk. See
+                * xfs_attr3_rmt_hdr_set() for the explanation.
+                */
+               if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
+                       xfs_buf_ioerror(bp, -EFSCORRUPTED);
+                       xfs_verifier_error(bp);
+                       return;
                }
                xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
 
@@ -221,6 +226,18 @@ xfs_attr3_rmt_hdr_set(
        rmt->rm_owner = cpu_to_be64(ino);
        rmt->rm_blkno = cpu_to_be64(bno);
 
+       /*
+        * Remote attribute blocks are written synchronously, so we don't
+        * have an LSN that we can stamp in them that makes any sense to log
+        * recovery. To ensure that log recovery handles overwrites of these
+        * blocks sanely (i.e. once they've been freed and reallocated as some
+        * other type of metadata) we need to ensure that the LSN has a value
+        * that tells log recovery to ignore the LSN and overwrite the buffer
+        * with whatever is in it's log. To do this, we use the magic
+        * NULLCOMMITLSN to indicate that the LSN is invalid.
+        */
+       rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
+
        return sizeof(struct xfs_attr3_rmt_hdr);
 }
 
@@ -434,14 +451,21 @@ xfs_attr_rmtval_set(
 
                /*
                 * Allocate a single extent, up to the size of the value.
+                *
+                * Note that we have to consider this a data allocation as we
+                * write the remote attribute without logging the contents.
+                * Hence we must ensure that we aren't using blocks that are on
+                * the busy list so that we don't overwrite blocks which have
+                * recently been freed but their transactions are not yet
+                * committed to disk. If we overwrite the contents of a busy
+                * extent and then crash then the block may not contain the
+                * correct metadata after log recovery occurs.
                 */
                xfs_bmap_init(args->flist, args->firstblock);
                nmap = 1;
                error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
-                                 blkcnt,
-                                 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
-                                 args->firstblock, args->total, &map, &nmap,
-                                 args->flist);
+                                 blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
+                                 args->total, &map, &nmap, args->flist);
                if (!error) {
                        error = xfs_bmap_finish(&args->trans, args->flist,
                                                &committed);
index f0e8249722d40a0dcaf9f31bc25effaba142b0a9..db4acc1c3e73479cdf32322944a7fa1bcf34b96f 100644 (file)
@@ -1514,18 +1514,27 @@ xfs_filemap_fault(
        struct vm_area_struct   *vma,
        struct vm_fault         *vmf)
 {
-       struct xfs_inode        *ip = XFS_I(file_inode(vma->vm_file));
+       struct inode            *inode = file_inode(vma->vm_file);
        int                     ret;
 
-       trace_xfs_filemap_fault(ip);
+       trace_xfs_filemap_fault(XFS_I(inode));
 
        /* DAX can shortcut the normal fault path on write faults! */
-       if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(VFS_I(ip)))
+       if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
                return xfs_filemap_page_mkwrite(vma, vmf);
 
-       xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
-       ret = filemap_fault(vma, vmf);
-       xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
+       xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+       if (IS_DAX(inode)) {
+               /*
+                * we do not want to trigger unwritten extent conversion on read
+                * faults - that is unnecessary overhead and would also require
+                * changes to xfs_get_blocks_direct() to map unwritten extent
+                * ioend for conversion on read-only mappings.
+                */
+               ret = __dax_fault(vma, vmf, xfs_get_blocks_direct, NULL);
+       } else
+               ret = filemap_fault(vma, vmf);
+       xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
        return ret;
 }
index 01dd228ca05e315b88feb7afbfaf3e81d728878b..480ebba8464f38dbb0608b579185ad93a0600913 100644 (file)
@@ -1886,9 +1886,14 @@ xlog_recover_get_buf_lsn(
                uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
                break;
        case XFS_ATTR3_RMT_MAGIC:
-               lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
-               uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
-               break;
+               /*
+                * Remote attr blocks are written synchronously, rather than
+                * being logged. That means they do not contain a valid LSN
+                * (i.e. transactionally ordered) in them, and hence any time we
+                * see a buffer to replay over the top of a remote attribute
+                * block we should simply do so.
+                */
+               goto recover_immediately;
        case XFS_SB_MAGIC:
                lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
                uuid = &((struct xfs_dsb *)blk)->sb_uuid;
index fed36418dd1c2fe8c1c084278080f2ff23169725..6c78956aa47092440edb3a73e7b9389ac3a57558 100644 (file)
@@ -45,6 +45,7 @@ enum {
        ATA_SECT_SIZE           = 512,
        ATA_MAX_SECTORS_128     = 128,
        ATA_MAX_SECTORS         = 256,
+       ATA_MAX_SECTORS_1024    = 1024,
        ATA_MAX_SECTORS_LBA48   = 65535,/* TODO: 65536? */
        ATA_MAX_SECTORS_TAPE    = 65535,
 
index 76abba4b238ece14f8a2bd9bcce5ab53306c61fd..dcacb1a72e26d0755703135016105ae511a2b042 100644 (file)
@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx {
        __u64   mm_reg_addr;
 };
 
-/* Memory Error Section */
+/* Old Memory Error Section UEFI 2.1, 2.2 */
+struct cper_sec_mem_err_old {
+       __u64   validation_bits;
+       __u64   error_status;
+       __u64   physical_addr;
+       __u64   physical_addr_mask;
+       __u16   node;
+       __u16   card;
+       __u16   module;
+       __u16   bank;
+       __u16   device;
+       __u16   row;
+       __u16   column;
+       __u16   bit_pos;
+       __u64   requestor_id;
+       __u64   responder_id;
+       __u64   target_id;
+       __u8    error_type;
+};
+
+/* Memory Error Section UEFI >= 2.3 */
 struct cper_sec_mem_err {
        __u64   validation_bits;
        __u64   error_status;
index 29ad97c34fd5cf4bcdeec373d2ad5691bb2ec6c3..bde1e567b3a93ad5feb7a0d2aa980a07e28270d5 100644 (file)
@@ -62,6 +62,7 @@ struct cpufreq_policy {
        /* CPUs sharing clock, require sw coordination */
        cpumask_var_t           cpus;   /* Online CPUs only */
        cpumask_var_t           related_cpus; /* Online + Offline CPUs */
+       cpumask_var_t           real_cpus; /* Related and present */
 
        unsigned int            shared_type; /* ACPI: ANY or ALL affected CPUs
                                                should set cpufreq */
index 1da602982cf93a6a262bef85967945ffea9c586a..6cd8c0ee4b6f89a9ab93b67515cb6e214e108071 100644 (file)
@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
  *            SAVE_REGS. If another ops with this flag set is already registered
  *            for any of the functions that this ops will be registered for, then
  *            this ops will fail to register or set_filter_ip.
+ * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
  */
 enum {
        FTRACE_OPS_FL_ENABLED                   = 1 << 0,
@@ -132,6 +133,7 @@ enum {
        FTRACE_OPS_FL_MODIFYING                 = 1 << 11,
        FTRACE_OPS_FL_ALLOC_TRAMP               = 1 << 12,
        FTRACE_OPS_FL_IPMODIFY                  = 1 << 13,
+       FTRACE_OPS_FL_PID                       = 1 << 14,
 };
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -159,6 +161,7 @@ struct ftrace_ops {
        struct ftrace_ops               *next;
        unsigned long                   flags;
        void                            *private;
+       ftrace_func_t                   saved_func;
        int __percpu                    *disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
        int                             nr_trampolines;
index 36ce37bcc963c270548989d658caa4f3ce832045..c9cfbcdb8d140e2f136b724501a064a25a766fb3 100644 (file)
@@ -431,6 +431,8 @@ enum {
        ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21),  /* some WDs have broken LPM */
        ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
        ATA_HORKAGE_NO_NCQ_LOG  = (1 << 23),    /* don't use NCQ for log read */
+       ATA_HORKAGE_NOTRIM      = (1 << 24),    /* don't use TRIM */
+       ATA_HORKAGE_MAX_SEC_1024 = (1 << 25),   /* Limit max sects to 1024 */
 
         /* DMA mask for user DMA control: User visible values; DO NOT
            renumber */
index f25e2bdd188c94643efc0f3a94c7c67de81897aa..272f42952f3424aa43953b7e3a50b925a93bd465 100644 (file)
@@ -177,11 +177,6 @@ typedef enum {
 #define NAND_OWN_BUFFERS       0x00020000
 /* Chip may not exist, so silence any errors in scan */
 #define NAND_SCAN_SILENT_NODEV 0x00040000
-/*
- * This option could be defined by controller drivers to protect against
- * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
- */
-#define NAND_USE_BOUNCE_BUFFER 0x00080000
 /*
  * Autodetect nand buswidth with readid/onfi.
  * This suppose the driver will configure the hardware in 8 bits mode
@@ -189,6 +184,11 @@ typedef enum {
  * before calling nand_scan_tail.
  */
 #define NAND_BUSWIDTH_AUTO      0x00080000
+/*
+ * This option could be defined by controller drivers to protect against
+ * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
+ */
+#define NAND_USE_BOUNCE_BUFFER 0x00100000
 
 /* Options set by nand scan */
 /* Nand scan has allocated controller struct */
index f91b5ade30c98fe8b03d06bc40deb0c434edb633..874b77228fb96285fb2024f07fa99d1ed7dd6dda 100644 (file)
@@ -292,9 +292,12 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
        struct nfs_inode *nfsi = NFS_I(inode);
 
        spin_lock(&inode->i_lock);
-       nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS;
+       nfsi->cache_validity |= NFS_INO_INVALID_ATTR |
+                               NFS_INO_REVAL_PAGECACHE |
+                               NFS_INO_INVALID_ACCESS |
+                               NFS_INO_INVALID_ACL;
        if (S_ISDIR(inode->i_mode))
-               nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
+               nfsi->cache_validity |= NFS_INO_INVALID_DATA;
        spin_unlock(&inode->i_lock);
 }
 
index a2ea1491d3dfc487611445490fb10adf9972d777..20bc8e51b16124496326274e84fc12d61996a4fd 100644 (file)
@@ -220,7 +220,7 @@ struct nfs_server {
 #define NFS_CAP_SYMLINKS       (1U << 2)
 #define NFS_CAP_ACLS           (1U << 3)
 #define NFS_CAP_ATOMIC_OPEN    (1U << 4)
-#define NFS_CAP_CHANGE_ATTR    (1U << 5)
+/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
 #define NFS_CAP_FILEID         (1U << 6)
 #define NFS_CAP_MODE           (1U << 7)
 #define NFS_CAP_NLINK          (1U << 8)
index 4c508549833a53fc76b59ba6a0bda354fc4d051f..cc7dd687a89dd60699ebe28556138e293bcde9bc 100644 (file)
@@ -59,7 +59,7 @@ void of_dma_configure(struct device *dev, struct device_node *np);
 #else /* CONFIG_OF */
 
 static inline int of_driver_match_device(struct device *dev,
-                                        struct device_driver *drv)
+                                        const struct device_driver *drv)
 {
        return 0;
 }
index 75f70f6ac13778f448a03de3136d32a0ef638b65..e1571efa3f2b28e01642e08f6709d7c7c8dc141a 100644 (file)
@@ -43,7 +43,6 @@ struct esdhc_platform_data {
        enum wp_types wp_type;
        enum cd_types cd_type;
        int max_bus_width;
-       unsigned int f_max;
        bool support_vsel;
        unsigned int delay_line;
 };
index 34117b8b72e49d84fb9477326ad10a490de1060a..0aedbb2c10e0451c162118988d6c070efcd9b629 100644 (file)
@@ -595,6 +595,7 @@ struct iscsi_conn {
        int                     bitmap_id;
        int                     rx_thread_active;
        struct task_struct      *rx_thread;
+       struct completion       rx_login_comp;
        int                     tx_thread_active;
        struct task_struct      *tx_thread;
        /* list_head for session connection list */
index b6fce900a8334613f45f169a0c28802327222f56..fbdd11851725ffa77435b0d3f66cfdfd4f7bb5fb 100644 (file)
@@ -32,7 +32,7 @@
 #ifndef __AMDGPU_DRM_H__
 #define __AMDGPU_DRM_H__
 
-#include <drm/drm.h>
+#include "drm.h"
 
 #define DRM_AMDGPU_GEM_CREATE          0x00
 #define DRM_AMDGPU_GEM_MMAP            0x01
@@ -614,6 +614,8 @@ struct drm_amdgpu_info_device {
        uint32_t vram_type;
        /** video memory bit width*/
        uint32_t vram_bit_width;
+       /* vce harvesting instance */
+       uint32_t vce_harvest_config;
 };
 
 struct drm_amdgpu_info_hw_ip {
index 6e1a2ed116cb1410958f61631a7dc30839652738..db809b722985d3a1a03480cc1e3d36a656af2d2d 100644 (file)
@@ -1070,6 +1070,14 @@ struct drm_i915_reg_read {
        __u64 offset;
        __u64 val; /* Return value */
 };
+/* Known registers:
+ *
+ * Render engine timestamp - 0x2358 + 64bit - gen7+
+ * - Note this register returns an invalid value if using the default
+ *   single instruction 8byte read, in order to workaround that use
+ *   offset (0x2538 | 1) instead.
+ *
+ */
 
 struct drm_i915_reset_stats {
        __u32 ctx_id;
index 1ef76661e1a1b5cc7a1d1021aefd2501f8e5375e..01aa2a8e3f8de1bc368ab53f3f5db472f0329fa7 100644 (file)
@@ -33,7 +33,7 @@
 #ifndef __RADEON_DRM_H__
 #define __RADEON_DRM_H__
 
-#include <drm/drm.h>
+#include "drm.h"
 
 /* WARNING: If you change any of these defines, make sure to change the
  * defines in the X server file (radeon_sarea.h)
index 7bbee79ca2933f518a16f0250e7bbfd0a4e85fe0..ec32293a00db057eb7559c2e7cadce01deb3ef22 100644 (file)
@@ -34,6 +34,7 @@
 /* The feature bitmap for virtio net */
 #define VIRTIO_NET_F_CSUM      0       /* Host handles pkts w/ partial csum */
 #define VIRTIO_NET_F_GUEST_CSUM        1       /* Guest handles pkts w/ partial csum */
+#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */
 #define VIRTIO_NET_F_MAC       5       /* Host has given MAC address. */
 #define VIRTIO_NET_F_GUEST_TSO4        7       /* Guest can handle TSOv4 in. */
 #define VIRTIO_NET_F_GUEST_TSO6        8       /* Guest can handle TSOv6 in. */
@@ -226,4 +227,19 @@ struct virtio_net_ctrl_mq {
  #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN        1
  #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX        0x8000
 
+/*
+ * Control network offloads
+ *
+ * Reconfigures the network offloads that Guest can handle.
+ *
+ * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
+ *
+ * Command data format matches the feature bit mask exactly.
+ *
+ * See VIRTIO_NET_F_GUEST_* for the list of offloads
+ * that can be enabled/disabled.
+ */
+#define VIRTIO_NET_CTRL_GUEST_OFFLOADS   5
+#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET        0
+
 #endif /* _LINUX_VIRTIO_NET_H */
index 75301468359f0c558ff8c3c12450301c80cff19a..90007a1abcab144ac3d6ac7d6e6f4001d58abb14 100644 (file)
@@ -157,6 +157,12 @@ struct virtio_pci_common_cfg {
        __le32 queue_used_hi;           /* read-write */
 };
 
+/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
+struct virtio_pci_cfg_cap {
+       struct virtio_pci_cap cap;
+       __u8 pci_cfg_data[4]; /* Data for BAR access. */
+};
+
 /* Macro versions of offsets for the Old Timers! */
 #define VIRTIO_PCI_CAP_VNDR            0
 #define VIRTIO_PCI_CAP_NEXT            1
index 915980ac68dfa8cc1dc973b8a7e659fc56383c22..c07295969b7e134ec85bf58b72b100949a6462fa 100644 (file)
@@ -31,6 +31,9 @@
  * SUCH DAMAGE.
  *
  * Copyright Rusty Russell IBM Corporation 2007. */
+#ifndef __KERNEL__
+#include <stdint.h>
+#endif
 #include <linux/types.h>
 #include <linux/virtio_types.h>
 
@@ -143,7 +146,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
        vr->num = num;
        vr->desc = p;
        vr->avail = p + num*sizeof(struct vring_desc);
-       vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16)
+       vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
                + align-1) & ~(align - 1));
 }
 
index 12215205ab8d0b11d854dcaa29f00bed48a980c7..785c5ca0994b5ab41e43fcfe6553197f6633dd06 100644 (file)
 
 /*
  * Block Header.
- * This header preceeds all object and object arrays below.
+ * This header precedes all object and object arrays below.
  */
 struct snd_soc_tplg_hdr {
        __le32 magic;           /* magic number */
@@ -222,7 +222,7 @@ struct snd_soc_tplg_stream_config {
 /*
  * Manifest. List totals for each payload type. Not used in parsing, but will
  * be passed to the component driver before any other objects in order for any
- * global componnent resource allocations.
+ * global component resource allocations.
  *
  * File block representation for manifest :-
  * +-----------------------------------+----+
index 90552aab5f2dd076c147c73cc6f9ff59aabb7af6..fed052a1bc9f5792c7cb856336f2093e75aa2432 100644 (file)
@@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size)
 {
        struct resource *p;
        resource_size_t end = start + size - 1;
-       int flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+       unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
        const char *name = "System RAM";
        int ret = -1;
 
        read_lock(&resource_lock);
        for (p = iomem_resource.child; p ; p = p->sibling) {
-               if (end < p->start)
+               if (p->end < start)
                        continue;
 
                if (p->start <= start && end <= p->end) {
@@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size)
                                ret = 1;
                        break;
                }
-               if (p->end < start)
+               if (end < p->start)
                        break;  /* not found */
        }
        read_unlock(&resource_lock);
index 02bece4a99ea36bb835fc45a9aa55c1aedd69f9f..eb11011b5292add880af7038800560aa29c5a674 100644 (file)
@@ -98,6 +98,13 @@ struct ftrace_pid {
        struct pid *pid;
 };
 
+static bool ftrace_pids_enabled(void)
+{
+       return !list_empty(&ftrace_pids);
+}
+
+static void ftrace_update_trampoline(struct ftrace_ops *ops);
+
 /*
  * ftrace_disabled is set when an anomaly is discovered.
  * ftrace_disabled is much stronger than ftrace_enabled.
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
-ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
 static struct ftrace_ops global_ops;
 static struct ftrace_ops control_ops;
 
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
        if (!test_tsk_trace_trace(current))
                return;
 
-       ftrace_pid_function(ip, parent_ip, op, regs);
-}
-
-static void set_ftrace_pid_function(ftrace_func_t func)
-{
-       /* do not set ftrace_pid_function to itself! */
-       if (func != ftrace_pid_func)
-               ftrace_pid_function = func;
+       op->saved_func(ip, parent_ip, op, regs);
 }
 
 /**
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
 void clear_ftrace_function(void)
 {
        ftrace_trace_function = ftrace_stub;
-       ftrace_pid_function = ftrace_stub;
 }
 
 static void control_ops_disable_all(struct ftrace_ops *ops)
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        } else
                add_ftrace_ops(&ftrace_ops_list, ops);
 
+       /* Always save the function, and reset at unregistering */
+       ops->saved_func = ops->func;
+
+       if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
+               ops->func = ftrace_pid_func;
+
        ftrace_update_trampoline(ops);
 
        if (ftrace_enabled)
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
        if (ftrace_enabled)
                update_ftrace_function();
 
+       ops->func = ops->saved_func;
+
        return 0;
 }
 
 static void ftrace_update_pid_func(void)
 {
+       bool enabled = ftrace_pids_enabled();
+       struct ftrace_ops *op;
+
        /* Only do something if we are tracing something */
        if (ftrace_trace_function == ftrace_stub)
                return;
 
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op->flags & FTRACE_OPS_FL_PID) {
+                       op->func = enabled ? ftrace_pid_func :
+                               op->saved_func;
+                       ftrace_update_trampoline(op);
+               }
+       } while_for_each_ftrace_op(op);
+
        update_ftrace_function();
 }
 
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
        .local_hash.filter_hash         = EMPTY_HASH,
        INIT_OPS_HASH(global_ops)
        .flags                          = FTRACE_OPS_FL_RECURSION_SAFE |
-                                         FTRACE_OPS_FL_INITIALIZED,
+                                         FTRACE_OPS_FL_INITIALIZED |
+                                         FTRACE_OPS_FL_PID,
 };
 
 /*
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
 
 static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
+                                 FTRACE_OPS_FL_INITIALIZED |
+                                 FTRACE_OPS_FL_PID,
 };
 
 static int __init ftrace_nodyn_init(void)
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
                if (WARN_ON(tr->ops->func != ftrace_stub))
                        printk("ftrace ops had %pS for function\n",
                               tr->ops->func);
-               /* Only the top level instance does pid tracing */
-               if (!list_empty(&ftrace_pids)) {
-                       set_ftrace_pid_function(func);
-                       func = ftrace_pid_func;
-               }
        }
        tr->ops->func = func;
        tr->ops->private = tr;
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
 {
        mutex_lock(&ftrace_lock);
 
-       if (list_empty(&ftrace_pids) && (!*pos))
+       if (!ftrace_pids_enabled() && (!*pos))
                return (void *) 1;
 
        return seq_list_start(&ftrace_pids, *pos);
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
        .func                   = ftrace_stub,
        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
                                   FTRACE_OPS_FL_INITIALIZED |
+                                  FTRACE_OPS_FL_PID |
                                   FTRACE_OPS_FL_STUB,
 #ifdef FTRACE_GRAPH_TRAMP_ADDR
        .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,
index 9dd49ca67dbc22a905999de21b8355475ba40052..6e70ddb158b4bc121a0f32e7a53fecf8125e8354 100644 (file)
@@ -704,6 +704,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
 
        mutex_unlock(&virtio_9p_lock);
 
+       vdev->config->reset(vdev);
        vdev->config->del_vqs(vdev);
 
        sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
index 9825ff0f91d6c0bde819105f639cae21883bbfad..6255d141133bb0ccabc195c8b4d9bce8cbe39c51 100644 (file)
@@ -240,8 +240,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
                req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
                if (!req)
                        goto not_found;
-               /* Note: this 'free' request adds it to xprt->bc_pa_list */
-               xprt_free_bc_request(req);
+               list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
+               xprt->bc_alloc_count++;
        }
        req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
                                rq_bc_pa_list);
@@ -336,7 +336,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
 
        spin_lock(&xprt->bc_pa_lock);
        list_del(&req->rq_bc_pa_list);
-       xprt->bc_alloc_count--;
+       xprt_dec_alloc_count(xprt, 1);
        spin_unlock(&xprt->bc_pa_lock);
 
        req->rq_private_buf.len = copied;
index cbc6af923dd1cb0baabc95161989133150269d4f..23608eb0ded2c94e363ed06e35384b08417e04ed 100644 (file)
@@ -1902,6 +1902,7 @@ call_transmit_status(struct rpc_task *task)
 
        switch (task->tk_status) {
        case -EAGAIN:
+       case -ENOBUFS:
                break;
        default:
                dprint_status(task);
@@ -1928,7 +1929,6 @@ call_transmit_status(struct rpc_task *task)
        case -ECONNABORTED:
        case -EADDRINUSE:
        case -ENOTCONN:
-       case -ENOBUFS:
        case -EPIPE:
                rpc_task_force_reencode(task);
        }
@@ -2057,12 +2057,13 @@ call_status(struct rpc_task *task)
        case -ECONNABORTED:
                rpc_force_rebind(clnt);
        case -EADDRINUSE:
-       case -ENOBUFS:
                rpc_delay(task, 3*HZ);
        case -EPIPE:
        case -ENOTCONN:
                task->tk_action = call_bind;
                break;
+       case -ENOBUFS:
+               rpc_delay(task, HZ>>2);
        case -EAGAIN:
                task->tk_action = call_transmit;
                break;
index e193c2b5476b3a83973e9799e2e826fdcd2b842c..0030376327b77f0a08d4af887286a0616a60069e 100644 (file)
@@ -527,6 +527,10 @@ static int xs_local_send_request(struct rpc_task *task)
                              true, &sent);
        dprintk("RPC:       %s(%u) = %d\n",
                        __func__, xdr->len - req->rq_bytes_sent, status);
+
+       if (status == -EAGAIN && sock_writeable(transport->inet))
+               status = -ENOBUFS;
+
        if (likely(sent > 0) || status == 0) {
                req->rq_bytes_sent += sent;
                req->rq_xmit_bytes_sent += sent;
@@ -539,6 +543,7 @@ static int xs_local_send_request(struct rpc_task *task)
 
        switch (status) {
        case -ENOBUFS:
+               break;
        case -EAGAIN:
                status = xs_nospace(task);
                break;
@@ -589,6 +594,9 @@ static int xs_udp_send_request(struct rpc_task *task)
        if (status == -EPERM)
                goto process_status;
 
+       if (status == -EAGAIN && sock_writeable(transport->inet))
+               status = -ENOBUFS;
+
        if (sent > 0 || status == 0) {
                req->rq_xmit_bytes_sent += sent;
                if (sent >= req->rq_slen)
@@ -669,9 +677,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
                dprintk("RPC:       xs_tcp_send_request(%u) = %d\n",
                                xdr->len - req->rq_bytes_sent, status);
 
-               if (unlikely(sent == 0 && status < 0))
-                       break;
-
                /* If we've sent the entire packet, immediately
                 * reset the count of bytes sent. */
                req->rq_bytes_sent += sent;
@@ -681,18 +686,21 @@ static int xs_tcp_send_request(struct rpc_task *task)
                        return 0;
                }
 
-               if (sent != 0)
-                       continue;
-               status = -EAGAIN;
-               break;
+               if (status < 0)
+                       break;
+               if (sent == 0) {
+                       status = -EAGAIN;
+                       break;
+               }
        }
+       if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
+               status = -ENOBUFS;
 
        switch (status) {
        case -ENOTSOCK:
                status = -ENOTCONN;
                /* Should we call xs_close() here? */
                break;
-       case -ENOBUFS:
        case -EAGAIN:
                status = xs_nospace(task);
                break;
@@ -703,6 +711,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
        case -ECONNREFUSED:
        case -ENOTCONN:
        case -EADDRINUSE:
+       case -ENOBUFS:
        case -EPIPE:
                clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
        }
index e72548b5897ec237dd7463374871538c81a84fd7..d33437007ad229313a5ef483826e427a74350876 100644 (file)
@@ -1181,9 +1181,11 @@ void __key_link_end(struct key *keyring,
        if (index_key->type == &key_type_keyring)
                up_write(&keyring_serialise_link_sem);
 
-       if (edit && !edit->dead_leaf) {
-               key_payload_reserve(keyring,
-                                   keyring->datalen - KEYQUOTA_LINK_BYTES);
+       if (edit) {
+               if (!edit->dead_leaf) {
+                       key_payload_reserve(keyring,
+                               keyring->datalen - KEYQUOTA_LINK_BYTES);
+               }
                assoc_array_cancel_edit(edit);
        }
        up_write(&keyring->sem);
index d126c03361aef87fb1239df1e42f547c5ce36aa9..75888dd38a7fc87acf5fcd7789570ad353dce482 100644 (file)
@@ -85,7 +85,7 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
 {
        if (substream->pcm->nonatomic) {
-               down_read(&snd_pcm_link_rwsem);
+               down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
                mutex_lock(&substream->self_group.mutex);
        } else {
                read_lock(&snd_pcm_link_rwlock);
index 2682e7e3e5c98511e8bd26fddf452427f1e83a6a..c670db4eee70d42c91db2904354bf391267c22ef 100644 (file)
@@ -248,6 +248,8 @@ efw_probe(struct fw_unit *unit,
        err = get_hardware_info(efw);
        if (err < 0)
                goto error;
+       if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2)
+               efw->is_af2 = true;
        if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
                efw->is_af9 = true;
 
index 4f0201a95222a2502ec438199fe9e78cad33b647..c33252b7bc847501c4fc2c7b2c18d07a9ec18898 100644 (file)
@@ -70,6 +70,7 @@ struct snd_efw {
        bool resp_addr_changable;
 
        /* for quirks */
+       bool is_af2;
        bool is_af9;
        u32 firmware_version;
 
index c55db1bddc80a0ceab4997279643f73840943cef..a0762dd6231e6080eb9ab86343c413db0bd64492 100644 (file)
@@ -172,6 +172,9 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
        efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
        /* Fireworks reset dbc at bus reset. */
        efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
+       /* AudioFire2 starts packets with non-zero dbc. */
+       if (efw->is_af2)
+               efw->tx_stream.flags |= CIP_SKIP_INIT_DBC_CHECK;
        /* AudioFire9 always reports wrong dbs. */
        if (efw->is_af9)
                efw->tx_stream.flags |= CIP_WRONG_DBS;
index 442500e06b7c7b66be05ec254a68e58d009089fe..5676b849379d43468267e8d12fc7367096a2ae4c 100644 (file)
@@ -56,8 +56,11 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
                enable ? "enable" : "disable");
 
        if (enable) {
-               if (!bus->i915_power_refcount++)
+               if (!bus->i915_power_refcount++) {
                        acomp->ops->get_power(acomp->dev);
+                       snd_hdac_set_codec_wakeup(bus, true);
+                       snd_hdac_set_codec_wakeup(bus, false);
+               }
        } else {
                WARN_ON(!bus->i915_power_refcount);
                if (!--bus->i915_power_refcount)
index 745535d1840a6713e802aaa8c1d475733b88720e..c38c68f579381d657786945baa3ab99b3ccae787 100644 (file)
@@ -867,7 +867,7 @@ static int azx_suspend(struct device *dev)
 
        chip = card->private_data;
        hda = container_of(chip, struct hda_intel, chip);
-       if (chip->disabled || hda->init_failed)
+       if (chip->disabled || hda->init_failed || !chip->running)
                return 0;
 
        bus = azx_bus(chip);
@@ -902,7 +902,7 @@ static int azx_resume(struct device *dev)
 
        chip = card->private_data;
        hda = container_of(chip, struct hda_intel, chip);
-       if (chip->disabled || hda->init_failed)
+       if (chip->disabled || hda->init_failed || !chip->running)
                return 0;
 
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
@@ -979,14 +979,16 @@ static int azx_runtime_resume(struct device *dev)
        if (!azx_has_pm_runtime(chip))
                return 0;
 
-       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
-               && hda->need_i915_power) {
-               bus =  azx_bus(chip);
-               snd_hdac_display_power(bus, true);
-               haswell_set_bclk(hda);
-               /* toggle codec wakeup bit for STATESTS read */
-               snd_hdac_set_codec_wakeup(bus, true);
-               snd_hdac_set_codec_wakeup(bus, false);
+       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+               bus = azx_bus(chip);
+               if (hda->need_i915_power) {
+                       snd_hdac_display_power(bus, true);
+                       haswell_set_bclk(hda);
+               } else {
+                       /* toggle codec wakeup bit for STATESTS read */
+                       snd_hdac_set_codec_wakeup(bus, true);
+                       snd_hdac_set_codec_wakeup(bus, false);
+               }
        }
 
        /* Read STATESTS before controller reset */
@@ -1025,7 +1027,7 @@ static int azx_runtime_idle(struct device *dev)
                return 0;
 
        if (!power_save_controller || !azx_has_pm_runtime(chip) ||
-           azx_bus(chip)->codec_powered)
+           azx_bus(chip)->codec_powered || !chip->running)
                return -EBUSY;
 
        return 0;
@@ -2182,6 +2184,8 @@ static const struct pci_device_id azx_ids[] = {
        /* ATI HDMI */
        { PCI_DEVICE(0x1002, 0x1308),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0x157a),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x793b),
          .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
        { PCI_DEVICE(0x1002, 0x7919),
@@ -2236,8 +2240,14 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaab0),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaac0),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaac8),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaad8),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaae8),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        /* VIA VT8251/VT8237A */
        { PCI_DEVICE(0x1106, 0x3288),
          .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
index 25ccf781fbe705cb5b2980814f7d4f90aff2d4a6..584a0343ab0cc132b7c2038679923b6617926cf7 100644 (file)
@@ -999,9 +999,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
 
        spec->spdif_present = spdif_present;
        /* SPDIF TX on/off */
-       if (spdif_present)
-               snd_hda_set_pin_ctl(codec, spdif_pin,
-                                   spdif_present ? PIN_OUT : 0);
+       snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
 
        cs_automute(codec);
 }
index 95158914cc6ce0c8761c04fde20f81a4e21e1931..a97db5fc8a151aa43c0c99edc6ccf7efc0eaa52a 100644 (file)
@@ -3512,6 +3512,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0070, .name = "GPU 70 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0071, .name = "GPU 71 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0072, .name = "GPU 72 HDMI/DP",  .patch = patch_nvhdmi },
+{ .id = 0x10de007d, .name = "GPU 7d HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
 { .id = 0x11069f81, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
@@ -3576,6 +3577,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0067");
 MODULE_ALIAS("snd-hda-codec-id:10de0070");
 MODULE_ALIAS("snd-hda-codec-id:10de0071");
 MODULE_ALIAS("snd-hda-codec-id:10de0072");
+MODULE_ALIAS("snd-hda-codec-id:10de007d");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
 MODULE_ALIAS("snd-hda-codec-id:11069f81");
index d35cf506a7dbc386a5aa003251d274ab9a305797..c456c04e0928d2c4ef9e65d114bdec3bf6b9332f 100644 (file)
@@ -2222,7 +2222,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
-       SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
+       SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
 
        SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
@@ -5061,7 +5061,7 @@ static const struct hda_fixup alc269_fixups[] = {
                        { 0x14, 0x90170110 },
                        { 0x17, 0x40000008 },
                        { 0x18, 0x411111f0 },
-                       { 0x19, 0x411111f0 },
+                       { 0x19, 0x01a1913c },
                        { 0x1a, 0x411111f0 },
                        { 0x1b, 0x411111f0 },
                        { 0x1d, 0x40f89b2d },
@@ -5185,6 +5185,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
+       SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5398,8 +5399,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {0x19, 0x411111f0}, \
        {0x1a, 0x411111f0}, \
        {0x1b, 0x411111f0}, \
-       {0x1d, 0x40700001}, \
-       {0x1e, 0x411111f0}, \
        {0x21, 0x02211020}
 
 #define ALC282_STANDARD_PINS \
@@ -5430,8 +5429,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {0x15, 0x0221401f}, \
        {0x1a, 0x411111f0}, \
        {0x1b, 0x411111f0}, \
-       {0x1d, 0x40700001}, \
-       {0x1e, 0x411111f0}
+       {0x1d, 0x40700001}
 
 #define ALC298_STANDARD_PINS \
        {0x18, 0x411111f0}, \
@@ -5462,6 +5460,39 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x17, 0x40000000},
                {0x1d, 0x40700001},
                {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x40000000},
+               {0x14, 0x90170130},
+               {0x17, 0x411111f0},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x01014020},
+               {0x1d, 0x4054c029},
+               {0x1e, 0x411111f0},
+               {0x21, 0x0221103f}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x40000000},
+               {0x14, 0x90170150},
+               {0x17, 0x411111f0},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x02011020},
+               {0x1d, 0x4054c029},
+               {0x1e, 0x411111f0},
+               {0x21, 0x0221105f}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x40000000},
+               {0x14, 0x90170110},
+               {0x17, 0x411111f0},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x01014020},
+               {0x1d, 0x4054c029},
+               {0x1e, 0x411111f0},
+               {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60160},
                {0x14, 0x90170120},
@@ -5524,10 +5555,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS,
-               {0x13, 0x40000000}),
+               {0x13, 0x40000000},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC256_STANDARD_PINS,
+               {0x13, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS,
-               {0x13, 0x411111f0}),
+               {0x13, 0x411111f0},
+               {0x1d, 0x4077992d},
+               {0x1e, 0x411111ff}),
        SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
                {0x12, 0x90a60130},
                {0x13, 0x40000000},
@@ -5690,35 +5730,48 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x13, 0x411111f0},
                {0x16, 0x01014020},
                {0x18, 0x411111f0},
-               {0x19, 0x01a19030}),
+               {0x19, 0x01a19030},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x12, 0x90a60140},
                {0x13, 0x411111f0},
                {0x16, 0x01014020},
                {0x18, 0x02a19031},
-               {0x19, 0x01a1903e}),
+               {0x19, 0x01a1903e},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x12, 0x90a60140},
                {0x13, 0x411111f0},
                {0x16, 0x411111f0},
                {0x18, 0x411111f0},
-               {0x19, 0x411111f0}),
+               {0x19, 0x411111f0},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x12, 0x40000000},
                {0x13, 0x90a60140},
                {0x16, 0x21014020},
                {0x18, 0x411111f0},
-               {0x19, 0x21a19030}),
+               {0x19, 0x21a19030},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x12, 0x40000000},
                {0x13, 0x90a60140},
                {0x16, 0x411111f0},
                {0x18, 0x411111f0},
-               {0x19, 0x411111f0}),
+               {0x19, 0x411111f0},
+               {0x1e, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC292_STANDARD_PINS,
+               {0x12, 0x40000000},
+               {0x13, 0x90a60140},
+               {0x16, 0x21014020},
+               {0x18, 0x411111f0},
+               {0x19, 0x21a19030},
+               {0x1e, 0x411111ff}),
        SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC298_STANDARD_PINS,
                {0x12, 0x90a60130},
index dcc7fe91244c2dfada5c97fbb42df06dbd857bbf..9d947aef2c8b60b99f63fd9464ad289bef0c7061 100644 (file)
@@ -2920,7 +2920,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
        SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
                      "HP Mini", STAC_92HD83XXX_HP_LED),
        SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
-       SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91,
+       /* match both for 0xfa91 and 0xfa93 */
+       SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_TOSHIBA, 0xfffd, 0xfa91,
                      "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
        {} /* terminator */
 };
index 477e13d309713e56a5a4416d054350185fea0007..e7ba557979cb2589439234d6bd0ddbbdc38985e5 100644 (file)
@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
 
        if (val != -1) {
                regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
-                                       PCM1681_DEEMPH_RATE_MASK, val);
+                                  PCM1681_DEEMPH_RATE_MASK, val << 3);
                enable = 1;
        } else
                enable = 0;
index 9ce311e088fc514bf8cde70dd120a3074f78bfe5..e9cc3aae5366d30d0522b105ed11fdb88943948f 100644 (file)
@@ -2943,6 +2943,9 @@ static int rt5645_irq_detection(struct rt5645_priv *rt5645)
 {
        int val, btn_type, gpio_state = 0, report = 0;
 
+       if (!rt5645->codec)
+               return -EINVAL;
+
        switch (rt5645->pdata.jd_mode) {
        case 0: /* Not using rt5645 JD */
                if (rt5645->gpiod_hp_det) {
index bd7a344bf8c517edf69af2fda9e06e08488e47e0..1c317de2617623438bd169d9ba7cb89623139794 100644 (file)
 #define SGTL5000_BIAS_CTRL_MASK                        0x000e
 #define SGTL5000_BIAS_CTRL_SHIFT               1
 #define SGTL5000_BIAS_CTRL_WIDTH               3
-#define SGTL5000_SMALL_POP                     0
+#define SGTL5000_SMALL_POP                     1
 
 /*
  * SGTL5000_CHIP_MIC_CTRL
index 938d2cb6d78bee03e809f02462b17f951f0c953a..84a4f5ad80645f2f45079ea35fe8ce19da901f09 100644 (file)
@@ -315,7 +315,13 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        if (invert_fclk)
                ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
 
-       return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1);
+       return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
+                       SSM4567_SAI_CTRL_1_BCLK |
+                       SSM4567_SAI_CTRL_1_FSYNC |
+                       SSM4567_SAI_CTRL_1_LJ |
+                       SSM4567_SAI_CTRL_1_TDM |
+                       SSM4567_SAI_CTRL_1_PDM,
+                       ctrl1);
 }
 
 static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
index c7647e066cfd76b07f6b1acd4d2a68ac04bae3bc..c0b940e2019f55f34bb53673a99a4594d7409ef3 100644 (file)
@@ -633,7 +633,7 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
                sub *= 100000;
                do_div(sub, freq);
 
-               if (sub < savesub) {
+               if (sub < savesub && !(i == 0 && psr == 0 && div2 == 0)) {
                        baudrate = tmprate;
                        savesub = sub;
                        pm = i;
index 3853ec2ddbc758d2c558dfcbe093d2cee59af8a1..6de5d5cd3280a92bf9d1b0ab71a5171e82a5bb0b 100644 (file)
@@ -7,4 +7,4 @@ obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
 obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
 
 # Machine support
-obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/
+obj-$(CONFIG_SND_SOC) += boards/
index 620da1d1b9e3ea4fbde293c3e98fae4c30708ac1..0e0e4d9c021ff29bdf1f19eda2827ac459e4046c 100644 (file)
 #define MIN_FRAGMENT_SIZE (50 * 1024)
 #define MAX_FRAGMENT_SIZE (1024 * 1024)
 #define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz)  (((pcm_wd_sz + 15) >> 4) << 1)
+#ifdef CONFIG_PM
+#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count))
+#else
+#define GET_USAGE_COUNT(dev) 1
+#endif
 
 int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
 {
@@ -141,15 +146,9 @@ static int sst_power_control(struct device *dev, bool state)
        int ret = 0;
        int usage_count = 0;
 
-#ifdef CONFIG_PM
-       usage_count = atomic_read(&dev->power.usage_count);
-#else
-       usage_count = 1;
-#endif
-
        if (state == true) {
                ret = pm_runtime_get_sync(dev);
-
+               usage_count = GET_USAGE_COUNT(dev);
                dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
                if (ret < 0) {
                        dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
@@ -164,6 +163,7 @@ static int sst_power_control(struct device *dev, bool state)
                        }
                }
        } else {
+               usage_count = GET_USAGE_COUNT(dev);
                dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
                return sst_pm_runtime_put(ctx);
        }
index d604ee80eda4be715601db603181e0291cc98747..70f832114a5aeffeeb7b0cc555f8f1b91bc40665 100644 (file)
@@ -69,12 +69,12 @@ static const struct snd_soc_dapm_route cht_audio_map[] = {
        {"Headphone", NULL, "HPR"},
        {"Ext Spk", NULL, "SPKL"},
        {"Ext Spk", NULL, "SPKR"},
-       {"AIF1 Playback", NULL, "ssp2 Tx"},
+       {"HiFi Playback", NULL, "ssp2 Tx"},
        {"ssp2 Tx", NULL, "codec_out0"},
        {"ssp2 Tx", NULL, "codec_out1"},
        {"codec_in0", NULL, "ssp2 Rx" },
        {"codec_in1", NULL, "ssp2 Rx" },
-       {"ssp2 Rx", NULL, "AIF1 Capture"},
+       {"ssp2 Rx", NULL, "HiFi Capture"},
 };
 
 static const struct snd_kcontrol_new cht_mc_controls[] = {
index 4d44b5803e55206c02006b9359449df377477b6c..2d2536af141fc9157b2ecb402a969af6c808faf6 100644 (file)
@@ -103,7 +103,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
                .name = "MAX98090 Playback",
                .stream_name = "MAX98090 Playback",
                .cpu_dai_name = "DL1",
-               .platform_name = "11220000.mt8173-afe-pcm",
                .codec_name = "snd-soc-dummy",
                .codec_dai_name = "snd-soc-dummy-dai",
                .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -114,7 +113,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
                .name = "MAX98090 Capture",
                .stream_name = "MAX98090 Capture",
                .cpu_dai_name = "VUL",
-               .platform_name = "11220000.mt8173-afe-pcm",
                .codec_name = "snd-soc-dummy",
                .codec_dai_name = "snd-soc-dummy-dai",
                .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -125,7 +123,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
        {
                .name = "Codec",
                .cpu_dai_name = "I2S",
-               .platform_name = "11220000.mt8173-afe-pcm",
                .no_pcm = 1,
                .codec_dai_name = "HiFi",
                .init = mt8173_max98090_init,
@@ -152,9 +149,21 @@ static struct snd_soc_card mt8173_max98090_card = {
 static int mt8173_max98090_dev_probe(struct platform_device *pdev)
 {
        struct snd_soc_card *card = &mt8173_max98090_card;
-       struct device_node *codec_node;
+       struct device_node *codec_node, *platform_node;
        int ret, i;
 
+       platform_node = of_parse_phandle(pdev->dev.of_node,
+                                        "mediatek,platform", 0);
+       if (!platform_node) {
+               dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+               return -EINVAL;
+       }
+       for (i = 0; i < card->num_links; i++) {
+               if (mt8173_max98090_dais[i].platform_name)
+                       continue;
+               mt8173_max98090_dais[i].platform_of_node = platform_node;
+       }
+
        codec_node = of_parse_phandle(pdev->dev.of_node,
                                      "mediatek,audio-codec", 0);
        if (!codec_node) {
index 0940553230596aed2ba41787b8605e26c2cc11c7..6f52eca05e2600f34c5b76deb77db81ab6fb9290 100644 (file)
@@ -138,7 +138,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
                .name = "rt5650_rt5676 Playback",
                .stream_name = "rt5650_rt5676 Playback",
                .cpu_dai_name = "DL1",
-               .platform_name = "11220000.mt8173-afe-pcm",
                .codec_name = "snd-soc-dummy",
                .codec_dai_name = "snd-soc-dummy-dai",
                .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -149,7 +148,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
                .name = "rt5650_rt5676 Capture",
                .stream_name = "rt5650_rt5676 Capture",
                .cpu_dai_name = "VUL",
-               .platform_name = "11220000.mt8173-afe-pcm",
                .codec_name = "snd-soc-dummy",
                .codec_dai_name = "snd-soc-dummy-dai",
                .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -161,7 +159,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
        {
                .name = "Codec",
                .cpu_dai_name = "I2S",
-               .platform_name = "11220000.mt8173-afe-pcm",
                .no_pcm = 1,
                .codecs = mt8173_rt5650_rt5676_codecs,
                .num_codecs = 2,
@@ -209,7 +206,21 @@ static struct snd_soc_card mt8173_rt5650_rt5676_card = {
 static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
 {
        struct snd_soc_card *card = &mt8173_rt5650_rt5676_card;
-       int ret;
+       struct device_node *platform_node;
+       int i, ret;
+
+       platform_node = of_parse_phandle(pdev->dev.of_node,
+                                        "mediatek,platform", 0);
+       if (!platform_node) {
+               dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < card->num_links; i++) {
+               if (mt8173_rt5650_rt5676_dais[i].platform_name)
+                       continue;
+               mt8173_rt5650_rt5676_dais[i].platform_of_node = platform_node;
+       }
 
        mt8173_rt5650_rt5676_codecs[0].of_node =
                of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0);
index cc228db5fb760eaa24f98d91b0b14f0eded80ea9..9863da73dfe03a0f057d9897fd89e7d4d1614194 100644 (file)
@@ -1199,6 +1199,8 @@ err_pm_disable:
 static int mtk_afe_pcm_dev_remove(struct platform_device *pdev)
 {
        pm_runtime_disable(&pdev->dev);
+       if (!pm_runtime_status_suspended(&pdev->dev))
+               mtk_afe_runtime_suspend(&pdev->dev);
        snd_soc_unregister_component(&pdev->dev);
        snd_soc_unregister_platform(&pdev->dev);
        return 0;
index 3a4a5c0e3f9737c795f74367b04c9825462651f8..0e1e69c7abd56b25fab6e4ffb0eab5fd574d03d2 100644 (file)
@@ -1716,6 +1716,7 @@ card_probe_error:
        if (card->remove)
                card->remove(card);
 
+       snd_soc_dapm_free(&card->dapm);
        soc_cleanup_card_debugfs(card);
        snd_card_free(card->snd_card);
 
index aa327c92480c56c2609699380594a75ad765fbda..e0de8072c5144e92fce3ec9518787a4db526d726 100644 (file)
@@ -358,9 +358,10 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
                        data->widget =
                                snd_soc_dapm_new_control_unlocked(widget->dapm,
                                &template);
+                       kfree(name);
                        if (!data->widget) {
                                ret = -ENOMEM;
-                               goto err_name;
+                               goto err_data;
                        }
                }
                break;
@@ -389,11 +390,12 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
 
                        data->value = template.on_val;
 
-                       data->widget = snd_soc_dapm_new_control(widget->dapm,
-                                       &template);
+                       data->widget = snd_soc_dapm_new_control_unlocked(
+                                               widget->dapm, &template);
+                       kfree(name);
                        if (!data->widget) {
                                ret = -ENOMEM;
-                               goto err_name;
+                               goto err_data;
                        }
 
                        snd_soc_dapm_add_path(widget->dapm, data->widget,
@@ -408,8 +410,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
 
        return 0;
 
-err_name:
-       kfree(name);
 err_data:
        kfree(data);
        return ret;
@@ -418,8 +418,6 @@ err_data:
 static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
 {
        struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
-       if (data->widget)
-               kfree(data->widget->name);
        kfree(data->wlist);
        kfree(data);
 }
@@ -1952,6 +1950,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
                                           size_t count, loff_t *ppos)
 {
        struct snd_soc_dapm_widget *w = file->private_data;
+       struct snd_soc_card *card = w->dapm->card;
        char *buf;
        int in, out;
        ssize_t ret;
@@ -1961,6 +1960,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
        if (!buf)
                return -ENOMEM;
 
+       mutex_lock(&card->dapm_mutex);
+
        /* Supply widgets are not handled by is_connected_{input,output}_ep() */
        if (w->is_supply) {
                in = 0;
@@ -2007,6 +2008,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
                                        p->sink->name);
        }
 
+       mutex_unlock(&card->dapm_mutex);
+
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
 
        kfree(buf);
@@ -2281,11 +2284,15 @@ static ssize_t dapm_widget_show(struct device *dev,
        struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
        int i, count = 0;
 
+       mutex_lock(&rtd->card->dapm_mutex);
+
        for (i = 0; i < rtd->num_codecs; i++) {
                struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
                count += dapm_widget_show_codec(codec, buf + count);
        }
 
+       mutex_unlock(&rtd->card->dapm_mutex);
+
        return count;
 }
 
@@ -3334,16 +3341,10 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
        }
 
        prefix = soc_dapm_prefix(dapm);
-       if (prefix) {
+       if (prefix)
                w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
-               if (widget->sname)
-                       w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
-                                            widget->sname);
-       } else {
+       else
                w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
-               if (widget->sname)
-                       w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
-       }
        if (w->name == NULL) {
                kfree(w);
                return NULL;
@@ -3792,7 +3793,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
                                break;
                        }
 
-                       if (!w->sname || !strstr(w->sname, dai_w->name))
+                       if (!w->sname || !strstr(w->sname, dai_w->sname))
                                continue;
 
                        if (dai_w->id == snd_soc_dapm_dai_in) {
index d0960683c4093c4303743b1e7e4f93190e11a3a5..59ac211f8fe7c273c84dcd67ac94bb27cb760564 100644 (file)
@@ -144,7 +144,7 @@ static const struct snd_soc_tplg_kcontrol_ops io_ops[] = {
        {SND_SOC_TPLG_CTL_STROBE, snd_soc_get_strobe,
                snd_soc_put_strobe, NULL},
        {SND_SOC_TPLG_DAPM_CTL_VOLSW, snd_soc_dapm_get_volsw,
-               snd_soc_dapm_put_volsw, NULL},
+               snd_soc_dapm_put_volsw, snd_soc_info_volsw},
        {SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE, snd_soc_dapm_get_enum_double,
                snd_soc_dapm_put_enum_double, snd_soc_info_enum_double},
        {SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT, snd_soc_dapm_get_enum_double,
@@ -580,27 +580,26 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg,
 }
 
 static int soc_tplg_create_tlv(struct soc_tplg *tplg,
-       struct snd_kcontrol_new *kc, u32 tlv_size)
+       struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_tlv *tplg_tlv)
 {
-       struct snd_soc_tplg_ctl_tlv *tplg_tlv;
        struct snd_ctl_tlv *tlv;
+       int size;
 
-       if (tlv_size == 0)
+       if (tplg_tlv->count == 0)
                return 0;
 
-       tplg_tlv = (struct snd_soc_tplg_ctl_tlv *) tplg->pos;
-       tplg->pos += tlv_size;
-
-       tlv = kzalloc(sizeof(*tlv) + tlv_size, GFP_KERNEL);
+       size = ((tplg_tlv->count + (sizeof(unsigned int) - 1)) &
+               ~(sizeof(unsigned int) - 1));
+       tlv = kzalloc(sizeof(*tlv) + size, GFP_KERNEL);
        if (tlv == NULL)
                return -ENOMEM;
 
        dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n",
-               tplg_tlv->numid, tplg_tlv->size);
+               tplg_tlv->numid, size);
 
        tlv->numid = tplg_tlv->numid;
-       tlv->length = tplg_tlv->size;
-       memcpy(tlv->tlv, tplg_tlv + 1, tplg_tlv->size);
+       tlv->length = size;
+       memcpy(&tlv->tlv[0], tplg_tlv->data, size);
        kc->tlv.p = (void *)tlv;
 
        return 0;
@@ -773,7 +772,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count,
                }
 
                /* create any TLV data */
-               soc_tplg_create_tlv(tplg, &kc, mc->hdr.tlv_size);
+               soc_tplg_create_tlv(tplg, &kc, &mc->tlv);
 
                /* register control here */
                err = soc_tplg_add_kcontrol(tplg, &kc,
index 98d96e1b17e05847aaa2b48740f3f9f0cc2c6e2a..1930c42e1f557ae62c30003b379088f242b0dbfe 100644 (file)
@@ -393,9 +393,9 @@ static int zx_i2s_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        zx_i2s->mapbase = res->start;
        zx_i2s->reg_base = devm_ioremap_resource(&pdev->dev, res);
-       if (!zx_i2s->reg_base) {
+       if (IS_ERR(zx_i2s->reg_base)) {
                dev_err(&pdev->dev, "ioremap failed!\n");
-               return -EIO;
+               return PTR_ERR(zx_i2s->reg_base);
        }
 
        writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL);
index 11a0e46a1156913a23d9f31852fa508cbd1f8c5f..26265ce4caca17da4b6315a4627130ac63851063 100644 (file)
@@ -322,9 +322,9 @@ static int zx_spdif_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        zx_spdif->mapbase = res->start;
        zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res);
-       if (!zx_spdif->reg_base) {
+       if (IS_ERR(zx_spdif->reg_base)) {
                dev_err(&pdev->dev, "ioremap failed!\n");
-               return -EIO;
+               return PTR_ERR(zx_spdif->reg_base);
        }
 
        zx_spdif_dev_init(zx_spdif->reg_base);
index 1b1a89e80d1394bb253aecc3cadc683dbcff3a36..784ceb85b2d9fe7cbe989d0c8443863f1d03df34 100644 (file)
@@ -956,6 +956,7 @@ static int snd_amd7930_create(struct snd_card *card,
        if (!amd->regs) {
                snd_printk(KERN_ERR
                           "amd7930-%d: Unable to map chip registers.\n", dev);
+               kfree(amd);
                return -EIO;
        }
 
index e5000da9e9d7093f6e287194665de2d63f046e93..6a803eff87f71110049d9c39cb07025d2c64d828 100644 (file)
@@ -341,6 +341,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = {
        { 0 }
 };
 
+/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
+static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
+static struct usbmix_name_map bose_companion5_map[] = {
+       { 3, NULL, .dB = &bose_companion5_dB },
+       { 0 }   /* terminator */
+};
+
+/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
+static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
+static struct usbmix_name_map dragonfly_1_2_map[] = {
+       { 7, NULL, .dB = &dragonfly_1_2_dB },
+       { 0 }   /* terminator */
+};
+
 /*
  * Control map entries
  */
@@ -451,6 +465,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .id = USB_ID(0x25c4, 0x0003),
                .map = scms_usb3318_map,
        },
+       {
+               /* Bose Companion 5 */
+               .id = USB_ID(0x05a7, 0x1020),
+               .map = bose_companion5_map,
+       },
+       {
+               /* Dragonfly DAC 1.2 */
+               .id = USB_ID(0x21b4, 0x0081),
+               .map = dragonfly_1_2_map,
+       },
        { 0 } /* terminator */
 };
 
index 7f0c756993af15a0b5cb99ba50dd6f7d0dfca226..3d7dc6afc3f8f9459ca407ee5a16b1b2c9a8119b 100644 (file)
@@ -191,7 +191,7 @@ int main(int argc, char *argv[])
                if (res > 0) {
                        atomic_set(&requeued, 1);
                        break;
-               } else if (res > 0) {
+               } else if (res < 0) {
                        error("FUTEX_CMP_REQUEUE_PI failed\n", errno);
                        ret = RET_ERROR;
                        break;