]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge branch 'perf/urgent' into perf/core, to pick up fixes
authorIngo Molnar <mingo@kernel.org>
Thu, 8 Jun 2017 08:12:12 +0000 (10:12 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 8 Jun 2017 08:12:12 +0000 (10:12 +0200)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
466 files changed:
Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt
Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt
Documentation/devicetree/bindings/net/fsl-fec.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
Documentation/input/devices/edt-ft5x06.rst
Documentation/sound/hd-audio/models.rst
MAINTAINERS
Makefile
arch/arm/boot/compressed/efi-header.S
arch/arm/boot/compressed/head.S
arch/arm/common/mcpm_entry.c
arch/arm/include/asm/pgtable-nommu.h
arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
arch/arm64/boot/dts/hisilicon/hi6220.dtsi
arch/arm64/include/asm/acpi.h
arch/arm64/kernel/pci.c
arch/frv/include/asm/timex.h
arch/mips/kernel/process.c
arch/openrisc/kernel/process.c
arch/powerpc/include/uapi/asm/cputable.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/prom.c
arch/powerpc/platforms/cell/spu_base.c
arch/powerpc/platforms/powernv/npu-dma.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/compressed/Makefile
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/include/asm/mce.h
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/process_32.c
arch/x86/kernel/setup.c
arch/x86/kernel/unwind_frame.c
arch/x86/kvm/lapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/pageattr.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/quirks.c
block/blk-cgroup.c
block/blk-core.c
block/blk-mq.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk.h
block/cfq-iosched.c
block/partition-generic.c
block/partitions/msdos.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/button.c
drivers/acpi/nfit/mce.c
drivers/acpi/sysfs.c
drivers/ata/ahci.c
drivers/ata/libahci_platform.c
drivers/ata/libata-core.c
drivers/ata/sata_mv.c
drivers/ata/sata_rcar.c
drivers/block/nbd.c
drivers/block/rbd.c
drivers/char/pcmcia/cm4040_cs.c
drivers/char/random.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/dma/ep93xx_dma.c
drivers/dma/mv_xor_v2.c
drivers/dma/pl330.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/sh/usb-dmac.c
drivers/firmware/dmi-id.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/efi-bgrt.c
drivers/firmware/efi/libstub/secureboot.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/gma500/psb_intel_lvds.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_lpe_audio.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lspcon.c
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fence.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_prime.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/si.c
drivers/hid/Kconfig
drivers/hid/hid-asus.c
drivers/hid/hid-core.c
drivers/hid/hid-elecom.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/wacom_wac.c
drivers/hwmon/Kconfig
drivers/hwmon/aspeed-pwm-tacho.c
drivers/i2c/busses/i2c-tiny-usb.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/netlink.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/umem.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/core/uverbs_marshall.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/chip_registers.h
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/intr.c
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/sysfs.c
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/i40iw/i40iw_osdep.h
drivers/infiniband/hw/i40iw/i40iw_type.h
drivers/infiniband/hw/i40iw/i40iw_utils.c
drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/qedr/qedr_cm.c
drivers/infiniband/hw/qib/qib_rc.c
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/keyboard/tm2-touchkey.c
drivers/input/misc/axp20x-pek.c
drivers/input/mouse/elan_i2c_i2c.c
drivers/input/mouse/synaptics.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/edt-ft5x06.c
drivers/input/touchscreen/silead.c
drivers/leds/leds-pca955x.c
drivers/md/bitmap.c
drivers/md/dm-bufio.c
drivers/md/dm-integrity.c
drivers/md/dm-ioctl.c
drivers/md/dm-raid1.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-verity-target.c
drivers/md/dm.c
drivers/md/md-cluster.c
drivers/md/md.c
drivers/md/raid5-cache.c
drivers/md/raid5-ppl.c
drivers/md/raid5.c
drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
drivers/misc/sgi-xp/xp.h
drivers/misc/sgi-xp/xp_main.c
drivers/mmc/core/pwrseq_simple.c
drivers/mmc/host/cavium-octeon.c
drivers/mmc/host/cavium-thunderx.c
drivers/mmc/host/cavium.c
drivers/mmc/host/sdhci-iproc.c
drivers/mmc/host/sdhci-xenon-phy.c
drivers/mmc/host/sdhci-xenon.c
drivers/mmc/host/sdhci-xenon.h
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_ids.c
drivers/mtd/nand/nand_samsung.c
drivers/mtd/nand/tango_nand.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/geneve.c
drivers/net/gtp.c
drivers/net/phy/Kconfig
drivers/net/phy/marvell.c
drivers/net/usb/cdc_ether.c
drivers/net/virtio_net.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/loop.c
drivers/of/platform.c
drivers/pci/dwc/pci-imx6.c
drivers/pci/endpoint/Kconfig
drivers/pci/pci.c
drivers/pci/switch/switchtec.c
drivers/perf/arm_pmu_acpi.c
drivers/pinctrl/core.c
drivers/pinctrl/freescale/pinctrl-mxs.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/pinconf-generic.c
drivers/pinctrl/pinmux.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
drivers/scsi/csiostor/csio_hw.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_disc.h
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_nvmet.h
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/qedi/qedi.h
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/scsi/ufs/ufshcd.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_erl0.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/thermal/broadcom/Kconfig
drivers/thermal/qoriq_thermal.c
drivers/thermal/thermal_core.c
drivers/thermal/ti-soc-thermal/ti-bandgap.c
drivers/tty/ehv_bytechan.c
drivers/tty/serdev/core.c
drivers/tty/serdev/serdev-ttyport.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/altera_jtaguart.c
drivers/tty/serial/altera_uart.c
drivers/tty/serial/efm32-uart.c
drivers/tty/serial/ifx6x60.c
drivers/tty/serial/imx.c
drivers/tty/serial/serial_core.c
drivers/tty/tty_port.c
fs/ceph/file.c
fs/dax.c
fs/gfs2/log.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/internal.h
fs/nfs/namespace.c
fs/nfs/nfs42proc.c
fs/nfs/nfs4client.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/super.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfsxdr.c
fs/ntfs/namei.c
fs/ocfs2/export.c
fs/overlayfs/Kconfig
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/proc/base.c
fs/reiserfs/journal.c
fs/ufs/super.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_btree.c
fs/xfs/libxfs/xfs_refcount.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_file.c
fs/xfs/xfs_fsmap.c
include/drm/drm_dp_helper.h
include/linux/blk-mq.h
include/linux/ceph/ceph_debug.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/filter.h
include/linux/gfp.h
include/linux/gpio/machine.h
include/linux/if_vlan.h
include/linux/jiffies.h
include/linux/memblock.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mmzone.h
include/linux/mod_devicetable.h
include/linux/of_platform.h
include/linux/pci.h
include/linux/pinctrl/pinconf-generic.h
include/linux/ptrace.h
include/linux/serdev.h
include/linux/sunrpc/svc.h
include/linux/tty.h
include/linux/usb/usbnet.h
include/net/dst.h
include/net/ip_fib.h
include/net/tc_act/tc_csum.h
include/net/xfrm.h
include/rdma/ib_sa.h
include/rdma/rdma_netlink.h
include/target/iscsi/iscsi_target_core.h
kernel/bpf/arraymap.c
kernel/bpf/lpm_trie.c
kernel/bpf/stackmap.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/cgroup/cpuset.c
kernel/events/core.c
kernel/fork.c
kernel/kprobes.c
kernel/livepatch/Kconfig
kernel/locking/rtmutex.c
kernel/ptrace.c
kernel/time/posix-cpu-timers.c
kernel/trace/ftrace.c
lib/test_bpf.c
mm/gup.c
mm/hugetlb.c
mm/ksm.c
mm/memblock.c
mm/memory-failure.c
mm/memory.c
mm/mlock.c
mm/page_alloc.c
mm/slub.c
mm/util.c
net/ceph/auth_x.c
net/ceph/ceph_common.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/ceph/osdmap.c
net/core/dst.c
net/core/filter.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/sysctl_net_core.c
net/ipv4/arp.c
net/ipv4/esp4.c
net/ipv4/fib_semantics.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/key/af_key.c
net/llc/af_llc.c
net/mac80211/rx.c
net/sctp/associola.c
net/sctp/input.c
net/sctp/sm_make_chunk.c
net/sctp/sm_statefuns.c
net/sunrpc/xprtrdma/backchannel.c
net/sunrpc/xprtsock.c
net/wireless/scan.c
net/wireless/util.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
scripts/gdb/linux/dmesg.py
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/usb/mixer_us16x08.c
sound/usb/quirks.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/powerpc/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/disabled-features.h
tools/arch/x86/include/asm/required-features.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/include/linux/filter.h
tools/include/uapi/linux/stat.h
tools/perf/Documentation/perf-script.txt
tools/perf/arch/common.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-trace.c
tools/perf/tests/bp_signal.c
tools/perf/tests/builtin-test.c
tools/perf/tests/tests.h
tools/perf/ui/hist.c
tools/perf/util/annotate.c
tools/perf/util/callchain.c
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/evsel_fprintf.c
tools/perf/util/header.c
tools/perf/util/machine.c
tools/perf/util/srcline.c
tools/perf/util/symbol-elf.c
tools/perf/util/unwind-libdw.c
tools/perf/util/unwind-libunwind-local.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc [new file with mode: 0644]
tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
usr/Kconfig

index 6db22103e2dd535952143a2bae9ba4c6790c6e56..025cf8c9324ac362eeee56b5d4f9df09db466180 100644 (file)
@@ -36,7 +36,7 @@ Optional properties:
                 control gpios
 
  - threshold:   allows setting the "click"-threshold in the range
-                from 20 to 80.
+                from 0 to 80.
 
  - gain:        allows setting the sensitivity in the range from 0 to
                 31. Note that lower values indicate higher
index 05485699d70e7c85cb25eb83f39b86f5be0daaca..9630ac0e4b56ef372f08a0850175be06d13cd9a7 100644 (file)
@@ -16,6 +16,11 @@ Required properties:
 - reg:                  Base address of PMIC on Hi6220 SoC.
 - interrupt-controller: Hi655x has internal IRQs (has own IRQ domain).
 - pmic-gpios:           The GPIO used by PMIC IRQ.
+- #clock-cells:                From common clock binding; shall be set to 0
+
+Optional properties:
+- clock-output-names: From common clock binding to override the
+  default output clock name
 
 Example:
        pmic: pmic@f8000000 {
@@ -24,4 +29,5 @@ Example:
                interrupt-controller;
                #interrupt-cells = <2>;
                pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+               #clock-cells = <0>;
        }
index e25436861867f28f75a2450ecbf28d1fd2df3921..9029b45b8a22a8d5b6a1afa3184bf389a85202f4 100644 (file)
@@ -18,6 +18,8 @@ Optional properties:
   "ext_clock" (External clock provided to the card).
 - post-power-on-delay-ms : Delay in ms after powering the card and
        de-asserting the reset-gpios (if any)
+- power-off-delay-us : Delay in us after asserting the reset-gpios (if any)
+       during power off of the card.
 
 Example:
 
index a1e3693cca1601e47096dc2edc2a9580b8b0569a..6f55bdd52f8a99be3c0741cf6411a00554c85778 100644 (file)
@@ -15,6 +15,10 @@ Optional properties:
 - phy-reset-active-high : If present then the reset sequence using the GPIO
   specified in the "phy-reset-gpios" property is reversed (H=reset state,
   L=operation state).
+- phy-reset-post-delay : Post reset delay in milliseconds. If present then
+  a delay of phy-reset-post-delay milliseconds will be observed after the
+  phy-reset-gpios has been toggled. Can be omitted thus no delay is
+  observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
 - phy-supply : regulator that powers the Ethernet PHY.
 - phy-handle : phandle to the PHY device connected to this device.
 - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
index 71a3c134af1b25d58613699aa465d17cff2c23d0..f01d154090dab1b3a8a58fee6a599459392aa1f8 100644 (file)
@@ -247,7 +247,6 @@ bias-bus-hold               - latch weakly
 bias-pull-up           - pull up the pin
 bias-pull-down         - pull down the pin
 bias-pull-pin-default  - use pin-default pull state
-bi-directional         - pin supports simultaneous input/output operations
 drive-push-pull                - drive actively high and low
 drive-open-drain       - drive with open drain
 drive-open-source      - drive with open source
@@ -260,7 +259,6 @@ input-debounce              - debounce mode with debound time X
 power-source           - select between different power supplies
 low-power-enable       - enable low power mode
 low-power-disable      - disable low power mode
-output-enable          - enable output on pin regardless of output value
 output-low             - set the pin to output mode with low level
 output-high            - set the pin to output mode with high level
 slew-rate              - set the slew rate
index 2032f0b7a8fa125dc3c88842dbeccb25a41a3453..1ccc94b192b7edacd8a42ff81de923b4a35a27e7 100644 (file)
@@ -15,7 +15,7 @@ It has been tested with the following devices:
 The driver allows configuration of the touch screen via a set of sysfs files:
 
 /sys/class/input/eventX/device/device/threshold:
-    allows setting the "click"-threshold in the range from 20 to 80.
+    allows setting the "click"-threshold in the range from 0 to 80.
 
 /sys/class/input/eventX/device/device/gain:
     allows setting the sensitivity in the range from 0 to 31. Note that
index 5338673c88d950e21dbed11f7f030544eeb7fa2e..773d2bfacc6cc6b8e714fd89c249296d647d3f1f 100644 (file)
@@ -16,6 +16,8 @@ ALC880
     6-jack in back, 2-jack in front
 6stack-digout
     6-jack with a SPDIF out
+6stack-automute
+    6-jack with headphone jack detection
 
 ALC260
 ======
@@ -62,6 +64,8 @@ lenovo-dock
     Enables docking station I/O for some Lenovos
 hp-gpio-led
     GPIO LED support on HP laptops
+hp-dock-gpio-mic1-led
+    HP dock with mic LED support
 dell-headset-multi
     Headset jack, which can also be used as mic-in
 dell-headset-dock
@@ -72,6 +76,12 @@ alc283-sense-combo
     Combo jack sensing on ALC283
 tpt440-dock
     Pin configs for Lenovo Thinkpad Dock support
+tpt440
+    Lenovo Thinkpad T440s setup
+tpt460
+    Lenovo Thinkpad T460/560 setup
+dual-codecs
+    Lenovo laptops with dual codecs
 
 ALC66x/67x/892
 ==============
@@ -97,6 +107,8 @@ inv-dmic
     Inverted internal mic workaround
 dell-headset-multi
     Headset jack, which can also be used as mic-in
+dual-codecs
+    Lenovo laptops with dual codecs
 
 ALC680
 ======
@@ -114,6 +126,8 @@ inv-dmic
     Inverted internal mic workaround
 no-primary-hp
     VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC)
+dual-codecs
+    ALC1220 dual codecs for Gaming mobos
 
 ALC861/660
 ==========
@@ -206,65 +220,47 @@ auto
 
 Conexant 5045
 =============
-laptop-hpsense
-    Laptop with HP sense (old model laptop)
-laptop-micsense
-    Laptop with Mic sense (old model fujitsu)
-laptop-hpmicsense
-    Laptop with HP and Mic senses
-benq
-    Benq R55E
-laptop-hp530
-    HP 530 laptop
-test
-    for testing/debugging purpose, almost all controls can be
-    adjusted.  Appearing only when compiled with $CONFIG_SND_DEBUG=y
+cap-mix-amp
+    Fix max input level on mixer widget
+toshiba-p105
+    Toshiba P105 quirk
+hp-530
+    HP 530 quirk
 
 Conexant 5047
 =============
-laptop
-    Basic Laptop config 
-laptop-hp
-    Laptop config for some HP models (subdevice 30A5)
-laptop-eapd
-    Laptop config with EAPD support
-test
-    for testing/debugging purpose, almost all controls can be
-    adjusted.  Appearing only when compiled with $CONFIG_SND_DEBUG=y
+cap-mix-amp
+    Fix max input level on mixer widget
 
 Conexant 5051
 =============
-laptop
-    Basic Laptop config (default)
-hp
-    HP Spartan laptop
-hp-dv6736
-    HP dv6736
-hp-f700
-    HP Compaq Presario F700
-ideapad
-    Lenovo IdeaPad laptop
-toshiba
-    Toshiba Satellite M300
+lenovo-x200
+    Lenovo X200 quirk
 
 Conexant 5066
 =============
-laptop
-    Basic Laptop config (default)
-hp-laptop
-    HP laptops, e g G60
-asus
-    Asus K52JU, Lenovo G560
-dell-laptop
-    Dell laptops
-dell-vostro
-    Dell Vostro
-olpc-xo-1_5
-    OLPC XO 1.5
-ideapad
-    Lenovo IdeaPad U150
+stereo-dmic
+    Workaround for inverted stereo digital mic
+gpio1
+    Enable GPIO1 pin
+headphone-mic-pin
+    Enable headphone mic NID 0x18 without detection
+tp410
+    Thinkpad T400 & co quirks
 thinkpad
-    Lenovo Thinkpad
+    Thinkpad mute/mic LED quirk
+lemote-a1004
+    Lemote A1004 quirk
+lemote-a1205
+    Lemote A1205 quirk
+olpc-xo
+    OLPC XO quirk
+mute-led-eapd
+    Mute LED control via EAPD
+hp-dock
+    HP dock support
+mute-led-gpio
+    Mute LED control via GPIO
 
 STAC9200
 ========
@@ -444,6 +440,8 @@ dell-eq
     Dell desktops/laptops
 alienware
     Alienware M17x
+asus-mobo
+    Pin configs for ASUS mobo with 5.1/SPDIF out
 auto
     BIOS setup (default)
 
@@ -477,6 +475,8 @@ hp-envy-ts-bass
     Pin fixup for HP Envy TS bass speaker (NID 0x10)
 hp-bnb13-eq
     Hardware equalizer setup for HP laptops
+hp-envy-ts-bass
+    HP Envy TS bass support
 auto
     BIOS setup (default)
 
@@ -496,10 +496,22 @@ auto
 
 Cirrus Logic CS4206/4207
 ========================
+mbp53
+    MacBook Pro 5,3
 mbp55
     MacBook Pro 5,5
 imac27
     IMac 27 Inch
+imac27_122
+    iMac 12,2
+apple
+    Generic Apple quirk
+mbp101
+    MacBookPro 10,1
+mbp81
+    MacBookPro 8,1
+mba42
+    MacBookAir 4,2
 auto
     BIOS setup (default)
 
@@ -509,6 +521,10 @@ mba6
     MacBook Air 6,1 and 6,2
 gpio0
     Enable GPIO 0 amp
+mbp11
+    MacBookPro 11,2
+macmini
+    MacMini 7,1
 auto
     BIOS setup (default)
 
index 9e984645c4b08b9e20ebc839704a5d25b0c32032..7a28acd7f525ce7c918015be587e1a7bcfab17b9 100644 (file)
@@ -7143,7 +7143,7 @@ S:        Maintained
 F:     drivers/media/platform/rcar_jpu.c
 
 JSM Neo PCI based serial card
-M:     Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
+M:     Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
 L:     linux-serial@vger.kernel.org
 S:     Maintained
 F:     drivers/tty/serial/jsm/
@@ -10450,7 +10450,7 @@ S:      Orphan
 
 PXA RTC DRIVER
 M:     Robert Jarzmik <robert.jarzmik@free.fr>
-L:     rtc-linux@googlegroups.com
+L:     linux-rtc@vger.kernel.org
 S:     Maintained
 
 QAT DRIVER
@@ -10757,7 +10757,7 @@ X:      kernel/torture.c
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:     Alessandro Zummo <a.zummo@towertech.it>
 M:     Alexandre Belloni <alexandre.belloni@free-electrons.com>
-L:     rtc-linux@googlegroups.com
+L:     linux-rtc@vger.kernel.org
 Q:     http://patchwork.ozlabs.org/project/rtc-linux/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
 S:     Maintained
index 63e10bd4f14ac25297b3841eec5b2be6ee03035b..853ae9179af93a0ca9751a7faf40372e7fdb15dd 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index 9d5dc4fda3c16710c0443b6a5043f212230f057d..3f7d1b74c5e02bd46730c58b0a66756c89b904ab 100644 (file)
                @ there.
                .inst   'M' | ('Z' << 8) | (0x1310 << 16)   @ tstne r0, #0x4d000
 #else
-               mov     r0, r0
+               W(mov)  r0, r0
 #endif
                .endm
 
                .macro  __EFI_HEADER
 #ifdef CONFIG_EFI_STUB
-               b       __efi_start
-
                .set    start_offset, __efi_start - start
                .org    start + 0x3c
                @
index 7c711ba614173d91d8c2fd6ff4ccb13980bb3109..8a756870c238435af684215c653f54a739f4f1a5 100644 (file)
@@ -130,19 +130,22 @@ start:
                .rept   7
                __nop
                .endr
-   ARM(                mov     r0, r0          )
-   ARM(                b       1f              )
- THUMB(                badr    r12, 1f         )
- THUMB(                bx      r12             )
+#ifndef CONFIG_THUMB2_KERNEL
+               mov     r0, r0
+#else
+ AR_CLASS(     sub     pc, pc, #3      )       @ A/R: switch to Thumb2 mode
+  M_CLASS(     nop.w                   )       @ M: already in Thumb2 mode
+               .thumb
+#endif
+               W(b)    1f
 
                .word   _magic_sig      @ Magic numbers to help the loader
                .word   _magic_start    @ absolute load/run zImage address
                .word   _magic_end      @ zImage end address
                .word   0x04030201      @ endianness flag
 
- THUMB(                .thumb                  )
-1:             __EFI_HEADER
-
+               __EFI_HEADER
+1:
  ARM_BE8(      setend  be              )       @ go BE8 if compiled for BE8
  AR_CLASS(     mrs     r9, cpsr        )
 #ifdef CONFIG_ARM_VIRT_EXT
index cf062472e07bcb4be470bf35ab029df3438dbc7e..2b913f17d50f5d91f50d3aa30e3a8a26c97847b9 100644 (file)
@@ -235,7 +235,7 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
        return ret;
 }
 
-typedef void (*phys_reset_t)(unsigned long);
+typedef typeof(cpu_reset) phys_reset_t;
 
 void mcpm_cpu_power_down(void)
 {
@@ -300,7 +300,7 @@ void mcpm_cpu_power_down(void)
         * on the CPU.
         */
        phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
-       phys_reset(__pa_symbol(mcpm_entry_point));
+       phys_reset(__pa_symbol(mcpm_entry_point), false);
 
        /* should never get here */
        BUG();
@@ -389,7 +389,7 @@ static int __init nocache_trampoline(unsigned long _arg)
        __mcpm_cpu_down(cpu, cluster);
 
        phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
-       phys_reset(__pa_symbol(mcpm_entry_point));
+       phys_reset(__pa_symbol(mcpm_entry_point), false);
        BUG();
 }
 
index 302240c19a5aa688e7bdab1ece506dfbeaccea4e..a0d726a47c8a272b722b0d5623021058da50e113 100644 (file)
@@ -66,6 +66,7 @@ typedef pte_t *pte_addr_t;
 #define pgprot_noncached(prot) (prot)
 #define pgprot_writecombine(prot) (prot)
 #define pgprot_dmacoherent(prot) (prot)
+#define pgprot_device(prot)    (prot)
 
 
 /*
index 75bce2d0b1a83fa9f953b55a75ebdcb2ff4dc526..49f6a6242cf9fa3d9cd64d4ef22f8abf74e8e885 100644 (file)
                };
        };
 
+       reg_sys_5v: regulator@0 {
+               compatible = "regulator-fixed";
+               regulator-name = "SYS_5V";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       reg_vdd_3v3: regulator@1 {
+               compatible = "regulator-fixed";
+               regulator-name = "VDD_3V3";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               regulator-boot-on;
+               regulator-always-on;
+               vin-supply = <&reg_sys_5v>;
+       };
+
+       reg_5v_hub: regulator@2 {
+               compatible = "regulator-fixed";
+               regulator-name = "5V_HUB";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               regulator-boot-on;
+               gpio = <&gpio0 7 0>;
+               regulator-always-on;
+               vin-supply = <&reg_sys_5v>;
+       };
+
+       wl1835_pwrseq: wl1835-pwrseq {
+               compatible = "mmc-pwrseq-simple";
+               /* WLAN_EN GPIO */
+               reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+               clocks = <&pmic>;
+               clock-names = "ext_clock";
+               power-off-delay-us = <10>;
+       };
+
        soc {
                spi0: spi@f7106000 {
                        status = "ok";
 
                /* GPIO blocks 16 thru 19 do not appear to be routed to pins */
 
+               dwmmc_0: dwmmc0@f723d000 {
+                       cap-mmc-highspeed;
+                       non-removable;
+                       bus-width = <0x8>;
+                       vmmc-supply = <&ldo19>;
+               };
+
+               dwmmc_1: dwmmc1@f723e000 {
+                       card-detect-delay = <200>;
+                       cap-sd-highspeed;
+                       sd-uhs-sdr12;
+                       sd-uhs-sdr25;
+                       sd-uhs-sdr50;
+                       vqmmc-supply = <&ldo7>;
+                       vmmc-supply = <&ldo10>;
+                       bus-width = <0x4>;
+                       disable-wp;
+                       cd-gpios = <&gpio1 0 1>;
+               };
+
                dwmmc_2: dwmmc2@f723f000 {
-                       ti,non-removable;
+                       bus-width = <0x4>;
                        non-removable;
-                       /* WL_EN */
-                       vmmc-supply = <&wlan_en_reg>;
+                       vmmc-supply = <&reg_vdd_3v3>;
+                       mmc-pwrseq = <&wl1835_pwrseq>;
 
                        #address-cells = <0x1>;
                        #size-cells = <0x0>;
                                interrupts = <3 IRQ_TYPE_EDGE_RISING>;
                        };
                };
-
-               wlan_en_reg: regulator@1 {
-                       compatible = "regulator-fixed";
-                       regulator-name = "wlan-en-regulator";
-                       regulator-min-microvolt = <1800000>;
-                       regulator-max-microvolt = <1800000>;
-                       /* WLAN_EN GPIO */
-                       gpio = <&gpio0 5 0>;
-                       /* WLAN card specific delay */
-                       startup-delay-us = <70000>;
-                       enable-active-high;
-               };
        };
 
        leds {
        pmic: pmic@f8000000 {
                compatible = "hisilicon,hi655x-pmic";
                reg = <0x0 0xf8000000 0x0 0x1000>;
+               #clock-cells = <0>;
                interrupt-controller;
                #interrupt-cells = <2>;
                pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
index 1e5129b19280bf8195a9ac24e506ec096493b084..5013e4b2ea71930960021fff96b8528611964955 100644 (file)
                        status = "disabled";
                };
 
-               fixed_5v_hub: regulator@0 {
-                       compatible = "regulator-fixed";
-                       regulator-name = "fixed_5v_hub";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       regulator-boot-on;
-                       gpio = <&gpio0 7 0>;
-                       regulator-always-on;
-               };
-
                usb_phy: usbphy {
                        compatible = "hisilicon,hi6220-usb-phy";
                        #phy-cells = <0>;
-                       phy-supply = <&fixed_5v_hub>;
+                       phy-supply = <&reg_5v_hub>;
                        hisilicon,peripheral-syscon = <&sys_ctrl>;
                };
 
 
                dwmmc_0: dwmmc0@f723d000 {
                        compatible = "hisilicon,hi6220-dw-mshc";
-                       num-slots = <0x1>;
-                       cap-mmc-highspeed;
-                       non-removable;
                        reg = <0x0 0xf723d000 0x0 0x1000>;
                        interrupts = <0x0 0x48 0x4>;
                        clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
                        clock-names = "ciu", "biu";
                        resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>;
                        reset-names = "reset";
-                       bus-width = <0x8>;
-                       vmmc-supply = <&ldo19>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&emmc_pmx_func &emmc_clk_cfg_func
                                     &emmc_cfg_func &emmc_rst_cfg_func>;
 
                dwmmc_1: dwmmc1@f723e000 {
                        compatible = "hisilicon,hi6220-dw-mshc";
-                       num-slots = <0x1>;
-                       card-detect-delay = <200>;
                        hisilicon,peripheral-syscon = <&ao_ctrl>;
-                       cap-sd-highspeed;
-                       sd-uhs-sdr12;
-                       sd-uhs-sdr25;
-                       sd-uhs-sdr50;
                        reg = <0x0 0xf723e000 0x0 0x1000>;
                        interrupts = <0x0 0x49 0x4>;
                        #address-cells = <0x1>;
                        clock-names = "ciu", "biu";
                        resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>;
                        reset-names = "reset";
-                       vqmmc-supply = <&ldo7>;
-                       vmmc-supply = <&ldo10>;
-                       bus-width = <0x4>;
-                       disable-wp;
-                       cd-gpios = <&gpio1 0 1>;
                        pinctrl-names = "default", "idle";
                        pinctrl-0 = <&sd_pmx_func &sd_clk_cfg_func &sd_cfg_func>;
                        pinctrl-1 = <&sd_pmx_idle &sd_clk_cfg_idle &sd_cfg_idle>;
 
                dwmmc_2: dwmmc2@f723f000 {
                        compatible = "hisilicon,hi6220-dw-mshc";
-                       num-slots = <0x1>;
                        reg = <0x0 0xf723f000 0x0 0x1000>;
                        interrupts = <0x0 0x4a 0x4>;
                        clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
                        clock-names = "ciu", "biu";
                        resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>;
                        reset-names = "reset";
-                       bus-width = <0x4>;
-                       broken-cd;
                        pinctrl-names = "default", "idle";
                        pinctrl-0 = <&sdio_pmx_func &sdio_clk_cfg_func &sdio_cfg_func>;
                        pinctrl-1 = <&sdio_pmx_idle &sdio_clk_cfg_idle &sdio_cfg_idle>;
index 0e99978da3f05013d145132950ad204a92e3e4b0..59cca1d6ec547270adbd56a4e2265b9f9fc34375 100644 (file)
@@ -23,9 +23,9 @@
 #define ACPI_MADT_GICC_LENGTH  \
        (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
 
-#define BAD_MADT_GICC_ENTRY(entry, end)                                                \
-       (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) ||       \
-        (entry)->header.length != ACPI_MADT_GICC_LENGTH)
+#define BAD_MADT_GICC_ENTRY(entry, end)                                        \
+       (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
+       (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
 
 /* Basic configuration for ACPI */
 #ifdef CONFIG_ACPI
index 4f0e3ebfea4b4f6496a783bd172abc05e7ec1d4a..c7e3e6387a4910a6377d78e10caf98cc1c20243b 100644 (file)
@@ -191,8 +191,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
                return NULL;
 
        root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
-       if (!root_ops)
+       if (!root_ops) {
+               kfree(ri);
                return NULL;
+       }
 
        ri->cfg = pci_acpi_setup_ecam_mapping(root);
        if (!ri->cfg) {
index a89bddefdacf9194373a201c6c6c4cf8c4ac87c0..139093fab3260debefb4da2fbd33e37778784298 100644 (file)
@@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
 #define vxtime_lock()          do {} while (0)
 #define vxtime_unlock()                do {} while (0)
 
+/* This attribute is used in include/linux/jiffies.h alongside with
+ * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
+ * for frv does not contain another section specification.
+ */
+#define __jiffy_arch_data      __attribute__((__section__(".data")))
+
 #endif
 
index 918d4c73e951d7815fc4063322c9ed8e493afb9f..5351e1f3950d158aaa5ff2590c32734862a79834 100644 (file)
@@ -120,7 +120,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
        struct thread_info *ti = task_thread_info(p);
        struct pt_regs *childregs, *regs = current_pt_regs();
        unsigned long childksp;
-       p->set_child_tid = p->clear_child_tid = NULL;
 
        childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
 
index f8da545854f979c33a7b3116d26d822caa46c494..106859ae27ffba114f9f4b0011151db0f65f98d4 100644 (file)
@@ -167,8 +167,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
 
        top_of_kernel_stack = sp;
 
-       p->set_child_tid = p->clear_child_tid = NULL;
-
        /* Locate userspace context on stack... */
        sp -= STACK_FRAME_OVERHEAD;     /* redzone */
        sp -= sizeof(struct pt_regs);
index 3e7ce86d5c1330986289a72b0c38ea2686f6d6e6..4d877144f3773b3aa4dcefd9deeb12b13bcfe136 100644 (file)
@@ -46,6 +46,8 @@
 #define PPC_FEATURE2_HTM_NOSC          0x01000000
 #define PPC_FEATURE2_ARCH_3_00         0x00800000 /* ISA 3.00 */
 #define PPC_FEATURE2_HAS_IEEE128       0x00400000 /* VSX IEEE Binary Float 128-bit */
+#define PPC_FEATURE2_DARN              0x00200000 /* darn random number insn */
+#define PPC_FEATURE2_SCV               0x00100000 /* scv syscall */
 
 /*
  * IMPORTANT!
index 9b3e88b1a9c83b041d9bd84ce9d17fdc87aaeaf7..6f849832a66914f520d59c38a3c6c28f405de904 100644 (file)
@@ -124,7 +124,8 @@ extern void __restore_cpu_e6500(void);
 #define COMMON_USER_POWER9     COMMON_USER_POWER8
 #define COMMON_USER2_POWER9    (COMMON_USER2_POWER8 | \
                                 PPC_FEATURE2_ARCH_3_00 | \
-                                PPC_FEATURE2_HAS_IEEE128)
+                                PPC_FEATURE2_HAS_IEEE128 | \
+                                PPC_FEATURE2_DARN )
 
 #ifdef CONFIG_PPC_BOOK3E_64
 #define COMMON_USER_BOOKE      (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE)
index 40c4887c27b613d73bc420b4aca34cee406325a8..f830562974417bfd0f338766b59a6ffd7a09a176 100644 (file)
@@ -161,7 +161,9 @@ static struct ibm_pa_feature {
        { .pabyte = 0,  .pabit = 3, .cpu_features  = CPU_FTR_CTRL },
        { .pabyte = 0,  .pabit = 6, .cpu_features  = CPU_FTR_NOEXECUTE },
        { .pabyte = 1,  .pabit = 2, .mmu_features  = MMU_FTR_CI_LARGE_PAGE },
+#ifdef CONFIG_PPC_RADIX_MMU
        { .pabyte = 40, .pabit = 0, .mmu_features  = MMU_FTR_TYPE_RADIX },
+#endif
        { .pabyte = 1,  .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN },
        { .pabyte = 5,  .pabit = 0, .cpu_features  = CPU_FTR_REAL_LE,
                                    .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
index 96c2b8a406303194737962905330c0a7a3900944..0c45cdbac4cfc80bcb79e287486d5a8a85fefdef 100644 (file)
@@ -197,7 +197,9 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
            (REGION_ID(ea) != USER_REGION_ID)) {
 
                spin_unlock(&spu->register_lock);
-               ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr);
+               ret = hash_page(ea,
+                               _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED,
+                               0x300, dsisr);
                spin_lock(&spu->register_lock);
 
                if (!ret) {
index 067defeea6911303e7fd59206e239fa7d6d770eb..78fa9395b8c55c3dab2c10e1a245f5a7a04be8a2 100644 (file)
@@ -714,7 +714,7 @@ static void pnv_npu2_release_context(struct kref *kref)
 void pnv_npu2_destroy_context(struct npu_context *npu_context,
                        struct pci_dev *gpdev)
 {
-       struct pnv_phb *nphb, *phb;
+       struct pnv_phb *nphb;
        struct npu *npu;
        struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
        struct device_node *nvlink_dn;
@@ -728,13 +728,12 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
 
        nphb = pci_bus_to_host(npdev->bus)->private_data;
        npu = &nphb->npu;
-       phb = pci_bus_to_host(gpdev->bus)->private_data;
        nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
        if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
                                                        &nvlink_index)))
                return;
        npu_context->npdev[npu->index][nvlink_index] = NULL;
-       opal_npu_destroy_context(phb->opal_id, npu_context->mm->context.id,
+       opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
                                PCI_DEVID(gpdev->bus->number, gpdev->devfn));
        kref_put(&npu_context->kref, pnv_npu2_release_context);
 }
index cd18994a9555ab7ec628da2bdc909dd445fef1a2..4ccfacc7232ab1ace21b8466ae73e4c7d18d3fba 100644 (file)
@@ -360,7 +360,7 @@ config SMP
          Management" code will be disabled if you say Y here.
 
          See also <file:Documentation/x86/i386/IO-APIC.txt>,
-         <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+         <file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO available at
          <http://www.tldp.org/docs.html#howto>.
 
          If you don't know what to do here, say N.
index 5851411e60fb9fd008f0035ac92932bd03eb519b..bf240b9204738598e78ece3f99e85d8f21385ea4 100644 (file)
@@ -159,7 +159,7 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
        # If '-Os' is enabled, disable it and print a warning.
         ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
           undefine CONFIG_CC_OPTIMIZE_FOR_SIZE
-         $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE.  Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
+          $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE.  Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
         endif
 
     endif
index 44163e8c3868ec4dda58fed54f38411134224089..2c860ad4fe0686a9cf796cd9a5b4453b8199223f 100644 (file)
@@ -94,7 +94,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
 quiet_cmd_check_data_rel = DATAREL $@
 define cmd_check_data_rel
        for obj in $(filter %.o,$^); do \
-               readelf -S $$obj | grep -qF .rel.local && { \
+               ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
                        echo "error: $$obj has data relocations!" >&2; \
                        exit 1; \
                } || true; \
index 50bc26949e9edf75cc5978f8a08a4273bf489aec..48ef7bb32c4269331ceb2332b8e6bf5faffcb3de 100644 (file)
@@ -251,6 +251,23 @@ ENTRY(__switch_to_asm)
        jmp     __switch_to
 END(__switch_to_asm)
 
+/*
+ * The unwinder expects the last frame on the stack to always be at the same
+ * offset from the end of the page, which allows it to validate the stack.
+ * Calling schedule_tail() directly would break that convention because its an
+ * asmlinkage function so its argument has to be pushed on the stack.  This
+ * wrapper creates a proper "end of stack" frame header before the call.
+ */
+ENTRY(schedule_tail_wrapper)
+       FRAME_BEGIN
+
+       pushl   %eax
+       call    schedule_tail
+       popl    %eax
+
+       FRAME_END
+       ret
+ENDPROC(schedule_tail_wrapper)
 /*
  * A newly forked process directly context switches into this address.
  *
@@ -259,24 +276,15 @@ END(__switch_to_asm)
  * edi: kernel thread arg
  */
 ENTRY(ret_from_fork)
-       FRAME_BEGIN             /* help unwinder find end of stack */
-
-       /*
-        * schedule_tail() is asmlinkage so we have to put its 'prev' argument
-        * on the stack.
-        */
-       pushl   %eax
-       call    schedule_tail
-       popl    %eax
+       call    schedule_tail_wrapper
 
        testl   %ebx, %ebx
        jnz     1f              /* kernel threads are uncommon */
 
 2:
        /* When we fork, we trace the syscall return in the child, too. */
-       leal    FRAME_OFFSET(%esp), %eax
+       movl    %esp, %eax
        call    syscall_return_slowpath
-       FRAME_END
        jmp     restore_all
 
        /* kernel thread */
index 607d72c4a485cf25a3e582804cea0b7c031e1d32..4a4c0834f9659bd9b4855e23801d47103e644f09 100644 (file)
@@ -36,7 +36,6 @@
 #include <asm/smap.h>
 #include <asm/pgtable_types.h>
 #include <asm/export.h>
-#include <asm/frame.h>
 #include <linux/err.h>
 
 .code64
@@ -406,19 +405,17 @@ END(__switch_to_asm)
  * r12: kernel thread arg
  */
 ENTRY(ret_from_fork)
-       FRAME_BEGIN                     /* help unwinder find end of stack */
        movq    %rax, %rdi
-       call    schedule_tail           /* rdi: 'prev' task parameter */
+       call    schedule_tail                   /* rdi: 'prev' task parameter */
 
-       testq   %rbx, %rbx              /* from kernel_thread? */
-       jnz     1f                      /* kernel threads are uncommon */
+       testq   %rbx, %rbx                      /* from kernel_thread? */
+       jnz     1f                              /* kernel threads are uncommon */
 
 2:
-       leaq    FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
+       movq    %rsp, %rdi
        call    syscall_return_slowpath /* returns with IRQs disabled */
        TRACE_IRQS_ON                   /* user mode is traced as IRQS on */
        SWAPGS
-       FRAME_END
        jmp     restore_regs_and_iret
 
 1:
index 4fd5195deed01836a092dccf680baf6b3e150713..3f9a3d2a52095af1f100e72bc3c24b4fc8d1b397 100644 (file)
@@ -266,6 +266,7 @@ static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *s
 #endif
 
 int mce_available(struct cpuinfo_x86 *c);
+bool mce_is_memory_error(struct mce *m);
 
 DECLARE_PER_CPU(unsigned, mce_exception_count);
 DECLARE_PER_CPU(unsigned, mce_poll_count);
index c5b8f760473c32f090c430693c5b2e8a61cc196f..32e14d13741670efa680c3c9e9facd1ffc0415a5 100644 (file)
@@ -409,8 +409,13 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
                memcpy(insnbuf, replacement, a->replacementlen);
                insnbuf_sz = a->replacementlen;
 
-               /* 0xe8 is a relative jump; fix the offset. */
-               if (*insnbuf == 0xe8 && a->replacementlen == 5) {
+               /*
+                * 0xe8 is a relative jump; fix the offset.
+                *
+                * Instruction length is checked before the opcode to avoid
+                * accessing uninitialized bytes for zero-length replacements.
+                */
+               if (a->replacementlen == 5 && *insnbuf == 0xe8) {
                        *(s32 *)(insnbuf + 1) += replacement - instr;
                        DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
                                *(s32 *)(insnbuf + 1),
index 5abd4bf73d6e6805d033c361e4671087bb7be076..5cfbaeb6529a04bcba6cb6ba7b09610d0f2fa88c 100644 (file)
@@ -499,16 +499,14 @@ static int mce_usable_address(struct mce *m)
        return 1;
 }
 
-static bool memory_error(struct mce *m)
+bool mce_is_memory_error(struct mce *m)
 {
-       struct cpuinfo_x86 *c = &boot_cpu_data;
-
-       if (c->x86_vendor == X86_VENDOR_AMD) {
+       if (m->cpuvendor == X86_VENDOR_AMD) {
                /* ErrCodeExt[20:16] */
                u8 xec = (m->status >> 16) & 0x1f;
 
                return (xec == 0x0 || xec == 0x8);
-       } else if (c->x86_vendor == X86_VENDOR_INTEL) {
+       } else if (m->cpuvendor == X86_VENDOR_INTEL) {
                /*
                 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
                 *
@@ -529,6 +527,7 @@ static bool memory_error(struct mce *m)
 
        return false;
 }
+EXPORT_SYMBOL_GPL(mce_is_memory_error);
 
 static bool cec_add_mce(struct mce *m)
 {
@@ -536,7 +535,7 @@ static bool cec_add_mce(struct mce *m)
                return false;
 
        /* We eat only correctable DRAM errors with usable addresses. */
-       if (memory_error(m) &&
+       if (mce_is_memory_error(m) &&
            !(m->status & MCI_STATUS_UC) &&
            mce_usable_address(m))
                if (!cec_add_elem(m->addr >> PAGE_SHIFT))
@@ -713,7 +712,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 
                severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
 
-               if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
+               if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m))
                        if (m.status & MCI_STATUS_ADDRV)
                                m.severity = severity;
 
index 45db4d2ebd0118e666c205185b9162d13e33316e..e9f4d762aa5b5cabde501f95045fad6c43fef54b 100644 (file)
@@ -320,7 +320,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
 }
 
 static enum ucode_state
-load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
+load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
 
 int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
 {
@@ -338,8 +338,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
        if (!desc.mc)
                return -EINVAL;
 
-       ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
-                                desc.data, desc.size);
+       ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
        if (ret != UCODE_OK)
                return -EINVAL;
 
@@ -675,7 +674,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
 }
 
 static enum ucode_state
-load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
+load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
 {
        enum ucode_state ret;
 
@@ -689,8 +688,8 @@ load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
 
 #ifdef CONFIG_X86_32
        /* save BSP's matching patch for early load */
-       if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
-               struct ucode_patch *p = find_patch(cpu);
+       if (save) {
+               struct ucode_patch *p = find_patch(0);
                if (p) {
                        memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
                        memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
@@ -722,11 +721,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
 {
        char fw_name[36] = "amd-ucode/microcode_amd.bin";
        struct cpuinfo_x86 *c = &cpu_data(cpu);
+       bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
        enum ucode_state ret = UCODE_NFOUND;
        const struct firmware *fw;
 
        /* reload ucode container only on the boot cpu */
-       if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
+       if (!refresh_fw || !bsp)
                return UCODE_OK;
 
        if (c->x86 >= 0x15)
@@ -743,7 +743,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
                goto fw_release;
        }
 
-       ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
+       ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
 
  fw_release:
        release_firmware(fw);
index 0651e974dcb3a88211db1711a5969434d3163e5a..9bef1bbeba63885e3ba584960b97e8c190b4c5a8 100644 (file)
@@ -689,8 +689,12 @@ static inline void *alloc_tramp(unsigned long size)
 {
        return module_alloc(size);
 }
-static inline void tramp_free(void *tramp)
+static inline void tramp_free(void *tramp, int size)
 {
+       int npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+       set_memory_nx((unsigned long)tramp, npages);
+       set_memory_rw((unsigned long)tramp, npages);
        module_memfree(tramp);
 }
 #else
@@ -699,7 +703,7 @@ static inline void *alloc_tramp(unsigned long size)
 {
        return NULL;
 }
-static inline void tramp_free(void *tramp) { }
+static inline void tramp_free(void *tramp, int size) { }
 #endif
 
 /* Defined as markers to the end of the ftrace default trampolines */
@@ -771,7 +775,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
        /* Copy ftrace_caller onto the trampoline memory */
        ret = probe_kernel_read(trampoline, (void *)start_offset, size);
        if (WARN_ON(ret < 0)) {
-               tramp_free(trampoline);
+               tramp_free(trampoline, *tramp_size);
                return 0;
        }
 
@@ -797,7 +801,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
 
        /* Are we pointing to the reference? */
        if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
-               tramp_free(trampoline);
+               tramp_free(trampoline, *tramp_size);
                return 0;
        }
 
@@ -839,7 +843,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
        unsigned long offset;
        unsigned long ip;
        unsigned int size;
-       int ret;
+       int ret, npages;
 
        if (ops->trampoline) {
                /*
@@ -848,11 +852,14 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
                 */
                if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
                        return;
+               npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT;
+               set_memory_rw(ops->trampoline, npages);
        } else {
                ops->trampoline = create_trampoline(ops, &size);
                if (!ops->trampoline)
                        return;
                ops->trampoline_size = size;
+               npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
        }
 
        offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
@@ -863,6 +870,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
        /* Do a safe modify in case the trampoline is executing */
        new = ftrace_call_replace(ip, (unsigned long)func);
        ret = update_ftrace_func(ip, new);
+       set_memory_ro(ops->trampoline, npages);
 
        /* The update should never fail */
        WARN_ON(ret);
@@ -939,7 +947,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
        if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
                return;
 
-       tramp_free((void *)ops->trampoline);
+       tramp_free((void *)ops->trampoline, ops->trampoline_size);
        ops->trampoline = 0;
 }
 
index 5b2bbfbb371284942b2ce95a3b2495eb9ecc8981..6b877807598b9028527a2357fef630431021c383 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/ftrace.h>
 #include <linux/frame.h>
 #include <linux/kasan.h>
+#include <linux/moduleloader.h>
 
 #include <asm/text-patching.h>
 #include <asm/cacheflush.h>
@@ -417,6 +418,14 @@ static void prepare_boost(struct kprobe *p, struct insn *insn)
        }
 }
 
+/* Recover page to RW mode before releasing it */
+void free_insn_page(void *page)
+{
+       set_memory_nx((unsigned long)page & PAGE_MASK, 1);
+       set_memory_rw((unsigned long)page & PAGE_MASK, 1);
+       module_memfree(page);
+}
+
 static int arch_copy_kprobe(struct kprobe *p)
 {
        struct insn insn;
index ff40e74c9181f0e009b51909a0e76ce25c1c2cf3..ffeae818aa7a95ffd0395ae9af5fcedd9b599981 100644 (file)
@@ -78,7 +78,7 @@ void __show_regs(struct pt_regs *regs, int all)
 
        printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
        printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
-               smp_processor_id());
+               raw_smp_processor_id());
 
        printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
                regs->ax, regs->bx, regs->cx, regs->dx);
index 0b4d3c686b1ef94463c8caabce01e38404d1b8c1..f818236950140fa0e3ccfc013e31857676db4f77 100644 (file)
@@ -980,8 +980,6 @@ void __init setup_arch(char **cmdline_p)
         */
        x86_configure_nx();
 
-       simple_udelay_calibration();
-
        parse_early_param();
 
 #ifdef CONFIG_MEMORY_HOTPLUG
@@ -1041,6 +1039,8 @@ void __init setup_arch(char **cmdline_p)
         */
        init_hypervisor_platform();
 
+       simple_udelay_calibration();
+
        x86_init.resources.probe_roms();
 
        /* after parse_early_param, so could debug it */
index 82c6d7f1fd73e64debd566d0846f3b9a62ab8959..b9389d72b2f784887e14acc89a6346a78c13c1b4 100644 (file)
@@ -104,6 +104,11 @@ static inline unsigned long *last_frame(struct unwind_state *state)
        return (unsigned long *)task_pt_regs(state->task) - 2;
 }
 
+static bool is_last_frame(struct unwind_state *state)
+{
+       return state->bp == last_frame(state);
+}
+
 #ifdef CONFIG_X86_32
 #define GCC_REALIGN_WORDS 3
 #else
@@ -115,16 +120,15 @@ static inline unsigned long *last_aligned_frame(struct unwind_state *state)
        return last_frame(state) - GCC_REALIGN_WORDS;
 }
 
-static bool is_last_task_frame(struct unwind_state *state)
+static bool is_last_aligned_frame(struct unwind_state *state)
 {
        unsigned long *last_bp = last_frame(state);
        unsigned long *aligned_bp = last_aligned_frame(state);
 
        /*
-        * We have to check for the last task frame at two different locations
-        * because gcc can occasionally decide to realign the stack pointer and
-        * change the offset of the stack frame in the prologue of a function
-        * called by head/entry code.  Examples:
+        * GCC can occasionally decide to realign the stack pointer and change
+        * the offset of the stack frame in the prologue of a function called
+        * by head/entry code.  Examples:
         *
         * <start_secondary>:
         *      push   %edi
@@ -141,11 +145,38 @@ static bool is_last_task_frame(struct unwind_state *state)
         *      push   %rbp
         *      mov    %rsp,%rbp
         *
-        * Note that after aligning the stack, it pushes a duplicate copy of
-        * the return address before pushing the frame pointer.
+        * After aligning the stack, it pushes a duplicate copy of the return
+        * address before pushing the frame pointer.
+        */
+       return (state->bp == aligned_bp && *(aligned_bp + 1) == *(last_bp + 1));
+}
+
+static bool is_last_ftrace_frame(struct unwind_state *state)
+{
+       unsigned long *last_bp = last_frame(state);
+       unsigned long *last_ftrace_bp = last_bp - 3;
+
+       /*
+        * When unwinding from an ftrace handler of a function called by entry
+        * code, the stack layout of the last frame is:
+        *
+        *   bp
+        *   parent ret addr
+        *   bp
+        *   function ret addr
+        *   parent ret addr
+        *   pt_regs
+        *   -----------------
         */
-       return (state->bp == last_bp ||
-               (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
+       return (state->bp == last_ftrace_bp &&
+               *state->bp == *(state->bp + 2) &&
+               *(state->bp + 1) == *(state->bp + 4));
+}
+
+static bool is_last_task_frame(struct unwind_state *state)
+{
+       return is_last_frame(state) || is_last_aligned_frame(state) ||
+              is_last_ftrace_frame(state);
 }
 
 /*
index c329d28949056e2d6ad4688e411c5644b81e4d68..d24c8742d9b0aa6df35d5e479e627ff008ea221f 100644 (file)
@@ -1495,8 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
 
 static void cancel_hv_timer(struct kvm_lapic *apic)
 {
+       preempt_disable();
        kvm_x86_ops->cancel_hv_timer(apic->vcpu);
        apic->lapic_timer.hv_timer_in_use = false;
+       preempt_enable();
 }
 
 static bool start_hv_timer(struct kvm_lapic *apic)
@@ -1934,7 +1936,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        for (i = 0; i < KVM_APIC_LVT_NUM; i++)
                kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
        apic_update_lvtt(apic);
-       if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
+       if (kvm_vcpu_is_reset_bsp(vcpu) &&
+           kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
                kvm_lapic_set_reg(apic, APIC_LVT0,
                             SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
        apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
index 183ddb235fb48658028433d451db75d554152c54..ba9891ac5c568f1798555bfa9dcbc421fff5ae2a 100644 (file)
@@ -1807,7 +1807,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
         * AMD's VMCB does not have an explicit unusable field, so emulate it
         * for cross vendor migration purposes by "not present"
         */
-       var->unusable = !var->present || (var->type == 0);
+       var->unusable = !var->present;
 
        switch (seg) {
        case VCPU_SREG_TR:
@@ -1840,6 +1840,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
                 */
                if (var->unusable)
                        var->db = 0;
+               /* This is symmetric with svm_set_segment() */
                var->dpl = to_svm(vcpu)->vmcb->save.cpl;
                break;
        }
@@ -1980,18 +1981,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
        s->base = var->base;
        s->limit = var->limit;
        s->selector = var->selector;
-       if (var->unusable)
-               s->attrib = 0;
-       else {
-               s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
-               s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
-               s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
-               s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
-               s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
-               s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
-               s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
-               s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
-       }
+       s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
+       s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
+       s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
+       s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
+       s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
+       s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
+       s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
+       s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
 
        /*
         * This is always accurate, except if SYSRET returned to a segment
@@ -2000,7 +1997,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
         * would entail passing the CPL to userspace and back.
         */
        if (seg == VCPU_SREG_SS)
-               svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
+               /* This is symmetric with svm_get_segment() */
+               svm->vmcb->save.cpl = (var->dpl & 3);
 
        mark_dirty(svm->vmcb, VMCB_SEG);
 }
index 72f78396bc0960968161b66ccee00c42fa203fb7..9b4b5d6dcd34755acc0c09525ca93b3408ee4128 100644 (file)
@@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-/*
- * This function performs the various checks including
- * - if it's 4KB aligned
- * - No bits beyond the physical address width are set
- * - Returns 0 on success or else 1
- * (Intel SDM Section 30.3)
- */
-static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
-                                 gpa_t *vmpointer)
+static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
 {
        gva_t gva;
-       gpa_t vmptr;
        struct x86_exception e;
-       struct page *page;
-       struct vcpu_vmx *vmx = to_vmx(vcpu);
-       int maxphyaddr = cpuid_maxphyaddr(vcpu);
 
        if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
                        vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
                return 1;
 
-       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
-                               sizeof(vmptr), &e)) {
+       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
+                               sizeof(*vmpointer), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
 
-       switch (exit_reason) {
-       case EXIT_REASON_VMON:
-               /*
-                * SDM 3: 24.11.5
-                * The first 4 bytes of VMXON region contain the supported
-                * VMCS revision identifier
-                *
-                * Note - IA32_VMX_BASIC[48] will never be 1
-                * for the nested case;
-                * which replaces physical address width with 32
-                *
-                */
-               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-                       nested_vmx_failInvalid(vcpu);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-
-               page = nested_get_page(vcpu, vmptr);
-               if (page == NULL) {
-                       nested_vmx_failInvalid(vcpu);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               if (*(u32 *)kmap(page) != VMCS12_REVISION) {
-                       kunmap(page);
-                       nested_release_page_clean(page);
-                       nested_vmx_failInvalid(vcpu);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               kunmap(page);
-               nested_release_page_clean(page);
-               vmx->nested.vmxon_ptr = vmptr;
-               break;
-       case EXIT_REASON_VMCLEAR:
-               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMCLEAR_INVALID_ADDRESS);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-
-               if (vmptr == vmx->nested.vmxon_ptr) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMCLEAR_VMXON_POINTER);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               break;
-       case EXIT_REASON_VMPTRLD:
-               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMPTRLD_INVALID_ADDRESS);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-
-               if (vmptr == vmx->nested.vmxon_ptr) {
-                       nested_vmx_failValid(vcpu,
-                                            VMXERR_VMPTRLD_VMXON_POINTER);
-                       return kvm_skip_emulated_instruction(vcpu);
-               }
-               break;
-       default:
-               return 1; /* shouldn't happen */
-       }
-
-       if (vmpointer)
-               *vmpointer = vmptr;
        return 0;
 }
 
@@ -7066,6 +6990,8 @@ out_msr_bitmap:
 static int handle_vmon(struct kvm_vcpu *vcpu)
 {
        int ret;
+       gpa_t vmptr;
+       struct page *page;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
                | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
                return 1;
        }
 
-       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
+       if (nested_vmx_get_vmptr(vcpu, &vmptr))
                return 1;
+
+       /*
+        * SDM 3: 24.11.5
+        * The first 4 bytes of VMXON region contain the supported
+        * VMCS revision identifier
+        *
+        * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
+        * which replaces physical address width with 32
+        */
+       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+               nested_vmx_failInvalid(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
+       page = nested_get_page(vcpu, vmptr);
+       if (page == NULL) {
+               nested_vmx_failInvalid(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+       if (*(u32 *)kmap(page) != VMCS12_REVISION) {
+               kunmap(page);
+               nested_release_page_clean(page);
+               nested_vmx_failInvalid(vcpu);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+       kunmap(page);
+       nested_release_page_clean(page);
+
+       vmx->nested.vmxon_ptr = vmptr;
        ret = enter_vmx_operation(vcpu);
        if (ret)
                return ret;
@@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
+       if (nested_vmx_get_vmptr(vcpu, &vmptr))
                return 1;
 
+       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+               nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
+       if (vmptr == vmx->nested.vmxon_ptr) {
+               nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
        if (vmptr == vmx->nested.current_vmptr)
                nested_release_vmcs12(vmx);
 
@@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
+       if (nested_vmx_get_vmptr(vcpu, &vmptr))
                return 1;
 
+       if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+               nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
+       if (vmptr == vmx->nested.vmxon_ptr) {
+               nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
+
        if (vmx->nested.current_vmptr != vmptr) {
                struct vmcs12 *new_vmcs12;
                struct page *page;
@@ -7913,11 +7887,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
 {
        unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
        int cr = exit_qualification & 15;
-       int reg = (exit_qualification >> 8) & 15;
-       unsigned long val = kvm_register_readl(vcpu, reg);
+       int reg;
+       unsigned long val;
 
        switch ((exit_qualification >> 4) & 3) {
        case 0: /* mov to cr */
+               reg = (exit_qualification >> 8) & 15;
+               val = kvm_register_readl(vcpu, reg);
                switch (cr) {
                case 0:
                        if (vmcs12->cr0_guest_host_mask &
@@ -7972,6 +7948,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
                 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
                 * cr0. Other attempted changes are ignored, with no exit.
                 */
+               val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
                if (vmcs12->cr0_guest_host_mask & 0xe &
                    (val ^ vmcs12->cr0_read_shadow))
                        return true;
index 02363e37d4a61e8271d7fed0a8c534e9dd90f264..a2cd0997343c485051e849551b9fc9d904177fe0 100644 (file)
@@ -8394,10 +8394,13 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
        if (vcpu->arch.pv.pv_unhalted)
                return true;
 
-       if (atomic_read(&vcpu->arch.nmi_queued))
+       if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+           (vcpu->arch.nmi_pending &&
+            kvm_x86_ops->nmi_allowed(vcpu)))
                return true;
 
-       if (kvm_test_request(KVM_REQ_SMI, vcpu))
+       if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
+           (vcpu->arch.smi_pending && !is_smm(vcpu)))
                return true;
 
        if (kvm_arch_interrupt_allowed(vcpu) &&
index 1dcd2be4cce44aabd5659b531962f918da1b8dd3..c8520b2c62d252f774f7c6b9df83eefa6a5925c2 100644 (file)
@@ -186,7 +186,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
        unsigned int i, level;
        unsigned long addr;
 
-       BUG_ON(irqs_disabled());
+       BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
        WARN_ON(PAGE_ALIGN(start) != start);
 
        on_each_cpu(__cpa_flush_range, NULL, 1);
index 7e76a4d8304bc5add30e5f86d16e4f5b423a24f6..43b96f5f78ba8c9c323c5ae6090c19f3ae290ad0 100644 (file)
@@ -828,9 +828,11 @@ static void __init kexec_enter_virtual_mode(void)
 
        /*
         * We don't do virtual mode, since we don't do runtime services, on
-        * non-native EFI
+        * non-native EFI. With efi=old_map, we don't do runtime services in
+        * kexec kernel because in the initial boot something else might
+        * have been mapped at these virtual addresses.
         */
-       if (!efi_is_native()) {
+       if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
                efi_memmap_unmap();
                clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
                return;
index c488625c9712de4fe150d01df5c260e650967265..eb8dff15a7f63721d0f07845263da1af75670771 100644 (file)
@@ -71,11 +71,13 @@ static void __init early_code_mapping_set_exec(int executable)
 
 pgd_t * __init efi_call_phys_prolog(void)
 {
-       unsigned long vaddress;
-       pgd_t *save_pgd;
+       unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
+       pgd_t *save_pgd, *pgd_k, *pgd_efi;
+       p4d_t *p4d, *p4d_k, *p4d_efi;
+       pud_t *pud;
 
        int pgd;
-       int n_pgds;
+       int n_pgds, i, j;
 
        if (!efi_enabled(EFI_OLD_MEMMAP)) {
                save_pgd = (pgd_t *)read_cr3();
@@ -88,10 +90,49 @@ pgd_t * __init efi_call_phys_prolog(void)
        n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
        save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
 
+       /*
+        * Build 1:1 identity mapping for efi=old_map usage. Note that
+        * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
+        * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
+        * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
+        * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
+        * This means here we can only reuse the PMD tables of the direct mapping.
+        */
        for (pgd = 0; pgd < n_pgds; pgd++) {
-               save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
-               vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
-               set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+               addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
+               vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
+               pgd_efi = pgd_offset_k(addr_pgd);
+               save_pgd[pgd] = *pgd_efi;
+
+               p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
+               if (!p4d) {
+                       pr_err("Failed to allocate p4d table!\n");
+                       goto out;
+               }
+
+               for (i = 0; i < PTRS_PER_P4D; i++) {
+                       addr_p4d = addr_pgd + i * P4D_SIZE;
+                       p4d_efi = p4d + p4d_index(addr_p4d);
+
+                       pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
+                       if (!pud) {
+                               pr_err("Failed to allocate pud table!\n");
+                               goto out;
+                       }
+
+                       for (j = 0; j < PTRS_PER_PUD; j++) {
+                               addr_pud = addr_p4d + j * PUD_SIZE;
+
+                               if (addr_pud > (max_pfn << PAGE_SHIFT))
+                                       break;
+
+                               vaddr = (unsigned long)__va(addr_pud);
+
+                               pgd_k = pgd_offset_k(vaddr);
+                               p4d_k = p4d_offset(pgd_k, vaddr);
+                               pud[j] = *pud_offset(p4d_k, vaddr);
+                       }
+               }
        }
 out:
        __flush_tlb_all();
@@ -104,8 +145,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
        /*
         * After the lock is released, the original page table is restored.
         */
-       int pgd_idx;
+       int pgd_idx, i;
        int nr_pgds;
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
 
        if (!efi_enabled(EFI_OLD_MEMMAP)) {
                write_cr3((unsigned long)save_pgd);
@@ -115,9 +159,28 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
 
        nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
 
-       for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++)
+       for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
+               pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
                set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
 
+               if (!(pgd_val(*pgd) & _PAGE_PRESENT))
+                       continue;
+
+               for (i = 0; i < PTRS_PER_P4D; i++) {
+                       p4d = p4d_offset(pgd,
+                                        pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
+
+                       if (!(p4d_val(*p4d) & _PAGE_PRESENT))
+                               continue;
+
+                       pud = (pud_t *)p4d_page_vaddr(*p4d);
+                       pud_free(&init_mm, pud);
+               }
+
+               p4d = (p4d_t *)pgd_page_vaddr(*pgd);
+               p4d_free(&init_mm, p4d);
+       }
+
        kfree(save_pgd);
 
        __flush_tlb_all();
index 26615991d69cc8b024470c921aca227c2756e439..e0cf95a83f3fab918eb73715d188e631e02a1425 100644 (file)
@@ -360,6 +360,9 @@ void __init efi_free_boot_services(void)
                free_bootmem_late(start, size);
        }
 
+       if (!num_entries)
+               return;
+
        new_size = efi.memmap.desc_size * num_entries;
        new_phys = efi_memmap_alloc(num_entries);
        if (!new_phys) {
index 7c2947128f5813a677a0361eddcd277b5946d03e..0480892e97e501807a7f14f843eb549719f33c81 100644 (file)
@@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg)
                        blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 
        if (blkg->blkcg != &blkcg_root)
-               blk_exit_rl(&blkg->rl);
+               blk_exit_rl(blkg->q, &blkg->rl);
 
        blkg_rwstat_exit(&blkg->stat_ios);
        blkg_rwstat_exit(&blkg->stat_bytes);
index c7068520794bd0ba060b905f850efaae6a8cbd36..a7421b772d0e0e3f4b8372fbc11aefd83763d30a 100644 (file)
@@ -648,13 +648,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
        if (!rl->rq_pool)
                return -ENOMEM;
 
+       if (rl != &q->root_rl)
+               WARN_ON_ONCE(!blk_get_queue(q));
+
        return 0;
 }
 
-void blk_exit_rl(struct request_list *rl)
+void blk_exit_rl(struct request_queue *q, struct request_list *rl)
 {
-       if (rl->rq_pool)
+       if (rl->rq_pool) {
                mempool_destroy(rl->rq_pool);
+               if (rl != &q->root_rl)
+                       blk_put_queue(q);
+       }
 }
 
 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
index a69ad122ed66c6b93385f1b3959a893b407b9d05..1bcccedcc74f0b48f58363640acb1eae04704800 100644 (file)
@@ -628,25 +628,6 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
 }
 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
 
-void blk_mq_abort_requeue_list(struct request_queue *q)
-{
-       unsigned long flags;
-       LIST_HEAD(rq_list);
-
-       spin_lock_irqsave(&q->requeue_lock, flags);
-       list_splice_init(&q->requeue_list, &rq_list);
-       spin_unlock_irqrestore(&q->requeue_lock, flags);
-
-       while (!list_empty(&rq_list)) {
-               struct request *rq;
-
-               rq = list_first_entry(&rq_list, struct request, queuelist);
-               list_del_init(&rq->queuelist);
-               blk_mq_end_request(rq, -EIO);
-       }
-}
-EXPORT_SYMBOL(blk_mq_abort_requeue_list);
-
 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
 {
        if (tag < tags->nr_tags) {
@@ -2660,7 +2641,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
        return ret;
 }
 
-void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+                                                       int nr_hw_queues)
 {
        struct request_queue *q;
 
@@ -2684,6 +2666,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
        list_for_each_entry(q, &set->tag_list, tag_set_list)
                blk_mq_unfreeze_queue(q);
 }
+
+void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+{
+       mutex_lock(&set->tag_list_lock);
+       __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
+       mutex_unlock(&set->tag_list_lock);
+}
 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 
 /* Enable polling stats and return whether they were already enabled. */
index 504fee9400523e206f987542d59abf8167e31c7e..283da7fbe03408d9eef71ba3e1a4f863671d761b 100644 (file)
@@ -809,7 +809,7 @@ static void blk_release_queue(struct kobject *kobj)
 
        blk_free_queue_stats(q->stats);
 
-       blk_exit_rl(&q->root_rl);
+       blk_exit_rl(q, &q->root_rl);
 
        if (q->queue_tags)
                __blk_queue_free_tags(q);
@@ -887,10 +887,10 @@ int blk_register_queue(struct gendisk *disk)
                goto unlock;
        }
 
-       if (q->mq_ops)
+       if (q->mq_ops) {
                __blk_mq_register_dev(dev, q);
-
-       blk_mq_debugfs_register(q);
+               blk_mq_debugfs_register(q);
+       }
 
        kobject_uevent(&q->kobj, KOBJ_ADD);
 
index b78db2e5fdff1e158ea52c179313ff3eba282015..fc13dd0c6e3956a84913d9e71132c0f321a67280 100644 (file)
@@ -22,11 +22,11 @@ static int throtl_quantum = 32;
 #define DFL_THROTL_SLICE_HD (HZ / 10)
 #define DFL_THROTL_SLICE_SSD (HZ / 50)
 #define MAX_THROTL_SLICE (HZ)
-#define DFL_IDLE_THRESHOLD_SSD (1000L) /* 1 ms */
-#define DFL_IDLE_THRESHOLD_HD (100L * 1000) /* 100 ms */
 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
-/* default latency target is 0, eg, guarantee IO latency by default */
-#define DFL_LATENCY_TARGET (0)
+#define MIN_THROTL_BPS (320 * 1024)
+#define MIN_THROTL_IOPS (10)
+#define DFL_LATENCY_TARGET (-1L)
+#define DFL_IDLE_THRESHOLD (0)
 
 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
 
@@ -157,6 +157,7 @@ struct throtl_grp {
        unsigned long last_check_time;
 
        unsigned long latency_target; /* us */
+       unsigned long latency_target_conf; /* us */
        /* When did we start a new slice */
        unsigned long slice_start[2];
        unsigned long slice_end[2];
@@ -165,6 +166,7 @@ struct throtl_grp {
        unsigned long checked_last_finish_time; /* ns / 1024 */
        unsigned long avg_idletime; /* ns / 1024 */
        unsigned long idletime_threshold; /* us */
+       unsigned long idletime_threshold_conf; /* us */
 
        unsigned int bio_cnt; /* total bios */
        unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
@@ -201,8 +203,6 @@ struct throtl_data
        unsigned int limit_index;
        bool limit_valid[LIMIT_CNT];
 
-       unsigned long dft_idletime_threshold; /* us */
-
        unsigned long low_upgrade_time;
        unsigned long low_downgrade_time;
 
@@ -294,8 +294,14 @@ static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
 
        td = tg->td;
        ret = tg->bps[rw][td->limit_index];
-       if (ret == 0 && td->limit_index == LIMIT_LOW)
-               return tg->bps[rw][LIMIT_MAX];
+       if (ret == 0 && td->limit_index == LIMIT_LOW) {
+               /* intermediate node or iops isn't 0 */
+               if (!list_empty(&blkg->blkcg->css.children) ||
+                   tg->iops[rw][td->limit_index])
+                       return U64_MAX;
+               else
+                       return MIN_THROTL_BPS;
+       }
 
        if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
            tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
@@ -315,10 +321,17 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
 
        if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
                return UINT_MAX;
+
        td = tg->td;
        ret = tg->iops[rw][td->limit_index];
-       if (ret == 0 && tg->td->limit_index == LIMIT_LOW)
-               return tg->iops[rw][LIMIT_MAX];
+       if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
+               /* intermediate node or bps isn't 0 */
+               if (!list_empty(&blkg->blkcg->css.children) ||
+                   tg->bps[rw][td->limit_index])
+                       return UINT_MAX;
+               else
+                       return MIN_THROTL_IOPS;
+       }
 
        if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
            tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
@@ -482,6 +495,9 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
        /* LIMIT_LOW will have default value 0 */
 
        tg->latency_target = DFL_LATENCY_TARGET;
+       tg->latency_target_conf = DFL_LATENCY_TARGET;
+       tg->idletime_threshold = DFL_IDLE_THRESHOLD;
+       tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
 
        return &tg->pd;
 }
@@ -510,8 +526,6 @@ static void throtl_pd_init(struct blkg_policy_data *pd)
        if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
                sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
        tg->td = td;
-
-       tg->idletime_threshold = td->dft_idletime_threshold;
 }
 
 /*
@@ -1349,7 +1363,7 @@ static int tg_print_conf_uint(struct seq_file *sf, void *v)
        return 0;
 }
 
-static void tg_conf_updated(struct throtl_grp *tg)
+static void tg_conf_updated(struct throtl_grp *tg, bool global)
 {
        struct throtl_service_queue *sq = &tg->service_queue;
        struct cgroup_subsys_state *pos_css;
@@ -1367,8 +1381,26 @@ static void tg_conf_updated(struct throtl_grp *tg)
         * restrictions in the whole hierarchy and allows them to bypass
         * blk-throttle.
         */
-       blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg))
-               tg_update_has_rules(blkg_to_tg(blkg));
+       blkg_for_each_descendant_pre(blkg, pos_css,
+                       global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
+               struct throtl_grp *this_tg = blkg_to_tg(blkg);
+               struct throtl_grp *parent_tg;
+
+               tg_update_has_rules(this_tg);
+               /* ignore root/second level */
+               if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
+                   !blkg->parent->parent)
+                       continue;
+               parent_tg = blkg_to_tg(blkg->parent);
+               /*
+                * make sure all children has lower idle time threshold and
+                * higher latency target
+                */
+               this_tg->idletime_threshold = min(this_tg->idletime_threshold,
+                               parent_tg->idletime_threshold);
+               this_tg->latency_target = max(this_tg->latency_target,
+                               parent_tg->latency_target);
+       }
 
        /*
         * We're already holding queue_lock and know @tg is valid.  Let's
@@ -1413,7 +1445,7 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of,
        else
                *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
 
-       tg_conf_updated(tg);
+       tg_conf_updated(tg, false);
        ret = 0;
 out_finish:
        blkg_conf_finish(&ctx);
@@ -1497,34 +1529,34 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
            tg->iops_conf[READ][off] == iops_dft &&
            tg->iops_conf[WRITE][off] == iops_dft &&
            (off != LIMIT_LOW ||
-            (tg->idletime_threshold == tg->td->dft_idletime_threshold &&
-             tg->latency_target == DFL_LATENCY_TARGET)))
+            (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
+             tg->latency_target_conf == DFL_LATENCY_TARGET)))
                return 0;
 
-       if (tg->bps_conf[READ][off] != bps_dft)
+       if (tg->bps_conf[READ][off] != U64_MAX)
                snprintf(bufs[0], sizeof(bufs[0]), "%llu",
                        tg->bps_conf[READ][off]);
-       if (tg->bps_conf[WRITE][off] != bps_dft)
+       if (tg->bps_conf[WRITE][off] != U64_MAX)
                snprintf(bufs[1], sizeof(bufs[1]), "%llu",
                        tg->bps_conf[WRITE][off]);
-       if (tg->iops_conf[READ][off] != iops_dft)
+       if (tg->iops_conf[READ][off] != UINT_MAX)
                snprintf(bufs[2], sizeof(bufs[2]), "%u",
                        tg->iops_conf[READ][off]);
-       if (tg->iops_conf[WRITE][off] != iops_dft)
+       if (tg->iops_conf[WRITE][off] != UINT_MAX)
                snprintf(bufs[3], sizeof(bufs[3]), "%u",
                        tg->iops_conf[WRITE][off]);
        if (off == LIMIT_LOW) {
-               if (tg->idletime_threshold == ULONG_MAX)
+               if (tg->idletime_threshold_conf == ULONG_MAX)
                        strcpy(idle_time, " idle=max");
                else
                        snprintf(idle_time, sizeof(idle_time), " idle=%lu",
-                               tg->idletime_threshold);
+                               tg->idletime_threshold_conf);
 
-               if (tg->latency_target == ULONG_MAX)
+               if (tg->latency_target_conf == ULONG_MAX)
                        strcpy(latency_time, " latency=max");
                else
                        snprintf(latency_time, sizeof(latency_time),
-                               " latency=%lu", tg->latency_target);
+                               " latency=%lu", tg->latency_target_conf);
        }
 
        seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
@@ -1563,8 +1595,8 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
        v[2] = tg->iops_conf[READ][index];
        v[3] = tg->iops_conf[WRITE][index];
 
-       idle_time = tg->idletime_threshold;
-       latency_time = tg->latency_target;
+       idle_time = tg->idletime_threshold_conf;
+       latency_time = tg->latency_target_conf;
        while (true) {
                char tok[27];   /* wiops=18446744073709551616 */
                char *p;
@@ -1623,17 +1655,33 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
                tg->iops_conf[READ][LIMIT_MAX]);
        tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
                tg->iops_conf[WRITE][LIMIT_MAX]);
+       tg->idletime_threshold_conf = idle_time;
+       tg->latency_target_conf = latency_time;
+
+       /* force user to configure all settings for low limit  */
+       if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
+             tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
+           tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
+           tg->latency_target_conf == DFL_LATENCY_TARGET) {
+               tg->bps[READ][LIMIT_LOW] = 0;
+               tg->bps[WRITE][LIMIT_LOW] = 0;
+               tg->iops[READ][LIMIT_LOW] = 0;
+               tg->iops[WRITE][LIMIT_LOW] = 0;
+               tg->idletime_threshold = DFL_IDLE_THRESHOLD;
+               tg->latency_target = DFL_LATENCY_TARGET;
+       } else if (index == LIMIT_LOW) {
+               tg->idletime_threshold = tg->idletime_threshold_conf;
+               tg->latency_target = tg->latency_target_conf;
+       }
 
-       if (index == LIMIT_LOW) {
-               blk_throtl_update_limit_valid(tg->td);
-               if (tg->td->limit_valid[LIMIT_LOW])
+       blk_throtl_update_limit_valid(tg->td);
+       if (tg->td->limit_valid[LIMIT_LOW]) {
+               if (index == LIMIT_LOW)
                        tg->td->limit_index = LIMIT_LOW;
-               tg->idletime_threshold = (idle_time == ULONG_MAX) ?
-                       ULONG_MAX : idle_time;
-               tg->latency_target = (latency_time == ULONG_MAX) ?
-                       ULONG_MAX : latency_time;
-       }
-       tg_conf_updated(tg);
+       } else
+               tg->td->limit_index = LIMIT_MAX;
+       tg_conf_updated(tg, index == LIMIT_LOW &&
+               tg->td->limit_valid[LIMIT_LOW]);
        ret = 0;
 out_finish:
        blkg_conf_finish(&ctx);
@@ -1722,17 +1770,25 @@ static bool throtl_tg_is_idle(struct throtl_grp *tg)
        /*
         * cgroup is idle if:
         * - single idle is too long, longer than a fixed value (in case user
-        *   configure a too big threshold) or 4 times of slice
+        *   configure a too big threshold) or 4 times of idletime threshold
         * - average think time is more than threshold
         * - IO latency is largely below threshold
         */
-       unsigned long time = jiffies_to_usecs(4 * tg->td->throtl_slice);
-
-       time = min_t(unsigned long, MAX_IDLE_TIME, time);
-       return (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
-              tg->avg_idletime > tg->idletime_threshold ||
-              (tg->latency_target && tg->bio_cnt &&
+       unsigned long time;
+       bool ret;
+
+       time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
+       ret = tg->latency_target == DFL_LATENCY_TARGET ||
+             tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
+             (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
+             tg->avg_idletime > tg->idletime_threshold ||
+             (tg->latency_target && tg->bio_cnt &&
                tg->bad_bio_cnt * 5 < tg->bio_cnt);
+       throtl_log(&tg->service_queue,
+               "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
+               tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
+               tg->bio_cnt, ret, tg->td->scale);
+       return ret;
 }
 
 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
@@ -1828,6 +1884,7 @@ static void throtl_upgrade_state(struct throtl_data *td)
        struct cgroup_subsys_state *pos_css;
        struct blkcg_gq *blkg;
 
+       throtl_log(&td->service_queue, "upgrade to max");
        td->limit_index = LIMIT_MAX;
        td->low_upgrade_time = jiffies;
        td->scale = 0;
@@ -1850,6 +1907,7 @@ static void throtl_downgrade_state(struct throtl_data *td, int new)
 {
        td->scale /= 2;
 
+       throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
        if (td->scale) {
                td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
                return;
@@ -2023,6 +2081,11 @@ static void throtl_update_latency_buckets(struct throtl_data *td)
                td->avg_buckets[i].valid = true;
                last_latency = td->avg_buckets[i].latency;
        }
+
+       for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
+               throtl_log(&td->service_queue,
+                       "Latency bucket %d: latency=%ld, valid=%d", i,
+                       td->avg_buckets[i].latency, td->avg_buckets[i].valid);
 }
 #else
 static inline void throtl_update_latency_buckets(struct throtl_data *td)
@@ -2354,19 +2417,14 @@ void blk_throtl_exit(struct request_queue *q)
 void blk_throtl_register_queue(struct request_queue *q)
 {
        struct throtl_data *td;
-       struct cgroup_subsys_state *pos_css;
-       struct blkcg_gq *blkg;
 
        td = q->td;
        BUG_ON(!td);
 
-       if (blk_queue_nonrot(q)) {
+       if (blk_queue_nonrot(q))
                td->throtl_slice = DFL_THROTL_SLICE_SSD;
-               td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_SSD;
-       } else {
+       else
                td->throtl_slice = DFL_THROTL_SLICE_HD;
-               td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_HD;
-       }
 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
        /* if no low limit, use previous default */
        td->throtl_slice = DFL_THROTL_SLICE_HD;
@@ -2375,18 +2433,6 @@ void blk_throtl_register_queue(struct request_queue *q)
        td->track_bio_latency = !q->mq_ops && !q->request_fn;
        if (!td->track_bio_latency)
                blk_stat_enable_accounting(q);
-
-       /*
-        * some tg are created before queue is fully initialized, eg, nonrot
-        * isn't initialized yet
-        */
-       rcu_read_lock();
-       blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
-               struct throtl_grp *tg = blkg_to_tg(blkg);
-
-               tg->idletime_threshold = td->dft_idletime_threshold;
-       }
-       rcu_read_unlock();
 }
 
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
index 2ed70228e44fc706e6efee71ca000e5e47433217..83c8e1100525f7dd80b9a75e83cd2f8efb0f5969 100644 (file)
@@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
 
 int blk_init_rl(struct request_list *rl, struct request_queue *q,
                gfp_t gfp_mask);
-void blk_exit_rl(struct request_list *rl);
+void blk_exit_rl(struct request_queue *q, struct request_list *rl);
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                        struct bio *bio);
 void blk_queue_bypass_start(struct request_queue *q);
index da69b079725fbf62a407db76f7c5c430c52be3f9..b7e9c7feeab2acbd1a846d0c31285460aba076ec 100644 (file)
@@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
 static const int cfq_hist_divisor = 4;
 
 /*
- * offset from end of service tree
+ * offset from end of queue service tree for idle class
  */
 #define CFQ_IDLE_DELAY         (NSEC_PER_SEC / 5)
+/* offset from end of group service tree under time slice mode */
+#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
+/* offset from end of group service under IOPS mode */
+#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
 
 /*
  * below this threshold, we consider thinktime immediate
@@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
        cfqg->vfraction = max_t(unsigned, vfr, 1);
 }
 
+static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
+{
+       if (!iops_mode(cfqd))
+               return CFQ_SLICE_MODE_GROUP_DELAY;
+       else
+               return CFQ_IOPS_MODE_GROUP_DELAY;
+}
+
 static void
 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 {
@@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
        n = rb_last(&st->rb);
        if (n) {
                __cfqg = rb_entry_cfqg(n);
-               cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
+               cfqg->vdisktime = __cfqg->vdisktime +
+                       cfq_get_cfqg_vdisktime_delay(cfqd);
        } else
                cfqg->vdisktime = st->min_vdisktime;
        cfq_group_service_tree_add(st, cfqg);
index ff07b9143ca456f8b2e8aaa3f41f9d6d8b5fe7a3..c5ec8246e25e1ed3eb6aef683c99e4aacd32ce2c 100644 (file)
@@ -320,8 +320,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
 
        if (info) {
                struct partition_meta_info *pinfo = alloc_part_info(disk);
-               if (!pinfo)
+               if (!pinfo) {
+                       err = -ENOMEM;
                        goto out_free_stats;
+               }
                memcpy(pinfo, info, sizeof(*info));
                p->info = pinfo;
        }
index 93e7c1b32eddd5aa27fc8c96f5f581f712541a53..5610cd537da78812e2633d76ca90e5c3fb66e7cc 100644 (file)
@@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state,
                        continue;
                bsd_start = le32_to_cpu(p->p_offset);
                bsd_size = le32_to_cpu(p->p_size);
+               if (memcmp(flavour, "bsd\0", 4) == 0)
+                       bsd_start += offset;
                if (offset == bsd_start && size == bsd_size)
                        /* full parent partition, we have it already */
                        continue;
index 5a968a78652bd23d269fbc871957a4488fb03af6..7abe6650573950674ce41a7511fd9abe2736c451 100644 (file)
@@ -418,11 +418,7 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc,
 
        table_desc->validation_count++;
        if (table_desc->validation_count == 0) {
-               ACPI_ERROR((AE_INFO,
-                           "Table %p, Validation count is zero after increment\n",
-                           table_desc));
                table_desc->validation_count--;
-               return_ACPI_STATUS(AE_LIMIT);
        }
 
        *out_table = table_desc->pointer;
index 25aba9b107dd51db522225b5171e7017a5a3ec9d..9ad8cdb58743b765a6daa615729d78745d70af30 100644 (file)
@@ -113,7 +113,7 @@ struct acpi_button {
 
 static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
 static struct acpi_device *lid_device;
-static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
+static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
 
 static unsigned long lid_report_interval __read_mostly = 500;
 module_param(lid_report_interval, ulong, 0644);
index 3ba1c3472cf9e293ae70fd7cdc649eeb96abf1a0..fd86bec98dea37f0fdc7afbfdcf94d79227e8a44 100644 (file)
@@ -26,7 +26,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
        struct nfit_spa *nfit_spa;
 
        /* We only care about memory errors */
-       if (!(mce->status & MCACOD))
+       if (!mce_is_memory_error(mce))
                return NOTIFY_DONE;
 
        /*
index 1b5ee1e0e5a3073457b0b15da34aadf292a544ba..e414fabf73158d77fba356be2c10354647e81a44 100644 (file)
@@ -333,14 +333,17 @@ static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
            container_of(bin_attr, struct acpi_table_attr, attr);
        struct acpi_table_header *table_header = NULL;
        acpi_status status;
+       ssize_t rc;
 
        status = acpi_get_table(table_attr->name, table_attr->instance,
                                &table_header);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
-       return memory_read_from_buffer(buf, count, &offset,
-                                      table_header, table_header->length);
+       rc = memory_read_from_buffer(buf, count, &offset, table_header,
+                       table_header->length);
+       acpi_put_table(table_header);
+       return rc;
 }
 
 static int acpi_table_attr_init(struct kobject *tables_obj,
index 2fc52407306c15c27b9fb0b11c2db4ef4641aeba..c69954023c2e7d8c235aace4b66a1d32298f36eb 100644 (file)
@@ -1364,6 +1364,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
 {}
 #endif
 
+/*
+ * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
+ * as DUMMY, or detected but eventually get a "link down" and never get up
+ * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
+ * port_map may hold a value of 0x00.
+ *
+ * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
+ * and can significantly reduce the occurrence of the problem.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=189471
+ */
+static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
+                                   struct pci_dev *pdev)
+{
+       static const struct dmi_system_id sysids[] = {
+               {
+                       .ident = "Acer Switch Alpha 12",
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                               DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
+                       },
+               },
+               { }
+       };
+
+       if (dmi_check_system(sysids)) {
+               dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
+               if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
+                       hpriv->port_map = 0x7;
+                       hpriv->cap = 0xC734FF02;
+               }
+       }
+}
+
 #ifdef CONFIG_ARM64
 /*
  * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
@@ -1636,6 +1670,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                         "online status unreliable, applying workaround\n");
        }
 
+
+       /* Acer SA5-271 workaround modifies private_data */
+       acer_sa5_271_workaround(hpriv, pdev);
+
        /* CAP.NP sometimes indicate the index of the last enabled
         * port, at other times, that of the last possible port, so
         * determining the maximum port number requires looking at
index aaa761b9081cc02a75792302c741f7b54ebd9823..cd2eab6aa92ea245e1a3dab839be7fe8aa938cdb 100644 (file)
@@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev,
 
        irq = platform_get_irq(pdev, 0);
        if (irq <= 0) {
-               dev_err(dev, "no irq\n");
-               return -EINVAL;
+               if (irq != -EPROBE_DEFER)
+                       dev_err(dev, "no irq\n");
+               return irq;
        }
 
        hpriv->irq = irq;
index 2d83b8c7596567a020300d8dd1aeefeb33a6a055..e157a0e4441916b77b53c402741b74e124012cc9 100644 (file)
@@ -6800,7 +6800,7 @@ static int __init ata_parse_force_one(char **cur,
        }
 
        force_ent->port = simple_strtoul(id, &endp, 10);
-       if (p == endp || *endp != '\0') {
+       if (id == endp || *endp != '\0') {
                *reason = "invalid port/link";
                return -EINVAL;
        }
index b66bcda88320fefa399ac9653eca64d3045a6a96..3b2246dded74fbeed89d53f913c725ab6e5c0082 100644 (file)
@@ -4067,7 +4067,6 @@ static int mv_platform_probe(struct platform_device *pdev)
        struct ata_host *host;
        struct mv_host_priv *hpriv;
        struct resource *res;
-       void __iomem *mmio;
        int n_ports = 0, irq = 0;
        int rc;
        int port;
@@ -4086,9 +4085,8 @@ static int mv_platform_probe(struct platform_device *pdev)
         * Get the register base first
         */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       mmio = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(mmio))
-               return PTR_ERR(mmio);
+       if (res == NULL)
+               return -EINVAL;
 
        /* allocate host */
        if (pdev->dev.of_node) {
@@ -4132,7 +4130,12 @@ static int mv_platform_probe(struct platform_device *pdev)
        hpriv->board_idx = chip_soc;
 
        host->iomap = NULL;
-       hpriv->base = mmio - SATAHC0_REG_BASE;
+       hpriv->base = devm_ioremap(&pdev->dev, res->start,
+                                  resource_size(res));
+       if (!hpriv->base)
+               return -ENOMEM;
+
+       hpriv->base -= SATAHC0_REG_BASE;
 
        hpriv->clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(hpriv->clk))
index 5d38245a7a73a7cc4fa0d49255a52c7daead7886..b7939a2c1fab53ff2a94799a261d401c1c440f2e 100644 (file)
@@ -890,7 +890,10 @@ static int sata_rcar_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "failed to get access to sata clock\n");
                return PTR_ERR(priv->clk);
        }
-       clk_prepare_enable(priv->clk);
+
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
 
        host = ata_host_alloc(&pdev->dev, 1);
        if (!host) {
@@ -970,8 +973,11 @@ static int sata_rcar_resume(struct device *dev)
        struct ata_host *host = dev_get_drvdata(dev);
        struct sata_rcar_priv *priv = host->private_data;
        void __iomem *base = priv->base;
+       int ret;
 
-       clk_prepare_enable(priv->clk);
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
 
        /* ack and mask */
        iowrite32(0, base + SATAINTSTAT_REG);
@@ -988,8 +994,11 @@ static int sata_rcar_restore(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
        struct sata_rcar_priv *priv = host->private_data;
+       int ret;
 
-       clk_prepare_enable(priv->clk);
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
 
        sata_rcar_setup_port(host);
 
index 9a7bb2c2944772cad8124a965bacc17d0aa8f935..f3f191ba8ca4bbe6b7d87a7accc84bd648e4d718 100644 (file)
@@ -937,14 +937,6 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
        return -ENOSPC;
 }
 
-/* Reset all properties of an NBD device */
-static void nbd_reset(struct nbd_device *nbd)
-{
-       nbd->config = NULL;
-       nbd->tag_set.timeout = 0;
-       queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
-}
-
 static void nbd_bdev_reset(struct block_device *bdev)
 {
        if (bdev->bd_openers > 1)
@@ -1029,7 +1021,11 @@ static void nbd_config_put(struct nbd_device *nbd)
                        }
                        kfree(config->socks);
                }
-               nbd_reset(nbd);
+               kfree(nbd->config);
+               nbd->config = NULL;
+
+               nbd->tag_set.timeout = 0;
+               queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 
                mutex_unlock(&nbd->config_lock);
                nbd_put(nbd);
@@ -1483,7 +1479,6 @@ static int nbd_dev_add(int index)
        disk->fops = &nbd_fops;
        disk->private_data = nbd;
        sprintf(disk->disk_name, "nbd%d", index);
-       nbd_reset(nbd);
        add_disk(disk);
        nbd_total_devices++;
        return index;
index 454bf9c34882f33d673ccbaf0c8afa4f3ee18ad4..c16f74547804ccb957275f6d59b705b0ba35eb6b 100644 (file)
@@ -4023,6 +4023,7 @@ static void rbd_queue_workfn(struct work_struct *work)
 
        switch (req_op(rq)) {
        case REQ_OP_DISCARD:
+       case REQ_OP_WRITE_ZEROES:
                op_type = OBJ_OP_DISCARD;
                break;
        case REQ_OP_WRITE:
@@ -4420,6 +4421,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        q->limits.discard_granularity = segment_size;
        q->limits.discard_alignment = segment_size;
        blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
+       blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
 
        if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
                q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
index d4dbd8d8e524d7b712f9668cbee57c7d722440b2..382c864814d944c79e610eaa434bc356d12bd335 100644 (file)
@@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
 
        rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
        if (rc <= 0) {
-               DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+               DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
                DEBUGP(2, dev, "<- cm4040_write (failed)\n");
                if (rc == -ERESTARTSYS)
                        return rc;
@@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
        for (i = 0; i < bytes_to_write; i++) {
                rc = wait_for_bulk_out_ready(dev);
                if (rc <= 0) {
-                       DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
+                       DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
                               rc);
                        DEBUGP(2, dev, "<- cm4040_write (failed)\n");
                        if (rc == -ERESTARTSYS)
@@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
        rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
 
        if (rc <= 0) {
-               DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+               DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
                DEBUGP(2, dev, "<- cm4040_write (failed)\n");
                if (rc == -ERESTARTSYS)
                        return rc;
index 0ab0249189072befe3cee1b8696052727f360540..a561f0c2f428df6cbd80e0fe5bfd3c479a578e18 100644 (file)
@@ -1097,12 +1097,16 @@ static void add_interrupt_bench(cycles_t start)
 static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
 {
        __u32 *ptr = (__u32 *) regs;
+       unsigned long flags;
 
        if (regs == NULL)
                return 0;
+       local_irq_save(flags);
        if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
                f->reg_idx = 0;
-       return *(ptr + f->reg_idx++);
+       ptr += f->reg_idx++;
+       local_irq_restore(flags);
+       return *ptr;
 }
 
 void add_interrupt_randomness(int irq, int irq_flags)
index 0e3f6496524d92c7c1717d8d2259684952d7acb8..26b643d57847de0fca4afc099f4bec49d6326be9 100644 (file)
@@ -2468,6 +2468,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
            list_empty(&cpufreq_policy_list)) {
                /* if all ->init() calls failed, unregister */
+               ret = -ENODEV;
                pr_debug("%s: No CPU initialized for driver %s\n", __func__,
                         driver_data->name);
                goto err_if_unreg;
index 1b9bcd76c60e334e72a31a3b6ff1a21410ac23ff..c2dd43f3f5d8a3092e6847f18d124f0631bdf065 100644 (file)
@@ -127,7 +127,12 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
                return PTR_ERR(priv.cpu_clk);
        }
 
-       clk_prepare_enable(priv.cpu_clk);
+       err = clk_prepare_enable(priv.cpu_clk);
+       if (err) {
+               dev_err(priv.dev, "Unable to prepare cpuclk\n");
+               return err;
+       }
+
        kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
 
        priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
@@ -137,7 +142,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
                goto out_cpu;
        }
 
-       clk_prepare_enable(priv.ddr_clk);
+       err = clk_prepare_enable(priv.ddr_clk);
+       if (err) {
+               dev_err(priv.dev, "Unable to prepare ddrclk\n");
+               goto out_cpu;
+       }
        kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
 
        priv.powersave_clk = of_clk_get_by_name(np, "powersave");
@@ -146,7 +155,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
                err = PTR_ERR(priv.powersave_clk);
                goto out_ddr;
        }
-       clk_prepare_enable(priv.powersave_clk);
+       err = clk_prepare_enable(priv.powersave_clk);
+       if (err) {
+               dev_err(priv.dev, "Unable to prepare powersave clk\n");
+               goto out_ddr;
+       }
 
        of_node_put(np);
        np = NULL;
index d37e8dda807900fe9725aa153c20c1c2bc927a52..ec240592f5c8e7a450e2c26c20feece9d12dafad 100644 (file)
@@ -201,6 +201,7 @@ struct ep93xx_dma_engine {
        struct dma_device       dma_dev;
        bool                    m2m;
        int                     (*hw_setup)(struct ep93xx_dma_chan *);
+       void                    (*hw_synchronize)(struct ep93xx_dma_chan *);
        void                    (*hw_shutdown)(struct ep93xx_dma_chan *);
        void                    (*hw_submit)(struct ep93xx_dma_chan *);
        int                     (*hw_interrupt)(struct ep93xx_dma_chan *);
@@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
                | M2P_CONTROL_ENABLE;
        m2p_set_control(edmac, control);
 
+       edmac->buffer = 0;
+
        return 0;
 }
 
@@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
        return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
 }
 
-static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
 {
+       unsigned long flags;
        u32 control;
 
+       spin_lock_irqsave(&edmac->lock, flags);
        control = readl(edmac->regs + M2P_CONTROL);
        control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
        m2p_set_control(edmac, control);
+       spin_unlock_irqrestore(&edmac->lock, flags);
 
        while (m2p_channel_state(edmac) >= M2P_STATE_ON)
-               cpu_relax();
+               schedule();
+}
 
+static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
        m2p_set_control(edmac, 0);
 
-       while (m2p_channel_state(edmac) == M2P_STATE_STALL)
-               cpu_relax();
+       while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
+               dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
 }
 
 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
@@ -1160,6 +1169,26 @@ fail:
        return NULL;
 }
 
+/**
+ * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
+ * current context.
+ * @chan: channel
+ *
+ * Synchronizes the DMA channel termination to the current context. When this
+ * function returns it is guaranteed that all transfers for previously issued
+ * descriptors have stopped and and it is safe to free the memory associated
+ * with them. Furthermore it is guaranteed that all complete callback functions
+ * for a previously submitted descriptor have finished running and it is safe to
+ * free resources accessed from within the complete callbacks.
+ */
+static void ep93xx_dma_synchronize(struct dma_chan *chan)
+{
+       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+
+       if (edmac->edma->hw_synchronize)
+               edmac->edma->hw_synchronize(edmac);
+}
+
 /**
  * ep93xx_dma_terminate_all - terminate all transactions
  * @chan: channel
@@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
        dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
        dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
        dma_dev->device_config = ep93xx_dma_slave_config;
+       dma_dev->device_synchronize = ep93xx_dma_synchronize;
        dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
        dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
        dma_dev->device_tx_status = ep93xx_dma_tx_status;
@@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
        } else {
                dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
 
+               edma->hw_synchronize = m2p_hw_synchronize;
                edma->hw_setup = m2p_hw_setup;
                edma->hw_shutdown = m2p_hw_shutdown;
                edma->hw_submit = m2p_hw_submit;
index a28a01fcba674dc569e4d49ca6fd50def5a58645..f3e211f8f6c58c00080703f11b25937bb36dab39 100644 (file)
@@ -161,6 +161,7 @@ struct mv_xor_v2_device {
        struct mv_xor_v2_sw_desc *sw_desq;
        int desc_size;
        unsigned int npendings;
+       unsigned int hw_queue_idx;
 };
 
 /**
@@ -213,18 +214,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
        }
 }
 
-/*
- * Return the next available index in the DESQ.
- */
-static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
-{
-       /* read the index for the next available descriptor in the DESQ */
-       u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
-
-       return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
-               & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
-}
-
 /*
  * notify the engine of new descriptors, and update the available index.
  */
@@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
        return MV_XOR_V2_EXT_DESC_SIZE;
 }
 
-/*
- * Set the IMSG threshold
- */
-static inline
-void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
-{
-       u32 reg;
-
-       reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
-
-       reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
-       reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
-
-       writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
-}
-
 static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 {
        struct mv_xor_v2_device *xor_dev = data;
@@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
        if (!ndescs)
                return IRQ_NONE;
 
-       /*
-        * Update IMSG threshold, to disable new IMSG interrupts until
-        * end of the tasklet
-        */
-       mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
-
        /* schedule a tasklet to handle descriptors callbacks */
        tasklet_schedule(&xor_dev->irq_tasklet);
 
@@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 static dma_cookie_t
 mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-       int desq_ptr;
        void *dest_hw_desc;
        dma_cookie_t cookie;
        struct mv_xor_v2_sw_desc *sw_desc =
@@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
        spin_lock_bh(&xor_dev->lock);
        cookie = dma_cookie_assign(tx);
 
-       /* get the next available slot in the DESQ */
-       desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
-
        /* copy the HW descriptor from the SW descriptor to the DESQ */
-       dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
+       dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
 
        memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
 
        xor_dev->npendings++;
+       xor_dev->hw_queue_idx++;
+       if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
+               xor_dev->hw_queue_idx = 0;
 
        spin_unlock_bh(&xor_dev->lock);
 
@@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc     *
 mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
 {
        struct mv_xor_v2_sw_desc *sw_desc;
+       bool found = false;
 
        /* Lock the channel */
        spin_lock_bh(&xor_dev->lock);
@@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
                return NULL;
        }
 
-       /* get a free SW descriptor from the SW DESQ */
-       sw_desc = list_first_entry(&xor_dev->free_sw_desc,
-                                  struct mv_xor_v2_sw_desc, free_list);
+       list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
+               if (async_tx_test_ack(&sw_desc->async_tx)) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (!found) {
+               spin_unlock_bh(&xor_dev->lock);
+               return NULL;
+       }
+
        list_del(&sw_desc->free_list);
 
        /* Release the channel */
        spin_unlock_bh(&xor_dev->lock);
 
-       /* set the async tx descriptor */
-       dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
-       sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
-       async_tx_ack(&sw_desc->async_tx);
-
        return sw_desc;
 }
 
@@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
                __func__, len, &src, &dest, flags);
 
        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+       if (!sw_desc)
+               return NULL;
 
        sw_desc->async_tx.flags = flags;
 
@@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
                __func__, src_cnt, len, &dest, flags);
 
        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+       if (!sw_desc)
+               return NULL;
 
        sw_desc->async_tx.flags = flags;
 
@@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
                container_of(chan, struct mv_xor_v2_device, dmachan);
 
        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+       if (!sw_desc)
+               return NULL;
 
        /* set the HW descriptor */
        hw_descriptor = &sw_desc->hw_desc;
@@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data)
 {
        struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
        int pending_ptr, num_of_pending, i;
-       struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
        struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
 
        dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
@@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data)
        /* get the pending descriptors parameters */
        num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
 
-       /* next HW descriptor */
-       next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
-
        /* loop over free descriptors */
        for (i = 0; i < num_of_pending; i++) {
-
-               if (pending_ptr > MV_XOR_V2_DESC_NUM)
-                       pending_ptr = 0;
-
-               if (next_pending_sw_desc != NULL)
-                       next_pending_hw_desc++;
+               struct mv_xor_v2_descriptor *next_pending_hw_desc =
+                       xor_dev->hw_desq_virt + pending_ptr;
 
                /* get the SW descriptor related to the HW descriptor */
                next_pending_sw_desc =
@@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data)
 
                /* increment the next descriptor */
                pending_ptr++;
+               if (pending_ptr >= MV_XOR_V2_DESC_NUM)
+                       pending_ptr = 0;
        }
 
        if (num_of_pending != 0) {
                /* free the descriptores */
                mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
        }
-
-       /* Update IMSG threshold, to enable new IMSG interrupts */
-       mv_xor_v2_set_imsg_thrd(xor_dev, 0);
 }
 
 /*
@@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
        writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
               xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
 
-       /* enable the DMA engine */
-       writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
-
        /*
         * This is a temporary solution, until we activate the
         * SMMU. Set the attributes for reading & writing data buffers
@@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
        reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
        writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
 
+       /* enable the DMA engine */
+       writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+
        return 0;
 }
 
@@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, xor_dev);
 
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+       if (ret)
+               return ret;
+
        xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
@@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
 
        /* add all SW descriptors to the free list */
        for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
-               xor_dev->sw_desq[i].idx = i;
-               list_add(&xor_dev->sw_desq[i].free_list,
+               struct mv_xor_v2_sw_desc *sw_desc =
+                       xor_dev->sw_desq + i;
+               sw_desc->idx = i;
+               dma_async_tx_descriptor_init(&sw_desc->async_tx,
+                                            &xor_dev->dmachan);
+               sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
+               async_tx_ack(&sw_desc->async_tx);
+
+               list_add(&sw_desc->free_list,
                         &xor_dev->free_sw_desc);
        }
 
index 8b0da7fa520d27ac514228130c354f121a12b848..e90a7a0d760af6d031fa465208e88f2dd6f056b4 100644 (file)
@@ -3008,7 +3008,8 @@ static int pl330_remove(struct amba_device *adev)
 
        for (i = 0; i < AMBA_NR_IRQS; i++) {
                irq = adev->irq[i];
-               devm_free_irq(&adev->dev, irq, pl330);
+               if (irq)
+                       devm_free_irq(&adev->dev, irq, pl330);
        }
 
        dma_async_device_unregister(&pl330->ddma);
index db41795fe42ae6ed355de41f12b5c90ea661bde4..bd261c9e9664b6ac951939641091bc0bb7466380 100644 (file)
@@ -1287,6 +1287,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
        if (desc->hwdescs.use) {
                dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
                        RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+               if (dptr == 0)
+                       dptr = desc->nchunks;
+               dptr--;
                WARN_ON(dptr >= desc->nchunks);
        } else {
                running = desc->running;
index 72c649713aceecd75a20957522792702fca1696a..31a145154e9f26a8562e51223cffa50e65352075 100644 (file)
@@ -117,7 +117,7 @@ struct usb_dmac {
 #define USB_DMASWR                     0x0008
 #define USB_DMASWR_SWR                 (1 << 0)
 #define USB_DMAOR                      0x0060
-#define USB_DMAOR_AE                   (1 << 2)
+#define USB_DMAOR_AE                   (1 << 1)
 #define USB_DMAOR_DME                  (1 << 0)
 
 #define USB_DMASAR                     0x0000
index 44c01390d0353fd3170fc797eb4ce6393229bd14..dc269cb288c209d60e780eff287af2930fb4c477 100644 (file)
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name,               0444, DMI_PRODUCT_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(product_version,     0444, DMI_PRODUCT_VERSION);
 DEFINE_DMI_ATTR_WITH_SHOW(product_serial,      0400, DMI_PRODUCT_SERIAL);
 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid,                0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_family,      0400, DMI_PRODUCT_FAMILY);
 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor,                0444, DMI_BOARD_VENDOR);
 DEFINE_DMI_ATTR_WITH_SHOW(board_name,          0444, DMI_BOARD_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(board_version,       0444, DMI_BOARD_VERSION);
@@ -191,6 +192,7 @@ static void __init dmi_id_init_attr_table(void)
        ADD_DMI_ATTR(product_version,   DMI_PRODUCT_VERSION);
        ADD_DMI_ATTR(product_serial,    DMI_PRODUCT_SERIAL);
        ADD_DMI_ATTR(product_uuid,      DMI_PRODUCT_UUID);
+       ADD_DMI_ATTR(product_family,      DMI_PRODUCT_FAMILY);
        ADD_DMI_ATTR(board_vendor,      DMI_BOARD_VENDOR);
        ADD_DMI_ATTR(board_name,        DMI_BOARD_NAME);
        ADD_DMI_ATTR(board_version,     DMI_BOARD_VERSION);
index 54be60ead08f8068c18dc9bbfd5a40e3cb26685c..93f7acdaac7ac19c057fc6b98a76c270a4646f24 100644 (file)
@@ -430,6 +430,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
                dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
                dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
                dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+               dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
                break;
        case 2:         /* Base Board Information */
                dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
index 04ca8764f0c096f4e3f006ab74e4dc55996735a1..8bf27323f7a37c34591c45f8b39d2091ae096260 100644 (file)
@@ -36,6 +36,9 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
        if (acpi_disabled)
                return;
 
+       if (!efi_enabled(EFI_BOOT))
+               return;
+
        if (table->length < sizeof(bgrt_tab)) {
                pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
                       table->length, sizeof(bgrt_tab));
index 8c34d50a4d8032bbaba3322b3dee4ff22826a923..959777ec8a77bab62e49097cd93d711ccd311610 100644 (file)
 
 /* BIOS variables */
 static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
-static const efi_char16_t const efi_SecureBoot_name[] = {
+static const efi_char16_t efi_SecureBoot_name[] = {
        'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0
 };
-static const efi_char16_t const efi_SetupMode_name[] = {
+static const efi_char16_t efi_SetupMode_name[] = {
        'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0
 };
 
index 236d9950221b62665e8728941faa5793fc757980..c0d8c6ff6380e8a69de8faf58d28963dde29c8de 100644 (file)
@@ -425,10 +425,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
 
 void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
 {
-       struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
+       struct amdgpu_fbdev *afbdev;
        struct drm_fb_helper *fb_helper;
        int ret;
 
+       if (!adev)
+               return;
+
+       afbdev = adev->mode_info.rfbdev;
+
        if (!afbdev)
                return;
 
index 07ff3b1514f129edc23875c1f42053f7ef1aaa72..8ecf82c5fe74dc4d34e55d4dbaec5931bca1a2e8 100644 (file)
@@ -634,7 +634,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
                mutex_unlock(&id_mgr->lock);
        }
 
-       if (gds_switch_needed) {
+       if (ring->funcs->emit_gds_switch && gds_switch_needed) {
                id->gds_base = job->gds_base;
                id->gds_size = job->gds_size;
                id->gws_base = job->gws_base;
@@ -672,6 +672,7 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
        struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
        struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
 
+       atomic64_set(&id->owner, 0);
        id->gds_base = 0;
        id->gds_size = 0;
        id->gws_base = 0;
@@ -680,6 +681,26 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
        id->oa_size = 0;
 }
 
+/**
+ * amdgpu_vm_reset_all_id - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+ *
+ * Reset VMID to force flush on next use
+ */
+void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
+{
+       unsigned i, j;
+
+       for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+               struct amdgpu_vm_id_manager *id_mgr =
+                       &adev->vm_manager.id_mgr[i];
+
+               for (j = 1; j < id_mgr->num_ids; ++j)
+                       amdgpu_vm_reset_id(adev, i, j);
+       }
+}
+
 /**
  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
  *
@@ -2270,7 +2291,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                adev->vm_manager.seqno[i] = 0;
 
-
        atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
        atomic64_set(&adev->vm_manager.client_counter, 0);
        spin_lock_init(&adev->vm_manager.prt_lock);
index d97e28b4bdc41cbb52e70647685b58db4886514a..e1d951ece4333672512f96ae914ee740f0a5922b 100644 (file)
@@ -204,6 +204,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
                        unsigned vmid);
+void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
                                 struct amdgpu_vm *vm);
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
index a4831fe0223bffebdda4727589453aa6cb8fb7b1..a2c59a08b2bd69919b23989feddff71a88855617 100644 (file)
@@ -220,9 +220,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
 }
 
 const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
-       amdgpu_vram_mgr_init,
-       amdgpu_vram_mgr_fini,
-       amdgpu_vram_mgr_new,
-       amdgpu_vram_mgr_del,
-       amdgpu_vram_mgr_debug
+       .init           = amdgpu_vram_mgr_init,
+       .takedown       = amdgpu_vram_mgr_fini,
+       .get_node       = amdgpu_vram_mgr_new,
+       .put_node       = amdgpu_vram_mgr_del,
+       .debug          = amdgpu_vram_mgr_debug
 };
index 6dc1410b380f376982551dbebe06f4dd84edf3b2..ec93714e4524eeaf80dbd5181da396e48024827b 100644 (file)
@@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
        u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
        u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
 
+       /* disable mclk switching if the refresh is >120Hz, even if the
+        * blanking period would allow it
+        */
+       if (amdgpu_dpm_get_vrefresh(adev) > 120)
+               return true;
+
        if (vblank_time < switch_limit)
                return true;
        else
index a572979f186cdaeba52701fb8183850e16581143..d860939152df234517e7806135015a23aa316df9 100644 (file)
@@ -950,10 +950,6 @@ static int gmc_v6_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->vm_manager.enabled) {
-               gmc_v6_0_vm_fini(adev);
-               adev->vm_manager.enabled = false;
-       }
        gmc_v6_0_hw_fini(adev);
 
        return 0;
@@ -968,16 +964,9 @@ static int gmc_v6_0_resume(void *handle)
        if (r)
                return r;
 
-       if (!adev->vm_manager.enabled) {
-               r = gmc_v6_0_vm_init(adev);
-               if (r) {
-                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
-                       return r;
-               }
-               adev->vm_manager.enabled = true;
-       }
+       amdgpu_vm_reset_all_ids(adev);
 
-       return r;
+       return 0;
 }
 
 static bool gmc_v6_0_is_idle(void *handle)
index a9083a16a250920c64605bc447ca19c8b3014f3a..2750e5c2381301ceebddb8f614f7e5868de23d2c 100644 (file)
@@ -1117,10 +1117,6 @@ static int gmc_v7_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->vm_manager.enabled) {
-               gmc_v7_0_vm_fini(adev);
-               adev->vm_manager.enabled = false;
-       }
        gmc_v7_0_hw_fini(adev);
 
        return 0;
@@ -1135,16 +1131,9 @@ static int gmc_v7_0_resume(void *handle)
        if (r)
                return r;
 
-       if (!adev->vm_manager.enabled) {
-               r = gmc_v7_0_vm_init(adev);
-               if (r) {
-                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
-                       return r;
-               }
-               adev->vm_manager.enabled = true;
-       }
+       amdgpu_vm_reset_all_ids(adev);
 
-       return r;
+       return 0;
 }
 
 static bool gmc_v7_0_is_idle(void *handle)
index 4ac99784160a3ed9fbb59c5bf53a8d1b0a6ec1b4..f56b4089ee9f3fe7581cd6c541d434867c613cbb 100644 (file)
@@ -1209,10 +1209,6 @@ static int gmc_v8_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->vm_manager.enabled) {
-               gmc_v8_0_vm_fini(adev);
-               adev->vm_manager.enabled = false;
-       }
        gmc_v8_0_hw_fini(adev);
 
        return 0;
@@ -1227,16 +1223,9 @@ static int gmc_v8_0_resume(void *handle)
        if (r)
                return r;
 
-       if (!adev->vm_manager.enabled) {
-               r = gmc_v8_0_vm_init(adev);
-               if (r) {
-                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
-                       return r;
-               }
-               adev->vm_manager.enabled = true;
-       }
+       amdgpu_vm_reset_all_ids(adev);
 
-       return r;
+       return 0;
 }
 
 static bool gmc_v8_0_is_idle(void *handle)
index dc1e1c1d6b2430cb9957047a454cde87bf439561..f936332a069d2d1c9329a3d52a17f9e44776f659 100644 (file)
@@ -791,10 +791,6 @@ static int gmc_v9_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->vm_manager.enabled) {
-               gmc_v9_0_vm_fini(adev);
-               adev->vm_manager.enabled = false;
-       }
        gmc_v9_0_hw_fini(adev);
 
        return 0;
@@ -809,17 +805,9 @@ static int gmc_v9_0_resume(void *handle)
        if (r)
                return r;
 
-       if (!adev->vm_manager.enabled) {
-               r = gmc_v9_0_vm_init(adev);
-               if (r) {
-                       dev_err(adev->dev,
-                               "vm manager initialization failed (%d).\n", r);
-                       return r;
-               }
-               adev->vm_manager.enabled = true;
-       }
+       amdgpu_vm_reset_all_ids(adev);
 
-       return r;
+       return 0;
 }
 
 static bool gmc_v9_0_is_idle(void *handle)
index fb08193599092d0d6562e0c5b4c019b6be45bed2..90332f55cfba91b7a543da4ec3820809bb876336 100644 (file)
@@ -77,13 +77,26 @@ static int vce_v3_0_set_clockgating_state(void *handle,
 static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
+       u32 v;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (adev->vce.harvest_config == 0 ||
+               adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+       else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
 
        if (ring == &adev->vce.ring[0])
-               return RREG32(mmVCE_RB_RPTR);
+               v = RREG32(mmVCE_RB_RPTR);
        else if (ring == &adev->vce.ring[1])
-               return RREG32(mmVCE_RB_RPTR2);
+               v = RREG32(mmVCE_RB_RPTR2);
        else
-               return RREG32(mmVCE_RB_RPTR3);
+               v = RREG32(mmVCE_RB_RPTR3);
+
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       return v;
 }
 
 /**
@@ -96,13 +109,26 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
+       u32 v;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (adev->vce.harvest_config == 0 ||
+               adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+       else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
 
        if (ring == &adev->vce.ring[0])
-               return RREG32(mmVCE_RB_WPTR);
+               v = RREG32(mmVCE_RB_WPTR);
        else if (ring == &adev->vce.ring[1])
-               return RREG32(mmVCE_RB_WPTR2);
+               v = RREG32(mmVCE_RB_WPTR2);
        else
-               return RREG32(mmVCE_RB_WPTR3);
+               v = RREG32(mmVCE_RB_WPTR3);
+
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       return v;
 }
 
 /**
@@ -116,12 +142,22 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (adev->vce.harvest_config == 0 ||
+               adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+       else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
+
        if (ring == &adev->vce.ring[0])
                WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
        else if (ring == &adev->vce.ring[1])
                WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
        else
                WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+       mutex_unlock(&adev->grbm_idx_mutex);
 }
 
 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
@@ -231,33 +267,38 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
        struct amdgpu_ring *ring;
        int idx, r;
 
-       ring = &adev->vce.ring[0];
-       WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
-
-       ring = &adev->vce.ring[1];
-       WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
-
-       ring = &adev->vce.ring[2];
-       WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
-       WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
-
        mutex_lock(&adev->grbm_idx_mutex);
        for (idx = 0; idx < 2; ++idx) {
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
                WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
+
+               /* Program instance 0 reg space for two instances or instance 0 case
+               program instance 1 reg space for only instance 1 available case */
+               if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
+                       ring = &adev->vce.ring[0];
+                       WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
+                       WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+                       WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+                       ring = &adev->vce.ring[1];
+                       WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
+                       WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+                       WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+
+                       ring = &adev->vce.ring[2];
+                       WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+                       WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
+                       WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
+                       WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
+               }
+
                vce_v3_0_mc_resume(adev, idx);
                WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
 
index a74a3db3056c9c4a991e2b4525eb09de8a234a0a..102eb6d029faeb27887215ada8aeccf93d4039b0 100644 (file)
@@ -2655,6 +2655,28 @@ static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
        return sizeof(struct smu7_power_state);
 }
 
+static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
+                                uint32_t vblank_time_us)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t switch_limit_us;
+
+       switch (hwmgr->chip_id) {
+       case CHIP_POLARIS10:
+       case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
+               switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+               break;
+       default:
+               switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
+               break;
+       }
+
+       if (vblank_time_us < switch_limit_us)
+               return true;
+       else
+               return false;
+}
 
 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                struct pp_power_state *request_ps,
@@ -2669,6 +2691,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
        bool disable_mclk_switching;
        bool disable_mclk_switching_for_frame_lock;
        struct cgs_display_info info = {0};
+       struct cgs_mode_info mode_info = {0};
        const struct phm_clock_and_voltage_limits *max_limits;
        uint32_t i;
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2677,6 +2700,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
        int32_t count;
        int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
 
+       info.mode_info = &mode_info;
        data->battery_state = (PP_StateUILabel_Battery ==
                        request_ps->classification.ui_label);
 
@@ -2703,8 +2727,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 
        cgs_get_active_displays_info(hwmgr->device, &info);
 
-       /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
        minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
        minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
 
@@ -2769,8 +2791,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
 
 
-       disable_mclk_switching = (1 < info.display_count) ||
-                                   disable_mclk_switching_for_frame_lock;
+       disable_mclk_switching = ((1 < info.display_count) ||
+                                 disable_mclk_switching_for_frame_lock ||
+                                 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
+                                 (mode_info.refresh_rate > 120));
 
        sclk = smu7_ps->performance_levels[0].engine_clock;
        mclk = smu7_ps->performance_levels[0].memory_clock;
index ad30f5d3a10d5ea2a0118203ff66110a11b0583a..2614af2f553f3007ae25cb70e1f8f322c23a62d6 100644 (file)
@@ -4186,7 +4186,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
                enum pp_clock_type type, uint32_t mask)
 {
        struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
-       uint32_t i;
+       int i;
 
        if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
                return -EINVAL;
index d5f53d04fa08c30a4053aa9fd07b04c795177e4e..83e40fe51b6212f6bbcf44b119c47a86b3fb1638 100644 (file)
@@ -709,17 +709,17 @@ static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
 
 static struct phm_master_table_item
 vega10_thermal_start_thermal_controller_master_list[] = {
-       {NULL, tf_vega10_thermal_initialize},
-       {NULL, tf_vega10_thermal_set_temperature_range},
-       {NULL, tf_vega10_thermal_enable_alert},
+       { .tableFunction = tf_vega10_thermal_initialize },
+       { .tableFunction = tf_vega10_thermal_set_temperature_range },
+       { .tableFunction = tf_vega10_thermal_enable_alert },
 /* We should restrict performance levels to low before we halt the SMC.
  * On the other hand we are still in boot state when we do this
  * so it would be pointless.
  * If this assumption changes we have to revisit this table.
  */
-       {NULL, tf_vega10_thermal_setup_fan_table},
-       {NULL, tf_vega10_thermal_start_smc_fan_control},
-       {NULL, NULL}
+       { .tableFunction = tf_vega10_thermal_setup_fan_table },
+       { .tableFunction = tf_vega10_thermal_start_smc_fan_control },
+       { }
 };
 
 static struct phm_master_table_header
@@ -731,10 +731,10 @@ vega10_thermal_start_thermal_controller_master = {
 
 static struct phm_master_table_item
 vega10_thermal_set_temperature_range_master_list[] = {
-       {NULL, tf_vega10_thermal_disable_alert},
-       {NULL, tf_vega10_thermal_set_temperature_range},
-       {NULL, tf_vega10_thermal_enable_alert},
-       {NULL, NULL}
+       { .tableFunction = tf_vega10_thermal_disable_alert },
+       { .tableFunction = tf_vega10_thermal_set_temperature_range },
+       { .tableFunction = tf_vega10_thermal_enable_alert },
+       { }
 };
 
 struct phm_master_table_header
index 3e5f52110ea17384c84f568ba9b1a4955922523c..213fb837e1c40fe79bf536d54b083d99dee1c192 100644 (file)
@@ -1208,3 +1208,86 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
        return 0;
 }
 EXPORT_SYMBOL(drm_dp_stop_crc);
+
+struct dpcd_quirk {
+       u8 oui[3];
+       bool is_branch;
+       u32 quirks;
+};
+
+#define OUI(first, second, third) { (first), (second), (third) }
+
+static const struct dpcd_quirk dpcd_quirk_list[] = {
+       /* Analogix 7737 needs reduced M and N at HBR2 link rates */
+       { OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) },
+};
+
+#undef OUI
+
+/*
+ * Get a bit mask of DPCD quirks for the sink/branch device identified by
+ * ident. The quirk data is shared but it's up to the drivers to act on the
+ * data.
+ *
+ * For now, only the OUI (first three bytes) is used, but this may be extended
+ * to device identification string and hardware/firmware revisions later.
+ */
+static u32
+drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
+{
+       const struct dpcd_quirk *quirk;
+       u32 quirks = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) {
+               quirk = &dpcd_quirk_list[i];
+
+               if (quirk->is_branch != is_branch)
+                       continue;
+
+               if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0)
+                       continue;
+
+               quirks |= quirk->quirks;
+       }
+
+       return quirks;
+}
+
+/**
+ * drm_dp_read_desc - read sink/branch descriptor from DPCD
+ * @aux: DisplayPort AUX channel
+ * @desc: Device decriptor to fill from DPCD
+ * @is_branch: true for branch devices, false for sink devices
+ *
+ * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the
+ * identification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+                    bool is_branch)
+{
+       struct drm_dp_dpcd_ident *ident = &desc->ident;
+       unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI;
+       int ret, dev_id_len;
+
+       ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+       if (ret < 0)
+               return ret;
+
+       desc->quirks = drm_dp_get_quirks(ident, is_branch);
+
+       dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
+
+       DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
+                     is_branch ? "branch" : "sink",
+                     (int)sizeof(ident->oui), ident->oui,
+                     dev_id_len, ident->device_id,
+                     ident->hw_rev >> 4, ident->hw_rev & 0xf,
+                     ident->sw_major_rev, ident->sw_minor_rev,
+                     desc->quirks);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_read_desc);
index fedd4d60d9cd5b2bab64ed8aae1391ea2f402fc5..5dc8c4350602a561fe4cfd77fce77e26770696fb 100644 (file)
@@ -948,8 +948,6 @@ retry:
        }
 
 out:
-       if (ret && crtc->funcs->page_flip_target)
-               drm_crtc_vblank_put(crtc);
        if (fb)
                drm_framebuffer_put(fb);
        if (crtc->primary->old_fb)
@@ -964,5 +962,8 @@ out:
        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
 
+       if (ret && crtc->funcs->page_flip_target)
+               drm_crtc_vblank_put(crtc);
+
        return ret;
 }
index 09d3c4c3c858e8a05dcf4cbc9a14feb5910ff132..50294a7bd29da10f99e9dca59701b66c385bb66b 100644 (file)
@@ -82,14 +82,9 @@ err_file_priv_free:
        return ret;
 }
 
-static void exynos_drm_preclose(struct drm_device *dev,
-                                       struct drm_file *file)
-{
-       exynos_drm_subdrv_close(dev, file);
-}
-
 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
 {
+       exynos_drm_subdrv_close(dev, file);
        kfree(file->driver_priv);
        file->driver_priv = NULL;
 }
@@ -145,7 +140,6 @@ static struct drm_driver exynos_drm_driver = {
        .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
                                  | DRIVER_ATOMIC | DRIVER_RENDER,
        .open                   = exynos_drm_open,
-       .preclose               = exynos_drm_preclose,
        .lastclose              = exynos_drm_lastclose,
        .postclose              = exynos_drm_postclose,
        .gem_free_object_unlocked = exynos_drm_gem_free_object,
index cb317693059696b3c86bda9c93c2e5ebcefad921..39c740572034a6d4f3d69f2ab861b1d0a8f80803 100644 (file)
@@ -160,12 +160,9 @@ struct exynos_drm_clk {
  *     drm framework doesn't support multiple irq yet.
  *     we can refer to the crtc to current hardware interrupt occurred through
  *     this pipe value.
- * @enabled: if the crtc is enabled or not
- * @event: vblank event that is currently queued for flip
- * @wait_update: wait all pending planes updates to finish
- * @pending_update: number of pending plane updates in this crtc
  * @ops: pointer to callbacks for exynos drm specific functionality
  * @ctx: A pointer to the crtc's implementation specific context
+ * @pipe_clk: A pointer to the crtc's pipeline clock.
  */
 struct exynos_drm_crtc {
        struct drm_crtc                 base;
index fc4fda738906251de7a6047c3fc8f3699c469b79..d404de86d5f9de1d5fe07f856fe4a10fafdefd91 100644 (file)
@@ -1633,7 +1633,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
 {
        struct device *dev = dsi->dev;
        struct device_node *node = dev->of_node;
-       struct device_node *ep;
        int ret;
 
        ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
@@ -1641,32 +1640,21 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
        if (ret < 0)
                return ret;
 
-       ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0);
-       if (!ep) {
-               dev_err(dev, "no output port with endpoint specified\n");
-               return -EINVAL;
-       }
-
-       ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency",
+       ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency",
                                     &dsi->burst_clk_rate);
        if (ret < 0)
-               goto end;
+               return ret;
 
-       ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency",
+       ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency",
                                     &dsi->esc_clk_rate);
        if (ret < 0)
-               goto end;
-
-       of_node_put(ep);
+               return ret;
 
        dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0);
        if (!dsi->bridge_node)
                return -EINVAL;
 
-end:
-       of_node_put(ep);
-
-       return ret;
+       return 0;
 }
 
 static int exynos_dsi_bind(struct device *dev, struct device *master,
@@ -1817,6 +1805,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
 
 static int exynos_dsi_remove(struct platform_device *pdev)
 {
+       struct exynos_dsi *dsi = platform_get_drvdata(pdev);
+
+       of_node_put(dsi->bridge_node);
+
        pm_runtime_disable(&pdev->dev);
 
        component_del(&pdev->dev, &exynos_dsi_component_ops);
index 0066fe7e622ef75de181cc54770daeb2006a2d24..be3eefec5152aa0cdf9dd891bf4d4fe423ff87d4 100644 (file)
@@ -759,20 +759,23 @@ void psb_intel_lvds_init(struct drm_device *dev,
                if (scan->type & DRM_MODE_TYPE_PREFERRED) {
                        mode_dev->panel_fixed_mode =
                            drm_mode_duplicate(dev, scan);
+                       DRM_DEBUG_KMS("Using mode from DDC\n");
                        goto out;       /* FIXME: check for quirks */
                }
        }
 
        /* Failed to get EDID, what about VBT? do we need this? */
-       if (mode_dev->vbt_mode)
+       if (dev_priv->lfp_lvds_vbt_mode) {
                mode_dev->panel_fixed_mode =
-                   drm_mode_duplicate(dev, mode_dev->vbt_mode);
+                       drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
 
-       if (!mode_dev->panel_fixed_mode)
-               if (dev_priv->lfp_lvds_vbt_mode)
-                       mode_dev->panel_fixed_mode =
-                               drm_mode_duplicate(dev,
-                                       dev_priv->lfp_lvds_vbt_mode);
+               if (mode_dev->panel_fixed_mode) {
+                       mode_dev->panel_fixed_mode->type |=
+                               DRM_MODE_TYPE_PREFERRED;
+                       DRM_DEBUG_KMS("Using mode from VBT\n");
+                       goto out;
+               }
+       }
 
        /*
         * If we didn't get EDID, try checking if the panel is already turned
@@ -789,6 +792,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
                if (mode_dev->panel_fixed_mode) {
                        mode_dev->panel_fixed_mode->type |=
                            DRM_MODE_TYPE_PREFERRED;
+                       DRM_DEBUG_KMS("Using pre-programmed mode\n");
                        goto out;       /* FIXME: check for quirks */
                }
        }
index dca989eb2d42ed48f6c13c15fe9d3f8a9cbfaab2..24fe04d6307b0383da918308827a12027ded9fbe 100644 (file)
@@ -779,8 +779,26 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
        vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 }
 
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct intel_engine_cs *engine;
+       struct intel_vgpu_workload *pos, *n;
+       unsigned int tmp;
+
+       /* free the unsubmited workloads in the queues. */
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+               list_for_each_entry_safe(pos, n,
+                       &vgpu->workload_q_head[engine->id], list) {
+                       list_del_init(&pos->list);
+                       free_workload(pos);
+               }
+       }
+}
+
 void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
 {
+       clean_workloads(vgpu, ALL_ENGINES);
        kmem_cache_destroy(vgpu->workloads);
 }
 
@@ -811,17 +829,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine;
-       struct intel_vgpu_workload *pos, *n;
        unsigned int tmp;
 
-       for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
-               /* free the unsubmited workload in the queue */
-               list_for_each_entry_safe(pos, n,
-                       &vgpu->workload_q_head[engine->id], list) {
-                       list_del_init(&pos->list);
-                       free_workload(pos);
-               }
-
+       clean_workloads(vgpu, engine_mask);
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
                init_vgpu_execlist(vgpu, engine->id);
-       }
 }
index c995e540ff96e1f8a18a9232de2b26794fa03aa2..0ffd696545927277200d8b2332a024168672c5c5 100644 (file)
@@ -1366,18 +1366,28 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       i915_reg_t reg = {.reg = offset};
+       u32 v = *(u32 *)p_data;
+
+       if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
+               return intel_vgpu_default_mmio_write(vgpu,
+                               offset, p_data, bytes);
 
        switch (offset) {
        case 0x4ddc:
-               vgpu_vreg(vgpu, offset) = 0x8000003c;
-               /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
-               I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
                break;
        case 0x42080:
-               vgpu_vreg(vgpu, offset) = 0x8000;
-               /* WaCompressedResourceDisplayNewHashMode:skl */
-               I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+               /* bypass WaCompressedResourceDisplayNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
+               break;
+       case 0xe194:
+               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
+               break;
+       case 0x7014:
+               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+               vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
                break;
        default:
                return -EINVAL;
@@ -1634,7 +1644,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
                NULL, NULL);
-       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+                skl_misc_ctl_write);
        MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2568,7 +2579,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_D(0x6e570, D_BDW_PLUS);
        MMIO_D(0x65f10, D_BDW_PLUS);
 
-       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
+                skl_misc_ctl_write);
        MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
index 3036d4835b0fa7a3b366a31d0b6ed18fc7889ae1..c994fe6e65b2eafe6a133fccb70f7c5db5019b00 100644 (file)
@@ -1272,10 +1272,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        dev_priv->ipc_enabled = false;
 
-       /* Everything is in place, we can now relax! */
-       DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
-                driver.name, driver.major, driver.minor, driver.patchlevel,
-                driver.date, pci_name(pdev), dev_priv->drm.primary->index);
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
                DRM_INFO("DRM_I915_DEBUG enabled\n");
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
index c9b0949f6c1a2aba281c9a4bbf8d8b2c9ede3785..963f6d4481f76ec54b5aeab138b0cca3f4ff90e5 100644 (file)
@@ -562,7 +562,8 @@ struct intel_link_m_n {
 
 void intel_link_compute_m_n(int bpp, int nlanes,
                            int pixel_clock, int link_clock,
-                           struct intel_link_m_n *m_n);
+                           struct intel_link_m_n *m_n,
+                           bool reduce_m_n);
 
 /* Interface history:
  *
index a0563e18d753fd84731f8372efc7a938d2898a6b..50b8f1139ff99d6dc8d3ec225abf251d6af4465d 100644 (file)
@@ -2313,7 +2313,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
                    appgtt->base.allocate_va_range) {
                        ret = appgtt->base.allocate_va_range(&appgtt->base,
                                                             vma->node.start,
-                                                            vma->node.size);
+                                                            vma->size);
                        if (ret)
                                goto err_pages;
                }
index 129ed303a6c46e2f856eb1abc84990079abefb65..57d9f7f4ef159cd6eb30f9bc0bd10683eec5123f 100644 (file)
@@ -59,9 +59,6 @@ static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
                return;
 
        mutex_unlock(&dev->struct_mutex);
-
-       /* expedite the RCU grace period to free some request slabs */
-       synchronize_rcu_expedited();
 }
 
 static bool any_vma_pinned(struct drm_i915_gem_object *obj)
@@ -274,8 +271,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
                                I915_SHRINK_ACTIVE);
        intel_runtime_pm_put(dev_priv);
 
-       synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
-
        return freed;
 }
 
index fd97fe00cd0d2ad00e1c7258eeb51ecf0f60d4c1..190f6aa5d15eb82bf51cbaed00b16ea8c5d4f5bc 100644 (file)
@@ -2953,7 +2953,6 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
        u32 pipestat_mask;
        u32 enable_mask;
        enum pipe pipe;
-       u32 val;
 
        pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
                        PIPE_CRC_DONE_INTERRUPT_STATUS;
@@ -2964,18 +2963,16 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
 
        enable_mask = I915_DISPLAY_PORT_INTERRUPT |
                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+               I915_LPE_PIPE_A_INTERRUPT |
+               I915_LPE_PIPE_B_INTERRUPT;
+
        if (IS_CHERRYVIEW(dev_priv))
-               enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+               enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
+                       I915_LPE_PIPE_C_INTERRUPT;
 
        WARN_ON(dev_priv->irq_mask != ~0);
 
-       val = (I915_LPE_PIPE_A_INTERRUPT |
-               I915_LPE_PIPE_B_INTERRUPT |
-               I915_LPE_PIPE_C_INTERRUPT);
-
-       enable_mask |= val;
-
        dev_priv->irq_mask = ~enable_mask;
 
        GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
index 5a7c63e64381e48a193610305973c468502565d2..65b837e96fe629d58f539b253dc1ab14595a459b 100644 (file)
@@ -8280,7 +8280,7 @@ enum {
 
 /* MIPI DSI registers */
 
-#define _MIPI_PORT(port, a, c) ((port) ? c : a)        /* ports A and C only */
+#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c)    /* ports A and C only */
 #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
 
 #define MIPIO_TXESC_CLK_DIV1                   _MMIO(0x160004)
index 3617927af269afb9872b0d5d419873f0945f880c..3cabe52a4e3b168e176d1f55abdae65f67219ef7 100644 (file)
@@ -6101,7 +6101,7 @@ retry:
        pipe_config->fdi_lanes = lane;
 
        intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
-                              link_bw, &pipe_config->fdi_m_n);
+                              link_bw, &pipe_config->fdi_m_n, false);
 
        ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
        if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
@@ -6277,7 +6277,8 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
 }
 
 static void compute_m_n(unsigned int m, unsigned int n,
-                       uint32_t *ret_m, uint32_t *ret_n)
+                       uint32_t *ret_m, uint32_t *ret_n,
+                       bool reduce_m_n)
 {
        /*
         * Reduce M/N as much as possible without loss in precision. Several DP
@@ -6285,9 +6286,11 @@ static void compute_m_n(unsigned int m, unsigned int n,
         * values. The passed in values are more likely to have the least
         * significant bits zero than M after rounding below, so do this first.
         */
-       while ((m & 1) == 0 && (n & 1) == 0) {
-               m >>= 1;
-               n >>= 1;
+       if (reduce_m_n) {
+               while ((m & 1) == 0 && (n & 1) == 0) {
+                       m >>= 1;
+                       n >>= 1;
+               }
        }
 
        *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
@@ -6298,16 +6301,19 @@ static void compute_m_n(unsigned int m, unsigned int n,
 void
 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
                       int pixel_clock, int link_clock,
-                      struct intel_link_m_n *m_n)
+                      struct intel_link_m_n *m_n,
+                      bool reduce_m_n)
 {
        m_n->tu = 64;
 
        compute_m_n(bits_per_pixel * pixel_clock,
                    link_clock * nlanes * 8,
-                   &m_n->gmch_m, &m_n->gmch_n);
+                   &m_n->gmch_m, &m_n->gmch_n,
+                   reduce_m_n);
 
        compute_m_n(pixel_clock, link_clock,
-                   &m_n->link_m, &m_n->link_n);
+                   &m_n->link_m, &m_n->link_n,
+                   reduce_m_n);
 }
 
 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
index ee77b519835c5fd9d8c582a9c3169b43d06ebab6..fc691b8b317cf3924a98adfb51ca1183a6f2a6b3 100644 (file)
@@ -1507,37 +1507,6 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
        DRM_DEBUG_KMS("common rates: %s\n", str);
 }
 
-bool
-__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
-{
-       u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
-                                                     DP_SINK_OUI;
-
-       return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
-              sizeof(*desc);
-}
-
-bool intel_dp_read_desc(struct intel_dp *intel_dp)
-{
-       struct intel_dp_desc *desc = &intel_dp->desc;
-       bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
-                      DP_OUI_SUPPORT;
-       int dev_id_len;
-
-       if (!__intel_dp_read_desc(intel_dp, desc))
-               return false;
-
-       dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
-       DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
-                     drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
-                     (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
-                     dev_id_len, desc->device_id,
-                     desc->hw_rev >> 4, desc->hw_rev & 0xf,
-                     desc->sw_major_rev, desc->sw_minor_rev);
-
-       return true;
-}
-
 static int rate_to_index(int find, const int *rates)
 {
        int i = 0;
@@ -1624,6 +1593,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        int common_rates[DP_MAX_SUPPORTED_RATES] = {};
        int common_len;
        uint8_t link_bw, rate_select;
+       bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
+                                          DP_DPCD_QUIRK_LIMITED_M_N);
 
        common_len = intel_dp_common_rates(intel_dp, common_rates);
 
@@ -1753,7 +1724,8 @@ found:
        intel_link_compute_m_n(bpp, lane_count,
                               adjusted_mode->crtc_clock,
                               pipe_config->port_clock,
-                              &pipe_config->dp_m_n);
+                              &pipe_config->dp_m_n,
+                              reduce_m_n);
 
        if (intel_connector->panel.downclock_mode != NULL &&
                dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -1761,7 +1733,8 @@ found:
                        intel_link_compute_m_n(bpp, lane_count,
                                intel_connector->panel.downclock_mode->clock,
                                pipe_config->port_clock,
-                               &pipe_config->dp_m2_n2);
+                               &pipe_config->dp_m2_n2,
+                               reduce_m_n);
        }
 
        /*
@@ -3622,7 +3595,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
        if (!intel_dp_read_dpcd(intel_dp))
                return false;
 
-       intel_dp_read_desc(intel_dp);
+       drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+                        drm_dp_is_branch(intel_dp->dpcd));
 
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
                dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
@@ -4624,7 +4598,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
 
        intel_dp_print_rates(intel_dp);
 
-       intel_dp_read_desc(intel_dp);
+       drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+                        drm_dp_is_branch(intel_dp->dpcd));
 
        intel_dp_configure_mst(intel_dp);
 
index c1f62eb07c07a7ce49b3ed39e1e6ee2b23eb65e8..989e25577ac0445f9e7632a575a6de38d7f9ec49 100644 (file)
@@ -44,6 +44,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        int lane_count, slots;
        const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        int mst_pbn;
+       bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
+                                          DP_DPCD_QUIRK_LIMITED_M_N);
 
        pipe_config->has_pch_encoder = false;
        bpp = 24;
@@ -75,7 +77,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        intel_link_compute_m_n(bpp, lane_count,
                               adjusted_mode->crtc_clock,
                               pipe_config->port_clock,
-                              &pipe_config->dp_m_n);
+                              &pipe_config->dp_m_n,
+                              reduce_m_n);
 
        pipe_config->dp_m_n.tu = slots;
 
index aaee3949a42267603a5dfa9deb9be87dd0f7b2b4..f630c7af50205540b64481d9c3ee559fbdfc8f7f 100644 (file)
@@ -906,14 +906,6 @@ enum link_m_n_set {
        M2_N2
 };
 
-struct intel_dp_desc {
-       u8 oui[3];
-       u8 device_id[6];
-       u8 hw_rev;
-       u8 sw_major_rev;
-       u8 sw_minor_rev;
-} __packed;
-
 struct intel_dp_compliance_data {
        unsigned long edid;
        uint8_t video_pattern;
@@ -957,7 +949,7 @@ struct intel_dp {
        /* Max link BW for the sink as per DPCD registers */
        int max_sink_link_bw;
        /* sink or branch descriptor */
-       struct intel_dp_desc desc;
+       struct drm_dp_desc desc;
        struct drm_dp_aux aux;
        enum intel_display_power_domain aux_power_domain;
        uint8_t train_set[4];
@@ -1532,9 +1524,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
 }
 
 bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
-bool __intel_dp_read_desc(struct intel_dp *intel_dp,
-                         struct intel_dp_desc *desc);
-bool intel_dp_read_desc(struct intel_dp *intel_dp);
 int intel_dp_link_required(int pixel_clock, int bpp);
 int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
index 668f00480d97c0ff0418c19dfaaffec31fc65341..292fedf30b0010c33e1eefd8f643b1b87bd38edd 100644 (file)
@@ -149,44 +149,10 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
 
 static void lpe_audio_irq_unmask(struct irq_data *d)
 {
-       struct drm_i915_private *dev_priv = d->chip_data;
-       unsigned long irqflags;
-       u32 val = (I915_LPE_PIPE_A_INTERRUPT |
-               I915_LPE_PIPE_B_INTERRUPT);
-
-       if (IS_CHERRYVIEW(dev_priv))
-               val |= I915_LPE_PIPE_C_INTERRUPT;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
-       dev_priv->irq_mask &= ~val;
-       I915_WRITE(VLV_IIR, val);
-       I915_WRITE(VLV_IIR, val);
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       POSTING_READ(VLV_IMR);
-
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 static void lpe_audio_irq_mask(struct irq_data *d)
 {
-       struct drm_i915_private *dev_priv = d->chip_data;
-       unsigned long irqflags;
-       u32 val = (I915_LPE_PIPE_A_INTERRUPT |
-               I915_LPE_PIPE_B_INTERRUPT);
-
-       if (IS_CHERRYVIEW(dev_priv))
-               val |= I915_LPE_PIPE_C_INTERRUPT;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
-       dev_priv->irq_mask |= val;
-       I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-       I915_WRITE(VLV_IIR, val);
-       I915_WRITE(VLV_IIR, val);
-       POSTING_READ(VLV_IIR);
-
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 static struct irq_chip lpe_audio_irqchip = {
@@ -330,8 +296,6 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
 
        desc = irq_to_desc(dev_priv->lpe_audio.irq);
 
-       lpe_audio_irq_mask(&desc->irq_data);
-
        lpe_audio_platdev_destroy(dev_priv);
 
        irq_free_desc(dev_priv->lpe_audio.irq);
index c8f7c631fc1f8e354cac0038c80aa35d0a1dd0d2..dac4e003c1f317ec402110132bad0c3a734bf52a 100644 (file)
@@ -1989,7 +1989,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 
        ce->ring = ring;
        ce->state = vma;
-       ce->initialised = engine->init_context == NULL;
+       ce->initialised |= engine->init_context == NULL;
 
        return 0;
 
index 71cbe9c089320cbc305c827bacd41fcbf1e542ce..5abef482eacf1b24780edea4c40ab7e593a42dc6 100644 (file)
@@ -240,7 +240,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
                return false;
        }
 
-       intel_dp_read_desc(dp);
+       drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd));
 
        DRM_DEBUG_KMS("Success: LSPCON init\n");
        return true;
index 1afb8b06e3e19bf23ed287277415afb364504b23..12b85b3278cd1cfc53b159253e9152e3d8f1784b 100644 (file)
@@ -320,7 +320,7 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
 static int igt_ctx_exec(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct drm_i915_gem_object *obj;
+       struct drm_i915_gem_object *obj = NULL;
        struct drm_file *file;
        IGT_TIMEOUT(end_time);
        LIST_HEAD(objects);
@@ -359,7 +359,7 @@ static int igt_ctx_exec(void *arg)
                }
 
                for_each_engine(engine, i915, id) {
-                       if (dw == 0) {
+                       if (!obj) {
                                obj = create_test_object(ctx, file, &objects);
                                if (IS_ERR(obj)) {
                                        err = PTR_ERR(obj);
@@ -376,8 +376,10 @@ static int igt_ctx_exec(void *arg)
                                goto out_unlock;
                        }
 
-                       if (++dw == max_dwords(obj))
+                       if (++dw == max_dwords(obj)) {
+                               obj = NULL;
                                dw = 0;
+                       }
                        ndwords++;
                }
                ncontexts++;
index 5b8e23d051f2f3752a180df4abedccefcfebc3ed..0a31cd6d01ce145f3f112b8c19d17dcdd46ea524 100644 (file)
@@ -13,6 +13,7 @@ config DRM_MSM
        select QCOM_SCM
        select SND_SOC_HDMI_CODEC if SND_SOC
        select SYNC_FILE
+       select PM_OPP
        default y
        help
          DRM/KMS driver for MSM/snapdragon.
index f8f48d014978c0ccd5cc9ffcc3d699b86f779399..9c34d7824988654ab2f8366741724da8ac18b82a 100644 (file)
@@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
        return 0;
 }
 
-static struct irq_domain_ops mdss_hw_irqdomain_ops = {
+static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
        .map = mdss_hw_irqdomain_map,
        .xlate = irq_domain_xlate_onecell,
 };
index a38c5fe6cc19752a9832618af1a4f146bab4a8df..7d3741215387110bb7f7ad622cb54d0d411fb9f1 100644 (file)
@@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
 
        mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
                        sizeof(*mdp5_state), GFP_KERNEL);
+       if (!mdp5_state)
+               return NULL;
 
-       if (mdp5_state && mdp5_state->base.fb)
-               drm_framebuffer_reference(mdp5_state->base.fb);
+       __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
 
        return &mdp5_state->base;
 }
@@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
                        mdp5_pipe_release(state->state, old_hwpipe);
                        mdp5_pipe_release(state->state, old_right_hwpipe);
                }
+       } else {
+               mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+               mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+               mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
        }
 
        return 0;
index 87b5695d4034df0e118475617167ee990ccfc490..9d498eb81906220705d85c57260cd4b1f82fa1fc 100644 (file)
@@ -830,6 +830,7 @@ static struct drm_driver msm_driver = {
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_export   = drm_gem_prime_export,
        .gem_prime_import   = drm_gem_prime_import,
+       .gem_prime_res_obj  = msm_gem_prime_res_obj,
        .gem_prime_pin      = msm_gem_prime_pin,
        .gem_prime_unpin    = msm_gem_prime_unpin,
        .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
index 28b6f9ba50664509bb44fa2b5704d3bcc86a67af..1b26ca626528ab5f4435f689d0a213f0e672aaa5 100644 (file)
@@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 void *msm_gem_prime_vmap(struct drm_gem_object *obj);
 void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
                struct dma_buf_attachment *attach, struct sg_table *sg);
 int msm_gem_prime_pin(struct drm_gem_object *obj);
index 3f299c537b77ae347bce4c92368ed774c7695459..a2f89bac9c160674f5f103f75a04a7a92e7c5b99 100644 (file)
@@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
 }
 
 struct msm_fence {
-       struct msm_fence_context *fctx;
        struct dma_fence base;
+       struct msm_fence_context *fctx;
 };
 
 static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
@@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence)
        return fence_completed(f->fctx, f->base.seqno);
 }
 
-static void msm_fence_release(struct dma_fence *fence)
-{
-       struct msm_fence *f = to_msm_fence(fence);
-       kfree_rcu(f, base.rcu);
-}
-
 static const struct dma_fence_ops msm_fence_ops = {
        .get_driver_name = msm_fence_get_driver_name,
        .get_timeline_name = msm_fence_get_timeline_name,
        .enable_signaling = msm_fence_enable_signaling,
        .signaled = msm_fence_signaled,
        .wait = dma_fence_default_wait,
-       .release = msm_fence_release,
+       .release = dma_fence_free,
 };
 
 struct dma_fence *
index 68e509b3b9e4d08730e3901f46a397519c33e77c..50289a23baf8df27c4bc1aebf067da2b011b8f28 100644 (file)
@@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
        struct msm_gem_object *msm_obj;
        bool use_vram = false;
 
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
        switch (flags & MSM_BO_CACHE_MASK) {
        case MSM_BO_UNCACHED:
        case MSM_BO_CACHED:
@@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 
        size = PAGE_ALIGN(dmabuf->size);
 
+       /* Take mutex so we can modify the inactive list in msm_gem_new_impl */
+       mutex_lock(&dev->struct_mutex);
        ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
+       mutex_unlock(&dev->struct_mutex);
+
        if (ret)
                goto fail;
 
index 60bb290700cef9c32fc2ca0dd2db229a6a7ffedf..13403c6da6c75012fa5f17f4b0b63075ddf20874 100644 (file)
@@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
        if (!obj->import_attach)
                msm_gem_put_pages(obj);
 }
+
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+       return msm_obj->resv;
+}
index 1c545ebe6a5a0f875a995b1db3a1204b0b21d57e..7832e6421d250d0bd78400057e46dce07dc2d18c 100644 (file)
@@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                if (!in_fence)
                        return -EINVAL;
 
-               /* TODO if we get an array-fence due to userspace merging multiple
-                * fences, we need a way to determine if all the backing fences
-                * are from our own context..
+               /*
+                * Wait if the fence is from a foreign context, or if the fence
+                * array contains any fence from a foreign context.
                 */
-
-               if (in_fence->context != gpu->fctx->context) {
+               if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
                        ret = dma_fence_wait(in_fence, true);
                        if (ret)
                                return ret;
@@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                        goto out;
                }
 
-               if ((submit_cmd.size + submit_cmd.submit_offset) >=
-                               msm_obj->base.size) {
+               if (!submit_cmd.size ||
+                       ((submit_cmd.size + submit_cmd.submit_offset) >
+                               msm_obj->base.size)) {
                        DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
                        ret = -EINVAL;
                        goto out;
index 97b9c38c6b3ff7e05adf9ea84e8328d19e32b90f..0fdc88d79ca87b3a54709aa4d527db80ca0997dc 100644 (file)
@@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
                gpu->grp_clks[i] = get_clock(dev, name);
 
                /* Remember the key clocks that we need to control later */
-               if (!strcmp(name, "core"))
+               if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
                        gpu->core_clk = gpu->grp_clks[i];
-               else if (!strcmp(name, "rbbmtimer"))
+               else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
                        gpu->rbbmtimer_clk = gpu->grp_clks[i];
 
                ++i;
index 058340a002c29daef072f21bf90e1e8a572b3fa9..4a340efd8ba67ac23aadc6611ea3ce7e7d460e1a 100644 (file)
@@ -575,8 +575,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
        if (ret)
                return;
 
-       cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
-
        if (fb != old_state->fb) {
                obj = to_qxl_framebuffer(fb)->obj;
                user_bo = gem_to_qxl_bo(obj);
@@ -614,6 +612,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
                qxl_bo_kunmap(cursor_bo);
                qxl_bo_kunmap(user_bo);
 
+               cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
                cmd->u.set.visible = 1;
                cmd->u.set.shape = qxl_bo_physical_address(qdev,
                                                           cursor_bo, 0);
@@ -624,6 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
                if (ret)
                        goto out_free_release;
 
+               cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
                cmd->type = QXL_CURSOR_MOVE;
        }
 
index 7ba450832e6b7a59db9499a0919a8bb5f4a3fd1a..ea36dc4dd5d22ec7b30678ea811dcf5009f454f7 100644 (file)
@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
        u32 vblank_time = r600_dpm_get_vblank_time(rdev);
        u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
 
+       /* disable mclk switching if the refresh is >120Hz, even if the
+        * blanking period would allow it
+        */
+       if (r600_dpm_get_vrefresh(rdev) > 120)
+               return true;
+
        if (vblank_time < switch_limit)
                return true;
        else
index ccebe0f8d2e1e3b4ec15d6170b788e2e8fc8b980..008c145b7f29f60a298419931f1922555de5e35a 100644 (file)
@@ -7401,7 +7401,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
@@ -7431,7 +7431,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_RX_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
index f130ec41ee4bbcad63516335dfaf17e24e782b60..0bf103536404e5dde2d480bf692a496a6865e817 100644 (file)
@@ -4927,7 +4927,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
@@ -4958,7 +4958,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_RX_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
index 0a085176e79b35b2887c19f7b33091804b58e645..e06e2d8feab397822361ba18a3e4b420cc2f0c4d 100644 (file)
@@ -3988,7 +3988,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
                        WREG32(DC_HPD5_INT_CONTROL, tmp);
                }
                if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
-                       tmp = RREG32(DC_HPD5_INT_CONTROL);
+                       tmp = RREG32(DC_HPD6_INT_CONTROL);
                        tmp |= DC_HPDx_INT_ACK;
                        WREG32(DC_HPD6_INT_CONTROL, tmp);
                }
index e3e7cb1d10a2941d1790d36a9f5250f70aeb78cd..4761f27f2ca2a073a8ffb30806a9cac905003f23 100644 (file)
@@ -116,7 +116,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
        if ((radeon_runtime_pm != 0) &&
            radeon_has_atpx() &&
            ((flags & RADEON_IS_IGP) == 0) &&
-           !pci_is_thunderbolt_attached(rdev->pdev))
+           !pci_is_thunderbolt_attached(dev->pdev))
                flags |= RADEON_IS_PX;
 
        /* radeon_device_init should report only fatal error
index ceee87f029d9a3479d374c4fc305338377dee961..76d1888528e675c700b543fa6e10c77a466054d7 100644 (file)
@@ -6317,7 +6317,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
@@ -6348,7 +6348,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
                WREG32(DC_HPD5_INT_CONTROL, tmp);
        }
        if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
                tmp |= DC_HPDx_RX_INT_ACK;
                WREG32(DC_HPD6_INT_CONTROL, tmp);
        }
index fe40e5e499dd4122ce0f623d9df776c0a0cf2bdf..687705c5079422a82b9977e5694d0cd45b2a47bc 100644 (file)
@@ -275,10 +275,12 @@ config HID_EMS_FF
         - Trio Linker Plus II
 
 config HID_ELECOM
-       tristate "ELECOM BM084 bluetooth mouse"
+       tristate "ELECOM HID devices"
        depends on HID
        ---help---
-       Support for the ELECOM BM084 (bluetooth mouse).
+       Support for ELECOM devices:
+         - BM084 Bluetooth Mouse
+         - DEFT Trackball (Wired and wireless)
 
 config HID_ELO
        tristate "ELO USB 4000/4500 touchscreen"
index 16df6cc902359ea620de3f079be796d32be4783a..a6268f2f7408a520660c6add3c734a8006474393 100644 (file)
@@ -69,6 +69,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_IS_MULTITOUCH            BIT(3)
 #define QUIRK_NO_CONSUMER_USAGES       BIT(4)
 #define QUIRK_USE_KBD_BACKLIGHT                BIT(5)
+#define QUIRK_T100_KEYBOARD            BIT(6)
 
 #define I2C_KEYBOARD_QUIRKS                    (QUIRK_FIX_NOTEBOOK_REPORT | \
                                                 QUIRK_NO_INIT_REPORTS | \
@@ -536,6 +537,8 @@ static void asus_remove(struct hid_device *hdev)
                drvdata->kbd_backlight->removed = true;
                cancel_work_sync(&drvdata->kbd_backlight->work);
        }
+
+       hid_hw_stop(hdev);
 }
 
 static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -548,6 +551,12 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
                rdesc[55] = 0xdd;
        }
+       if (drvdata->quirks & QUIRK_T100_KEYBOARD &&
+                *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) {
+               hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n");
+               rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT;
+       }
+
        return rdesc;
 }
 
@@ -560,6 +569,9 @@ static const struct hid_device_id asus_devices[] = {
                USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+               USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
+         QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
        { }
 };
 MODULE_DEVICE_TABLE(hid, asus_devices);
index 37084b6457851ebe0d52361e327db9df07b86b2c..04cee65531d761c18e53775ffc784c3c3d993daa 100644 (file)
@@ -1855,6 +1855,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
        { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
@@ -1891,6 +1892,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
index 6e3848a8d8dd1416a0091ce0e7d263325dd8910b..e2c7465df69f3ae74c2cb1979c531b02e2934089 100644 (file)
@@ -1,10 +1,8 @@
 /*
- *  HID driver for Elecom BM084 (bluetooth mouse).
- *  Removes a non-existing horizontal wheel from
- *  the HID descriptor.
- *  (This module is based on "hid-ortek".)
- *
+ *  HID driver for ELECOM devices.
  *  Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
+ *  Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
+ *  Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
  */
 
 /*
 static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
-       if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
-               hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
-               rdesc[47] = 0x00;
+       switch (hdev->product) {
+       case USB_DEVICE_ID_ELECOM_BM084:
+               /* The BM084 Bluetooth mouse includes a non-existing horizontal
+                * wheel in the HID descriptor. */
+               if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
+                       hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
+                       rdesc[47] = 0x00;
+               }
+               break;
+       case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
+       case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
+               /* The DEFT trackball has eight buttons, but its descriptor only
+                * reports five, disabling the three Fn buttons on the top of
+                * the mouse.
+                *
+                * Apply the following diff to the descriptor:
+                *
+                * Collection (Physical),              Collection (Physical),
+                *     Report ID (1),                      Report ID (1),
+                *     Report Count (5),           ->      Report Count (8),
+                *     Report Size (1),                    Report Size (1),
+                *     Usage Page (Button),                Usage Page (Button),
+                *     Usage Minimum (01h),                Usage Minimum (01h),
+                *     Usage Maximum (05h),        ->      Usage Maximum (08h),
+                *     Logical Minimum (0),                Logical Minimum (0),
+                *     Logical Maximum (1),                Logical Maximum (1),
+                *     Input (Variable),                   Input (Variable),
+                *     Report Count (1),           ->      Report Count (0),
+                *     Report Size (3),                    Report Size (3),
+                *     Input (Constant),                   Input (Constant),
+                *     Report Size (16),                   Report Size (16),
+                *     Report Count (2),                   Report Count (2),
+                *     Usage Page (Desktop),               Usage Page (Desktop),
+                *     Usage (X),                          Usage (X),
+                *     Usage (Y),                          Usage (Y),
+                *     Logical Minimum (-32768),           Logical Minimum (-32768),
+                *     Logical Maximum (32767),            Logical Maximum (32767),
+                *     Input (Variable, Relative),         Input (Variable, Relative),
+                * End Collection,                     End Collection,
+                */
+               if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
+                       hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
+                       rdesc[13] = 8; /* Button/Variable Report Count */
+                       rdesc[21] = 8; /* Button/Variable Usage Maximum */
+                       rdesc[29] = 0; /* Button/Constant Report Count */
+               }
+               break;
        }
        return rdesc;
 }
 
 static const struct hid_device_id elecom_devices[] = {
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084)},
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, elecom_devices);
index 643390ba749d96c5ddb5fbde68fda45c38569c3d..8ca1e8ce0af24e325957526c125ccf55d9081eb8 100644 (file)
 #define USB_VENDOR_ID_ASUSTEK          0x0b05
 #define USB_DEVICE_ID_ASUSTEK_LCM      0x1726
 #define USB_DEVICE_ID_ASUSTEK_LCM2     0x175b
+#define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD    0x17e0
 #define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD     0x8585
 #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD     0x0101
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854
 
 #define USB_VENDOR_ID_ELECOM           0x056e
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
+#define USB_DEVICE_ID_ELECOM_DEFT_WIRED        0x00fe
+#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS     0x00ff
 
 #define USB_VENDOR_ID_DREAM_CHEEKY     0x1d34
 #define USB_DEVICE_ID_DREAM_CHEEKY_WN  0x0004
index 20b40ad2632503754685b84cc07d8787a4a44515..1d6c997b300149269367d00fb5db66b7c2ea25b7 100644 (file)
@@ -349,6 +349,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
 
        if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
                magicmouse_emit_buttons(msc, clicks & 3);
+               input_mt_report_pointer_emulation(input, true);
                input_report_rel(input, REL_X, x);
                input_report_rel(input, REL_Y, y);
        } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
@@ -388,16 +389,16 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
                __clear_bit(BTN_RIGHT, input->keybit);
                __clear_bit(BTN_MIDDLE, input->keybit);
                __set_bit(BTN_MOUSE, input->keybit);
-               __set_bit(BTN_TOOL_FINGER, input->keybit);
-               __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
-               __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
-               __set_bit(BTN_TOOL_QUADTAP, input->keybit);
-               __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
-               __set_bit(BTN_TOUCH, input->keybit);
-               __set_bit(INPUT_PROP_POINTER, input->propbit);
                __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
        }
 
+       __set_bit(BTN_TOOL_FINGER, input->keybit);
+       __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+       __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
+       __set_bit(BTN_TOOL_QUADTAP, input->keybit);
+       __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
+       __set_bit(BTN_TOUCH, input->keybit);
+       __set_bit(INPUT_PROP_POINTER, input->propbit);
 
        __set_bit(EV_ABS, input->evbit);
 
index 8daa8ce64ebba51e91e57ec801fa7e702fb2a072..fb55fb4c39fcfecaca55c0b8720d28d2f9717678 100644 (file)
@@ -897,6 +897,15 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
        return 0;
 }
 
+static void i2c_hid_acpi_fix_up_power(struct device *dev)
+{
+       acpi_handle handle = ACPI_HANDLE(dev);
+       struct acpi_device *adev;
+
+       if (handle && acpi_bus_get_device(handle, &adev) == 0)
+               acpi_device_fix_up_power(adev);
+}
+
 static const struct acpi_device_id i2c_hid_acpi_match[] = {
        {"ACPI0C50", 0 },
        {"PNP0C50", 0 },
@@ -909,6 +918,8 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
 {
        return -ENODEV;
 }
+
+static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
 #endif
 
 #ifdef CONFIG_OF
@@ -1030,6 +1041,8 @@ static int i2c_hid_probe(struct i2c_client *client,
        if (ret < 0)
                goto err_regulator;
 
+       i2c_hid_acpi_fix_up_power(&client->dev);
+
        pm_runtime_get_noresume(&client->dev);
        pm_runtime_set_active(&client->dev);
        pm_runtime_enable(&client->dev);
index 4b225fb19a16842f635026d1b1023d5d1cf5068e..e274c9dc32f3a211d97d02f2b6477f20c9121fac 100644 (file)
@@ -1571,37 +1571,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
 {
        unsigned char *data = wacom->data;
 
-       if (wacom->pen_input)
+       if (wacom->pen_input) {
                dev_dbg(wacom->pen_input->dev.parent,
                        "%s: received report #%d\n", __func__, data[0]);
-       else if (wacom->touch_input)
+
+               if (len == WACOM_PKGLEN_PENABLED ||
+                   data[0] == WACOM_REPORT_PENABLED)
+                       return wacom_tpc_pen(wacom);
+       }
+       else if (wacom->touch_input) {
                dev_dbg(wacom->touch_input->dev.parent,
                        "%s: received report #%d\n", __func__, data[0]);
 
-       switch (len) {
-       case WACOM_PKGLEN_TPC1FG:
-               return wacom_tpc_single_touch(wacom, len);
+               switch (len) {
+               case WACOM_PKGLEN_TPC1FG:
+                       return wacom_tpc_single_touch(wacom, len);
 
-       case WACOM_PKGLEN_TPC2FG:
-               return wacom_tpc_mt_touch(wacom);
+               case WACOM_PKGLEN_TPC2FG:
+                       return wacom_tpc_mt_touch(wacom);
 
-       case WACOM_PKGLEN_PENABLED:
-               return wacom_tpc_pen(wacom);
+               default:
+                       switch (data[0]) {
+                       case WACOM_REPORT_TPC1FG:
+                       case WACOM_REPORT_TPCHID:
+                       case WACOM_REPORT_TPCST:
+                       case WACOM_REPORT_TPC1FGE:
+                               return wacom_tpc_single_touch(wacom, len);
 
-       default:
-               switch (data[0]) {
-               case WACOM_REPORT_TPC1FG:
-               case WACOM_REPORT_TPCHID:
-               case WACOM_REPORT_TPCST:
-               case WACOM_REPORT_TPC1FGE:
-                       return wacom_tpc_single_touch(wacom, len);
-
-               case WACOM_REPORT_TPCMT:
-               case WACOM_REPORT_TPCMT2:
-                       return wacom_mt_touch(wacom);
+                       case WACOM_REPORT_TPCMT:
+                       case WACOM_REPORT_TPCMT2:
+                               return wacom_mt_touch(wacom);
 
-               case WACOM_REPORT_PENABLED:
-                       return wacom_tpc_pen(wacom);
+                       }
                }
        }
 
index 22d5eafd681541374817314a1a2db164bb235ce7..5ef2814345ef7f37aa47fd47b8765ce6cc7fad1b 100644 (file)
@@ -343,6 +343,7 @@ config SENSORS_ASB100
 
 config SENSORS_ASPEED
        tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver"
+       select REGMAP
        help
          This driver provides support for ASPEED AST2400/AST2500 PWM
          and Fan Tacho controllers.
index 48403a2115beb68df404766350cfa844974a55e3..9de13d626c6896d379da385ab48da29258e5f3ee 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/errno.h>
 #include <linux/gpio/consumer.h>
 #include <linux/delay.h>
 #include <linux/hwmon.h>
@@ -494,7 +495,7 @@ static u32 aspeed_get_fan_tach_ch_measure_period(struct aspeed_pwm_tacho_data
        return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit);
 }
 
-static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
+static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
                                      u8 fan_tach_ch)
 {
        u32 raw_data, tach_div, clk_source, sec, val;
@@ -510,6 +511,9 @@ static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
        msleep(sec);
 
        regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val);
+       if (!(val & RESULT_STATUS_MASK))
+               return -ETIMEDOUT;
+
        raw_data = val & RESULT_VALUE_MASK;
        tach_div = priv->type_fan_tach_clock_division[type];
        tach_div = 0x4 << (tach_div * 2);
@@ -561,12 +565,14 @@ static ssize_t show_rpm(struct device *dev, struct device_attribute *attr,
 {
        struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
        int index = sensor_attr->index;
-       u32 rpm;
+       int rpm;
        struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev);
 
        rpm = aspeed_get_fan_tach_ch_rpm(priv, index);
+       if (rpm < 0)
+               return rpm;
 
-       return sprintf(buf, "%u\n", rpm);
+       return sprintf(buf, "%d\n", rpm);
 }
 
 static umode_t pwm_is_visible(struct kobject *kobj,
@@ -591,24 +597,23 @@ static umode_t fan_dev_is_visible(struct kobject *kobj,
        return a->mode;
 }
 
-static SENSOR_DEVICE_ATTR(pwm0, 0644,
-                       show_pwm, set_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm1, 0644,
-                       show_pwm, set_pwm, 1);
+                       show_pwm, set_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm2, 0644,
-                       show_pwm, set_pwm, 2);
+                       show_pwm, set_pwm, 1);
 static SENSOR_DEVICE_ATTR(pwm3, 0644,
-                       show_pwm, set_pwm, 3);
+                       show_pwm, set_pwm, 2);
 static SENSOR_DEVICE_ATTR(pwm4, 0644,
-                       show_pwm, set_pwm, 4);
+                       show_pwm, set_pwm, 3);
 static SENSOR_DEVICE_ATTR(pwm5, 0644,
-                       show_pwm, set_pwm, 5);
+                       show_pwm, set_pwm, 4);
 static SENSOR_DEVICE_ATTR(pwm6, 0644,
-                       show_pwm, set_pwm, 6);
+                       show_pwm, set_pwm, 5);
 static SENSOR_DEVICE_ATTR(pwm7, 0644,
+                       show_pwm, set_pwm, 6);
+static SENSOR_DEVICE_ATTR(pwm8, 0644,
                        show_pwm, set_pwm, 7);
 static struct attribute *pwm_dev_attrs[] = {
-       &sensor_dev_attr_pwm0.dev_attr.attr,
        &sensor_dev_attr_pwm1.dev_attr.attr,
        &sensor_dev_attr_pwm2.dev_attr.attr,
        &sensor_dev_attr_pwm3.dev_attr.attr,
@@ -616,6 +621,7 @@ static struct attribute *pwm_dev_attrs[] = {
        &sensor_dev_attr_pwm5.dev_attr.attr,
        &sensor_dev_attr_pwm6.dev_attr.attr,
        &sensor_dev_attr_pwm7.dev_attr.attr,
+       &sensor_dev_attr_pwm8.dev_attr.attr,
        NULL,
 };
 
@@ -624,40 +630,39 @@ static const struct attribute_group pwm_dev_group = {
        .is_visible = pwm_is_visible,
 };
 
-static SENSOR_DEVICE_ATTR(fan0_input, 0444,
-               show_rpm, NULL, 0);
 static SENSOR_DEVICE_ATTR(fan1_input, 0444,
-               show_rpm, NULL, 1);
+               show_rpm, NULL, 0);
 static SENSOR_DEVICE_ATTR(fan2_input, 0444,
-               show_rpm, NULL, 2);
+               show_rpm, NULL, 1);
 static SENSOR_DEVICE_ATTR(fan3_input, 0444,
-               show_rpm, NULL, 3);
+               show_rpm, NULL, 2);
 static SENSOR_DEVICE_ATTR(fan4_input, 0444,
-               show_rpm, NULL, 4);
+               show_rpm, NULL, 3);
 static SENSOR_DEVICE_ATTR(fan5_input, 0444,
-               show_rpm, NULL, 5);
+               show_rpm, NULL, 4);
 static SENSOR_DEVICE_ATTR(fan6_input, 0444,
-               show_rpm, NULL, 6);
+               show_rpm, NULL, 5);
 static SENSOR_DEVICE_ATTR(fan7_input, 0444,
-               show_rpm, NULL, 7);
+               show_rpm, NULL, 6);
 static SENSOR_DEVICE_ATTR(fan8_input, 0444,
-               show_rpm, NULL, 8);
+               show_rpm, NULL, 7);
 static SENSOR_DEVICE_ATTR(fan9_input, 0444,
-               show_rpm, NULL, 9);
+               show_rpm, NULL, 8);
 static SENSOR_DEVICE_ATTR(fan10_input, 0444,
-               show_rpm, NULL, 10);
+               show_rpm, NULL, 9);
 static SENSOR_DEVICE_ATTR(fan11_input, 0444,
-               show_rpm, NULL, 11);
+               show_rpm, NULL, 10);
 static SENSOR_DEVICE_ATTR(fan12_input, 0444,
-               show_rpm, NULL, 12);
+               show_rpm, NULL, 11);
 static SENSOR_DEVICE_ATTR(fan13_input, 0444,
-               show_rpm, NULL, 13);
+               show_rpm, NULL, 12);
 static SENSOR_DEVICE_ATTR(fan14_input, 0444,
-               show_rpm, NULL, 14);
+               show_rpm, NULL, 13);
 static SENSOR_DEVICE_ATTR(fan15_input, 0444,
+               show_rpm, NULL, 14);
+static SENSOR_DEVICE_ATTR(fan16_input, 0444,
                show_rpm, NULL, 15);
 static struct attribute *fan_dev_attrs[] = {
-       &sensor_dev_attr_fan0_input.dev_attr.attr,
        &sensor_dev_attr_fan1_input.dev_attr.attr,
        &sensor_dev_attr_fan2_input.dev_attr.attr,
        &sensor_dev_attr_fan3_input.dev_attr.attr,
@@ -673,6 +678,7 @@ static struct attribute *fan_dev_attrs[] = {
        &sensor_dev_attr_fan13_input.dev_attr.attr,
        &sensor_dev_attr_fan14_input.dev_attr.attr,
        &sensor_dev_attr_fan15_input.dev_attr.attr,
+       &sensor_dev_attr_fan16_input.dev_attr.attr,
        NULL
 };
 
@@ -802,7 +808,6 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
                if (ret)
                        return ret;
        }
-       of_node_put(np);
 
        priv->groups[0] = &pwm_dev_group;
        priv->groups[1] = &fan_dev_group;
index 0ed77eeff31e661fe5a4c6b8426c7d2cd6a10f89..a2e3dd715380c74397a5cf2c1a778786eb12826b 100644 (file)
@@ -178,22 +178,39 @@ static int usb_read(struct i2c_adapter *adapter, int cmd,
                    int value, int index, void *data, int len)
 {
        struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+       void *dmadata = kmalloc(len, GFP_KERNEL);
+       int ret;
+
+       if (!dmadata)
+               return -ENOMEM;
 
        /* do control transfer */
-       return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
+       ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
                               cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE |
-                              USB_DIR_IN, value, index, data, len, 2000);
+                              USB_DIR_IN, value, index, dmadata, len, 2000);
+
+       memcpy(data, dmadata, len);
+       kfree(dmadata);
+       return ret;
 }
 
 static int usb_write(struct i2c_adapter *adapter, int cmd,
                     int value, int index, void *data, int len)
 {
        struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+       void *dmadata = kmemdup(data, len, GFP_KERNEL);
+       int ret;
+
+       if (!dmadata)
+               return -ENOMEM;
 
        /* do control transfer */
-       return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
+       ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
                               cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
-                              value, index, data, len, 2000);
+                              value, index, dmadata, len, 2000);
+
+       kfree(dmadata);
+       return ret;
 }
 
 static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev)
index 1844770f3ae838bd63f3837bc0f475b0c8b5f1b1..2b4d613a347491295a51932699264c6edcd4bb51 100644 (file)
@@ -1429,7 +1429,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
        primary_path->packet_life_time =
                cm_req_get_primary_local_ack_timeout(req_msg);
        primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
-       sa_path_set_service_id(primary_path, req_msg->service_id);
+       primary_path->service_id = req_msg->service_id;
 
        if (req_msg->alt_local_lid) {
                alt_path->dgid = req_msg->alt_local_gid;
@@ -1452,7 +1452,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
                alt_path->packet_life_time =
                        cm_req_get_alt_local_ack_timeout(req_msg);
                alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
-               sa_path_set_service_id(alt_path, req_msg->service_id);
+               alt_path->service_id = req_msg->service_id;
        }
 }
 
index 91b7a2fe5a55488ce1e5bb886ba19023bd624328..31bb82d8ecd7f19bbee90bd95a83cec7fe5abca7 100644 (file)
@@ -1140,7 +1140,7 @@ static void cma_save_ib_info(struct sockaddr *src_addr,
                        ib->sib_pkey = path->pkey;
                        ib->sib_flowinfo = path->flow_label;
                        memcpy(&ib->sib_addr, &path->sgid, 16);
-                       ib->sib_sid = sa_path_get_service_id(path);
+                       ib->sib_sid = path->service_id;
                        ib->sib_scope_id = 0;
                } else {
                        ib->sib_pkey = listen_ib->sib_pkey;
@@ -1274,8 +1274,7 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
                memcpy(&req->local_gid, &req_param->primary_path->sgid,
                       sizeof(req->local_gid));
                req->has_gid    = true;
-               req->service_id =
-                       sa_path_get_service_id(req_param->primary_path);
+               req->service_id = req_param->primary_path->service_id;
                req->pkey       = be16_to_cpu(req_param->primary_path->pkey);
                if (req->pkey != req_param->bth_pkey)
                        pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
@@ -1827,7 +1826,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
        struct rdma_route *rt;
        const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
        struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
-       const __be64 service_id = sa_path_get_service_id(path);
+       const __be64 service_id =
+               ib_event->param.req_rcvd.primary_path->service_id;
        int ret;
 
        id = rdma_create_id(listen_id->route.addr.dev_addr.net,
@@ -2345,9 +2345,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
        path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
        path_rec.numb_path = 1;
        path_rec.reversible = 1;
-       sa_path_set_service_id(&path_rec,
-                              rdma_get_service_id(&id_priv->id,
-                                                  cma_dst_addr(id_priv)));
+       path_rec.service_id = rdma_get_service_id(&id_priv->id,
+                                                 cma_dst_addr(id_priv));
 
        comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
                    IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
index cb7d372e4bdf877206d8da9141d46a590395bcd9..d92ab4eaa8f311a44bc3854107d9b7f352c0bca7 100644 (file)
@@ -169,6 +169,16 @@ void ib_mad_cleanup(void);
 int ib_sa_init(void);
 void ib_sa_cleanup(void);
 
+int ibnl_init(void);
+void ibnl_cleanup(void);
+
+/**
+ * Check if there are any listeners to the netlink group
+ * @group: the netlink group ID
+ * Returns 0 on success or a negative for no listeners.
+ */
+int ibnl_chk_listeners(unsigned int group);
+
 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
                              struct netlink_callback *cb);
 int ib_nl_handle_set_timeout(struct sk_buff *skb,
index b784055423c8346b80e00f98d58f6499e7adc8ef..94931c474d41db72b606f654cb0d334087600ee5 100644 (file)
@@ -37,6 +37,7 @@
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <rdma/rdma_netlink.h>
+#include "core_priv.h"
 
 struct ibnl_client {
        struct list_head                list;
@@ -55,7 +56,6 @@ int ibnl_chk_listeners(unsigned int group)
                return -1;
        return 0;
 }
-EXPORT_SYMBOL(ibnl_chk_listeners);
 
 int ibnl_add_client(int index, int nops,
                    const struct ibnl_client_cbs cb_table[])
index e335b09c022ef69c58db15e8c37a43bfce9e8a42..fb7aec4047c8be90d0b70c39f66fdf6606c6fd7e 100644 (file)
@@ -194,7 +194,7 @@ static u32 tid;
        .field_name          = "sa_path_rec:" #field
 
 static const struct ib_field path_rec_table[] = {
-       { PATH_REC_FIELD(ib.service_id),
+       { PATH_REC_FIELD(service_id),
          .offset_words = 0,
          .offset_bits  = 0,
          .size_bits    = 64 },
@@ -296,7 +296,7 @@ static const struct ib_field path_rec_table[] = {
        .field_name          = "sa_path_rec:" #field
 
 static const struct ib_field opa_path_rec_table[] = {
-       { OPA_PATH_REC_FIELD(opa.service_id),
+       { OPA_PATH_REC_FIELD(service_id),
          .offset_words = 0,
          .offset_bits  = 0,
          .size_bits    = 64 },
@@ -774,7 +774,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
 
        /* Now build the attributes */
        if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
-               val64 = be64_to_cpu(sa_path_get_service_id(sa_rec));
+               val64 = be64_to_cpu(sa_rec->service_id);
                nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
                        sizeof(val64), &val64);
        }
index 3dbf811d3c517232e4ebbff5f6970d34d8f518ca..21e60b1e2ff41b1c27e98ebad68e5f4b0ccb7f42 100644 (file)
@@ -58,7 +58,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
        for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
 
                page = sg_page(sg);
-               if (umem->writable && dirty)
+               if (!PageDirty(page) && umem->writable && dirty)
                        set_page_dirty_lock(page);
                put_page(page);
        }
index 0780b1afefa9d996c870e6982bfc9c5f9b13fb2a..8c4ec564e49583f6d05eab8df5e57e4378582f08 100644 (file)
@@ -321,11 +321,15 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
                struct vm_area_struct *vma;
                struct hstate *h;
 
+               down_read(&mm->mmap_sem);
                vma = find_vma(mm, ib_umem_start(umem));
-               if (!vma || !is_vm_hugetlb_page(vma))
+               if (!vma || !is_vm_hugetlb_page(vma)) {
+                       up_read(&mm->mmap_sem);
                        return -EINVAL;
+               }
                h = hstate_vma(vma);
                umem->page_shift = huge_page_shift(h);
+               up_read(&mm->mmap_sem);
                umem->hugetlb = 1;
        } else {
                umem->hugetlb = 0;
index 8b9587fe23033fd69708dd60c7e76b18eb2e23ff..94fd989c90600b2640d2fb36f2397c9814160583 100644 (file)
@@ -96,11 +96,11 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
 }
 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
 
-void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
-                               struct sa_path_rec *src)
+static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
+                                      struct sa_path_rec *src)
 {
-       memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid);
-       memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid);
+       memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid));
+       memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid));
 
        dst->dlid               = htons(ntohl(sa_path_get_dlid(src)));
        dst->slid               = htons(ntohl(sa_path_get_slid(src)));
index b6fe45924c6ed5a3a835c89f00b9265a940e57ef..0910faf3587b547e873bc4e5572e7defd93623b3 100644 (file)
@@ -488,6 +488,7 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
 
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        release_ep_resources(ep);
+       kfree_skb(skb);
        return 0;
 }
 
@@ -498,6 +499,7 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        c4iw_put_ep(&ep->parent_ep->com);
        release_ep_resources(ep);
+       kfree_skb(skb);
        return 0;
 }
 
@@ -569,11 +571,13 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
 
        pr_debug("%s rdev %p\n", __func__, rdev);
        req->cmd = CPL_ABORT_NO_RST;
+       skb_get(skb);
        ret = c4iw_ofld_send(rdev, skb);
        if (ret) {
                __state_set(&ep->com, DEAD);
                queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
-       }
+       } else
+               kfree_skb(skb);
 }
 
 static int send_flowc(struct c4iw_ep *ep)
@@ -2517,7 +2521,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                goto reject;
        }
 
-       hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
+       hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
+              sizeof(struct tcphdr) +
               ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
        if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
                child_ep->mtu = peer_mss + hdrs;
index 329fb65e8fb0edfafd19d6fbea17196f810f29d5..f96a96dbcf1ff4e40b75de36122a3efd0405faae 100644 (file)
@@ -971,7 +971,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
                 devp->rdev.lldi.sge_egrstatuspagesize);
 
        devp->rdev.hw_queue.t4_eq_status_entries =
-               devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
+               devp->rdev.lldi.sge_egrstatuspagesize / 64;
        devp->rdev.hw_queue.t4_max_eq_size = 65520;
        devp->rdev.hw_queue.t4_max_iq_size = 65520;
        devp->rdev.hw_queue.t4_max_rq_size = 8192 -
index 5d6b1eeaa9a0a14c1088655cd5f049b5d3defa91..2ba00b89df6a046bba536cfe889c373d9063ced0 100644 (file)
@@ -6312,25 +6312,38 @@ static void handle_8051_request(struct hfi1_pportdata *ppd)
        }
 }
 
-static void write_global_credit(struct hfi1_devdata *dd,
-                               u8 vau, u16 total, u16 shared)
+/*
+ * Set up allocation unit vaulue.
+ */
+void set_up_vau(struct hfi1_devdata *dd, u8 vau)
 {
-       write_csr(dd, SEND_CM_GLOBAL_CREDIT,
-                 ((u64)total <<
-                  SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
-                 ((u64)shared <<
-                  SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
-                 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
+       u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+
+       /* do not modify other values in the register */
+       reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
+       reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
+       write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
 }
 
 /*
  * Set up initial VL15 credits of the remote.  Assumes the rest of
- * the CM credit registers are zero from a previous global or credit reset .
+ * the CM credit registers are zero from a previous global or credit reset.
+ * Shared limit for VL15 will always be 0.
  */
-void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
+void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
 {
-       /* leave shared count at zero for both global and VL15 */
-       write_global_credit(dd, vau, vl15buf, 0);
+       u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+
+       /* set initial values for total and shared credit limit */
+       reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
+                SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
+
+       /*
+        * Set total limit to be equal to VL15 credits.
+        * Leave shared limit at 0.
+        */
+       reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
+       write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
 
        write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
                  << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
@@ -6348,9 +6361,11 @@ void reset_link_credits(struct hfi1_devdata *dd)
        for (i = 0; i < TXE_NUM_DATA_VL; i++)
                write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
        write_csr(dd, SEND_CM_CREDIT_VL15, 0);
-       write_global_credit(dd, 0, 0, 0);
+       write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
        /* reset the CM block */
        pio_send_control(dd, PSC_CM_RESET);
+       /* reset cached value */
+       dd->vl15buf_cached = 0;
 }
 
 /* convert a vCU to a CU */
@@ -6839,24 +6854,35 @@ void handle_link_up(struct work_struct *work)
 {
        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
                                                  link_up_work);
+       struct hfi1_devdata *dd = ppd->dd;
+
        set_link_state(ppd, HLS_UP_INIT);
 
        /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
-       read_ltp_rtt(ppd->dd);
+       read_ltp_rtt(dd);
        /*
         * OPA specifies that certain counters are cleared on a transition
         * to link up, so do that.
         */
-       clear_linkup_counters(ppd->dd);
+       clear_linkup_counters(dd);
        /*
         * And (re)set link up default values.
         */
        set_linkup_defaults(ppd);
 
+       /*
+        * Set VL15 credits. Use cached value from verify cap interrupt.
+        * In case of quick linkup or simulator, vl15 value will be set by
+        * handle_linkup_change. VerifyCap interrupt handler will not be
+        * called in those scenarios.
+        */
+       if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
+               set_up_vl15(dd, dd->vl15buf_cached);
+
        /* enforce link speed enabled */
        if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
                /* oops - current speed is not enabled, bounce */
-               dd_dev_err(ppd->dd,
+               dd_dev_err(dd,
                           "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
                           ppd->link_speed_active, ppd->link_speed_enabled);
                set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
@@ -7357,7 +7383,14 @@ void handle_verify_cap(struct work_struct *work)
         */
        if (vau == 0)
                vau = 1;
-       set_up_vl15(dd, vau, vl15buf);
+       set_up_vau(dd, vau);
+
+       /*
+        * Set VL15 credits to 0 in global credit register. Cache remote VL15
+        * credits value and wait for link-up interrupt ot set it.
+        */
+       set_up_vl15(dd, 0);
+       dd->vl15buf_cached = vl15buf;
 
        /* set up the LCB CRC mode */
        crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
index 5bfa839d1c48bc14939f680e74b696ac43105119..793514f1d15fb4a82357ba755dd97c7855f076ad 100644 (file)
 #define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull
 #define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull
 #define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508)
+#define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull
 #define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16
+#define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull
 #define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull
 #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull
 #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0
index da322e6668cc5893262c3316486147cac6df1ca9..414a04a481c2abab3b3fe44d3949d591e684c065 100644 (file)
@@ -1045,6 +1045,14 @@ struct hfi1_devdata {
        /* initial vl15 credits to use */
        u16 vl15_init;
 
+       /*
+        * Cached value for vl15buf, read during verify cap interrupt. VL15
+        * credits are to be kept at 0 and set when handling the link-up
+        * interrupt. This removes the possibility of receiving VL15 MAD
+        * packets before this HFI is ready.
+        */
+       u16 vl15buf_cached;
+
        /* Misc small ints */
        u8 n_krcv_queues;
        u8 qos_shift;
@@ -1598,7 +1606,8 @@ int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode);
 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
 
-void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
+void set_up_vau(struct hfi1_devdata *dd, u8 vau);
+void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
 void reset_link_credits(struct hfi1_devdata *dd);
 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
 
index ba265d0ae93b4a96d6c29364f30b34c6046c335b..04a5082d5ac55259d992db8843cd361797b2992c 100644 (file)
@@ -130,7 +130,8 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
                 * the remote values.  Both sides must be using the values.
                 */
                if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
-                       set_up_vl15(dd, dd->vau, dd->vl15_init);
+                       set_up_vau(dd, dd->vau);
+                       set_up_vl15(dd, dd->vl15_init);
                        assign_remote_cm_au_table(dd, dd->vcu);
                }
 
index 93faf86d54b620fb2cc621844932d811c17868dd..6a9f6f9819e1a326b0ed5f15f831d10cec2620a6 100644 (file)
@@ -207,8 +207,8 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
        /*
         * Save BARs and command to rewrite after device reset.
         */
-       dd->pcibar0 = addr;
-       dd->pcibar1 = addr >> 32;
+       pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0);
+       pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1);
        pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
        pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
        pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);
index 069bdaf061ab923cbc8b123ab182806fdb3c4dac..1080778a1f7c4a38816ce02058f63baae862d89e 100644 (file)
@@ -2159,8 +2159,11 @@ send_last:
                ret = hfi1_rvt_get_rwqe(qp, 1);
                if (ret < 0)
                        goto nack_op_err;
-               if (!ret)
+               if (!ret) {
+                       /* peer will send again */
+                       rvt_put_ss(&qp->r_sge);
                        goto rnr_nak;
+               }
                wc.ex.imm_data = ohdr->u.rc.imm_data;
                wc.wc_flags = IB_WC_WITH_IMM;
                goto send_last;
index 50d140d25e38fa3cce8b4512b86099c633f1bc6c..2f3bbcac1e3492ff6cfeb5d92ebef73bda8b59fd 100644 (file)
@@ -196,7 +196,8 @@ static const struct sysfs_ops port_cc_sysfs_ops = {
 };
 
 static struct attribute *port_cc_default_attributes[] = {
-       &cc_prescan_attr.attr
+       &cc_prescan_attr.attr,
+       NULL
 };
 
 static struct kobj_type port_cc_ktype = {
index f3bc01bce483fe5d81ba08e2a60212e4f0f32b98..6ae98aa7f74ebb14f4ce2e9e3cee7c629270253f 100644 (file)
@@ -784,7 +784,6 @@ static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
        }
 
        ctrl_ird |= IETF_PEER_TO_PEER;
-       ctrl_ird |= IETF_FLPDU_ZERO_LEN;
 
        switch (mpa_key) {
        case MPA_KEY_REQUEST:
@@ -2446,8 +2445,8 @@ static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
                } else {
                        type = I40IW_CM_EVENT_CONNECTED;
                        cm_node->state = I40IW_CM_STATE_OFFLOADED;
-                       i40iw_send_ack(cm_node);
                }
+               i40iw_send_ack(cm_node);
                break;
        default:
                pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
index f82483b3d1e7dc205fa2f6a5e8037fd4d6866eab..a027e2072477aef12a230fdc79a1a1d6668bf15c 100644 (file)
@@ -285,28 +285,20 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
        struct i40iw_sc_dev *dev = vsi->dev;
        struct i40iw_sc_qp *qp = NULL;
        bool qs_handle_change = false;
-       bool mss_change = false;
        unsigned long flags;
        u16 qs_handle;
        int i;
 
-       if (vsi->mss != l2params->mss) {
-               mss_change = true;
-               vsi->mss = l2params->mss;
-       }
+       vsi->mss = l2params->mss;
 
        i40iw_fill_qos_list(l2params->qs_handle_list);
        for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
                qs_handle = l2params->qs_handle_list[i];
                if (vsi->qos[i].qs_handle != qs_handle)
                        qs_handle_change = true;
-               else if (!mss_change)
-                       continue;       /* no MSS nor qs handle change */
                spin_lock_irqsave(&vsi->qos[i].lock, flags);
                qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
                while (qp) {
-                       if (mss_change)
-                               i40iw_qp_mss_modify(dev, qp);
                        if (qs_handle_change) {
                                qp->qs_handle = qs_handle;
                                /* issue cqp suspend command */
@@ -2395,7 +2387,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
 
        set_64bit_val(wqe,
                      8,
-                     LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) |
                      LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
 
        set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
@@ -2410,7 +2401,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
                 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
                 LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
                 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
-                LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) |
                 LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
                 LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
                 LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
index 2728af3103ce9ae285ab8c0b88980e716a1cb5e4..a3f18a22f5ed1787794031eb8a1765ab2804cdc4 100644 (file)
@@ -1319,13 +1319,13 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
        status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
                                       I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
        if (status)
-               goto exit;
+               goto error;
        info.fpm_query_buf_pa = mem.pa;
        info.fpm_query_buf = mem.va;
        status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
                                       I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
        if (status)
-               goto exit;
+               goto error;
        info.fpm_commit_buf_pa = mem.pa;
        info.fpm_commit_buf = mem.va;
        info.hmc_fn_id = ldev->fid;
@@ -1347,11 +1347,9 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
        info.exception_lan_queue = 1;
        info.vchnl_send = i40iw_virtchnl_send;
        status = i40iw_device_init(&iwdev->sc_dev, &info);
-exit:
-       if (status) {
-               kfree(iwdev->hmc_info_mem);
-               iwdev->hmc_info_mem = NULL;
-       }
+
+       if (status)
+               goto error;
        memset(&vsi_info, 0, sizeof(vsi_info));
        vsi_info.dev = &iwdev->sc_dev;
        vsi_info.back_vsi = (void *)iwdev;
@@ -1362,11 +1360,19 @@ exit:
                memset(&stats_info, 0, sizeof(stats_info));
                stats_info.fcn_id = ldev->fid;
                stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
+               if (!stats_info.pestat) {
+                       status = I40IW_ERR_NO_MEMORY;
+                       goto error;
+               }
                stats_info.stats_initialize = true;
                if (stats_info.pestat)
                        i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
        }
        return status;
+error:
+       kfree(iwdev->hmc_info_mem);
+       iwdev->hmc_info_mem = NULL;
+       return status;
 }
 
 /**
index aa66c1c63dfa4b0879eecfdb4a46f58f3279de98..f27be3e7830bb438f5543d88ef35eae1c4d93586 100644 (file)
@@ -199,7 +199,6 @@ void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
                            struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
 void *i40iw_remove_head(struct list_head *list);
 void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
-void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
 
 void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
 void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
index 7b76259752b0062e5cf16f7bc097f5cd4b66098e..959ec81fba99ca6499f2e8d7d68f0cb073c86646 100644 (file)
@@ -541,7 +541,6 @@ struct i40iw_create_qp_info {
 struct i40iw_modify_qp_info {
        u64 rx_win0;
        u64 rx_win1;
-       u16 new_mss;
        u8 next_iwarp_state;
        u8 termlen;
        bool ord_valid;
@@ -554,7 +553,6 @@ struct i40iw_modify_qp_info {
        bool dont_send_term;
        bool dont_send_fin;
        bool cached_var_valid;
-       bool mss_change;
        bool force_loopback;
 };
 
index 409a3781e735db6f2072bde350815beed380425e..56d986924a4c1708216684f776f4705451a65f79 100644 (file)
@@ -756,23 +756,6 @@ void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, b
                i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
 }
 
-/**
- * i40iw_qp_mss_modify - modify mss for qp
- * @dev: hardware control device structure
- * @qp: hardware control qp
- */
-void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
-{
-       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
-       struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
-       struct i40iw_modify_qp_info info;
-
-       memset(&info, 0, sizeof(info));
-       info.mss_change = true;
-       info.new_mss = qp->vsi->mss;
-       i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
-}
-
 /**
  * i40iw_term_modify_qp - modify qp for term message
  * @qp: hardware control qp
index f4d13683a403a6369c61d40d0a19cc4e5124334d..48fd327f876b08b5b246b41425ff997e64cf861d 100644 (file)
@@ -443,10 +443,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
        if (!dev->vchnl_up)
                return I40IW_ERR_NOT_READY;
        if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
-               if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0)
-                       vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
-               else
-                       vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
+               vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
                return I40IW_SUCCESS;
        }
        for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
index b4694717f6f301f4a2d6b407f18b2c42614ee24e..21d31cb1325f5fc0271f093e5ec069641b8b9ffe 100644 (file)
@@ -1578,6 +1578,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
        if (port < 0)
                return;
        ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
+       ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port);
 
        mlx4_ib_query_ah(&ah.ibah, &ah_attr);
        if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
index d45772da09635c2164f4cef8bcf5255c17fe8cff..0c79983c8b1a0a4e6189fb5e02439ff3522a9098 100644 (file)
@@ -2979,6 +2979,18 @@ error_0:
        return ret;
 }
 
+static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
+{
+       switch (umr_fence_cap) {
+       case MLX5_CAP_UMR_FENCE_NONE:
+               return MLX5_FENCE_MODE_NONE;
+       case MLX5_CAP_UMR_FENCE_SMALL:
+               return MLX5_FENCE_MODE_INITIATOR_SMALL;
+       default:
+               return MLX5_FENCE_MODE_STRONG_ORDERING;
+       }
+}
+
 static int create_dev_resources(struct mlx5_ib_resources *devr)
 {
        struct ib_srq_init_attr attr;
@@ -3693,6 +3705,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 
        mlx5_ib_internal_fill_odp_caps(dev);
 
+       dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
+
        if (MLX5_CAP_GEN(mdev, imaicl)) {
                dev->ib_dev.alloc_mw            = mlx5_ib_alloc_mw;
                dev->ib_dev.dealloc_mw          = mlx5_ib_dealloc_mw;
index 38c877bc45e592dbbe9670b3c9b624b90b6df30d..bdcf25410c99df7f57e280f2672eb86b8e3205fd 100644 (file)
@@ -349,7 +349,7 @@ struct mlx5_ib_qp {
        struct mlx5_ib_wq       rq;
 
        u8                      sq_signal_bits;
-       u8                      fm_cache;
+       u8                      next_fence;
        struct mlx5_ib_wq       sq;
 
        /* serialize qp state modifications
@@ -654,6 +654,7 @@ struct mlx5_ib_dev {
        struct mlx5_ib_port     *port;
        struct mlx5_sq_bfreg     bfreg;
        struct mlx5_sq_bfreg     fp_bfreg;
+       u8                              umr_fence;
 };
 
 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
index 93959e1e43a3da5e2f71a89c8c12bdc61368e3e0..ebb6768684de372d755cff4c6f92c768edb9045b 100644 (file)
@@ -3738,24 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
        }
 }
 
-static u8 get_fence(u8 fence, struct ib_send_wr *wr)
-{
-       if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
-                    wr->send_flags & IB_SEND_FENCE))
-               return MLX5_FENCE_MODE_STRONG_ORDERING;
-
-       if (unlikely(fence)) {
-               if (wr->send_flags & IB_SEND_FENCE)
-                       return MLX5_FENCE_MODE_SMALL_AND_FENCE;
-               else
-                       return fence;
-       } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
-               return MLX5_FENCE_MODE_FENCE;
-       }
-
-       return 0;
-}
-
 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
                     struct mlx5_wqe_ctrl_seg **ctrl,
                     struct ib_send_wr *wr, unsigned *idx,
@@ -3784,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
 static void finish_wqe(struct mlx5_ib_qp *qp,
                       struct mlx5_wqe_ctrl_seg *ctrl,
                       u8 size, unsigned idx, u64 wr_id,
-                      int nreq, u8 fence, u8 next_fence,
-                      u32 mlx5_opcode)
+                      int nreq, u8 fence, u32 mlx5_opcode)
 {
        u8 opmod = 0;
 
@@ -3793,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
                                             mlx5_opcode | ((u32)opmod << 24));
        ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
        ctrl->fm_ce_se |= fence;
-       qp->fm_cache = next_fence;
        if (unlikely(qp->wq_sig))
                ctrl->signature = wq_sig(ctrl);
 
@@ -3853,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        goto out;
                }
 
-               fence = qp->fm_cache;
                num_sge = wr->num_sge;
                if (unlikely(num_sge > qp->sq.max_gs)) {
                        mlx5_ib_warn(dev, "\n");
@@ -3870,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        goto out;
                }
 
+               if (wr->opcode == IB_WR_LOCAL_INV ||
+                   wr->opcode == IB_WR_REG_MR) {
+                       fence = dev->umr_fence;
+                       next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+               } else if (wr->send_flags & IB_SEND_FENCE) {
+                       if (qp->next_fence)
+                               fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+                       else
+                               fence = MLX5_FENCE_MODE_FENCE;
+               } else {
+                       fence = qp->next_fence;
+               }
+
                switch (ibqp->qp_type) {
                case IB_QPT_XRC_INI:
                        xrc = seg;
@@ -3896,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                goto out;
 
                        case IB_WR_LOCAL_INV:
-                               next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
                                qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
                                ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
                                set_linv_wr(qp, &seg, &size);
@@ -3904,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                break;
 
                        case IB_WR_REG_MR:
-                               next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
                                qp->sq.wr_data[idx] = IB_WR_REG_MR;
                                ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
                                err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
@@ -3927,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                        goto out;
                                }
 
-                               finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-                                          nreq, get_fence(fence, wr),
-                                          next_fence, MLX5_OPCODE_UMR);
+                               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+                                          fence, MLX5_OPCODE_UMR);
                                /*
                                 * SET_PSV WQEs are not signaled and solicited
                                 * on error
@@ -3954,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                        goto out;
                                }
 
-                               finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-                                          nreq, get_fence(fence, wr),
-                                          next_fence, MLX5_OPCODE_SET_PSV);
+                               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+                                          fence, MLX5_OPCODE_SET_PSV);
                                err = begin_wqe(qp, &seg, &ctrl, wr,
                                                &idx, &size, nreq);
                                if (err) {
@@ -3966,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                        goto out;
                                }
 
-                               next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
                                err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
                                                 mr->sig->psv_wire.psv_idx, &seg,
                                                 &size);
@@ -3976,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                        goto out;
                                }
 
-                               finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-                                          nreq, get_fence(fence, wr),
-                                          next_fence, MLX5_OPCODE_SET_PSV);
+                               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+                                          fence, MLX5_OPCODE_SET_PSV);
+                               qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
                                num_sge = 0;
                                goto skip_psv;
 
@@ -4089,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        }
                }
 
-               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
-                          get_fence(fence, wr), next_fence,
+               qp->next_fence = next_fence;
+               finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence,
                           mlx5_ib_opcode[wr->opcode]);
 skip_psv:
                if (0)
index fb983df7c157b660239983e00c85da5627863dec..30b256a2c54ec42dd97b29ff0f0cb15be6d44510 100644 (file)
@@ -610,7 +610,6 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
                ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD;
        }
        ctrl_ird |= IETF_PEER_TO_PEER;
-       ctrl_ird |= IETF_FLPDU_ZERO_LEN;
 
        switch (mpa_key) {
        case MPA_KEY_REQUEST:
@@ -1826,7 +1825,7 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb)
                        type = NES_CM_EVENT_CONNECTED;
                        cm_node->state = NES_CM_STATE_TSA;
                }
-
+               send_ack(cm_node, NULL);
                break;
        default:
                WARN_ON(1);
index 3d7705cec7705fcf334a96353e6830b554176500..d86dbe814d98fbe00adf22acb6d1ee658efc5ef5 100644 (file)
@@ -270,11 +270,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
                return rc;
        }
 
-       vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
-       if (vlan_id < VLAN_CFI_MASK)
-               has_vlan = true;
-       if (sgid_attr.ndev)
+       if (sgid_attr.ndev) {
+               vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+               if (vlan_id < VLAN_CFI_MASK)
+                       has_vlan = true;
+
                dev_put(sgid_attr.ndev);
+       }
 
        if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
                DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
index fc8b88514da52bc380ce51066e0a2665ff18a2be..4ddbcac5eabe6834f90ecac5885b75495493264a 100644 (file)
@@ -1956,8 +1956,10 @@ send_last:
                ret = qib_get_rwqe(qp, 1);
                if (ret < 0)
                        goto nack_op_err;
-               if (!ret)
+               if (!ret) {
+                       rvt_put_ss(&qp->r_sge);
                        goto rnr_nak;
+               }
                wc.ex.imm_data = ohdr->u.rc.imm_data;
                hdrsize += 4;
                wc.wc_flags = IB_WC_WITH_IMM;
index 874b24366e4dd744cc9ddf1c4b561a5a050641d1..7871379342f48fa77b2e6e8279ca774b4c49ad2f 100644 (file)
@@ -178,7 +178,7 @@ static inline int ib_speed_enum_to_int(int speed)
 static int ipoib_get_link_ksettings(struct net_device *netdev,
                                    struct ethtool_link_ksettings *cmd)
 {
-       struct ipoib_dev_priv *priv = netdev_priv(netdev);
+       struct ipoib_dev_priv *priv = ipoib_priv(netdev);
        struct ib_port_attr attr;
        int ret, speed, width;
 
index 2869d1adb1decdab20a19145536fc978344aba78..a115c0b7a310ed630c1c32ffd9e2c17574358f7c 100644 (file)
@@ -1590,7 +1590,7 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
        wait_for_completion(&priv->ntbl.deleted);
 }
 
-void ipoib_dev_uninit_default(struct net_device *dev)
+static void ipoib_dev_uninit_default(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
 
index def723a5df29fa72342ed5e52ec7fa35fa54375c..2354c742caa12d69ef761ecbc7ca8af311bb8aa5 100644 (file)
@@ -320,7 +320,7 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch)
        ch->path.sgid = target->sgid;
        ch->path.dgid = target->orig_dgid;
        ch->path.pkey = target->pkey;
-       sa_path_set_service_id(&ch->path, target->service_id);
+       ch->path.service_id = target->service_id;
 
        return 0;
 }
@@ -575,7 +575,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
        return 0;
 
 err_qp:
-       srp_destroy_qp(ch, qp);
+       ib_destroy_qp(qp);
 
 err_send_cq:
        ib_free_cq(send_cq);
index 485900f953e088ca91d0b1b538be6f6cc3ad6794..abc266e40e1710e034d05077ebbe7dc33ce42903 100644 (file)
@@ -213,7 +213,7 @@ static int tm2_touchkey_probe(struct i2c_client *client,
        /* led device */
        touchkey->led_dev.name = TM2_TOUCHKEY_DEV_NAME;
        touchkey->led_dev.brightness = LED_FULL;
-       touchkey->led_dev.max_brightness = LED_FULL;
+       touchkey->led_dev.max_brightness = LED_ON;
        touchkey->led_dev.brightness_set = tm2_touchkey_led_brightness_set;
 
        error = devm_led_classdev_register(&client->dev, &touchkey->led_dev);
index f11807db69792a1c19661b6921caceee247fbaf5..400869e61a0663be592e723f8f9de4f3596aaf3d 100644 (file)
@@ -256,6 +256,42 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
        return 0;
 }
 
+#ifdef CONFIG_ACPI
+static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
+                                            struct platform_device *pdev)
+{
+       unsigned long long hrv = 0;
+       acpi_status status;
+
+       if (IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY) &&
+           axp20x_pek->axp20x->variant == AXP288_ID) {
+               status = acpi_evaluate_integer(ACPI_HANDLE(pdev->dev.parent),
+                                              "_HRV", NULL, &hrv);
+               if (ACPI_FAILURE(status))
+                       dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n");
+
+               /*
+                * On Cherry Trail platforms (hrv == 3), do not register the
+                * input device if there is an "INTCFD9" or "ACPI0011" gpio
+                * button ACPI device, as that handles the power button too,
+                * and otherwise we end up reporting all presses twice.
+                */
+               if (hrv == 3 && (acpi_dev_present("INTCFD9", NULL, -1) ||
+                                acpi_dev_present("ACPI0011", NULL, -1)))
+                       return false;
+
+       }
+
+       return true;
+}
+#else
+static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
+                                            struct platform_device *pdev)
+{
+       return true;
+}
+#endif
+
 static int axp20x_pek_probe(struct platform_device *pdev)
 {
        struct axp20x_pek *axp20x_pek;
@@ -268,13 +304,7 @@ static int axp20x_pek_probe(struct platform_device *pdev)
 
        axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent);
 
-       /*
-        * Do not register the input device if there is an "INTCFD9"
-        * gpio button ACPI device, that handles the power button too,
-        * and otherwise we end up reporting all presses twice.
-        */
-       if (!acpi_dev_found("INTCFD9") ||
-           !IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY)) {
+       if (axp20x_pek_should_register_input(axp20x_pek, pdev)) {
                error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
                if (error)
                        return error;
index a679e56c44cd49ddea4361aebdb97d4fe7f1e12e..f431da07f861e50fa86574954f7e4b0c2c1e57c3 100644 (file)
@@ -554,32 +554,34 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client,
                                     struct completion *completion)
 {
        struct device *dev = &client->dev;
-       long ret;
        int error;
        int len;
-       u8 buffer[ETP_I2C_INF_LENGTH];
+       u8 buffer[ETP_I2C_REPORT_LEN];
+
+       len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN);
+       if (len != ETP_I2C_REPORT_LEN) {
+               error = len < 0 ? len : -EIO;
+               dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n",
+                       error, len);
+       }
 
        reinit_completion(completion);
        enable_irq(client->irq);
 
        error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET);
-       if (!error)
-               ret = wait_for_completion_interruptible_timeout(completion,
-                                                       msecs_to_jiffies(300));
-       disable_irq(client->irq);
-
        if (error) {
                dev_err(dev, "device reset failed: %d\n", error);
-               return error;
-       } else if (ret == 0) {
+       } else if (!wait_for_completion_timeout(completion,
+                                               msecs_to_jiffies(300))) {
                dev_err(dev, "timeout waiting for device reset\n");
-               return -ETIMEDOUT;
-       } else if (ret < 0) {
-               error = ret;
-               dev_err(dev, "error waiting for device reset: %d\n", error);
-               return error;
+               error = -ETIMEDOUT;
        }
 
+       disable_irq(client->irq);
+
+       if (error)
+               return error;
+
        len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH);
        if (len != ETP_I2C_INF_LENGTH) {
                error = len < 0 ? len : -EIO;
index 131df9d3660f0e4866b9ffc849ca0d6ce1d4e906..16c30460ef041b13686db7f6efe36860b725a4bd 100644 (file)
@@ -176,6 +176,12 @@ static const char * const smbus_pnp_ids[] = {
        NULL
 };
 
+static const char * const forcepad_pnp_ids[] = {
+       "SYN300D",
+       "SYN3014",
+       NULL
+};
+
 /*
  * Send a command to the synpatics touchpad by special commands
  */
@@ -397,6 +403,8 @@ static int synaptics_query_hardware(struct psmouse *psmouse,
 {
        int error;
 
+       memset(info, 0, sizeof(*info));
+
        error = synaptics_identify(psmouse, info);
        if (error)
                return error;
@@ -480,13 +488,6 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
        { }
 };
 
-/* This list has been kindly provided by Synaptics. */
-static const char * const forcepad_pnp_ids[] = {
-       "SYN300D",
-       "SYN3014",
-       NULL
-};
-
 /*****************************************************************************
  *     Synaptics communications functions
  ****************************************************************************/
@@ -1687,7 +1688,8 @@ enum {
        SYNAPTICS_INTERTOUCH_ON,
 };
 
-static int synaptics_intertouch = SYNAPTICS_INTERTOUCH_NOT_SET;
+static int synaptics_intertouch = IS_ENABLED(CONFIG_RMI4_SMB) ?
+               SYNAPTICS_INTERTOUCH_NOT_SET : SYNAPTICS_INTERTOUCH_OFF;
 module_param_named(synaptics_intertouch, synaptics_intertouch, int, 0644);
 MODULE_PARM_DESC(synaptics_intertouch, "Use a secondary bus for the Synaptics device.");
 
@@ -1737,8 +1739,16 @@ static int synaptics_setup_intertouch(struct psmouse *psmouse,
 
        if (synaptics_intertouch == SYNAPTICS_INTERTOUCH_NOT_SET) {
                if (!psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
-                   !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids))
+                   !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids)) {
+
+                       if (!psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids))
+                               psmouse_info(psmouse,
+                                            "Your touchpad (%s) says it can support a different bus. "
+                                            "If i2c-hid and hid-rmi are not used, you might want to try setting psmouse.synaptics_intertouch to 1 and report this to linux-input@vger.kernel.org.\n",
+                                            psmouse->ps2dev.serio->firmware_id);
+
                        return -ENXIO;
+               }
        }
 
        psmouse_info(psmouse, "Trying to set up SMBus access\n");
@@ -1810,6 +1820,15 @@ int synaptics_init(struct psmouse *psmouse)
        }
 
        if (SYN_CAP_INTERTOUCH(info.ext_cap_0c)) {
+               if ((!IS_ENABLED(CONFIG_RMI4_SMB) ||
+                    !IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS)) &&
+                   /* Forcepads need F21, which is not ready */
+                   !psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids)) {
+                       psmouse_warn(psmouse,
+                                    "The touchpad can support a better bus than the too old PS/2 protocol. "
+                                    "Make sure MOUSE_PS2_SYNAPTICS_SMBUS and RMI4_SMB are enabled to get a better touchpad experience.\n");
+               }
+
                error = synaptics_setup_intertouch(psmouse, &info, true);
                if (!error)
                        return PSMOUSE_SYNAPTICS_SMBUS;
index 2302aef2b2d403d47f8384c0127ad91104788ef5..dd042a9b0aaacc208056376b27a5fd7bf5755f6c 100644 (file)
@@ -350,6 +350,7 @@ static bool mxt_object_readable(unsigned int type)
        case MXT_TOUCH_KEYARRAY_T15:
        case MXT_TOUCH_PROXIMITY_T23:
        case MXT_TOUCH_PROXKEY_T52:
+       case MXT_TOUCH_MULTITOUCHSCREEN_T100:
        case MXT_PROCI_GRIPFACE_T20:
        case MXT_PROCG_NOISE_T22:
        case MXT_PROCI_ONETOUCH_T24:
index 8cf8d8d5d4ef4f82b45cc5ecbb959c92be95b699..f872817e81e46ba6d962a5edc207302e9bd25f3f 100644 (file)
@@ -471,7 +471,7 @@ static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN,
 static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET,
                M09_REGISTER_OFFSET, 0, 31);
 static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
-               M09_REGISTER_THRESHOLD, 20, 80);
+               M09_REGISTER_THRESHOLD, 0, 80);
 static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
                NO_REGISTER, 3, 14);
 
index 813dd68a5c82478f4636652fe6e5dec05ec1fd37..0dbcf105f7db348773592f54d4b85beb9b7ba4a3 100644 (file)
@@ -526,6 +526,7 @@ static int __maybe_unused silead_ts_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
 
+       disable_irq(client->irq);
        silead_ts_set_power(client, SILEAD_POWER_OFF);
        return 0;
 }
@@ -551,6 +552,8 @@ static int __maybe_unused silead_ts_resume(struct device *dev)
                return -ENODEV;
        }
 
+       enable_irq(client->irq);
+
        return 0;
 }
 
index 78a7ce816a4792c7b1ddc101cece8c265084e630..9a873118ea5fcf4ff7adae18e03b3a351936df16 100644 (file)
@@ -285,7 +285,7 @@ static int pca955x_probe(struct i2c_client *client,
                        "slave address 0x%02x\n",
                        client->name, chip->bits, client->addr);
 
-       if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
                return -EIO;
 
        if (pdata) {
index bf7419a56454e3834ea2c2034d3170591f1ad97f..f4eace5ea184095eb0c170c4f3f1647f72b8c537 100644 (file)
@@ -485,10 +485,10 @@ void bitmap_print_sb(struct bitmap *bitmap)
        pr_debug("         magic: %08x\n", le32_to_cpu(sb->magic));
        pr_debug("       version: %d\n", le32_to_cpu(sb->version));
        pr_debug("          uuid: %08x.%08x.%08x.%08x\n",
-                *(__u32 *)(sb->uuid+0),
-                *(__u32 *)(sb->uuid+4),
-                *(__u32 *)(sb->uuid+8),
-                *(__u32 *)(sb->uuid+12));
+                le32_to_cpu(*(__u32 *)(sb->uuid+0)),
+                le32_to_cpu(*(__u32 *)(sb->uuid+4)),
+                le32_to_cpu(*(__u32 *)(sb->uuid+8)),
+                le32_to_cpu(*(__u32 *)(sb->uuid+12)));
        pr_debug("        events: %llu\n",
                 (unsigned long long) le64_to_cpu(sb->events));
        pr_debug("events cleared: %llu\n",
index cd8139593ccd50655a2329460cc8de9d175eac86..840c1496b2b138ef504bde4c441b1082df183473 100644 (file)
@@ -1334,7 +1334,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
 {
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
-               .bi_op_flags = REQ_PREFLUSH,
+               .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = c->dm_io,
index c7f7c8d7657670850adedceb505538e1b9cdb2ce..7910bfe50da4469c44b571363cc6696f74f5fa42 100644 (file)
@@ -783,7 +783,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
                        for (i = 0; i < commit_sections; i++)
                                rw_section_mac(ic, commit_start + i, true);
                }
-               rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp);
+               rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
+                          commit_sections, &io_comp);
        } else {
                unsigned to_end;
                io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
@@ -2374,21 +2375,6 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
        blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
 }
 
-/* FIXME: use new kvmalloc */
-static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp)
-{
-       void *ptr = NULL;
-
-       if (size <= PAGE_SIZE)
-               ptr = kmalloc(size, GFP_KERNEL | gfp);
-       if (!ptr && size <= KMALLOC_MAX_SIZE)
-               ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp);
-       if (!ptr)
-               ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL);
-
-       return ptr;
-}
-
 static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
 {
        unsigned i;
@@ -2407,7 +2393,7 @@ static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
        struct page_list *pl;
        unsigned i;
 
-       pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO);
+       pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO);
        if (!pl)
                return NULL;
 
@@ -2437,7 +2423,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
        struct scatterlist **sl;
        unsigned i;
 
-       sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO);
+       sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO);
        if (!sl)
                return NULL;
 
@@ -2453,7 +2439,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
 
                n_pages = (end_index - start_index + 1);
 
-               s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0);
+               s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL);
                if (!s) {
                        dm_integrity_free_journal_scatterlist(ic, sl);
                        return NULL;
@@ -2617,7 +2603,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                goto bad;
                        }
 
-                       sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0);
+                       sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL);
                        if (!sg) {
                                *error = "Unable to allocate sg list";
                                r = -ENOMEM;
@@ -2673,7 +2659,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                r = -ENOMEM;
                                goto bad;
                        }
-                       ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO);
+                       ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO);
                        if (!ic->sk_requests) {
                                *error = "Unable to allocate sk requests";
                                r = -ENOMEM;
@@ -2740,7 +2726,7 @@ retest_commit_id:
                r = -ENOMEM;
                goto bad;
        }
-       ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0);
+       ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
        if (!ic->journal_tree) {
                *error = "Could not allocate memory for journal tree";
                r = -ENOMEM;
index 0555b4410e0598a6096642f10978ad6798bc5f98..41852ae287a58c29e675dd4b794f1670f9dc53e8 100644 (file)
@@ -1710,12 +1710,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
        }
 
        /*
-        * Try to avoid low memory issues when a device is suspended.
+        * Use __GFP_HIGH to avoid low memory issues when a device is
+        * suspended and the ioctl is needed to resume it.
         * Use kmalloc() rather than vmalloc() when we can.
         */
        dmi = NULL;
        noio_flag = memalloc_noio_save();
-       dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL);
+       dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH);
        memalloc_noio_restore(noio_flag);
 
        if (!dmi) {
index a95cbb80fb34444144bad346b3e769c625e8c788..e61c45047c25a9ba2683c313fbc2151c9051b178 100644 (file)
@@ -260,7 +260,7 @@ static int mirror_flush(struct dm_target *ti)
        struct mirror *m;
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
-               .bi_op_flags = REQ_PREFLUSH,
+               .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = ms->io_client,
index b93476c3ba3f9767fb133fed977e7a888cc0698e..c5534d294773fc0267a1b4c7438a3316e74d417a 100644 (file)
@@ -741,7 +741,8 @@ static void persistent_commit_exception(struct dm_exception_store *store,
        /*
         * Commit exceptions to disk.
         */
-       if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA))
+       if (ps->valid && area_io(ps, REQ_OP_WRITE,
+                                REQ_PREFLUSH | REQ_FUA | REQ_SYNC))
                ps->valid = 0;
 
        /*
index 97de961a3bfc80d11497c5ac2558ce7ad7a57a7e..1ec9b2c51c076d99ba6003f90eae608d9c9e35af 100644 (file)
@@ -166,7 +166,7 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
                return r;
        }
 
-       if (likely(v->version >= 1))
+       if (likely(v->salt_size && (v->version >= 1)))
                r = verity_hash_update(v, req, v->salt, v->salt_size, res);
 
        return r;
@@ -177,7 +177,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
 {
        int r;
 
-       if (unlikely(!v->version)) {
+       if (unlikely(v->salt_size && (!v->version))) {
                r = verity_hash_update(v, req, v->salt, v->salt_size, res);
 
                if (r < 0) {
index 6ef9500226c0c7d789ed78e6876195f73ef9d6b7..37ccd73c79ecf2eeb4f33b5bc597f88ca5750d4b 100644 (file)
@@ -1657,7 +1657,7 @@ static struct mapped_device *alloc_dev(int minor)
 
        bio_init(&md->flush_bio, NULL, 0);
        md->flush_bio.bi_bdev = md->bdev;
-       md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+       md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
 
        dm_stats_init(&md->stats);
 
index 7299ce2f08a810555a0407a423a512a0f59f190c..03082e17c65cc87af2a44a8020bda6cbdb8b0262 100644 (file)
@@ -1311,8 +1311,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
        cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
        lock_comm(cinfo, 1);
        ret = __sendmsg(cinfo, &cmsg);
-       if (ret)
+       if (ret) {
+               unlock_comm(cinfo);
                return ret;
+       }
        cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
        ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
        cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
index 10367ffe92e3e37704f5e32793ea97175c8b15e6..212a6777ff3172dd9e20401dd7bf87ad5d2f7468 100644 (file)
@@ -765,7 +765,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
            test_bit(FailFast, &rdev->flags) &&
            !test_bit(LastDev, &rdev->flags))
                ff = MD_FAILFAST;
-       bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff;
+       bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
 
        atomic_inc(&mddev->pending_writes);
        submit_bio(bio);
index 4c00bc248287e4ab89b492225e0d054973725549..0a7af8b0a80a031a99a7af1742e2d64e6df0d106 100644 (file)
@@ -1782,7 +1782,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
        mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
                                             mb, PAGE_SIZE));
        if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
-                         REQ_FUA, false)) {
+                         REQ_SYNC | REQ_FUA, false)) {
                __free_page(page);
                return -EIO;
        }
@@ -2388,7 +2388,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
                mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
                                                     mb, PAGE_SIZE));
                sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
-                            REQ_OP_WRITE, REQ_FUA, false);
+                            REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false);
                sh->log_start = ctx->pos;
                list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
                atomic_inc(&log->stripe_in_journal_count);
index 5d25bebf3328e4967334465916aca3e3c750e447..ccce92e68d7fa5d8258bb7f2ca2bfa1bcd545709 100644 (file)
@@ -907,8 +907,8 @@ static int ppl_write_empty_header(struct ppl_log *log)
        pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
 
        if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
-                         PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_FUA, 0,
-                         false)) {
+                         PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
+                         REQ_FUA, 0, false)) {
                md_error(rdev->mddev, rdev);
                ret = -EIO;
        }
index 9c4f7659f8b1337c99cfd0ab5070012e3f658849..722064689e822f3b876411f076921e244abbec2f 100644 (file)
@@ -4085,10 +4085,15 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
                        set_bit(STRIPE_INSYNC, &sh->state);
                else {
                        atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
-                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
                                /* don't try to repair!! */
                                set_bit(STRIPE_INSYNC, &sh->state);
-                       else {
+                               pr_warn_ratelimited("%s: mismatch sector in range "
+                                                   "%llu-%llu\n", mdname(conf->mddev),
+                                                   (unsigned long long) sh->sector,
+                                                   (unsigned long long) sh->sector +
+                                                   STRIPE_SECTORS);
+                       } else {
                                sh->check_state = check_state_compute_run;
                                set_bit(STRIPE_COMPUTE_RUN, &sh->state);
                                set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
@@ -4237,10 +4242,15 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
                        }
                } else {
                        atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
-                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
                                /* don't try to repair!! */
                                set_bit(STRIPE_INSYNC, &sh->state);
-                       else {
+                               pr_warn_ratelimited("%s: mismatch sector in range "
+                                                   "%llu-%llu\n", mdname(conf->mddev),
+                                                   (unsigned long long) sh->sector,
+                                                   (unsigned long long) sh->sector +
+                                                   STRIPE_SECTORS);
+                       } else {
                                int *target = &sh->ops.target;
 
                                sh->ops.target = -1;
index 57a842ff309747382a928998d0209072476da421..b7731b18ecae1741ebee3be1bf9188659bfeb398 100644 (file)
@@ -493,10 +493,10 @@ static int vdec_h264_get_param(unsigned long h_vdec,
 }
 
 static struct vdec_common_if vdec_h264_if = {
-       vdec_h264_init,
-       vdec_h264_decode,
-       vdec_h264_get_param,
-       vdec_h264_deinit,
+       .init           = vdec_h264_init,
+       .decode         = vdec_h264_decode,
+       .get_param      = vdec_h264_get_param,
+       .deinit         = vdec_h264_deinit,
 };
 
 struct vdec_common_if *get_h264_dec_comm_if(void);
index 6e7a62ae0842c2e69bb65e31bb1fa80e7c9ce44c..b9fad6a488799ebc7fad8b12b6990b9c33d7c60b 100644 (file)
@@ -620,10 +620,10 @@ static void vdec_vp8_deinit(unsigned long h_vdec)
 }
 
 static struct vdec_common_if vdec_vp8_if = {
-       vdec_vp8_init,
-       vdec_vp8_decode,
-       vdec_vp8_get_param,
-       vdec_vp8_deinit,
+       .init           = vdec_vp8_init,
+       .decode         = vdec_vp8_decode,
+       .get_param      = vdec_vp8_get_param,
+       .deinit         = vdec_vp8_deinit,
 };
 
 struct vdec_common_if *get_vp8_dec_comm_if(void);
index 5539b1853f166a611ed678bc1274f55e48f1347c..1daee1207469b3ea9e740676aa80765a4280c118 100644 (file)
@@ -979,10 +979,10 @@ static int vdec_vp9_get_param(unsigned long h_vdec,
 }
 
 static struct vdec_common_if vdec_vp9_if = {
-       vdec_vp9_init,
-       vdec_vp9_decode,
-       vdec_vp9_get_param,
-       vdec_vp9_deinit,
+       .init           = vdec_vp9_init,
+       .decode         = vdec_vp9_decode,
+       .get_param      = vdec_vp9_get_param,
+       .deinit         = vdec_vp9_deinit,
 };
 
 struct vdec_common_if *get_vp9_dec_comm_if(void);
index c862cd4583cc93694747e191f5e3537e5767bfa5..b8069eec18cb44ef335535789f0e2a61ffaf4bd1 100644 (file)
@@ -309,6 +309,9 @@ static inline enum xp_retval
 xpc_send(short partid, int ch_number, u32 flags, void *payload,
         u16 payload_size)
 {
+       if (!xpc_interface.send)
+               return xpNotLoaded;
+
        return xpc_interface.send(partid, ch_number, flags, payload,
                                  payload_size);
 }
@@ -317,6 +320,9 @@ static inline enum xp_retval
 xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
                u16 payload_size, xpc_notify_func func, void *key)
 {
+       if (!xpc_interface.send_notify)
+               return xpNotLoaded;
+
        return xpc_interface.send_notify(partid, ch_number, flags, payload,
                                         payload_size, func, key);
 }
@@ -324,12 +330,16 @@ xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
 static inline void
 xpc_received(short partid, int ch_number, void *payload)
 {
-       return xpc_interface.received(partid, ch_number, payload);
+       if (xpc_interface.received)
+               xpc_interface.received(partid, ch_number, payload);
 }
 
 static inline enum xp_retval
 xpc_partid_to_nasids(short partid, void *nasids)
 {
+       if (!xpc_interface.partid_to_nasids)
+               return xpNotLoaded;
+
        return xpc_interface.partid_to_nasids(partid, nasids);
 }
 
index 01be66d02ca8ce52c84b809fa55a7aeb6b219bc2..6d7f557fd1c1a1e885eb3dad64886b3e0afe32fe 100644 (file)
@@ -69,23 +69,9 @@ struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS];
 EXPORT_SYMBOL_GPL(xpc_registrations);
 
 /*
- * Initialize the XPC interface to indicate that XPC isn't loaded.
+ * Initialize the XPC interface to NULL to indicate that XPC isn't loaded.
  */
-static enum xp_retval
-xpc_notloaded(void)
-{
-       return xpNotLoaded;
-}
-
-struct xpc_interface xpc_interface = {
-       (void (*)(int))xpc_notloaded,
-       (void (*)(int))xpc_notloaded,
-       (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
-       (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
-                          void *))xpc_notloaded,
-       (void (*)(short, int, void *))xpc_notloaded,
-       (enum xp_retval(*)(short, void *))xpc_notloaded
-};
+struct xpc_interface xpc_interface = { };
 EXPORT_SYMBOL_GPL(xpc_interface);
 
 /*
@@ -115,17 +101,7 @@ EXPORT_SYMBOL_GPL(xpc_set_interface);
 void
 xpc_clear_interface(void)
 {
-       xpc_interface.connect = (void (*)(int))xpc_notloaded;
-       xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
-       xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16))
-           xpc_notloaded;
-       xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *,
-                                                      u16, xpc_notify_func,
-                                                      void *))xpc_notloaded;
-       xpc_interface.received = (void (*)(short, int, void *))
-           xpc_notloaded;
-       xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
-           xpc_notloaded;
+       memset(&xpc_interface, 0, sizeof(xpc_interface));
 }
 EXPORT_SYMBOL_GPL(xpc_clear_interface);
 
@@ -188,7 +164,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
 
        mutex_unlock(&registration->mutex);
 
-       xpc_interface.connect(ch_number);
+       if (xpc_interface.connect)
+               xpc_interface.connect(ch_number);
 
        return xpSuccess;
 }
@@ -237,7 +214,8 @@ xpc_disconnect(int ch_number)
        registration->assigned_limit = 0;
        registration->idle_limit = 0;
 
-       xpc_interface.disconnect(ch_number);
+       if (xpc_interface.disconnect)
+               xpc_interface.disconnect(ch_number);
 
        mutex_unlock(&registration->mutex);
 
index 1304160de16828f402dad6ff1f970af70764c3b2..13ef162cf066a63363106e40a513f3317205d10d 100644 (file)
@@ -27,6 +27,7 @@ struct mmc_pwrseq_simple {
        struct mmc_pwrseq pwrseq;
        bool clk_enabled;
        u32 post_power_on_delay_ms;
+       u32 power_off_delay_us;
        struct clk *ext_clk;
        struct gpio_descs *reset_gpios;
 };
@@ -78,6 +79,10 @@ static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
 
        mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
 
+       if (pwrseq->power_off_delay_us)
+               usleep_range(pwrseq->power_off_delay_us,
+                       2 * pwrseq->power_off_delay_us);
+
        if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) {
                clk_disable_unprepare(pwrseq->ext_clk);
                pwrseq->clk_enabled = false;
@@ -119,6 +124,8 @@ static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
 
        device_property_read_u32(dev, "post-power-on-delay-ms",
                                 &pwrseq->post_power_on_delay_ms);
+       device_property_read_u32(dev, "power-off-delay-us",
+                                &pwrseq->power_off_delay_us);
 
        pwrseq->pwrseq.dev = dev;
        pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
index 772d0900026d0efbd6e59911f93ef9cc8930b38a..951d2cdd7888b0d68f9994298572759162277682 100644 (file)
@@ -108,7 +108,7 @@ static void octeon_mmc_release_bus(struct cvm_mmc_host *host)
 static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
 {
        writeq(val, host->base + MIO_EMM_INT(host));
-       if (!host->dma_active || (host->dma_active && !host->has_ciu3))
+       if (!host->has_ciu3)
                writeq(val, host->base + MIO_EMM_INT_EN(host));
 }
 
@@ -267,7 +267,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
        }
 
        host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev,
-                                                        "power-gpios",
+                                                        "power",
                                                         GPIOD_OUT_HIGH);
        if (IS_ERR(host->global_pwr_gpiod)) {
                dev_err(&pdev->dev, "Invalid power GPIO\n");
@@ -288,11 +288,20 @@ static int octeon_mmc_probe(struct platform_device *pdev)
                if (ret) {
                        dev_err(&pdev->dev, "Error populating slots\n");
                        octeon_mmc_set_shared_power(host, 0);
-                       return ret;
+                       goto error;
                }
                i++;
        }
        return 0;
+
+error:
+       for (i = 0; i < CAVIUM_MAX_MMC; i++) {
+               if (host->slot[i])
+                       cvm_mmc_of_slot_remove(host->slot[i]);
+               if (host->slot_pdev[i])
+                       of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
+       }
+       return ret;
 }
 
 static int octeon_mmc_remove(struct platform_device *pdev)
index fe3d77267cd6b7803ae287d8b6fc5fc308f95a87..b9cc9599879978972b4c8c96f9dbddc26caebb2f 100644 (file)
@@ -146,6 +146,12 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
        return 0;
 
 error:
+       for (i = 0; i < CAVIUM_MAX_MMC; i++) {
+               if (host->slot[i])
+                       cvm_mmc_of_slot_remove(host->slot[i]);
+               if (host->slot_pdev[i])
+                       of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
+       }
        clk_disable_unprepare(host->clk);
        return ret;
 }
index 58b51ba6aabd2de7773209d42aa758cb493c2500..b8aaf0fdb77cf52bf89cd3000a98b77536a6f5a4 100644 (file)
@@ -839,14 +839,14 @@ static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                cvm_mmc_reset_bus(slot);
                if (host->global_pwr_gpiod)
                        host->set_shared_power(host, 0);
-               else
+               else if (!IS_ERR(mmc->supply.vmmc))
                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
                break;
 
        case MMC_POWER_UP:
                if (host->global_pwr_gpiod)
                        host->set_shared_power(host, 1);
-               else
+               else if (!IS_ERR(mmc->supply.vmmc))
                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
                break;
        }
@@ -968,20 +968,15 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
                return -EINVAL;
        }
 
-       mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
-       if (IS_ERR(mmc->supply.vmmc)) {
-               if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
-               /*
-                * Legacy Octeon firmware has no regulator entry, fall-back to
-                * a hard-coded voltage to get a sane OCR.
-                */
+       ret = mmc_regulator_get_supply(mmc);
+       if (ret == -EPROBE_DEFER)
+               return ret;
+       /*
+        * Legacy Octeon firmware has no regulator entry, fall-back to
+        * a hard-coded voltage to get a sane OCR.
+        */
+       if (IS_ERR(mmc->supply.vmmc))
                mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
-       } else {
-               ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
-               if (ret > 0)
-                       mmc->ocr_avail = ret;
-       }
 
        /* Common MMC bindings */
        ret = mmc_of_parse(mmc);
index 3275d49958120857d899384237bc905ad5fd17a5..61666d2697713a7665191b848084846142644457 100644 (file)
@@ -187,7 +187,8 @@ static const struct sdhci_iproc_data iproc_cygnus_data = {
 };
 
 static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
-       .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+       .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
        .ops = &sdhci_iproc_ops,
 };
index 6356781f1cca78190bff46e5225606e0f2113cc7..f7e26b031e768d871ed465cde982005295066e82 100644 (file)
@@ -787,14 +787,6 @@ int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios)
        return ret;
 }
 
-void xenon_clean_phy(struct sdhci_host *host)
-{
-       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-       struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
-
-       kfree(priv->phy_params);
-}
-
 static int xenon_add_phy(struct device_node *np, struct sdhci_host *host,
                         const char *phy_name)
 {
@@ -819,11 +811,7 @@ static int xenon_add_phy(struct device_node *np, struct sdhci_host *host,
        if (ret)
                return ret;
 
-       ret = xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params);
-       if (ret)
-               xenon_clean_phy(host);
-
-       return ret;
+       return xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params);
 }
 
 int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host)
index 67246655315b02b005b00153abfaa60c67fca7a5..bc1781bb070b7b8b83c0132abb114f6905ffddfb 100644 (file)
@@ -486,7 +486,7 @@ static int xenon_probe(struct platform_device *pdev)
 
        err = xenon_sdhc_prepare(host);
        if (err)
-               goto clean_phy_param;
+               goto err_clk;
 
        err = sdhci_add_host(host);
        if (err)
@@ -496,8 +496,6 @@ static int xenon_probe(struct platform_device *pdev)
 
 remove_sdhc:
        xenon_sdhc_unprepare(host);
-clean_phy_param:
-       xenon_clean_phy(host);
 err_clk:
        clk_disable_unprepare(pltfm_host->clk);
 free_pltfm:
@@ -510,8 +508,6 @@ static int xenon_remove(struct platform_device *pdev)
        struct sdhci_host *host = platform_get_drvdata(pdev);
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 
-       xenon_clean_phy(host);
-
        sdhci_remove_host(host, 0);
 
        xenon_sdhc_unprepare(host);
index 6e6523ea01ce50389f3b77f52b467efb11305831..73debb42dc2f9991356f59668d18ebd09296f927 100644 (file)
@@ -93,7 +93,6 @@ struct xenon_priv {
 };
 
 int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios);
-void xenon_clean_phy(struct sdhci_host *host);
 int xenon_phy_parse_dt(struct device_node *np,
                       struct sdhci_host *host);
 void xenon_soc_pad_ctrl(struct sdhci_host *host,
index d474378ed810b3c3ab19d8a4e85e77e0eb15511c..b1dd12729f19b29ea8f35886aba02cc986990661 100644 (file)
@@ -202,7 +202,7 @@ static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
        return 0;
 }
 
-const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
        .ecc = nand_ooblayout_ecc_lp_hamming,
        .free = nand_ooblayout_free_lp_hamming,
 };
@@ -4361,7 +4361,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
        /* Initialize the ->data_interface field. */
        ret = nand_init_data_interface(chip);
        if (ret)
-               return ret;
+               goto err_nand_init;
 
        /*
         * Setup the data interface correctly on the chip and controller side.
@@ -4373,7 +4373,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
         */
        ret = nand_setup_data_interface(chip);
        if (ret)
-               return ret;
+               goto err_nand_init;
 
        nand_maf_id = chip->id.data[0];
        nand_dev_id = chip->id.data[1];
@@ -4404,6 +4404,12 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
        mtd->size = i * chip->chipsize;
 
        return 0;
+
+err_nand_init:
+       /* Free manufacturer priv data. */
+       nand_manufacturer_cleanup(chip);
+
+       return ret;
 }
 EXPORT_SYMBOL(nand_scan_ident);
 
@@ -4574,18 +4580,23 @@ int nand_scan_tail(struct mtd_info *mtd)
 
        /* New bad blocks should be marked in OOB, flash-based BBT, or both */
        if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
-                  !(chip->bbt_options & NAND_BBT_USE_FLASH)))
-               return -EINVAL;
+                  !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
+               ret = -EINVAL;
+               goto err_ident;
+       }
 
        if (invalid_ecc_page_accessors(chip)) {
                pr_err("Invalid ECC page accessors setup\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_ident;
        }
 
        if (!(chip->options & NAND_OWN_BUFFERS)) {
                nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
-               if (!nbuf)
-                       return -ENOMEM;
+               if (!nbuf) {
+                       ret = -ENOMEM;
+                       goto err_ident;
+               }
 
                nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
                if (!nbuf->ecccalc) {
@@ -4608,8 +4619,10 @@ int nand_scan_tail(struct mtd_info *mtd)
 
                chip->buffers = nbuf;
        } else {
-               if (!chip->buffers)
-                       return -ENOMEM;
+               if (!chip->buffers) {
+                       ret = -ENOMEM;
+                       goto err_ident;
+               }
        }
 
        /* Set the internal oob buffer location, just after the page data */
@@ -4842,7 +4855,11 @@ int nand_scan_tail(struct mtd_info *mtd)
                return 0;
 
        /* Build bad block table */
-       return chip->scan_bbt(mtd);
+       ret = chip->scan_bbt(mtd);
+       if (ret)
+               goto err_free;
+       return 0;
+
 err_free:
        if (nbuf) {
                kfree(nbuf->databuf);
@@ -4850,6 +4867,13 @@ err_free:
                kfree(nbuf->ecccalc);
                kfree(nbuf);
        }
+
+err_ident:
+       /* Clean up nand_scan_ident(). */
+
+       /* Free manufacturer priv data. */
+       nand_manufacturer_cleanup(chip);
+
        return ret;
 }
 EXPORT_SYMBOL(nand_scan_tail);
index 9d5ca0e540b5bc5c9e3be9a6a3b476624e63be14..92e2cf8e9ff9066860973caaf60ecb95e2d4a599 100644 (file)
@@ -6,7 +6,6 @@
  * published by the Free Software Foundation.
  *
  */
-#include <linux/module.h>
 #include <linux/mtd/nand.h>
 #include <linux/sizes.h>
 
index 9cfc4035a420a3eae2d2a451c97f5300f8bc8b80..1e0755997762aa23ee953ad20f2a88d24dc7139c 100644 (file)
@@ -84,6 +84,9 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
                        case 7:
                                chip->ecc_strength_ds = 60;
                                break;
+                       default:
+                               WARN(1, "Could not decode ECC info");
+                               chip->ecc_step_ds = 0;
                        }
                }
        } else {
index 05b6e106520331ddd48f619f798d21c4cfeb2059..49b286c6c10fc85e5ee7e75f4dd10d231c86c73f 100644 (file)
  * byte 1 for other packets in the page (PKT_N, for N > 0)
  * ERR_COUNT_PKT_N is the max error count over all but the first packet.
  */
-#define DECODE_OK_PKT_0(v)     ((v) & BIT(7))
-#define DECODE_OK_PKT_N(v)     ((v) & BIT(15))
 #define ERR_COUNT_PKT_0(v)     (((v) >> 0) & 0x3f)
 #define ERR_COUNT_PKT_N(v)     (((v) >> 8) & 0x3f)
+#define DECODE_FAIL_PKT_0(v)   (((v) & BIT(7)) == 0)
+#define DECODE_FAIL_PKT_N(v)   (((v) & BIT(15)) == 0)
 
 /* Offsets relative to pbus_base */
 #define PBUS_CS_CTRL   0x83c
@@ -193,6 +193,8 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf)
                                                  chip->ecc.strength);
                if (res < 0)
                        mtd->ecc_stats.failed++;
+               else
+                       mtd->ecc_stats.corrected += res;
 
                bitflips = max(res, bitflips);
                buf += pkt_size;
@@ -202,9 +204,11 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf)
        return bitflips;
 }
 
-static int decode_error_report(struct tango_nfc *nfc)
+static int decode_error_report(struct nand_chip *chip)
 {
        u32 status, res;
+       struct mtd_info *mtd = nand_to_mtd(chip);
+       struct tango_nfc *nfc = to_tango_nfc(chip->controller);
 
        status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS);
        if (status & PAGE_IS_EMPTY)
@@ -212,10 +216,14 @@ static int decode_error_report(struct tango_nfc *nfc)
 
        res = readl_relaxed(nfc->mem_base + ERROR_REPORT);
 
-       if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res))
-               return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
+       if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res))
+               return -EBADMSG;
+
+       /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */
+       mtd->ecc_stats.corrected +=
+               ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res);
 
-       return -EBADMSG;
+       return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
 }
 
 static void tango_dma_callback(void *arg)
@@ -282,7 +290,7 @@ static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip,
        if (err)
                return err;
 
-       res = decode_error_report(nfc);
+       res = decode_error_report(chip);
        if (res < 0) {
                chip->ecc.read_oob_raw(mtd, chip, page);
                res = check_erased_page(chip, buf);
@@ -663,6 +671,7 @@ static const struct of_device_id tango_nand_ids[] = {
        { .compatible = "sigma,smp8758-nand" },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, tango_nand_ids);
 
 static struct platform_driver tango_nand_driver = {
        .probe  = tango_nand_probe,
index 73313318399c7b06b116b12e37b0b1b0277c4345..2359478b977f0e008335e51dc8f63adc2dc35087 100644 (file)
@@ -2612,11 +2612,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
        bond_for_each_slave_rcu(bond, slave, iter) {
                unsigned long trans_start = dev_trans_start(slave->dev);
 
+               slave->new_link = BOND_LINK_NOCHANGE;
+
                if (slave->link != BOND_LINK_UP) {
                        if (bond_time_in_interval(bond, trans_start, 1) &&
                            bond_time_in_interval(bond, slave->last_rx, 1)) {
 
-                               slave->link  = BOND_LINK_UP;
+                               slave->new_link = BOND_LINK_UP;
                                slave_state_changed = 1;
 
                                /* primary_slave has no meaning in round-robin
@@ -2643,7 +2645,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
                        if (!bond_time_in_interval(bond, trans_start, 2) ||
                            !bond_time_in_interval(bond, slave->last_rx, 2)) {
 
-                               slave->link  = BOND_LINK_DOWN;
+                               slave->new_link = BOND_LINK_DOWN;
                                slave_state_changed = 1;
 
                                if (slave->link_failure_count < UINT_MAX)
@@ -2674,6 +2676,11 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
                if (!rtnl_trylock())
                        goto re_arm;
 
+               bond_for_each_slave(bond, slave, iter) {
+                       if (slave->new_link != BOND_LINK_NOCHANGE)
+                               slave->link = slave->new_link;
+               }
+
                if (slave_state_changed) {
                        bond_slave_state_change(bond);
                        if (BOND_MODE(bond) == BOND_MODE_XOR)
index b0a3b85fc6f8d346f3a7d421b094b491d022dd12..db02bc2fb4b2d634ed454cedfcf57692e9b941c0 100644 (file)
@@ -748,13 +748,13 @@ static int ax_init_dev(struct net_device *dev)
 
        ret = ax_mii_init(dev);
        if (ret)
-               goto out_irq;
+               goto err_out;
 
        ax_NS8390_init(dev, 0);
 
        ret = register_netdev(dev);
        if (ret)
-               goto out_irq;
+               goto err_out;
 
        netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
                    ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
@@ -762,9 +762,6 @@ static int ax_init_dev(struct net_device *dev)
 
        return 0;
 
- out_irq:
-       /* cleanup irq */
-       free_irq(dev->irq, dev);
  err_out:
        return ret;
 }
index f3a09ab559004b80106e957674cb0d6acbf3ea65..4eee18ce9be46b5e1dc35167b51af634a2070e60 100644 (file)
@@ -5078,9 +5078,11 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
        struct be_adapter *adapter = netdev_priv(dev);
        u8 l4_hdr = 0;
 
-       /* The code below restricts offload features for some tunneled packets.
+       /* The code below restricts offload features for some tunneled and
+        * Q-in-Q packets.
         * Offload features for normal (non tunnel) packets are unchanged.
         */
+       features = vlan_features_check(skb, features);
        if (!skb->encapsulation ||
            !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
                return features;
index 56a563f90b0bf51d189dac8eb7da14791cc2b078..f7c8649fd28f695a1ff0519a85491bf85ec11f0b 100644 (file)
@@ -3192,7 +3192,7 @@ static int fec_reset_phy(struct platform_device *pdev)
 {
        int err, phy_reset;
        bool active_high = false;
-       int msec = 1;
+       int msec = 1, phy_post_delay = 0;
        struct device_node *np = pdev->dev.of_node;
 
        if (!np)
@@ -3209,6 +3209,11 @@ static int fec_reset_phy(struct platform_device *pdev)
        else if (!gpio_is_valid(phy_reset))
                return 0;
 
+       err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
+       /* valid reset duration should be less than 1s */
+       if (!err && phy_post_delay > 1000)
+               return -EINVAL;
+
        active_high = of_property_read_bool(np, "phy-reset-active-high");
 
        err = devm_gpio_request_one(&pdev->dev, phy_reset,
@@ -3226,6 +3231,15 @@ static int fec_reset_phy(struct platform_device *pdev)
 
        gpio_set_value_cansleep(phy_reset, !active_high);
 
+       if (!phy_post_delay)
+               return 0;
+
+       if (phy_post_delay > 20)
+               msleep(phy_post_delay);
+       else
+               usleep_range(phy_post_delay * 1000,
+                            phy_post_delay * 1000 + 1000);
+
        return 0;
 }
 #else /* CONFIG_OF */
index 5bdaf3d545b2fc656a318d5b562f940e14ecd9d9..10d282841f5be16c0957c72111c68baf6a452ca9 100644 (file)
@@ -774,7 +774,7 @@ static void cb_timeout_handler(struct work_struct *work)
        mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
                       mlx5_command_str(msg_to_opcode(ent->in)),
                       msg_to_opcode(ent->in));
-       mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+       mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
 }
 
 static void cmd_work_handler(struct work_struct *work)
@@ -804,6 +804,7 @@ static void cmd_work_handler(struct work_struct *work)
        }
 
        cmd->ent_arr[ent->idx] = ent;
+       set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
        lay = get_inst(cmd, ent->idx);
        ent->lay = lay;
        memset(lay, 0, sizeof(*lay));
@@ -825,6 +826,20 @@ static void cmd_work_handler(struct work_struct *work)
        if (ent->callback)
                schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
 
+       /* Skip sending command to fw if internal error */
+       if (pci_channel_offline(dev->pdev) ||
+           dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+               u8 status = 0;
+               u32 drv_synd;
+
+               ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
+               MLX5_SET(mbox_out, ent->out, status, status);
+               MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
+
+               mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+               return;
+       }
+
        /* ring doorbell after the descriptor is valid */
        mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
        wmb();
@@ -835,7 +850,7 @@ static void cmd_work_handler(struct work_struct *work)
                poll_timeout(ent);
                /* make sure we read the descriptor after ownership is SW */
                rmb();
-               mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+               mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
        }
 }
 
@@ -879,7 +894,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
                wait_for_completion(&ent->done);
        } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
                ent->ret = -ETIMEDOUT;
-               mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+               mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
        }
 
        err = ent->ret;
@@ -1375,7 +1390,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
        }
 }
 
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
 {
        struct mlx5_cmd *cmd = &dev->cmd;
        struct mlx5_cmd_work_ent *ent;
@@ -1395,6 +1410,19 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
                        struct semaphore *sem;
 
                        ent = cmd->ent_arr[i];
+
+                       /* if we already completed the command, ignore it */
+                       if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
+                                               &ent->state)) {
+                               /* only real completion can free the cmd slot */
+                               if (!forced) {
+                                       mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
+                                                     ent->idx);
+                                       free_ent(cmd, ent->idx);
+                               }
+                               continue;
+                       }
+
                        if (ent->callback)
                                cancel_delayed_work(&ent->cb_timeout_work);
                        if (ent->page_queue)
@@ -1417,7 +1445,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
                                mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
                                              ent->ret, deliv_status_to_str(ent->status), ent->status);
                        }
-                       free_ent(cmd, ent->idx);
+
+                       /* only real completion will free the entry slot */
+                       if (!forced)
+                               free_ent(cmd, ent->idx);
 
                        if (ent->callback) {
                                ds = ent->ts2 - ent->ts1;
index 7b1566f0ae58c7c64e3313ed4f6f3fb83bafee5c..66b5fec1531349d14b5324f68a28746cecdf1918 100644 (file)
@@ -1041,6 +1041,8 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
 #define MLX5_IB_GRH_BYTES       40
 #define MLX5_IPOIB_ENCAP_LEN    4
 #define MLX5_GID_SIZE           16
+#define MLX5_IPOIB_PSEUDO_LEN   20
+#define MLX5_IPOIB_HARD_LEN     (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN)
 
 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
                                         struct mlx5_cqe64 *cqe,
@@ -1048,6 +1050,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
                                         struct sk_buff *skb)
 {
        struct net_device *netdev = rq->netdev;
+       char *pseudo_header;
        u8 *dgid;
        u8 g;
 
@@ -1076,8 +1079,11 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
        if (likely(netdev->features & NETIF_F_RXHASH))
                mlx5e_skb_set_hash(cqe, skb);
 
+       /* 20 bytes of ipoib header and 4 for encap existing */
+       pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
+       memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
        skb_reset_mac_header(skb);
-       skb_pull(skb, MLX5_IPOIB_ENCAP_LEN);
+       skb_pull(skb, MLX5_IPOIB_HARD_LEN);
 
        skb->dev = netdev;
 
index 11c27e4fadf6e09400a432c5498749df73265477..ec63158ab64330c939ffa4ca8c001f8134f8e4e2 100644 (file)
@@ -43,6 +43,7 @@
 #include <net/tc_act/tc_vlan.h>
 #include <net/tc_act/tc_tunnel_key.h>
 #include <net/tc_act/tc_pedit.h>
+#include <net/tc_act/tc_csum.h>
 #include <net/vxlan.h>
 #include <net/arp.h>
 #include "en.h"
@@ -384,7 +385,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
                if (e->flags & MLX5_ENCAP_ENTRY_VALID)
                        mlx5_encap_dealloc(priv->mdev, e->encap_id);
 
-               hlist_del_rcu(&e->encap_hlist);
+               hash_del_rcu(&e->encap_hlist);
                kfree(e->encap_header);
                kfree(e);
        }
@@ -925,11 +926,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
                                struct mlx5e_tc_flow_parse_attr *parse_attr)
 {
        struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
-       int i, action_size, nactions, max_actions, first, last;
+       int i, action_size, nactions, max_actions, first, last, first_z;
        void *s_masks_p, *a_masks_p, *vals_p;
-       u32 s_mask, a_mask, val;
        struct mlx5_fields *f;
        u8 cmd, field_bsize;
+       u32 s_mask, a_mask;
        unsigned long mask;
        void *action;
 
@@ -946,7 +947,8 @@ static int offload_pedit_fields(struct pedit_headers *masks,
        for (i = 0; i < ARRAY_SIZE(fields); i++) {
                f = &fields[i];
                /* avoid seeing bits set from previous iterations */
-               s_mask = a_mask = mask = val = 0;
+               s_mask = 0;
+               a_mask = 0;
 
                s_masks_p = (void *)set_masks + f->offset;
                a_masks_p = (void *)add_masks + f->offset;
@@ -981,12 +983,12 @@ static int offload_pedit_fields(struct pedit_headers *masks,
                        memset(a_masks_p, 0, f->size);
                }
 
-               memcpy(&val, vals_p, f->size);
-
                field_bsize = f->size * BITS_PER_BYTE;
+
+               first_z = find_first_zero_bit(&mask, field_bsize);
                first = find_first_bit(&mask, field_bsize);
                last  = find_last_bit(&mask, field_bsize);
-               if (first > 0 || last != (field_bsize - 1)) {
+               if (first > 0 || last != (field_bsize - 1) || first_z < last) {
                        printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
                               mask);
                        return -EOPNOTSUPP;
@@ -1002,11 +1004,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
                }
 
                if (field_bsize == 32)
-                       MLX5_SET(set_action_in, action, data, ntohl(val));
+                       MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
                else if (field_bsize == 16)
-                       MLX5_SET(set_action_in, action, data, ntohs(val));
+                       MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
                else if (field_bsize == 8)
-                       MLX5_SET(set_action_in, action, data, val);
+                       MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
 
                action += action_size;
                nactions++;
@@ -1109,6 +1111,28 @@ out_err:
        return err;
 }
 
+static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
+{
+       u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
+                        TCA_CSUM_UPDATE_FLAG_UDP;
+
+       /*  The HW recalcs checksums only if re-writing headers */
+       if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
+               netdev_warn(priv->netdev,
+                           "TC csum action is only offloaded with pedit\n");
+               return false;
+       }
+
+       if (update_flags & ~prot_flags) {
+               netdev_warn(priv->netdev,
+                           "can't offload TC csum action for some header/s - flags %#x\n",
+                           update_flags);
+               return false;
+       }
+
+       return true;
+}
+
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                struct mlx5e_tc_flow_parse_attr *parse_attr,
                                struct mlx5e_tc_flow *flow)
@@ -1149,6 +1173,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                        continue;
                }
 
+               if (is_tcf_csum(a)) {
+                       if (csum_offload_supported(priv, attr->action,
+                                                  tcf_csum_update_flags(a)))
+                               continue;
+
+                       return -EOPNOTSUPP;
+               }
+
                if (is_tcf_skbedit_mark(a)) {
                        u32 mark = tcf_skbedit_mark(a);
 
@@ -1651,6 +1683,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                        continue;
                }
 
+               if (is_tcf_csum(a)) {
+                       if (csum_offload_supported(priv, attr->action,
+                                                  tcf_csum_update_flags(a)))
+                               continue;
+
+                       return -EOPNOTSUPP;
+               }
+
                if (is_tcf_mirred_egress_redirect(a)) {
                        int ifindex = tcf_mirred_ifindex(a);
                        struct net_device *out_dev, *encap_dev = NULL;
index ea5d8d37a75c465cf022a4c29d89918802f97bbb..33eae5ad2fb09efe302e2b892c3c9e30d4b117f5 100644 (file)
@@ -422,7 +422,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
                        break;
 
                case MLX5_EVENT_TYPE_CMD:
-                       mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
+                       mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
                        break;
 
                case MLX5_EVENT_TYPE_PORT_CHANGE:
index d0515391d33bbc57961311f648ed5ef25f457c28..44f59b1d6f0f27f7bb4f818b11f341af28ba09dc 100644 (file)
@@ -90,7 +90,7 @@ static void trigger_cmd_completions(struct mlx5_core_dev *dev)
        spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
 
        mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
-       mlx5_cmd_comp_handler(dev, vector);
+       mlx5_cmd_comp_handler(dev, vector, true);
        return;
 
 no_trig:
index 0c123d571b4cf52a0eb3b833208156766b5de6be..fe5546bb41537f0af0c4bcfe9054ccceaa42bbb2 100644 (file)
@@ -612,7 +612,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
        struct mlx5_priv *priv  = &mdev->priv;
        struct msix_entry *msix = priv->msix_arr;
        int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
-       int err;
 
        if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
                mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
@@ -622,18 +621,12 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
        cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
                        priv->irq_info[i].mask);
 
-       err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
-       if (err) {
-               mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
-                              irq);
-               goto err_clear_mask;
-       }
+#ifdef CONFIG_SMP
+       if (irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+               mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
+#endif
 
        return 0;
-
-err_clear_mask:
-       free_cpumask_var(priv->irq_info[i].mask);
-       return err;
 }
 
 static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
index dec5d563ab19091cdadd422eed9af8268ce5805e..959fd12d2e670dfa52d7d9d11f835e990c82aa7c 100644 (file)
@@ -1293,7 +1293,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
        if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
                goto nla_put_failure;
 
-       if (ip_tunnel_info_af(info) == AF_INET) {
+       if (rtnl_dereference(geneve->sock4)) {
                if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
                                    info->key.u.ipv4.dst))
                        goto nla_put_failure;
@@ -1302,8 +1302,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
                               !!(info->key.tun_flags & TUNNEL_CSUM)))
                        goto nla_put_failure;
 
+       }
+
 #if IS_ENABLED(CONFIG_IPV6)
-       } else {
+       if (rtnl_dereference(geneve->sock6)) {
                if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
                                     &info->key.u.ipv6.dst))
                        goto nla_put_failure;
@@ -1315,8 +1317,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
                if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
                               !geneve->use_udp6_rx_checksums))
                        goto nla_put_failure;
-#endif
        }
+#endif
 
        if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) ||
            nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) ||
index 4fea1b3dfbb4457247c4bc98dc9378591c36ecd3..7b652bb7ebe407b35c054009b521b173fd9fa361 100644 (file)
@@ -873,7 +873,7 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
 
        /* Check if there's an existing gtpX device to configure */
        dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
-       if (dev->netdev_ops == &gtp_netdev_ops)
+       if (dev && dev->netdev_ops == &gtp_netdev_ops)
                gtp = netdev_priv(dev);
 
        put_net(net);
index 60ffc9da6a286272d84e8dc3f4326eb908e7961a..c360dd6ead2213b112282ff508b963354cf15d72 100644 (file)
@@ -108,7 +108,7 @@ config MDIO_MOXART
 config MDIO_OCTEON
        tristate "Octeon and some ThunderX SOCs MDIO buses"
        depends on 64BIT
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && OF_MDIO
        select MDIO_CAVIUM
        help
          This module provides a driver for the Octeon and ThunderX MDIO
index 272b051a019975110aa1d117da993cf18cb98816..9097e42bec2e42d8ee864edea4a6be5d8c0a92cc 100644 (file)
@@ -255,34 +255,6 @@ static int marvell_config_aneg(struct phy_device *phydev)
 {
        int err;
 
-       /* The Marvell PHY has an errata which requires
-        * that certain registers get written in order
-        * to restart autonegotiation */
-       err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-
-       if (err < 0)
-               return err;
-
-       err = phy_write(phydev, 0x1d, 0x1f);
-       if (err < 0)
-               return err;
-
-       err = phy_write(phydev, 0x1e, 0x200c);
-       if (err < 0)
-               return err;
-
-       err = phy_write(phydev, 0x1d, 0x5);
-       if (err < 0)
-               return err;
-
-       err = phy_write(phydev, 0x1e, 0);
-       if (err < 0)
-               return err;
-
-       err = phy_write(phydev, 0x1e, 0x100);
-       if (err < 0)
-               return err;
-
        err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
        if (err < 0)
                return err;
@@ -316,6 +288,42 @@ static int marvell_config_aneg(struct phy_device *phydev)
        return 0;
 }
 
+static int m88e1101_config_aneg(struct phy_device *phydev)
+{
+       int err;
+
+       /* This Marvell PHY has an errata which requires
+        * that certain registers get written in order
+        * to restart autonegotiation
+        */
+       err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, 0x1d, 0x1f);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, 0x1e, 0x200c);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, 0x1d, 0x5);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, 0x1e, 0);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, 0x1e, 0x100);
+       if (err < 0)
+               return err;
+
+       return marvell_config_aneg(phydev);
+}
+
 static int m88e1111_config_aneg(struct phy_device *phydev)
 {
        int err;
@@ -1892,7 +1900,7 @@ static struct phy_driver marvell_drivers[] = {
                .flags = PHY_HAS_INTERRUPT,
                .probe = marvell_probe,
                .config_init = &marvell_config_init,
-               .config_aneg = &marvell_config_aneg,
+               .config_aneg = &m88e1101_config_aneg,
                .read_status = &genphy_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
                .config_intr = &marvell_config_intr,
index f3ae88fdf332e890ac8273e3df1ca7dd53092c07..8ab281b478f23bd98d71b896a0c00c4fdba7dacc 100644 (file)
@@ -310,6 +310,26 @@ skip:
                return -ENODEV;
        }
 
+       return 0;
+
+bad_desc:
+       dev_info(&dev->udev->dev, "bad CDC descriptors\n");
+       return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind);
+
+
+/* like usbnet_generic_cdc_bind() but handles filter initialization
+ * correctly
+ */
+int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+       int rv;
+
+       rv = usbnet_generic_cdc_bind(dev, intf);
+       if (rv < 0)
+               goto bail_out;
+
        /* Some devices don't initialise properly. In particular
         * the packet filter is not reset. There are devices that
         * don't do reset all the way. So the packet filter should
@@ -317,13 +337,10 @@ skip:
         */
        usbnet_cdc_update_filter(dev);
 
-       return 0;
-
-bad_desc:
-       dev_info(&dev->udev->dev, "bad CDC descriptors\n");
-       return -ENODEV;
+bail_out:
+       return rv;
 }
-EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind);
+EXPORT_SYMBOL_GPL(usbnet_ether_cdc_bind);
 
 void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
@@ -417,7 +434,7 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
        BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data)
                        < sizeof(struct cdc_state)));
 
-       status = usbnet_generic_cdc_bind(dev, intf);
+       status = usbnet_ether_cdc_bind(dev, intf);
        if (status < 0)
                return status;
 
index 9320d96a1632bbebe8bd1d4a04059e0df631ac19..3e9246cc49c3784ebc045868a53318d35bf01075 100644 (file)
@@ -1989,6 +1989,7 @@ static const struct net_device_ops virtnet_netdev = {
        .ndo_poll_controller = virtnet_netpoll,
 #endif
        .ndo_xdp                = virtnet_xdp,
+       .ndo_features_check     = passthru_features_check,
 };
 
 static void virtnet_config_changed_work(struct work_struct *work)
index d5e0906262ead5dd9f202385d7ace81444c25a0f..a60926410438b98c2e414de081f7c8093bac5862 100644 (file)
@@ -925,6 +925,29 @@ static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 }
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
+static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
+               u16 bs)
+{
+       struct nvme_ns *ns = disk->private_data;
+       u16 old_ms = ns->ms;
+       u8 pi_type = 0;
+
+       ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
+       ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
+
+       /* PI implementation requires metadata equal t10 pi tuple size */
+       if (ns->ms == sizeof(struct t10_pi_tuple))
+               pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+
+       if (blk_get_integrity(disk) &&
+           (ns->pi_type != pi_type || ns->ms != old_ms ||
+            bs != queue_logical_block_size(disk->queue) ||
+            (ns->ms && ns->ext)))
+               blk_integrity_unregister(disk);
+
+       ns->pi_type = pi_type;
+}
+
 static void nvme_init_integrity(struct nvme_ns *ns)
 {
        struct blk_integrity integrity;
@@ -951,6 +974,10 @@ static void nvme_init_integrity(struct nvme_ns *ns)
        blk_queue_max_integrity_segments(ns->queue, 1);
 }
 #else
+static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
+               u16 bs)
+{
+}
 static void nvme_init_integrity(struct nvme_ns *ns)
 {
 }
@@ -997,37 +1024,22 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
 {
        struct nvme_ns *ns = disk->private_data;
-       u8 lbaf, pi_type;
-       u16 old_ms;
-       unsigned short bs;
-
-       old_ms = ns->ms;
-       lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
-       ns->lba_shift = id->lbaf[lbaf].ds;
-       ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
-       ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
+       u16 bs;
 
        /*
         * If identify namespace failed, use default 512 byte block size so
         * block layer can use before failing read/write for 0 capacity.
         */
+       ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
        if (ns->lba_shift == 0)
                ns->lba_shift = 9;
        bs = 1 << ns->lba_shift;
-       /* XXX: PI implementation requires metadata equal t10 pi tuple size */
-       pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
-                                       id->dps & NVME_NS_DPS_PI_MASK : 0;
 
        blk_mq_freeze_queue(disk->queue);
-       if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
-                               ns->ms != old_ms ||
-                               bs != queue_logical_block_size(disk->queue) ||
-                               (ns->ms && ns->ext)))
-               blk_integrity_unregister(disk);
 
-       ns->pi_type = pi_type;
+       if (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
+               nvme_prep_integrity(disk, id, bs);
        blk_queue_logical_block_size(ns->queue, bs);
-
        if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
                nvme_init_integrity(ns);
        if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
@@ -1605,7 +1617,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        }
        memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
 
-       if (ctrl->ops->is_fabrics) {
+       if (ctrl->ops->flags & NVME_F_FABRICS) {
                ctrl->icdoff = le16_to_cpu(id->icdoff);
                ctrl->ioccsz = le32_to_cpu(id->ioccsz);
                ctrl->iorcsz = le32_to_cpu(id->iorcsz);
@@ -2098,7 +2110,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                if (ns->ndev)
                        nvme_nvm_unregister_sysfs(ns);
                del_gendisk(ns->disk);
-               blk_mq_abort_requeue_list(ns->queue);
                blk_cleanup_queue(ns->queue);
        }
 
@@ -2436,8 +2447,16 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
                        continue;
                revalidate_disk(ns->disk);
                blk_set_queue_dying(ns->queue);
-               blk_mq_abort_requeue_list(ns->queue);
-               blk_mq_start_stopped_hw_queues(ns->queue, true);
+
+               /*
+                * Forcibly start all queues to avoid having stuck requests.
+                * Note that we must ensure the queues are not stopped
+                * when the final removal happens.
+                */
+               blk_mq_start_hw_queues(ns->queue);
+
+               /* draining requests in requeue list */
+               blk_mq_kick_requeue_list(ns->queue);
        }
        mutex_unlock(&ctrl->namespaces_mutex);
 }
index dca7165fabcf9ce5df19fee1007e8da8bd794e21..5b14cbefb7240d5e7d50bb1ade8fd958417282e8 100644 (file)
@@ -45,8 +45,6 @@ enum nvme_fc_queue_flags {
 
 #define NVMEFC_QUEUE_DELAY     3               /* ms units */
 
-#define NVME_FC_MAX_CONNECT_ATTEMPTS   1
-
 struct nvme_fc_queue {
        struct nvme_fc_ctrl     *ctrl;
        struct device           *dev;
@@ -165,8 +163,6 @@ struct nvme_fc_ctrl {
        struct work_struct      delete_work;
        struct work_struct      reset_work;
        struct delayed_work     connect_work;
-       int                     reconnect_delay;
-       int                     connect_attempts;
 
        struct kref             ref;
        u32                     flags;
@@ -1376,9 +1372,9 @@ done:
        complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
        if (!complete_rq) {
                if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
-                       status = cpu_to_le16(NVME_SC_ABORT_REQ);
+                       status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
                        if (blk_queue_dying(rq->q))
-                               status |= cpu_to_le16(NVME_SC_DNR);
+                               status |= cpu_to_le16(NVME_SC_DNR << 1);
                }
                nvme_end_request(rq, status, result);
        } else
@@ -1751,7 +1747,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
        dev_warn(ctrl->ctrl.device,
                "NVME-FC{%d}: transport association error detected: %s\n",
                ctrl->cnum, errmsg);
-       dev_info(ctrl->ctrl.device,
+       dev_warn(ctrl->ctrl.device,
                "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
 
        /* stop the queues on error, cleanup is in reset thread */
@@ -2195,9 +2191,6 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
        if (!opts->nr_io_queues)
                return 0;
 
-       dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
-                       opts->nr_io_queues);
-
        nvme_fc_init_io_queues(ctrl);
 
        memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
@@ -2268,9 +2261,6 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
        if (ctrl->queue_count == 1)
                return 0;
 
-       dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n",
-                       opts->nr_io_queues);
-
        nvme_fc_init_io_queues(ctrl);
 
        ret = blk_mq_reinit_tagset(&ctrl->tag_set);
@@ -2306,7 +2296,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        int ret;
        bool changed;
 
-       ctrl->connect_attempts++;
+       ++ctrl->ctrl.opts->nr_reconnects;
 
        /*
         * Create the admin queue
@@ -2403,9 +2393,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
        WARN_ON_ONCE(!changed);
 
-       ctrl->connect_attempts = 0;
-
-       kref_get(&ctrl->ctrl.kref);
+       ctrl->ctrl.opts->nr_reconnects = 0;
 
        if (ctrl->queue_count > 1) {
                nvme_start_queues(&ctrl->ctrl);
@@ -2536,26 +2524,32 @@ nvme_fc_delete_ctrl_work(struct work_struct *work)
 
        /*
         * tear down the controller
-        * This will result in the last reference on the nvme ctrl to
-        * expire, calling the transport nvme_fc_nvme_ctrl_freed() callback.
-        * From there, the transport will tear down it's logical queues and
-        * association.
+        * After the last reference on the nvme ctrl is removed,
+        * the transport nvme_fc_nvme_ctrl_freed() callback will be
+        * invoked. From there, the transport will tear down it's
+        * logical queues and association.
         */
        nvme_uninit_ctrl(&ctrl->ctrl);
 
        nvme_put_ctrl(&ctrl->ctrl);
 }
 
-static int
-__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
+static bool
+__nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
 {
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
-               return -EBUSY;
+               return true;
 
        if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
-               return -EBUSY;
+               return true;
 
-       return 0;
+       return false;
+}
+
+static int
+__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
+{
+       return __nvme_fc_schedule_delete_work(ctrl) ? -EBUSY : 0;
 }
 
 /*
@@ -2580,6 +2574,35 @@ nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
        return ret;
 }
 
+static void
+nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
+{
+       /* If we are resetting/deleting then do nothing */
+       if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
+               WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
+                       ctrl->ctrl.state == NVME_CTRL_LIVE);
+               return;
+       }
+
+       dev_info(ctrl->ctrl.device,
+               "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
+               ctrl->cnum, status);
+
+       if (nvmf_should_reconnect(&ctrl->ctrl)) {
+               dev_info(ctrl->ctrl.device,
+                       "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
+                       ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
+               queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
+                               ctrl->ctrl.opts->reconnect_delay * HZ);
+       } else {
+               dev_warn(ctrl->ctrl.device,
+                               "NVME-FC{%d}: Max reconnect attempts (%d) "
+                               "reached. Removing controller\n",
+                               ctrl->cnum, ctrl->ctrl.opts->nr_reconnects);
+               WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
+       }
+}
+
 static void
 nvme_fc_reset_ctrl_work(struct work_struct *work)
 {
@@ -2591,34 +2614,9 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
        nvme_fc_delete_association(ctrl);
 
        ret = nvme_fc_create_association(ctrl);
-       if (ret) {
-               dev_warn(ctrl->ctrl.device,
-                       "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
-                       ctrl->cnum, ret);
-               if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
-                       dev_warn(ctrl->ctrl.device,
-                               "NVME-FC{%d}: Max reconnect attempts (%d) "
-                               "reached. Removing controller\n",
-                               ctrl->cnum, ctrl->connect_attempts);
-
-                       if (!nvme_change_ctrl_state(&ctrl->ctrl,
-                               NVME_CTRL_DELETING)) {
-                               dev_err(ctrl->ctrl.device,
-                                       "NVME-FC{%d}: failed to change state "
-                                       "to DELETING\n", ctrl->cnum);
-                               return;
-                       }
-
-                       WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
-                       return;
-               }
-
-               dev_warn(ctrl->ctrl.device,
-                       "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
-                       ctrl->cnum, ctrl->reconnect_delay);
-               queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
-                               ctrl->reconnect_delay * HZ);
-       } else
+       if (ret)
+               nvme_fc_reconnect_or_delete(ctrl, ret);
+       else
                dev_info(ctrl->ctrl.device,
                        "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
 }
@@ -2632,7 +2630,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
 {
        struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
 
-       dev_warn(ctrl->ctrl.device,
+       dev_info(ctrl->ctrl.device,
                "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
 
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
@@ -2649,7 +2647,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
        .name                   = "fc",
        .module                 = THIS_MODULE,
-       .is_fabrics             = true,
+       .flags                  = NVME_F_FABRICS,
        .reg_read32             = nvmf_reg_read32,
        .reg_read64             = nvmf_reg_read64,
        .reg_write32            = nvmf_reg_write32,
@@ -2671,34 +2669,9 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
                                struct nvme_fc_ctrl, connect_work);
 
        ret = nvme_fc_create_association(ctrl);
-       if (ret) {
-               dev_warn(ctrl->ctrl.device,
-                       "NVME-FC{%d}: Reconnect attempt failed (%d)\n",
-                       ctrl->cnum, ret);
-               if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
-                       dev_warn(ctrl->ctrl.device,
-                               "NVME-FC{%d}: Max reconnect attempts (%d) "
-                               "reached. Removing controller\n",
-                               ctrl->cnum, ctrl->connect_attempts);
-
-                       if (!nvme_change_ctrl_state(&ctrl->ctrl,
-                               NVME_CTRL_DELETING)) {
-                               dev_err(ctrl->ctrl.device,
-                                       "NVME-FC{%d}: failed to change state "
-                                       "to DELETING\n", ctrl->cnum);
-                               return;
-                       }
-
-                       WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
-                       return;
-               }
-
-               dev_warn(ctrl->ctrl.device,
-                       "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
-                       ctrl->cnum, ctrl->reconnect_delay);
-               queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
-                               ctrl->reconnect_delay * HZ);
-       } else
+       if (ret)
+               nvme_fc_reconnect_or_delete(ctrl, ret);
+       else
                dev_info(ctrl->ctrl.device,
                        "NVME-FC{%d}: controller reconnect complete\n",
                        ctrl->cnum);
@@ -2755,7 +2728,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
        INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
        INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
-       ctrl->reconnect_delay = opts->reconnect_delay;
        spin_lock_init(&ctrl->lock);
 
        /* io queue count */
@@ -2819,7 +2791,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
                ctrl->ctrl.opts = NULL;
                /* initiate nvme ctrl ref counting teardown */
                nvme_uninit_ctrl(&ctrl->ctrl);
-               nvme_put_ctrl(&ctrl->ctrl);
 
                /* as we're past the point where we transition to the ref
                 * counting teardown path, if we return a bad pointer here,
@@ -2835,6 +2806,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
                return ERR_PTR(ret);
        }
 
+       kref_get(&ctrl->ctrl.kref);
+
        dev_info(ctrl->ctrl.device,
                "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
                ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
@@ -2971,7 +2944,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
 static struct nvmf_transport_ops nvme_fc_transport = {
        .name           = "fc",
        .required_opts  = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
-       .allowed_opts   = NVMF_OPT_RECONNECT_DELAY,
+       .allowed_opts   = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
        .create_ctrl    = nvme_fc_create_ctrl,
 };
 
index 29c708ca9621c4622ab3f32d153c02546e9d0ca6..9d6a070d43914dcc5388b2a7a03f477a00b8bd1f 100644 (file)
@@ -208,7 +208,9 @@ struct nvme_ns {
 struct nvme_ctrl_ops {
        const char *name;
        struct module *module;
-       bool is_fabrics;
+       unsigned int flags;
+#define NVME_F_FABRICS                 (1 << 0)
+#define NVME_F_METADATA_SUPPORTED      (1 << 1)
        int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
        int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
        int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
index 4c2ff2bb26bcd7c615e40ae777327a3ba401cf3d..d52701df72457d0fa2b85a168c500fd022b8b717 100644 (file)
@@ -263,7 +263,7 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
        c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
 
        if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
-               dev_warn(dev->dev, "unable to set dbbuf\n");
+               dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
                /* Free memory and continue on */
                nvme_dbbuf_dma_free(dev);
        }
@@ -1394,11 +1394,11 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
        result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
                                      &pci_status);
        if (result == PCIBIOS_SUCCESSFUL)
-               dev_warn(dev->dev,
+               dev_warn(dev->ctrl.device,
                         "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
                         csts, pci_status);
        else
-               dev_warn(dev->dev,
+               dev_warn(dev->ctrl.device,
                         "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
                         csts, result);
 }
@@ -1740,8 +1740,8 @@ static int nvme_pci_enable(struct nvme_dev *dev)
         */
        if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
                dev->q_depth = 2;
-               dev_warn(dev->dev, "detected Apple NVMe controller, set "
-                       "queue depth=%u to work around controller resets\n",
+               dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
+                       "set queue depth=%u to work around controller resets\n",
                        dev->q_depth);
        }
 
@@ -1759,7 +1759,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
                if (dev->cmbsz) {
                        if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
                                                    &dev_attr_cmb.attr, NULL))
-                               dev_warn(dev->dev,
+                               dev_warn(dev->ctrl.device,
                                         "failed to add sysfs attribute for CMB\n");
                }
        }
@@ -2047,6 +2047,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
        .name                   = "pcie",
        .module                 = THIS_MODULE,
+       .flags                  = NVME_F_METADATA_SUPPORTED,
        .reg_read32             = nvme_pci_reg_read32,
        .reg_write32            = nvme_pci_reg_write32,
        .reg_read64             = nvme_pci_reg_read64,
@@ -2293,6 +2294,8 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_VDEVICE(INTEL, 0x0a54),
                .driver_data = NVME_QUIRK_STRIPE_SIZE |
                                NVME_QUIRK_DEALLOCATE_ZEROES, },
+       { PCI_VDEVICE(INTEL, 0xf1a5),   /* Intel 600P/P3100 */
+               .driver_data = NVME_QUIRK_NO_DEEPEST_PS },
        { PCI_VDEVICE(INTEL, 0x5845),   /* Qemu emulated controller */
                .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
        { PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
index dd1c6deef82fcf509b5c04af20d9df1c17746d9b..28bd255c144dcca10aa60cede2c9a51cd101426a 100644 (file)
@@ -1038,6 +1038,19 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
                nvme_rdma_wr_error(cq, wc, "SEND");
 }
 
+static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
+{
+       int sig_limit;
+
+       /*
+        * We signal completion every queue depth/2 and also handle the
+        * degenerated case of a  device with queue_depth=1, where we
+        * would need to signal every message.
+        */
+       sig_limit = max(queue->queue_size / 2, 1);
+       return (++queue->sig_count % sig_limit) == 0;
+}
+
 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
                struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
                struct ib_send_wr *first, bool flush)
@@ -1065,9 +1078,6 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
         * Would have been way to obvious to handle this in hardware or
         * at least the RDMA stack..
         *
-        * This messy and racy code sniplet is copy and pasted from the iSER
-        * initiator, and the magic '32' comes from there as well.
-        *
         * Always signal the flushes. The magic request used for the flush
         * sequencer is not allocated in our driver's tagset and it's
         * triggered to be freed by blk_cleanup_queue(). So we need to
@@ -1075,7 +1085,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
         * embedded in request's payload, is not freed when __ib_process_cq()
         * calls wr_cqe->done().
         */
-       if ((++queue->sig_count % 32) == 0 || flush)
+       if (nvme_rdma_queue_sig_limit(queue) || flush)
                wr.send_flags |= IB_SEND_SIGNALED;
 
        if (first)
@@ -1782,7 +1792,7 @@ static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
        .name                   = "rdma",
        .module                 = THIS_MODULE,
-       .is_fabrics             = true,
+       .flags                  = NVME_F_FABRICS,
        .reg_read32             = nvmf_reg_read32,
        .reg_read64             = nvmf_reg_read64,
        .reg_write32            = nvmf_reg_write32,
index feb497134aeea662092424804073be9ed104c464..e503cfff03372fb9cc7605c800743dd4c5891318 100644 (file)
@@ -558,7 +558,7 @@ static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
        .name                   = "loop",
        .module                 = THIS_MODULE,
-       .is_fabrics             = true,
+       .flags                  = NVME_F_FABRICS,
        .reg_read32             = nvmf_reg_read32,
        .reg_read64             = nvmf_reg_read64,
        .reg_write32            = nvmf_reg_write32,
index 71fecc2debfc940affba38afc6c912f3a4aa8b4b..703a42118ffc907571f2da5300639b725c2d529f 100644 (file)
@@ -523,7 +523,7 @@ static int __init of_platform_default_populate_init(void)
 arch_initcall_sync(of_platform_default_populate_init);
 #endif
 
-static int of_platform_device_destroy(struct device *dev, void *data)
+int of_platform_device_destroy(struct device *dev, void *data)
 {
        /* Do not touch devices not populated from the device tree */
        if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED))
@@ -544,6 +544,7 @@ static int of_platform_device_destroy(struct device *dev, void *data)
        of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
        return 0;
 }
+EXPORT_SYMBOL_GPL(of_platform_device_destroy);
 
 /**
  * of_platform_depopulate() - Remove devices populated from device tree
index a98cba55c7f02e670436787c011a85a8772f5109..19a289b8cc944fefb6e2bb3b627b61a0a2eff3ed 100644 (file)
@@ -252,7 +252,34 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
 static int imx6q_pcie_abort_handler(unsigned long addr,
                unsigned int fsr, struct pt_regs *regs)
 {
-       return 0;
+       unsigned long pc = instruction_pointer(regs);
+       unsigned long instr = *(unsigned long *)pc;
+       int reg = (instr >> 12) & 15;
+
+       /*
+        * If the instruction being executed was a read,
+        * make it look like it read all-ones.
+        */
+       if ((instr & 0x0c100000) == 0x04100000) {
+               unsigned long val;
+
+               if (instr & 0x00400000)
+                       val = 255;
+               else
+                       val = -1;
+
+               regs->uregs[reg] = val;
+               regs->ARM_pc += 4;
+               return 0;
+       }
+
+       if ((instr & 0x0e100090) == 0x00100090) {
+               regs->uregs[reg] = -1;
+               regs->ARM_pc += 4;
+               return 0;
+       }
+
+       return 1;
 }
 
 static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
@@ -819,8 +846,8 @@ static int __init imx6_pcie_init(void)
         * we can install the handler here without risking it
         * accessing some uninitialized driver state.
         */
-       hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
-                       "imprecise external abort");
+       hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
+                       "external abort on non-linefetch");
 
        return platform_driver_register(&imx6_pcie_driver);
 }
index c23f146fb5a668b3a5f5ae27b0bb4f00a8057d7f..c09623ca8c3b14b9b522c471fa9ac118c9522106 100644 (file)
@@ -6,6 +6,7 @@ menu "PCI Endpoint"
 
 config PCI_ENDPOINT
        bool "PCI Endpoint Support"
+       depends on HAS_DMA
        help
           Enable this configuration option to support configurable PCI
           endpoint. This should be enabled if the platform has a PCI
index b01bd5bba8e604709c3b51f14f9fbdb796a2c4fb..563901cd9c06b839f4eea9a74ed78f76309f7f62 100644 (file)
@@ -2144,7 +2144,8 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
 
        if (!pm_runtime_suspended(dev)
            || pci_target_state(pci_dev) != pci_dev->current_state
-           || platform_pci_need_resume(pci_dev))
+           || platform_pci_need_resume(pci_dev)
+           || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
                return false;
 
        /*
index cc6e085008fb9c6e72244d9e6a588864c3f2962b..f6a63406c76e0b170e348c882abcf2723f2d401d 100644 (file)
@@ -1291,7 +1291,6 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
        cdev = &stdev->cdev;
        cdev_init(cdev, &switchtec_fops);
        cdev->owner = THIS_MODULE;
-       cdev->kobj.parent = &dev->kobj;
 
        return stdev;
 
@@ -1442,12 +1441,15 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
        stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
        stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
        stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
-       stdev->partition = ioread8(&stdev->mmio_ntb->partition_id);
+       stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
        stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
        stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
        stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
        stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
 
+       if (stdev->partition_count < 1)
+               stdev->partition_count = 1;
+
        init_pff(stdev);
 
        pci_set_drvdata(pdev, stdev);
@@ -1479,11 +1481,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
                  SWITCHTEC_EVENT_EN_IRQ,
                  &stdev->mmio_part_cfg->mrpc_comp_hdr);
 
-       rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1);
-       if (rc)
-               goto err_put;
-
-       rc = device_add(&stdev->dev);
+       rc = cdev_device_add(&stdev->cdev, &stdev->dev);
        if (rc)
                goto err_devadd;
 
@@ -1492,7 +1490,6 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
        return 0;
 
 err_devadd:
-       cdev_del(&stdev->cdev);
        stdev_kill(stdev);
 err_put:
        ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
@@ -1506,8 +1503,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
 
        pci_set_drvdata(pdev, NULL);
 
-       device_del(&stdev->dev);
-       cdev_del(&stdev->cdev);
+       cdev_device_del(&stdev->cdev, &stdev->dev);
        ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
        dev_info(&stdev->dev, "unregistered.\n");
 
index 34c862f213c7e85554830b741cdf051f1fbbd86d..0a9b78705ee810c9e18c6fa1f46551fc27374287 100644 (file)
@@ -29,6 +29,17 @@ static int arm_pmu_acpi_register_irq(int cpu)
                return -EINVAL;
 
        gsi = gicc->performance_interrupt;
+
+       /*
+        * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
+        * have an interrupt. QEMU advertises this by using a GSI of zero,
+        * which is not known to be valid on any hardware despite being
+        * valid per the spec. Take the pragmatic approach and reject a
+        * GSI of zero for now.
+        */
+       if (!gsi)
+               return 0;
+
        if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
                trigger = ACPI_EDGE_SENSITIVE;
        else
index 1653cbda6a8299b33b5cebae92bd4710e41412a4..bd459a93b0e7e9b11c999dd4bf9b95c3500be3e2 100644 (file)
@@ -680,30 +680,16 @@ EXPORT_SYMBOL_GPL(pinctrl_generic_remove_group);
  * pinctrl_generic_free_groups() - removes all pin groups
  * @pctldev: pin controller device
  *
- * Note that the caller must take care of locking.
+ * Note that the caller must take care of locking. The pinctrl groups
+ * are allocated with devm_kzalloc() so no need to free them here.
  */
 static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev)
 {
        struct radix_tree_iter iter;
-       struct group_desc *group;
-       unsigned long *indices;
        void **slot;
-       int i = 0;
-
-       indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
-                              pctldev->num_groups, GFP_KERNEL);
-       if (!indices)
-               return;
 
        radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0)
-               indices[i++] = iter.index;
-
-       for (i = 0; i < pctldev->num_groups; i++) {
-               group = radix_tree_lookup(&pctldev->pin_group_tree,
-                                         indices[i]);
-               radix_tree_delete(&pctldev->pin_group_tree, indices[i]);
-               devm_kfree(pctldev->dev, group);
-       }
+               radix_tree_delete(&pctldev->pin_group_tree, iter.index);
 
        pctldev->num_groups = 0;
 }
index 41b5b07d5a2bf51f6b0623597c294862910de78c..6852010a6d708b5010555cbb141bf57ac31de077 100644 (file)
@@ -194,6 +194,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
        return 0;
 }
 
+static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg)
+{
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp &= ~(mask << shift);
+       tmp |= value << shift;
+       writel(tmp, reg);
+}
+
 static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
                               unsigned group)
 {
@@ -211,8 +221,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
                reg += bank * 0x20 + pin / 16 * 0x10;
                shift = pin % 16 * 2;
 
-               writel(0x3 << shift, reg + CLR);
-               writel(g->muxsel[i] << shift, reg + SET);
+               mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg);
        }
 
        return 0;
@@ -279,8 +288,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
                        /* mA */
                        if (config & MA_PRESENT) {
                                shift = pin % 8 * 4;
-                               writel(0x3 << shift, reg + CLR);
-                               writel(ma << shift, reg + SET);
+                               mxs_pinctrl_rmwl(ma, 0x3, shift, reg);
                        }
 
                        /* vol */
index 2debba62fac90d956ce37cd09805c518ee4a8da5..20f1b44939944614ff270c757fc7152f901e9f09 100644 (file)
@@ -1539,15 +1539,29 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
  * is not listed below.
  */
 static const struct dmi_system_id chv_no_valid_mask[] = {
+       /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
        {
-               /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
-               .ident = "Acer Chromebook (CYAN)",
+               .ident = "Intel_Strago based Chromebooks (All models)",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
-                       DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
                },
-       }
+       },
+       {
+               .ident = "Acer Chromebook R11 (Cyan)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
+               },
+       },
+       {
+               .ident = "Samsung Chromebook 3 (Celes)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
+               },
+       },
+       {}
 };
 
 static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
index 0d6b7f4b82af34a2a85e51c924ca8420bd7a6268..720a19fd38d2c6c24d30e5e1c7dcdf70cae0e590 100644 (file)
@@ -35,7 +35,6 @@ static const struct pin_config_item conf_items[] = {
        PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
                                "input bias pull to pin specific state", NULL, false),
        PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
-       PCONFDUMP(PIN_CONFIG_BIDIRECTIONAL, "bi-directional pin operations", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false),
        PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false),
@@ -161,7 +160,6 @@ static const struct pinconf_generic_params dt_params[] = {
        { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 },
        { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 },
        { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 },
-       { "bi-directional", PIN_CONFIG_BIDIRECTIONAL, 1 },
        { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 },
        { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 },
        { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 },
@@ -174,7 +172,6 @@ static const struct pinconf_generic_params dt_params[] = {
        { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
        { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
        { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
-       { "output-enable", PIN_CONFIG_OUTPUT, 1, },
        { "output-high", PIN_CONFIG_OUTPUT, 1, },
        { "output-low", PIN_CONFIG_OUTPUT, 0, },
        { "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
index 9fd6d9087dc508ca7731d7f1e868988e0e320cc2..16b3ae5e4f440c4769db55ebf7c61ebae7e1e5c1 100644 (file)
@@ -826,30 +826,17 @@ EXPORT_SYMBOL_GPL(pinmux_generic_remove_function);
  * pinmux_generic_free_functions() - removes all functions
  * @pctldev: pin controller device
  *
- * Note that the caller must take care of locking.
+ * Note that the caller must take care of locking. The pinctrl
+ * functions are allocated with devm_kzalloc() so no need to free
+ * them here.
  */
 void pinmux_generic_free_functions(struct pinctrl_dev *pctldev)
 {
        struct radix_tree_iter iter;
-       struct function_desc *function;
-       unsigned long *indices;
        void **slot;
-       int i = 0;
-
-       indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
-                              pctldev->num_functions, GFP_KERNEL);
-       if (!indices)
-               return;
 
        radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0)
-               indices[i++] = iter.index;
-
-       for (i = 0; i < pctldev->num_functions; i++) {
-               function = radix_tree_lookup(&pctldev->pin_function_tree,
-                                            indices[i]);
-               radix_tree_delete(&pctldev->pin_function_tree, indices[i]);
-               devm_kfree(pctldev->dev, function);
-       }
+               radix_tree_delete(&pctldev->pin_function_tree, iter.index);
 
        pctldev->num_functions = 0;
 }
index 9aec1d2232dd830e2c19a8e1e394e6033e621c21..6624499eae72f5c2ba986c8c54c6f7e583f05f2a 100644 (file)
@@ -394,7 +394,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x3, "owa")),          /* DOUT */
+                 SUNXI_FUNCTION(0x3, "spdif")),        /* DOUT */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out")),
index 622bdabc88941430f18ed65b0d70fd9fb0478b9b..dab195f04da78f46921f4eaf044c3f7e953c9a65 100644 (file)
@@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
                goto bye;
        }
 
-       mempool_free(mbp, hw->mb_mempool);
        if (finicsum != cfcsum) {
                csio_warn(hw,
                      "Config File checksum mismatch: csum=%#x, computed=%#x\n",
@@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
        rv = csio_hw_validate_caps(hw, mbp);
        if (rv != 0)
                goto bye;
+
+       mempool_free(mbp, hw->mb_mempool);
+       mbp = NULL;
+
        /*
         * Note that we're operating with parameters
         * not supplied by the driver, rather than from hard-wired
index bd7d39ecbd2470246a58a8ed3a3fd11367df814f..fb06974c88c15c2b23864e44779e7d61826546bf 100644 (file)
@@ -1873,6 +1873,11 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
        tcp_task->dd_data = tdata;
        task->hdr = NULL;
 
+       if (tdata->skb) {
+               kfree_skb(tdata->skb);
+               tdata->skb = NULL;
+       }
+
        if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
            (opcode == ISCSI_OP_SCSI_DATA_OUT ||
             (opcode == ISCSI_OP_SCSI_CMD &&
@@ -1890,6 +1895,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
                return -ENOMEM;
        }
 
+       skb_get(tdata->skb);
        skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
        task->hdr = (struct iscsi_hdr *)tdata->skb->data;
        task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
@@ -2035,9 +2041,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
        unsigned int datalen;
        int err;
 
-       if (!skb) {
+       if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) {
                log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
-                       "task 0x%p, skb NULL.\n", task);
+                       "task 0x%p, skb 0x%p\n", task, skb);
                return 0;
        }
 
@@ -2050,7 +2056,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
        }
 
        datalen = skb->data_len;
-       tdata->skb = NULL;
 
        /* write ppod first if using ofldq to write ppod */
        if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) {
@@ -2078,6 +2083,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
                        pdulen += ISCSI_DIGEST_SIZE;
 
                task->conn->txdata_octets += pdulen;
+               cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE);
                return 0;
        }
 
@@ -2086,7 +2092,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
                        "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
                        task, skb, skb->len, skb->data_len, err);
                /* reset skb to send when we are called again */
-               tdata->skb = skb;
                return err;
        }
 
@@ -2094,7 +2099,8 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
                "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
                task->itt, skb, skb->len, skb->data_len, err);
 
-       kfree_skb(skb);
+       __kfree_skb(tdata->skb);
+       tdata->skb = NULL;
 
        iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
        iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
@@ -2113,8 +2119,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
 
        tcp_task->dd_data = NULL;
        /*  never reached the xmit task callout */
-       if (tdata->skb)
-               __kfree_skb(tdata->skb);
+       if (tdata->skb) {
+               kfree_skb(tdata->skb);
+               tdata->skb = NULL;
+       }
 
        task_release_itt(task, task->hdr_itt);
        memset(tdata, 0, sizeof(*tdata));
@@ -2714,6 +2722,9 @@ EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
 static int __init libcxgbi_init_module(void)
 {
        pr_info("%s", version);
+
+       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
+                    sizeof(struct cxgbi_skb_cb));
        return 0;
 }
 
index 18e0ea83d36172cf2fcc55ecfe8f3b133913f1d5..239462a7576051dca167ad246ba34f07777209cf 100644 (file)
@@ -195,7 +195,8 @@ struct cxgbi_skb_rx_cb {
 };
 
 struct cxgbi_skb_tx_cb {
-       void *l2t;
+       void *handle;
+       void *arp_err_handler;
        struct sk_buff *wr_next;
 };
 
@@ -203,6 +204,7 @@ enum cxgbi_skcb_flags {
        SKCBF_TX_NEED_HDR,      /* packet needs a header */
        SKCBF_TX_MEM_WRITE,     /* memory write */
        SKCBF_TX_FLAG_COMPL,    /* wr completion flag */
+       SKCBF_TX_DONE,          /* skb tx done */
        SKCBF_RX_COALESCED,     /* received whole pdu */
        SKCBF_RX_HDR,           /* received pdu header */
        SKCBF_RX_DATA,          /* received pdu payload */
@@ -215,13 +217,13 @@ enum cxgbi_skcb_flags {
 };
 
 struct cxgbi_skb_cb {
-       unsigned char ulp_mode;
-       unsigned long flags;
-       unsigned int seq;
        union {
                struct cxgbi_skb_rx_cb rx;
                struct cxgbi_skb_tx_cb tx;
        };
+       unsigned char ulp_mode;
+       unsigned long flags;
+       unsigned int seq;
 };
 
 #define CXGBI_SKB_CB(skb)      ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
@@ -374,11 +376,9 @@ static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk,
        cxgbi_skcb_tx_wr_next(skb) = NULL;
        /*
         * We want to take an extra reference since both us and the driver
-        * need to free the packet before it's really freed. We know there's
-        * just one user currently so we use atomic_set rather than skb_get
-        * to avoid the atomic op.
+        * need to free the packet before it's really freed.
         */
-       atomic_set(&skb->users, 2);
+       skb_get(skb);
 
        if (!csk->wr_pending_head)
                csk->wr_pending_head = skb;
index 3cbab8710e58133ead0cb173cb60ea7a4cafc8e2..2ceff585f1896d9762fd288a93f4bcfde0bf2623 100644 (file)
@@ -265,18 +265,16 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
                                      struct list_head *list,
                                      unsigned char *cdb)
 {
-       struct scsi_device *sdev = ctlr->ms_sdev;
-       struct rdac_dh_data *h = sdev->handler_data;
        struct rdac_mode_common *common;
        unsigned data_size;
        struct rdac_queue_data *qdata;
        u8 *lun_table;
 
-       if (h->ctlr->use_ms10) {
+       if (ctlr->use_ms10) {
                struct rdac_pg_expanded *rdac_pg;
 
                data_size = sizeof(struct rdac_pg_expanded);
-               rdac_pg = &h->ctlr->mode_select.expanded;
+               rdac_pg = &ctlr->mode_select.expanded;
                memset(rdac_pg, 0, data_size);
                common = &rdac_pg->common;
                rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
@@ -288,7 +286,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
                struct rdac_pg_legacy *rdac_pg;
 
                data_size = sizeof(struct rdac_pg_legacy);
-               rdac_pg = &h->ctlr->mode_select.legacy;
+               rdac_pg = &ctlr->mode_select.legacy;
                memset(rdac_pg, 0, data_size);
                common = &rdac_pg->common;
                rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
@@ -304,7 +302,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
        }
 
        /* Prepare the command. */
-       if (h->ctlr->use_ms10) {
+       if (ctlr->use_ms10) {
                cdb[0] = MODE_SELECT_10;
                cdb[7] = data_size >> 8;
                cdb[8] = data_size & 0xff;
index d390325c99ecf9487c9b4441fd0e25aec05378c7..abf6026645dd2308fba55179ca750b0e786da5fa 100644 (file)
@@ -1170,6 +1170,8 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
                cmd = list_first_entry_or_null(&vscsi->free_cmd,
                                               struct ibmvscsis_cmd, list);
                if (cmd) {
+                       if (cmd->abort_cmd)
+                               cmd->abort_cmd = NULL;
                        cmd->flags &= ~(DELAY_SEND);
                        list_del(&cmd->list);
                        cmd->iue = iue;
@@ -1774,6 +1776,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
                                if (cmd->abort_cmd) {
                                        retry = true;
                                        cmd->abort_cmd->flags &= ~(DELAY_SEND);
+                                       cmd->abort_cmd = NULL;
                                }
 
                                /*
@@ -1788,6 +1791,25 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
                                        list_del(&cmd->list);
                                        ibmvscsis_free_cmd_resources(vscsi,
                                                                     cmd);
+                                       /*
+                                        * With a successfully aborted op
+                                        * through LIO we want to increment the
+                                        * the vscsi credit so that when we dont
+                                        * send a rsp to the original scsi abort
+                                        * op (h_send_crq), but the tm rsp to
+                                        * the abort is sent, the credit is
+                                        * correctly sent with the abort tm rsp.
+                                        * We would need 1 for the abort tm rsp
+                                        * and 1 credit for the aborted scsi op.
+                                        * Thus we need to increment here.
+                                        * Also we want to increment the credit
+                                        * here because we want to make sure
+                                        * cmd is actually released first
+                                        * otherwise the client will think it
+                                        * it can send a new cmd, and we could
+                                        * find ourselves short of cmd elements.
+                                        */
+                                       vscsi->credit += 1;
                                } else {
                                        iue = cmd->iue;
 
@@ -2962,10 +2984,7 @@ static long srp_build_response(struct scsi_info *vscsi,
 
        rsp->opcode = SRP_RSP;
 
-       if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
-               rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
-       else
-               rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
+       rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
        rsp->tag = cmd->rsp.tag;
        rsp->flags = 0;
 
index b44c3136eb5181311f12f982fa1ab77b5e95a5f5..520325867e2b4c05528bd89a7eeaccea2f5c6f94 100644 (file)
@@ -1422,7 +1422,7 @@ static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
        fp = fc_frame_alloc(lport, sizeof(*rtv));
        if (!fp) {
                rjt_data.reason = ELS_RJT_UNAB;
-               rjt_data.reason = ELS_EXPL_INSUF_RES;
+               rjt_data.explan = ELS_EXPL_INSUF_RES;
                fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
                goto drop;
        }
index 6d7840b096e6f0899823e99d98e12153683e4f07..f2c0ba6ced78bad65694cde021611d9e0e9f9f25 100644 (file)
@@ -141,6 +141,13 @@ struct lpfc_dmabuf {
        uint32_t   buffer_tag;  /* used for tagged queue ring */
 };
 
+struct lpfc_nvmet_ctxbuf {
+       struct list_head list;
+       struct lpfc_nvmet_rcv_ctx *context;
+       struct lpfc_iocbq *iocbq;
+       struct lpfc_sglq *sglq;
+};
+
 struct lpfc_dma_pool {
        struct lpfc_dmabuf   *elements;
        uint32_t    max_count;
@@ -163,9 +170,7 @@ struct rqb_dmabuf {
        struct lpfc_dmabuf dbuf;
        uint16_t total_size;
        uint16_t bytes_recv;
-       void *context;
-       struct lpfc_iocbq *iocbq;
-       struct lpfc_sglq *sglq;
+       uint16_t idx;
        struct lpfc_queue *hrq;   /* ptr to associated Header RQ */
        struct lpfc_queue *drq;   /* ptr to associated Data RQ */
 };
@@ -670,6 +675,8 @@ struct lpfc_hba {
                                        /* INIT_LINK mailbox command */
 #define LS_NPIV_FAB_SUPPORTED 0x2      /* Fabric supports NPIV */
 #define LS_IGNORE_ERATT       0x4      /* intr handler should ignore ERATT */
+#define LS_MDS_LINK_DOWN      0x8      /* MDS Diagnostics Link Down */
+#define LS_MDS_LOOPBACK      0x16      /* MDS Diagnostics Link Up (Loopback) */
 
        uint32_t hba_flag;      /* hba generic flags */
 #define HBA_ERATT_HANDLED      0x1 /* This flag is set when eratt handled */
@@ -777,7 +784,6 @@ struct lpfc_hba {
        uint32_t cfg_nvme_oas;
        uint32_t cfg_nvme_io_channel;
        uint32_t cfg_nvmet_mrq;
-       uint32_t cfg_nvmet_mrq_post;
        uint32_t cfg_enable_nvmet;
        uint32_t cfg_nvme_enable_fb;
        uint32_t cfg_nvmet_fb_size;
@@ -943,6 +949,7 @@ struct lpfc_hba {
        struct pci_pool *lpfc_mbuf_pool;
        struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
        struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
+       struct pci_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
        struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
        struct pci_pool *txrdy_payload_pool;
        struct lpfc_dma_pool lpfc_mbuf_safety_pool;
@@ -1228,7 +1235,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
 static inline struct lpfc_sli_ring *
 lpfc_phba_elsring(struct lpfc_hba *phba)
 {
-       if (phba->sli_rev == LPFC_SLI_REV4)
-               return phba->sli4_hba.els_wq->pring;
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               if (phba->sli4_hba.els_wq)
+                       return phba->sli4_hba.els_wq->pring;
+               else
+                       return NULL;
+       }
        return &phba->sli.sli3_ring[LPFC_ELS_RING];
 }
index 4830370bfab14247f567de976787aecc9d6b6d28..bb2d9e238225a43315fa846ddcab4052b4de1caa 100644 (file)
@@ -60,9 +60,9 @@
 #define LPFC_MIN_DEVLOSS_TMO   1
 #define LPFC_MAX_DEVLOSS_TMO   255
 
-#define LPFC_DEF_MRQ_POST      256
-#define LPFC_MIN_MRQ_POST      32
-#define LPFC_MAX_MRQ_POST      512
+#define LPFC_DEF_MRQ_POST      512
+#define LPFC_MIN_MRQ_POST      512
+#define LPFC_MAX_MRQ_POST      2048
 
 /*
  * Write key size should be multiple of 4. If write key is changed
@@ -205,8 +205,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
                                atomic_read(&tgtp->xmt_ls_rsp_error));
 
                len += snprintf(buf+len, PAGE_SIZE-len,
-                               "FCP: Rcv %08x Drop %08x\n",
+                               "FCP: Rcv %08x Release %08x Drop %08x\n",
                                atomic_read(&tgtp->rcv_fcp_cmd_in),
+                               atomic_read(&tgtp->xmt_fcp_release),
                                atomic_read(&tgtp->rcv_fcp_cmd_drop));
 
                if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
@@ -218,15 +219,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
                }
 
                len += snprintf(buf+len, PAGE_SIZE-len,
-                               "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x\n",
+                               "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
+                               "drop %08x\n",
                                atomic_read(&tgtp->xmt_fcp_read),
                                atomic_read(&tgtp->xmt_fcp_read_rsp),
                                atomic_read(&tgtp->xmt_fcp_write),
-                               atomic_read(&tgtp->xmt_fcp_rsp));
-
-               len += snprintf(buf+len, PAGE_SIZE-len,
-                               "FCP Rsp: abort %08x drop %08x\n",
-                               atomic_read(&tgtp->xmt_fcp_abort),
+                               atomic_read(&tgtp->xmt_fcp_rsp),
                                atomic_read(&tgtp->xmt_fcp_drop));
 
                len += snprintf(buf+len, PAGE_SIZE-len,
@@ -236,10 +234,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
                                atomic_read(&tgtp->xmt_fcp_rsp_drop));
 
                len += snprintf(buf+len, PAGE_SIZE-len,
-                               "ABORT: Xmt %08x Err %08x Cmpl %08x",
+                               "ABORT: Xmt %08x Cmpl %08x\n",
+                               atomic_read(&tgtp->xmt_fcp_abort),
+                               atomic_read(&tgtp->xmt_fcp_abort_cmpl));
+
+               len += snprintf(buf + len, PAGE_SIZE - len,
+                               "ABORT: Sol %08x  Usol %08x Err %08x Cmpl %08x",
+                               atomic_read(&tgtp->xmt_abort_sol),
+                               atomic_read(&tgtp->xmt_abort_unsol),
                                atomic_read(&tgtp->xmt_abort_rsp),
-                               atomic_read(&tgtp->xmt_abort_rsp_error),
-                               atomic_read(&tgtp->xmt_abort_cmpl));
+                               atomic_read(&tgtp->xmt_abort_rsp_error));
+
+               len += snprintf(buf + len, PAGE_SIZE - len,
+                               "IO_CTX: %08x outstanding %08x total %x",
+                               phba->sli4_hba.nvmet_ctx_cnt,
+                               phba->sli4_hba.nvmet_io_wait_cnt,
+                               phba->sli4_hba.nvmet_io_wait_total);
 
                len +=  snprintf(buf+len, PAGE_SIZE-len, "\n");
                return len;
@@ -3311,14 +3321,6 @@ LPFC_ATTR_R(nvmet_mrq,
            1, 1, 16,
            "Specify number of RQ pairs for processing NVMET cmds");
 
-/*
- * lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ
- *
- */
-LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
-           LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST,
-           "Specify number of buffers to post on every MRQ");
-
 /*
  * lpfc_enable_fc4_type: Defines what FC4 types are supported.
  * Supported Values:  1 - register just FCP
@@ -5154,7 +5156,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_suppress_rsp,
        &dev_attr_lpfc_nvme_io_channel,
        &dev_attr_lpfc_nvmet_mrq,
-       &dev_attr_lpfc_nvmet_mrq_post,
        &dev_attr_lpfc_nvme_enable_fb,
        &dev_attr_lpfc_nvmet_fb_size,
        &dev_attr_lpfc_enable_bg,
@@ -6194,7 +6195,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 
        lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
        lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
-       lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
 
        /* Initialize first burst. Target vs Initiator are different. */
        lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
@@ -6291,7 +6291,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
                /* Not NVME Target mode.  Turn off Target parameters. */
                phba->nvmet_support = 0;
                phba->cfg_nvmet_mrq = 0;
-               phba->cfg_nvmet_mrq_post = 0;
                phba->cfg_nvmet_fb_size = 0;
        }
 
index 1c55408ac718a94f9aa622210a0ebf96a9896137..8912767e7bc88cc407ea3fb372f242e2cbccd0de 100644 (file)
@@ -75,6 +75,10 @@ void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
 void lpfc_retry_pport_discovery(struct lpfc_hba *);
 void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
+int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
+void lpfc_free_iocb_list(struct lpfc_hba *phba);
+int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+                       struct lpfc_queue *drq, int count, int idx);
 
 void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -246,16 +250,14 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
 void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
 struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
 void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
-void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
-                       struct lpfc_dmabuf *mp);
+void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
+                           struct lpfc_nvmet_ctxbuf *ctxp);
 int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
                               struct fc_frame_header *fc_hdr);
 void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
                        uint16_t);
 int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
                     struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
-int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq,
-                       struct lpfc_queue *dq, int count);
 int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
 void lpfc_unregister_fcf(struct lpfc_hba *);
 void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
@@ -271,6 +273,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
 void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
 
 int lpfc_mem_alloc(struct lpfc_hba *, int align);
+int lpfc_nvmet_mem_alloc(struct lpfc_hba *phba);
 int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
 void lpfc_mem_free(struct lpfc_hba *);
 void lpfc_mem_free_all(struct lpfc_hba *);
index c7962dae4dab8c7130dcb46fa1b7d45dac0509eb..f2cd19c6c2df9fd77516d18fddf2de04cf531437 100644 (file)
@@ -2092,6 +2092,7 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
 
        ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
        ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
+       ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */
        ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
        size = FOURBYTES + 32;
        ad->AttrLen = cpu_to_be16(size);
index fce549a91911c197d8e616bce9e0c13cb17de535..4bcb92c844ca5f5061c8a4a5cdfb3d7835594162 100644 (file)
@@ -797,11 +797,6 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                                atomic_read(&tgtp->xmt_fcp_write),
                                atomic_read(&tgtp->xmt_fcp_rsp));
 
-               len += snprintf(buf + len, size - len,
-                               "FCP Rsp: abort %08x drop %08x\n",
-                               atomic_read(&tgtp->xmt_fcp_abort),
-                               atomic_read(&tgtp->xmt_fcp_drop));
-
                len += snprintf(buf + len, size - len,
                                "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
                                atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
@@ -809,10 +804,16 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                                atomic_read(&tgtp->xmt_fcp_rsp_drop));
 
                len += snprintf(buf + len, size - len,
-                               "ABORT: Xmt %08x Err %08x Cmpl %08x",
+                               "ABORT: Xmt %08x Cmpl %08x\n",
+                               atomic_read(&tgtp->xmt_fcp_abort),
+                               atomic_read(&tgtp->xmt_fcp_abort_cmpl));
+
+               len += snprintf(buf + len, size - len,
+                               "ABORT: Sol %08x  Usol %08x Err %08x Cmpl %08x",
+                               atomic_read(&tgtp->xmt_abort_sol),
+                               atomic_read(&tgtp->xmt_abort_unsol),
                                atomic_read(&tgtp->xmt_abort_rsp),
-                               atomic_read(&tgtp->xmt_abort_rsp_error),
-                               atomic_read(&tgtp->xmt_abort_cmpl));
+                               atomic_read(&tgtp->xmt_abort_rsp_error));
 
                len +=  snprintf(buf + len, size - len, "\n");
 
@@ -841,6 +842,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                        }
                        spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
                }
+
+               len += snprintf(buf + len, size - len,
+                               "IO_CTX: %08x  outstanding %08x total %08x\n",
+                               phba->sli4_hba.nvmet_ctx_cnt,
+                               phba->sli4_hba.nvmet_io_wait_cnt,
+                               phba->sli4_hba.nvmet_io_wait_total);
        } else {
                if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
                        return len;
@@ -1959,6 +1966,7 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
                atomic_set(&tgtp->rcv_ls_req_out, 0);
                atomic_set(&tgtp->rcv_ls_req_drop, 0);
                atomic_set(&tgtp->xmt_ls_abort, 0);
+               atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
                atomic_set(&tgtp->xmt_ls_rsp, 0);
                atomic_set(&tgtp->xmt_ls_drop, 0);
                atomic_set(&tgtp->xmt_ls_rsp_error, 0);
@@ -1967,19 +1975,22 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
                atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
                atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
                atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
-               atomic_set(&tgtp->xmt_fcp_abort, 0);
                atomic_set(&tgtp->xmt_fcp_drop, 0);
                atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
                atomic_set(&tgtp->xmt_fcp_read, 0);
                atomic_set(&tgtp->xmt_fcp_write, 0);
                atomic_set(&tgtp->xmt_fcp_rsp, 0);
+               atomic_set(&tgtp->xmt_fcp_release, 0);
                atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
                atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
                atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
 
+               atomic_set(&tgtp->xmt_fcp_abort, 0);
+               atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
+               atomic_set(&tgtp->xmt_abort_sol, 0);
+               atomic_set(&tgtp->xmt_abort_unsol, 0);
                atomic_set(&tgtp->xmt_abort_rsp, 0);
                atomic_set(&tgtp->xmt_abort_rsp_error, 0);
-               atomic_set(&tgtp->xmt_abort_cmpl, 0);
        }
        return nbytes;
 }
@@ -3070,11 +3081,11 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype,
                        qp->assoc_qid, qp->q_cnt_1,
                        (unsigned long long)qp->q_cnt_4);
        len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
-                       "\t\tWQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
-                       "HOST-IDX[%04d], PORT-IDX[%04d]",
+                       "\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+                       "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
                        qp->queue_id, qp->entry_count,
                        qp->entry_size, qp->host_index,
-                       qp->hba_index);
+                       qp->hba_index, qp->entry_repost);
        len +=  snprintf(pbuffer + len,
                        LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
        return len;
@@ -3121,11 +3132,11 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype,
                        qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
                        qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
        len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
-                       "\tCQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
-                       "HOST-IDX[%04d], PORT-IDX[%04d]",
+                       "\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+                       "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
                        qp->queue_id, qp->entry_count,
                        qp->entry_size, qp->host_index,
-                       qp->hba_index);
+                       qp->hba_index, qp->entry_repost);
 
        len +=  snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
 
@@ -3143,20 +3154,20 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
                        "\t\t%s RQ info: ", rqtype);
        len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x "
-                       "trunc:x%x rcv:x%llx]\n",
+                       "posted:x%x rcv:x%llx]\n",
                        qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
                        qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
        len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
-                       "\t\tHQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
-                       "HOST-IDX[%04d], PORT-IDX[%04d]\n",
+                       "\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+                       "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
                        qp->queue_id, qp->entry_count, qp->entry_size,
-                       qp->host_index, qp->hba_index);
+                       qp->host_index, qp->hba_index, qp->entry_repost);
        len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
-                       "\t\tDQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
-                       "HOST-IDX[%04d], PORT-IDX[%04d]\n",
+                       "\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+                       "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
                        datqp->queue_id, datqp->entry_count,
                        datqp->entry_size, datqp->host_index,
-                       datqp->hba_index);
+                       datqp->hba_index, datqp->entry_repost);
        return len;
 }
 
@@ -3242,10 +3253,10 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
                        eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
                        (unsigned long long)qp->q_cnt_4);
        len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
-                       "EQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
-                       "HOST-IDX[%04d], PORT-IDX[%04d]",
+                       "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+                       "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
                        qp->queue_id, qp->entry_count, qp->entry_size,
-                       qp->host_index, qp->hba_index);
+                       qp->host_index, qp->hba_index, qp->entry_repost);
        len +=  snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
 
        return len;
@@ -5855,8 +5866,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
                        atomic_dec(&lpfc_debugfs_hba_count);
                }
 
-               debugfs_remove(lpfc_debugfs_root); /* lpfc */
-               lpfc_debugfs_root = NULL;
+               if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
+                       debugfs_remove(lpfc_debugfs_root); /* lpfc */
+                       lpfc_debugfs_root = NULL;
+               }
        }
 #endif
        return;
index 9d5a379f4b15734a484c643fc9cc81b0ba8b33e9..094c97b9e5f741faba5e0ef904f97faa3cc7a734 100644 (file)
@@ -90,6 +90,7 @@ struct lpfc_nodelist {
 #define NLP_FCP_INITIATOR  0x10                        /* entry is an FCP Initiator */
 #define NLP_NVME_TARGET    0x20                        /* entry is a NVME Target */
 #define NLP_NVME_INITIATOR 0x40                        /* entry is a NVME Initiator */
+#define NLP_NVME_DISCOVERY 0x80                 /* entry has NVME disc srvc */
 
        uint16_t        nlp_fc4_type;           /* FC types node supports. */
                                                /* Assigned from GID_FF, only
index 67827e397431abe8b55955d9cc6497cbf680c054..8e532b39ae93af5c35a1199084f606b627e37ff0 100644 (file)
@@ -1047,6 +1047,13 @@ stop_rr_fcf_flogi:
                                 irsp->ulpStatus, irsp->un.ulpWord[4],
                                 irsp->ulpTimeout);
 
+
+               /* If this is not a loop open failure, bail out */
+               if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+                     ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+                                       IOERR_LOOP_OPEN_FAILURE)))
+                       goto flogifail;
+
                /* FLOGI failed, so there is no fabric */
                spin_lock_irq(shost->host_lock);
                vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -2077,16 +2084,19 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
        if (irsp->ulpStatus) {
                /* Check for retry */
+               ndlp->fc4_prli_sent--;
                if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
                        /* ELS command is being retried */
-                       ndlp->fc4_prli_sent--;
                        goto out;
                }
+
                /* PRLI failed */
                lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
-                                "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
+                                "2754 PRLI failure DID:%06X Status:x%x/x%x, "
+                                "data: x%x\n",
                                 ndlp->nlp_DID, irsp->ulpStatus,
-                                irsp->un.ulpWord[4]);
+                                irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
+
                /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
                if (lpfc_error_lost_link(irsp))
                        goto out;
@@ -7441,6 +7451,13 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
         */
        spin_lock_irq(&phba->hbalock);
        pring = lpfc_phba_elsring(phba);
+
+       /* Bail out if we've no ELS wq, like in PCI error recovery case. */
+       if (unlikely(!pring)) {
+               spin_unlock_irq(&phba->hbalock);
+               return;
+       }
+
        if (phba->sli_rev == LPFC_SLI_REV4)
                spin_lock(&pring->ring_lock);
 
@@ -8667,7 +8684,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                lpfc_do_scr_ns_plogi(phba, vport);
        goto out;
 fdisc_failed:
-       if (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)
+       if (vport->fc_vport &&
+           (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
        /* Cancel discovery timer */
        lpfc_can_disctmo(vport);
index 0482c558033104d3a44f75290750e1acdc3ee0d9..3ffcd9215ca892eb7ef3e5972df50a427ef17369 100644 (file)
@@ -693,15 +693,16 @@ lpfc_work_done(struct lpfc_hba *phba)
        pring = lpfc_phba_elsring(phba);
        status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
        status >>= (4*LPFC_ELS_RING);
-       if ((status & HA_RXMASK) ||
-           (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
-           (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
+       if (pring && (status & HA_RXMASK ||
+                     pring->flag & LPFC_DEFERRED_RING_EVENT ||
+                     phba->hba_flag & HBA_SP_QUEUE_EVT)) {
                if (pring->flag & LPFC_STOP_IOCB_EVENT) {
                        pring->flag |= LPFC_DEFERRED_RING_EVENT;
                        /* Set the lpfc data pending flag */
                        set_bit(LPFC_DATA_READY, &phba->data_flags);
                } else {
-                       if (phba->link_state >= LPFC_LINK_UP) {
+                       if (phba->link_state >= LPFC_LINK_UP ||
+                           phba->link_flag & LS_MDS_LOOPBACK) {
                                pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
                                lpfc_sli_handle_slow_ring_event(phba, pring,
                                                                (status &
index 1d12f2be36bcccd336f8892aa0a20d3109d7150c..e0a5fce416aeea7604ab9a46464d8514e82fb0cb 100644 (file)
@@ -1356,6 +1356,7 @@ struct lpfc_mbx_wq_destroy {
 
 #define LPFC_HDR_BUF_SIZE 128
 #define LPFC_DATA_BUF_SIZE 2048
+#define LPFC_NVMET_DATA_BUF_SIZE 128
 struct rq_context {
        uint32_t word0;
 #define lpfc_rq_context_rqe_count_SHIFT        16      /* Version 0 Only */
@@ -4420,6 +4421,19 @@ struct fcp_treceive64_wqe {
 };
 #define TXRDY_PAYLOAD_LEN      12
 
+#define CMD_SEND_FRAME 0xE1
+
+struct send_frame_wqe {
+       struct ulp_bde64 bde;          /* words 0-2 */
+       uint32_t frame_len;            /* word 3 */
+       uint32_t fc_hdr_wd0;           /* word 4 */
+       uint32_t fc_hdr_wd1;           /* word 5 */
+       struct wqe_common wqe_com;     /* words 6-11 */
+       uint32_t fc_hdr_wd2;           /* word 12 */
+       uint32_t fc_hdr_wd3;           /* word 13 */
+       uint32_t fc_hdr_wd4;           /* word 14 */
+       uint32_t fc_hdr_wd5;           /* word 15 */
+};
 
 union lpfc_wqe {
        uint32_t words[16];
@@ -4438,7 +4452,7 @@ union lpfc_wqe {
        struct fcp_trsp64_wqe fcp_trsp;
        struct fcp_tsend64_wqe fcp_tsend;
        struct fcp_treceive64_wqe fcp_treceive;
-
+       struct send_frame_wqe send_frame;
 };
 
 union lpfc_wqe128 {
index 4b1eb98c228df823a986f5568f8b948a7b2ef9bf..9add9473cae52a1f2bf5d1b78a8854a57c9f6192 100644 (file)
@@ -1099,7 +1099,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
 
                list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
                        ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
-                       lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+                       lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
                }
        }
 
@@ -3381,7 +3381,7 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
 {
        struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
        uint16_t i, lxri, xri_cnt, els_xri_cnt;
-       uint16_t nvmet_xri_cnt, tot_cnt;
+       uint16_t nvmet_xri_cnt;
        LIST_HEAD(nvmet_sgl_list);
        int rc;
 
@@ -3389,15 +3389,9 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
         * update on pci function's nvmet xri-sgl list
         */
        els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
-       nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
-       tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
-       if (nvmet_xri_cnt > tot_cnt) {
-               phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
-               nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
-               lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-                               "6301 NVMET post-sgl count changed to %d\n",
-                               phba->cfg_nvmet_mrq_post);
-       }
+
+       /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
+       nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
 
        if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
                /* els xri-sgl expanded */
@@ -4546,6 +4540,19 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
        pmb->vport = phba->pport;
 
        if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
+               phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
+
+               switch (phba->sli4_hba.link_state.status) {
+               case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
+                       phba->link_flag |= LS_MDS_LINK_DOWN;
+                       break;
+               case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
+                       phba->link_flag |= LS_MDS_LOOPBACK;
+                       break;
+               default:
+                       break;
+               }
+
                /* Parse and translate status field */
                mb = &pmb->u.mb;
                mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
@@ -5830,6 +5837,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
+               INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
+               INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
+
                /* Fast-path XRI aborted CQ Event work queue list */
                INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
        }
@@ -5837,6 +5847,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        /* This abort list used by worker thread */
        spin_lock_init(&phba->sli4_hba.sgl_list_lock);
        spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
+       spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
 
        /*
         * Initialize driver internal slow-path work queues
@@ -5951,16 +5962,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
                        if (wwn == lpfc_enable_nvmet[i]) {
 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+                               if (lpfc_nvmet_mem_alloc(phba))
+                                       break;
+
+                               phba->nvmet_support = 1; /* a match */
+
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "6017 NVME Target %016llx\n",
                                                wwn);
-                               phba->nvmet_support = 1; /* a match */
 #else
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "6021 Can't enable NVME Target."
                                                " NVME_TARGET_FC infrastructure"
                                                " is not in kernel\n");
 #endif
+                               break;
                        }
                }
        }
@@ -6269,7 +6285,7 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
  *
  * This routine is invoked to free the driver's IOCB list and memory.
  **/
-static void
+void
 lpfc_free_iocb_list(struct lpfc_hba *phba)
 {
        struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
@@ -6297,7 +6313,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba)
  *     0 - successful
  *     other values - error
  **/
-static int
+int
 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
 {
        struct lpfc_iocbq *iocbq_entry = NULL;
@@ -6525,7 +6541,6 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
        uint16_t rpi_limit, curr_rpi_range;
        struct lpfc_dmabuf *dmabuf;
        struct lpfc_rpi_hdr *rpi_hdr;
-       uint32_t rpi_count;
 
        /*
         * If the SLI4 port supports extents, posting the rpi header isn't
@@ -6538,8 +6553,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
                return NULL;
 
        /* The limit on the logical index is just the max_rpi count. */
-       rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
-       phba->sli4_hba.max_cfg_param.max_rpi - 1;
+       rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
 
        spin_lock_irq(&phba->hbalock);
        /*
@@ -6550,18 +6564,10 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
        curr_rpi_range = phba->sli4_hba.next_rpi;
        spin_unlock_irq(&phba->hbalock);
 
-       /*
-        * The port has a limited number of rpis. The increment here
-        * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
-        * and to allow the full max_rpi range per port.
-        */
-       if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
-               rpi_count = rpi_limit - curr_rpi_range;
-       else
-               rpi_count = LPFC_RPI_HDR_COUNT;
-
-       if (!rpi_count)
+       /* Reached full RPI range */
+       if (curr_rpi_range == rpi_limit)
                return NULL;
+
        /*
         * First allocate the protocol header region for the port.  The
         * port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -6595,13 +6601,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
 
        /* The rpi_hdr stores the logical index only. */
        rpi_hdr->start_rpi = curr_rpi_range;
+       rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
        list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
 
-       /*
-        * The next_rpi stores the next logical module-64 rpi value used
-        * to post physical rpis in subsequent rpi postings.
-        */
-       phba->sli4_hba.next_rpi += rpi_count;
        spin_unlock_irq(&phba->hbalock);
        return rpi_hdr;
 
@@ -8172,7 +8174,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                        /* Create NVMET Receive Queue for header */
                        qdesc = lpfc_sli4_queue_alloc(phba,
                                                      phba->sli4_hba.rq_esize,
-                                                     phba->sli4_hba.rq_ecount);
+                                                     LPFC_NVMET_RQE_DEF_COUNT);
                        if (!qdesc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "3146 Failed allocate "
@@ -8194,7 +8196,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                        /* Create NVMET Receive Queue for data */
                        qdesc = lpfc_sli4_queue_alloc(phba,
                                                      phba->sli4_hba.rq_esize,
-                                                     phba->sli4_hba.rq_ecount);
+                                                     LPFC_NVMET_RQE_DEF_COUNT);
                        if (!qdesc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "3156 Failed allocate "
@@ -8325,46 +8327,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
 }
 
-int
-lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
-                   struct lpfc_queue *drq, int count)
-{
-       int rc, i;
-       struct lpfc_rqe hrqe;
-       struct lpfc_rqe drqe;
-       struct lpfc_rqb *rqbp;
-       struct rqb_dmabuf *rqb_buffer;
-       LIST_HEAD(rqb_buf_list);
-
-       rqbp = hrq->rqbp;
-       for (i = 0; i < count; i++) {
-               rqb_buffer = (rqbp->rqb_alloc_buffer)(phba);
-               if (!rqb_buffer)
-                       break;
-               rqb_buffer->hrq = hrq;
-               rqb_buffer->drq = drq;
-               list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
-       }
-       while (!list_empty(&rqb_buf_list)) {
-               list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
-                                hbuf.list);
-
-               hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
-               hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
-               drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
-               drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
-               rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
-               if (rc < 0) {
-                       (rqbp->rqb_free_buffer)(phba, rqb_buffer);
-               } else {
-                       list_add_tail(&rqb_buffer->hbuf.list,
-                                     &rqbp->rqb_buffer_list);
-                       rqbp->buffer_count++;
-               }
-       }
-       return 1;
-}
-
 int
 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
 {
@@ -8784,9 +8746,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                goto out_destroy;
        }
 
-       lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
-       lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
-
        rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
                            phba->sli4_hba.els_cq, LPFC_USOL);
        if (rc) {
@@ -11110,7 +11069,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        struct lpfc_hba   *phba;
        struct lpfc_vport *vport = NULL;
        struct Scsi_Host  *shost = NULL;
-       int error, cnt;
+       int error;
        uint32_t cfg_mode, intr_mode;
 
        /* Allocate memory for HBA structure */
@@ -11144,22 +11103,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
                goto out_unset_pci_mem_s4;
        }
 
-       cnt = phba->cfg_iocb_cnt * 1024;
-       if (phba->nvmet_support)
-               cnt += phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq;
-
-       /* Initialize and populate the iocb list per host */
-       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                       "2821 initialize iocb list %d total %d\n",
-                       phba->cfg_iocb_cnt, cnt);
-       error = lpfc_init_iocb_list(phba, cnt);
-
-       if (error) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "1413 Failed to initialize iocb list.\n");
-               goto out_unset_driver_resource_s4;
-       }
-
        INIT_LIST_HEAD(&phba->active_rrq_list);
        INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
 
@@ -11168,7 +11111,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        if (error) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "1414 Failed to set up driver resource.\n");
-               goto out_free_iocb_list;
+               goto out_unset_driver_resource_s4;
        }
 
        /* Get the default values for Model Name and Description */
@@ -11268,8 +11211,6 @@ out_destroy_shost:
        lpfc_destroy_shost(phba);
 out_unset_driver_resource:
        lpfc_unset_driver_resource_phase2(phba);
-out_free_iocb_list:
-       lpfc_free_iocb_list(phba);
 out_unset_driver_resource_s4:
        lpfc_sli4_driver_resource_unset(phba);
 out_unset_pci_mem_s4:
index 5986c7957199df6ef97343a3c0402931cbdeb7ad..fcc05a1517c21d5134282e6cc9337ade5ee1a5c9 100644 (file)
@@ -214,6 +214,21 @@ fail_free_drb_pool:
        return -ENOMEM;
 }
 
+int
+lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
+{
+       phba->lpfc_nvmet_drb_pool =
+               pci_pool_create("lpfc_nvmet_drb_pool",
+                               phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE,
+                               SGL_ALIGN_SZ, 0);
+       if (!phba->lpfc_nvmet_drb_pool) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "6024 Can't enable NVME Target - no memory\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
 /**
  * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
  * @phba: HBA to free memory for
@@ -232,6 +247,9 @@ lpfc_mem_free(struct lpfc_hba *phba)
 
        /* Free HBQ pools */
        lpfc_sli_hbqbuf_free_all(phba);
+       if (phba->lpfc_nvmet_drb_pool)
+               pci_pool_destroy(phba->lpfc_nvmet_drb_pool);
+       phba->lpfc_nvmet_drb_pool = NULL;
        if (phba->lpfc_drb_pool)
                pci_pool_destroy(phba->lpfc_drb_pool);
        phba->lpfc_drb_pool = NULL;
@@ -611,8 +629,6 @@ struct rqb_dmabuf *
 lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
 {
        struct rqb_dmabuf *dma_buf;
-       struct lpfc_iocbq *nvmewqe;
-       union lpfc_wqe128 *wqe;
 
        dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
        if (!dma_buf)
@@ -624,69 +640,15 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
                kfree(dma_buf);
                return NULL;
        }
-       dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
-                                           &dma_buf->dbuf.phys);
+       dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool,
+                                           GFP_KERNEL, &dma_buf->dbuf.phys);
        if (!dma_buf->dbuf.virt) {
                pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
                              dma_buf->hbuf.phys);
                kfree(dma_buf);
                return NULL;
        }
-       dma_buf->total_size = LPFC_DATA_BUF_SIZE;
-
-       dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx),
-                                  GFP_KERNEL);
-       if (!dma_buf->context) {
-               pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
-                             dma_buf->dbuf.phys);
-               pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
-                             dma_buf->hbuf.phys);
-               kfree(dma_buf);
-               return NULL;
-       }
-
-       dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
-       if (!dma_buf->iocbq) {
-               kfree(dma_buf->context);
-               pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
-                             dma_buf->dbuf.phys);
-               pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
-                             dma_buf->hbuf.phys);
-               kfree(dma_buf);
-               lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
-                               "2621 Ran out of nvmet iocb/WQEs\n");
-               return NULL;
-       }
-       dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
-       nvmewqe = dma_buf->iocbq;
-       wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
-       /* Initialize WQE */
-       memset(wqe, 0, sizeof(union lpfc_wqe));
-       /* Word 7 */
-       bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
-       bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
-       bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
-       /* Word 10 */
-       bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
-       bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
-       bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
-
-       dma_buf->iocbq->context1 = NULL;
-       spin_lock(&phba->sli4_hba.sgl_list_lock);
-       dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq);
-       spin_unlock(&phba->sli4_hba.sgl_list_lock);
-       if (!dma_buf->sglq) {
-               lpfc_sli_release_iocbq(phba, dma_buf->iocbq);
-               kfree(dma_buf->context);
-               pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
-                             dma_buf->dbuf.phys);
-               pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
-                             dma_buf->hbuf.phys);
-               kfree(dma_buf);
-               lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
-                               "6132 Ran out of nvmet XRIs\n");
-               return NULL;
-       }
+       dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
        return dma_buf;
 }
 
@@ -705,20 +667,9 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
 void
 lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
 {
-       unsigned long flags;
-
-       __lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag);
-       dmab->sglq->state = SGL_FREED;
-       dmab->sglq->ndlp = NULL;
-
-       spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
-       list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list);
-       spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags);
-
-       lpfc_sli_release_iocbq(phba, dmab->iocbq);
-       kfree(dmab->context);
        pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
-       pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+       pci_pool_free(phba->lpfc_nvmet_drb_pool,
+                     dmab->dbuf.virt, dmab->dbuf.phys);
        kfree(dmab);
 }
 
@@ -803,6 +754,11 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
        rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
        if (rc < 0) {
                (rqbp->rqb_free_buffer)(phba, rqb_entry);
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "6409 Cannot post to RQ %d: %x %x\n",
+                               rqb_entry->hrq->queue_id,
+                               rqb_entry->hrq->host_index,
+                               rqb_entry->hrq->hba_index);
        } else {
                list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
                rqbp->buffer_count++;
index 8777c2d5f50d35ecae18223da67245157811b4be..bff3de053df475365193ea47b153c13795f9c816 100644 (file)
@@ -1944,7 +1944,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
                /* Target driver cannot solicit NVME FB. */
                if (bf_get_be32(prli_tgt, nvpr)) {
+                       /* Complete the nvme target roles.  The transport
+                        * needs to know if the rport is capable of
+                        * discovery in addition to its role.
+                        */
                        ndlp->nlp_type |= NLP_NVME_TARGET;
+                       if (bf_get_be32(prli_disc, nvpr))
+                               ndlp->nlp_type |= NLP_NVME_DISCOVERY;
                        if ((bf_get_be32(prli_fba, nvpr) == 1) &&
                            (bf_get_be32(prli_fb_sz, nvpr) > 0) &&
                            (phba->cfg_nvme_enable_fb) &&
index 0488580eea12eecd0c2767bc6ad5c11a014ac46d..074a6b5e7763510555d9b7f9f7e34e095af1b0f4 100644 (file)
@@ -142,7 +142,7 @@ out:
 }
 
 /**
- * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context
+ * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
  * @phba: HBA buffer is associated with
  * @ctxp: context to clean up
  * @mp: Buffer to free
@@ -155,24 +155,113 @@ out:
  * Returns: None
  **/
 void
-lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
-                  struct lpfc_dmabuf *mp)
+lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
 {
-       if (ctxp) {
-               if (ctxp->flag)
-                       lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-                               "6314 rq_post ctx xri x%x flag x%x\n",
-                               ctxp->oxid, ctxp->flag);
-
-               if (ctxp->txrdy) {
-                       pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
-                                     ctxp->txrdy_phys);
-                       ctxp->txrdy = NULL;
-                       ctxp->txrdy_phys = 0;
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+       struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
+       struct lpfc_nvmet_tgtport *tgtp;
+       struct fc_frame_header *fc_hdr;
+       struct rqb_dmabuf *nvmebuf;
+       struct lpfc_dmabuf *hbufp;
+       uint32_t *payload;
+       uint32_t size, oxid, sid, rc;
+       unsigned long iflag;
+
+       if (ctxp->txrdy) {
+               pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
+                             ctxp->txrdy_phys);
+               ctxp->txrdy = NULL;
+               ctxp->txrdy_phys = 0;
+       }
+       ctxp->state = LPFC_NVMET_STE_FREE;
+
+       spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+       if (phba->sli4_hba.nvmet_io_wait_cnt) {
+               hbufp = &nvmebuf->hbuf;
+               list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
+                                nvmebuf, struct rqb_dmabuf,
+                                hbuf.list);
+               phba->sli4_hba.nvmet_io_wait_cnt--;
+               spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
+                                      iflag);
+
+               fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+               oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+               tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+               payload = (uint32_t *)(nvmebuf->dbuf.virt);
+               size = nvmebuf->bytes_recv;
+               sid = sli4_sid_from_fc_hdr(fc_hdr);
+
+               ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
+               memset(ctxp, 0, sizeof(ctxp->ctx));
+               ctxp->wqeq = NULL;
+               ctxp->txrdy = NULL;
+               ctxp->offset = 0;
+               ctxp->phba = phba;
+               ctxp->size = size;
+               ctxp->oxid = oxid;
+               ctxp->sid = sid;
+               ctxp->state = LPFC_NVMET_STE_RCV;
+               ctxp->entry_cnt = 1;
+               ctxp->flag = 0;
+               ctxp->ctxbuf = ctx_buf;
+               spin_lock_init(&ctxp->ctxlock);
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+               if (phba->ktime_on) {
+                       ctxp->ts_cmd_nvme = ktime_get_ns();
+                       ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
+                       ctxp->ts_nvme_data = 0;
+                       ctxp->ts_data_wqput = 0;
+                       ctxp->ts_isr_data = 0;
+                       ctxp->ts_data_nvme = 0;
+                       ctxp->ts_nvme_status = 0;
+                       ctxp->ts_status_wqput = 0;
+                       ctxp->ts_isr_status = 0;
+                       ctxp->ts_status_nvme = 0;
                }
-               ctxp->state = LPFC_NVMET_STE_FREE;
+#endif
+               atomic_inc(&tgtp->rcv_fcp_cmd_in);
+               /*
+                * The calling sequence should be:
+                * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
+                * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
+                * When we return from nvmet_fc_rcv_fcp_req, all relevant info
+                * the NVME command / FC header is stored.
+                * A buffer has already been reposted for this IO, so just free
+                * the nvmebuf.
+                */
+               rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
+                                         payload, size);
+
+               /* Process FCP command */
+               if (rc == 0) {
+                       atomic_inc(&tgtp->rcv_fcp_cmd_out);
+                       nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+                       return;
+               }
+
+               atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+               lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+                               "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
+                               ctxp->oxid, rc,
+                               atomic_read(&tgtp->rcv_fcp_cmd_in),
+                               atomic_read(&tgtp->rcv_fcp_cmd_out),
+                               atomic_read(&tgtp->xmt_fcp_release));
+
+               lpfc_nvmet_defer_release(phba, ctxp);
+               lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
+               nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+               return;
        }
-       lpfc_rq_buf_free(phba, mp);
+       spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+
+       spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
+       list_add_tail(&ctx_buf->list,
+                     &phba->sli4_hba.lpfc_nvmet_ctx_list);
+       phba->sli4_hba.nvmet_ctx_cnt++;
+       spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
+#endif
 }
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -502,6 +591,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
                                "6150 LS Drop IO x%x: Prep\n",
                                ctxp->oxid);
                lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+               atomic_inc(&nvmep->xmt_ls_abort);
                lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
                                                ctxp->sid, ctxp->oxid);
                return -ENOMEM;
@@ -545,6 +635,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
        lpfc_nlp_put(nvmewqeq->context1);
 
        lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+       atomic_inc(&nvmep->xmt_ls_abort);
        lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
        return -ENXIO;
 }
@@ -612,9 +703,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
        lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
                         ctxp->oxid, rsp->op, rsp->rsplen);
 
+       ctxp->flag |= LPFC_NVMET_IO_INP;
        rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
        if (rc == WQE_SUCCESS) {
-               ctxp->flag |= LPFC_NVMET_IO_INP;
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
                if (!phba->ktime_on)
                        return 0;
@@ -692,6 +783,7 @@ static void
 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
                           struct nvmefc_tgt_fcp_req *rsp)
 {
+       struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
        struct lpfc_nvmet_rcv_ctx *ctxp =
                container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
        struct lpfc_hba *phba = ctxp->phba;
@@ -710,10 +802,12 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
        lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
                         ctxp->state, 0);
 
+       atomic_inc(&lpfc_nvmep->xmt_fcp_release);
+
        if (aborting)
                return;
 
-       lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+       lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
 }
 
 static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -734,17 +828,128 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
        .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
 };
 
+void
+lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
+{
+       struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
+       unsigned long flags;
+
+       list_for_each_entry_safe(
+               ctx_buf, next_ctx_buf,
+               &phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
+               spin_lock_irqsave(
+                       &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
+               list_del_init(&ctx_buf->list);
+               spin_unlock_irqrestore(
+                       &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
+               __lpfc_clear_active_sglq(phba,
+                                        ctx_buf->sglq->sli4_lxritag);
+               ctx_buf->sglq->state = SGL_FREED;
+               ctx_buf->sglq->ndlp = NULL;
+
+               spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
+               list_add_tail(&ctx_buf->sglq->list,
+                             &phba->sli4_hba.lpfc_nvmet_sgl_list);
+               spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
+                                      flags);
+
+               lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
+               kfree(ctx_buf->context);
+       }
+}
+
+int
+lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
+{
+       struct lpfc_nvmet_ctxbuf *ctx_buf;
+       struct lpfc_iocbq *nvmewqe;
+       union lpfc_wqe128 *wqe;
+       int i;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
+                       "6403 Allocate NVMET resources for %d XRIs\n",
+                       phba->sli4_hba.nvmet_xri_cnt);
+
+       /* For all nvmet xris, allocate resources needed to process a
+        * received command on a per xri basis.
+        */
+       for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
+               ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
+               if (!ctx_buf) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+                                       "6404 Ran out of memory for NVMET\n");
+                       return -ENOMEM;
+               }
+
+               ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
+                                          GFP_KERNEL);
+               if (!ctx_buf->context) {
+                       kfree(ctx_buf);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+                                       "6405 Ran out of NVMET "
+                                       "context memory\n");
+                       return -ENOMEM;
+               }
+               ctx_buf->context->ctxbuf = ctx_buf;
+
+               ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
+               if (!ctx_buf->iocbq) {
+                       kfree(ctx_buf->context);
+                       kfree(ctx_buf);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+                                       "6406 Ran out of NVMET iocb/WQEs\n");
+                       return -ENOMEM;
+               }
+               ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
+               nvmewqe = ctx_buf->iocbq;
+               wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
+               /* Initialize WQE */
+               memset(wqe, 0, sizeof(union lpfc_wqe));
+               /* Word 7 */
+               bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
+               bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
+               bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
+               /* Word 10 */
+               bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
+               bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
+               bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
+
+               ctx_buf->iocbq->context1 = NULL;
+               spin_lock(&phba->sli4_hba.sgl_list_lock);
+               ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
+               spin_unlock(&phba->sli4_hba.sgl_list_lock);
+               if (!ctx_buf->sglq) {
+                       lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
+                       kfree(ctx_buf->context);
+                       kfree(ctx_buf);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+                                       "6407 Ran out of NVMET XRIs\n");
+                       return -ENOMEM;
+               }
+               spin_lock(&phba->sli4_hba.nvmet_io_lock);
+               list_add_tail(&ctx_buf->list,
+                             &phba->sli4_hba.lpfc_nvmet_ctx_list);
+               spin_unlock(&phba->sli4_hba.nvmet_io_lock);
+       }
+       phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
+       return 0;
+}
+
 int
 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
 {
        struct lpfc_vport  *vport = phba->pport;
        struct lpfc_nvmet_tgtport *tgtp;
        struct nvmet_fc_port_info pinfo;
-       int error = 0;
+       int error;
 
        if (phba->targetport)
                return 0;
 
+       error = lpfc_nvmet_setup_io_context(phba);
+       if (error)
+               return error;
+
        memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
        pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
        pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
@@ -772,13 +977,16 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
                                             &phba->pcidev->dev,
                                             &phba->targetport);
 #else
-       error = -ENOMEM;
+       error = -ENOENT;
 #endif
        if (error) {
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
                                "6025 Cannot register NVME targetport "
                                "x%x\n", error);
                phba->targetport = NULL;
+
+               lpfc_nvmet_cleanup_io_context(phba);
+
        } else {
                tgtp = (struct lpfc_nvmet_tgtport *)
                        phba->targetport->private;
@@ -795,6 +1003,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
                atomic_set(&tgtp->rcv_ls_req_out, 0);
                atomic_set(&tgtp->rcv_ls_req_drop, 0);
                atomic_set(&tgtp->xmt_ls_abort, 0);
+               atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
                atomic_set(&tgtp->xmt_ls_rsp, 0);
                atomic_set(&tgtp->xmt_ls_drop, 0);
                atomic_set(&tgtp->xmt_ls_rsp_error, 0);
@@ -802,18 +1011,21 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
                atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
                atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
                atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
-               atomic_set(&tgtp->xmt_fcp_abort, 0);
                atomic_set(&tgtp->xmt_fcp_drop, 0);
                atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
                atomic_set(&tgtp->xmt_fcp_read, 0);
                atomic_set(&tgtp->xmt_fcp_write, 0);
                atomic_set(&tgtp->xmt_fcp_rsp, 0);
+               atomic_set(&tgtp->xmt_fcp_release, 0);
                atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
                atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
                atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
+               atomic_set(&tgtp->xmt_fcp_abort, 0);
+               atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
+               atomic_set(&tgtp->xmt_abort_unsol, 0);
+               atomic_set(&tgtp->xmt_abort_sol, 0);
                atomic_set(&tgtp->xmt_abort_rsp, 0);
                atomic_set(&tgtp->xmt_abort_rsp_error, 0);
-               atomic_set(&tgtp->xmt_abort_cmpl, 0);
        }
        return error;
 }
@@ -864,7 +1076,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
        list_for_each_entry_safe(ctxp, next_ctxp,
                                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
                                 list) {
-               if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
+               if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
                        continue;
 
                /* Check if we already received a free context call
@@ -885,7 +1097,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
                    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
                     ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
                        lpfc_set_rrq_active(phba, ndlp,
-                               ctxp->rqb_buffer->sglq->sli4_lxritag,
+                               ctxp->ctxbuf->sglq->sli4_lxritag,
                                rxid, 1);
                        lpfc_sli4_abts_err_handler(phba, ndlp, axri);
                }
@@ -894,8 +1106,8 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
                                "6318 XB aborted %x flg x%x (%x)\n",
                                ctxp->oxid, ctxp->flag, released);
                if (released)
-                       lpfc_nvmet_rq_post(phba, ctxp,
-                                          &ctxp->rqb_buffer->hbuf);
+                       lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
+
                if (rrq_empty)
                        lpfc_worker_wake_up(phba);
                return;
@@ -923,7 +1135,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
        list_for_each_entry_safe(ctxp, next_ctxp,
                                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
                                 list) {
-               if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
+               if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
                        continue;
 
                spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
@@ -975,6 +1187,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
                init_completion(&tgtp->tport_unreg_done);
                nvmet_fc_unregister_targetport(phba->targetport);
                wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
+               lpfc_nvmet_cleanup_io_context(phba);
        }
        phba->targetport = NULL;
 #endif
@@ -1010,6 +1223,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                oxid = 0;
                size = 0;
                sid = 0;
+               ctxp = NULL;
                goto dropit;
        }
 
@@ -1104,39 +1318,71 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
        struct lpfc_nvmet_rcv_ctx *ctxp;
        struct lpfc_nvmet_tgtport *tgtp;
        struct fc_frame_header *fc_hdr;
+       struct lpfc_nvmet_ctxbuf *ctx_buf;
        uint32_t *payload;
-       uint32_t size, oxid, sid, rc;
+       uint32_t size, oxid, sid, rc, qno;
+       unsigned long iflag;
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        uint32_t id;
 #endif
 
+       ctx_buf = NULL;
        if (!nvmebuf || !phba->targetport) {
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-                               "6157 FCP Drop IO\n");
+                               "6157 NVMET FCP Drop IO\n");
                oxid = 0;
                size = 0;
                sid = 0;
+               ctxp = NULL;
                goto dropit;
        }
 
+       spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
+       if (phba->sli4_hba.nvmet_ctx_cnt) {
+               list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
+                                ctx_buf, struct lpfc_nvmet_ctxbuf, list);
+               phba->sli4_hba.nvmet_ctx_cnt--;
+       }
+       spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
 
-       tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-       payload = (uint32_t *)(nvmebuf->dbuf.virt);
        fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
-       size = nvmebuf->bytes_recv;
        oxid = be16_to_cpu(fc_hdr->fh_ox_id);
-       sid = sli4_sid_from_fc_hdr(fc_hdr);
+       size = nvmebuf->bytes_recv;
 
-       ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context;
-       if (ctxp == NULL) {
-               atomic_inc(&tgtp->rcv_fcp_cmd_drop);
-               lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-                               "6158 FCP Drop IO x%x: Alloc\n",
-                               oxid);
-               lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
-               /* Cannot send ABTS without context */
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
+               id = smp_processor_id();
+               if (id < LPFC_CHECK_CPU_CNT)
+                       phba->cpucheck_rcv_io[id]++;
+       }
+#endif
+
+       lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
+                        oxid, size, smp_processor_id());
+
+       if (!ctx_buf) {
+               /* Queue this NVME IO to process later */
+               spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+               list_add_tail(&nvmebuf->hbuf.list,
+                             &phba->sli4_hba.lpfc_nvmet_io_wait_list);
+               phba->sli4_hba.nvmet_io_wait_cnt++;
+               phba->sli4_hba.nvmet_io_wait_total++;
+               spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
+                                      iflag);
+
+               /* Post a brand new DMA buffer to RQ */
+               qno = nvmebuf->idx;
+               lpfc_post_rq_buffer(
+                       phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
+                       phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
                return;
        }
+
+       tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+       payload = (uint32_t *)(nvmebuf->dbuf.virt);
+       sid = sli4_sid_from_fc_hdr(fc_hdr);
+
+       ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
        memset(ctxp, 0, sizeof(ctxp->ctx));
        ctxp->wqeq = NULL;
        ctxp->txrdy = NULL;
@@ -1146,9 +1392,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
        ctxp->oxid = oxid;
        ctxp->sid = sid;
        ctxp->state = LPFC_NVMET_STE_RCV;
-       ctxp->rqb_buffer = nvmebuf;
        ctxp->entry_cnt = 1;
        ctxp->flag = 0;
+       ctxp->ctxbuf = ctx_buf;
        spin_lock_init(&ctxp->ctxlock);
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1164,22 +1410,16 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
                ctxp->ts_isr_status = 0;
                ctxp->ts_status_nvme = 0;
        }
-
-       if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
-               id = smp_processor_id();
-               if (id < LPFC_CHECK_CPU_CNT)
-                       phba->cpucheck_rcv_io[id]++;
-       }
 #endif
 
-       lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
-                        oxid, size, smp_processor_id());
-
        atomic_inc(&tgtp->rcv_fcp_cmd_in);
        /*
         * The calling sequence should be:
         * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
         * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
+        * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
+        * the NVME command / FC header is stored, so we are free to repost
+        * the buffer.
         */
        rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
                                  payload, size);
@@ -1187,26 +1427,32 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
        /* Process FCP command */
        if (rc == 0) {
                atomic_inc(&tgtp->rcv_fcp_cmd_out);
+               lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
                return;
        }
 
        atomic_inc(&tgtp->rcv_fcp_cmd_drop);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-                       "6159 FCP Drop IO x%x: err x%x\n",
-                       ctxp->oxid, rc);
+                       "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
+                       ctxp->oxid, rc,
+                       atomic_read(&tgtp->rcv_fcp_cmd_in),
+                       atomic_read(&tgtp->rcv_fcp_cmd_out),
+                       atomic_read(&tgtp->xmt_fcp_release));
 dropit:
        lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
                         oxid, size, sid);
        if (oxid) {
+               lpfc_nvmet_defer_release(phba, ctxp);
                lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
+               lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
                return;
        }
 
-       if (nvmebuf) {
-               nvmebuf->iocbq->hba_wqidx = 0;
-               /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
-               lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
-       }
+       if (ctx_buf)
+               lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
+
+       if (nvmebuf)
+               lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
 #endif
 }
 
@@ -1258,7 +1504,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
                           uint64_t isr_timestamp)
 {
        if (phba->nvmet_support == 0) {
-               lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
+               lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
                return;
        }
        lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
@@ -1459,7 +1705,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
        nvmewqe = ctxp->wqeq;
        if (nvmewqe == NULL) {
                /* Allocate buffer for  command wqe */
-               nvmewqe = ctxp->rqb_buffer->iocbq;
+               nvmewqe = ctxp->ctxbuf->iocbq;
                if (nvmewqe == NULL) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                                        "6110 lpfc_nvmet_prep_fcp_wqe: No "
@@ -1486,7 +1732,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
                return NULL;
        }
 
-       sgl  = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl;
+       sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
        switch (rsp->op) {
        case NVMET_FCOP_READDATA:
        case NVMET_FCOP_READDATA_RSP:
@@ -1811,7 +2057,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        result = wcqe->parameter;
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-       atomic_inc(&tgtp->xmt_abort_cmpl);
+       if (ctxp->flag & LPFC_NVMET_ABORT_OP)
+               atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
 
        ctxp->state = LPFC_NVMET_STE_DONE;
 
@@ -1826,6 +2073,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        }
        ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
        spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+       atomic_inc(&tgtp->xmt_abort_rsp);
 
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
                        "6165 ABORT cmpl: xri x%x flg x%x (%d) "
@@ -1834,15 +2082,16 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
                        wcqe->word0, wcqe->total_data_placed,
                        result, wcqe->word3);
 
+       cmdwqe->context2 = NULL;
+       cmdwqe->context3 = NULL;
        /*
         * if transport has released ctx, then can reuse it. Otherwise,
         * will be recycled by transport release call.
         */
        if (released)
-               lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+               lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
 
-       cmdwqe->context2 = NULL;
-       cmdwqe->context3 = NULL;
+       /* This is the iocbq for the abort, not the command */
        lpfc_sli_release_iocbq(phba, cmdwqe);
 
        /* Since iaab/iaar are NOT set, there is no work left.
@@ -1876,7 +2125,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        result = wcqe->parameter;
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-       atomic_inc(&tgtp->xmt_abort_cmpl);
+       if (ctxp->flag & LPFC_NVMET_ABORT_OP)
+               atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
 
        if (!ctxp) {
                /* if context is clear, related io alrady complete */
@@ -1906,6 +2156,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        }
        ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
        spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+       atomic_inc(&tgtp->xmt_abort_rsp);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
                        "6316 ABTS cmpl xri x%x flg x%x (%x) "
@@ -1913,15 +2164,15 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
                        ctxp->oxid, ctxp->flag, released,
                        wcqe->word0, wcqe->total_data_placed,
                        result, wcqe->word3);
+
+       cmdwqe->context2 = NULL;
+       cmdwqe->context3 = NULL;
        /*
         * if transport has released ctx, then can reuse it. Otherwise,
         * will be recycled by transport release call.
         */
        if (released)
-               lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
-
-       cmdwqe->context2 = NULL;
-       cmdwqe->context3 = NULL;
+               lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
 
        /* Since iaab/iaar are NOT set, there is no work left.
         * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
@@ -1952,7 +2203,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        result = wcqe->parameter;
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-       atomic_inc(&tgtp->xmt_abort_cmpl);
+       atomic_inc(&tgtp->xmt_ls_abort_cmpl);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
                        "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
@@ -1983,10 +2234,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
                        sid, xri, ctxp->wqeq->sli4_xritag);
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-       if (!ctxp->wqeq) {
-               ctxp->wqeq = ctxp->rqb_buffer->iocbq;
-               ctxp->wqeq->hba_wqidx = 0;
-       }
 
        ndlp = lpfc_findnode_did(phba->pport, sid);
        if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
@@ -2082,7 +2329,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
        if (!ctxp->wqeq) {
-               ctxp->wqeq = ctxp->rqb_buffer->iocbq;
+               ctxp->wqeq = ctxp->ctxbuf->iocbq;
                ctxp->wqeq->hba_wqidx = 0;
        }
 
@@ -2103,6 +2350,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
        /* Issue ABTS for this WQE based on iotag */
        ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
        if (!ctxp->abort_wqeq) {
+               atomic_inc(&tgtp->xmt_abort_rsp_error);
                lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
                                "6161 ABORT failed: No wqeqs: "
                                "xri: x%x\n", ctxp->oxid);
@@ -2127,6 +2375,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
        /* driver queued commands are in process of being flushed */
        if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
                spin_unlock_irqrestore(&phba->hbalock, flags);
+               atomic_inc(&tgtp->xmt_abort_rsp_error);
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
                                "6163 Driver in reset cleanup - flushing "
                                "NVME Req now. hba_flag x%x oxid x%x\n",
@@ -2139,6 +2388,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
        /* Outstanding abort is in progress */
        if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
                spin_unlock_irqrestore(&phba->hbalock, flags);
+               atomic_inc(&tgtp->xmt_abort_rsp_error);
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
                                "6164 Outstanding NVME I/O Abort Request "
                                "still pending on oxid x%x\n",
@@ -2189,9 +2439,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
        abts_wqeq->context2 = ctxp;
        rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
        spin_unlock_irqrestore(&phba->hbalock, flags);
-       if (rc == WQE_SUCCESS)
+       if (rc == WQE_SUCCESS) {
+               atomic_inc(&tgtp->xmt_abort_sol);
                return 0;
+       }
 
+       atomic_inc(&tgtp->xmt_abort_rsp_error);
        ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
        lpfc_sli_release_iocbq(phba, abts_wqeq);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
@@ -2214,7 +2467,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
        if (!ctxp->wqeq) {
-               ctxp->wqeq = ctxp->rqb_buffer->iocbq;
+               ctxp->wqeq = ctxp->ctxbuf->iocbq;
                ctxp->wqeq->hba_wqidx = 0;
        }
 
@@ -2230,11 +2483,11 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
        rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
        spin_unlock_irqrestore(&phba->hbalock, flags);
        if (rc == WQE_SUCCESS) {
-               atomic_inc(&tgtp->xmt_abort_rsp);
                return 0;
        }
 
 aerr:
+       atomic_inc(&tgtp->xmt_abort_rsp_error);
        ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
        atomic_inc(&tgtp->xmt_abort_rsp_error);
        lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
@@ -2269,6 +2522,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
        }
        abts_wqeq = ctxp->wqeq;
        wqe_abts = &abts_wqeq->wqe;
+
        lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
 
        spin_lock_irqsave(&phba->hbalock, flags);
@@ -2278,7 +2532,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
        rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
        spin_unlock_irqrestore(&phba->hbalock, flags);
        if (rc == WQE_SUCCESS) {
-               atomic_inc(&tgtp->xmt_abort_rsp);
+               atomic_inc(&tgtp->xmt_abort_unsol);
                return 0;
        }
 
index 128759fe665058dba133febdaa7140f29469f733..6eb2f5d8d4eda40f931097752c72f175354f14da 100644 (file)
@@ -22,6 +22,7 @@
  ********************************************************************/
 
 #define LPFC_NVMET_DEFAULT_SEGS                (64 + 1)        /* 256K IOs */
+#define LPFC_NVMET_RQE_DEF_COUNT       512
 #define LPFC_NVMET_SUCCESS_LEN 12
 
 /* Used for NVME Target */
@@ -34,6 +35,7 @@ struct lpfc_nvmet_tgtport {
        atomic_t rcv_ls_req_out;
        atomic_t rcv_ls_req_drop;
        atomic_t xmt_ls_abort;
+       atomic_t xmt_ls_abort_cmpl;
 
        /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
        atomic_t xmt_ls_rsp;
@@ -47,9 +49,9 @@ struct lpfc_nvmet_tgtport {
        atomic_t rcv_fcp_cmd_in;
        atomic_t rcv_fcp_cmd_out;
        atomic_t rcv_fcp_cmd_drop;
+       atomic_t xmt_fcp_release;
 
        /* Stats counters - lpfc_nvmet_xmt_fcp_op */
-       atomic_t xmt_fcp_abort;
        atomic_t xmt_fcp_drop;
        atomic_t xmt_fcp_read_rsp;
        atomic_t xmt_fcp_read;
@@ -62,12 +64,13 @@ struct lpfc_nvmet_tgtport {
        atomic_t xmt_fcp_rsp_drop;
 
 
-       /* Stats counters - lpfc_nvmet_unsol_issue_abort */
+       /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
+       atomic_t xmt_fcp_abort;
+       atomic_t xmt_fcp_abort_cmpl;
+       atomic_t xmt_abort_sol;
+       atomic_t xmt_abort_unsol;
        atomic_t xmt_abort_rsp;
        atomic_t xmt_abort_rsp_error;
-
-       /* Stats counters - lpfc_nvmet_xmt_abort_cmp */
-       atomic_t xmt_abort_cmpl;
 };
 
 struct lpfc_nvmet_rcv_ctx {
@@ -103,6 +106,7 @@ struct lpfc_nvmet_rcv_ctx {
 #define LPFC_NVMET_CTX_RLS             0x8  /* ctx free requested */
 #define LPFC_NVMET_ABTS_RCV            0x10  /* ABTS received on exchange */
        struct rqb_dmabuf *rqb_buffer;
+       struct lpfc_nvmet_ctxbuf *ctxbuf;
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        uint64_t ts_isr_cmd;
index 2a4fc00dfa9bdc7dbe42d5bed00935abd0a9e398..d6b184839bc2ff951233ee8fbcf477d6b133206f 100644 (file)
@@ -74,6 +74,8 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
                                                         struct lpfc_iocbq *);
 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
                                      struct hbq_dmabuf *);
+static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
+                                         struct hbq_dmabuf *dmabuf);
 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
                                    struct lpfc_cqe *);
 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
@@ -479,22 +481,23 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
        if (unlikely(!hq) || unlikely(!dq))
                return -ENOMEM;
        put_index = hq->host_index;
-       temp_hrqe = hq->qe[hq->host_index].rqe;
+       temp_hrqe = hq->qe[put_index].rqe;
        temp_drqe = dq->qe[dq->host_index].rqe;
 
        if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
                return -EINVAL;
-       if (hq->host_index != dq->host_index)
+       if (put_index != dq->host_index)
                return -EINVAL;
        /* If the host has not yet processed the next entry then we are done */
-       if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
+       if (((put_index + 1) % hq->entry_count) == hq->hba_index)
                return -EBUSY;
        lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
        lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
 
        /* Update the host index to point to the next slot */
-       hq->host_index = ((hq->host_index + 1) % hq->entry_count);
+       hq->host_index = ((put_index + 1) % hq->entry_count);
        dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+       hq->RQ_buf_posted++;
 
        /* Ring The Header Receive Queue Doorbell */
        if (!(hq->host_index % hq->entry_repost)) {
@@ -5906,7 +5909,7 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
                bf_set(lpfc_mbx_set_feature_mds,
                       &mbox->u.mqe.un.set_feature, 1);
                bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
-                      &mbox->u.mqe.un.set_feature, 0);
+                      &mbox->u.mqe.un.set_feature, 1);
                mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
                mbox->u.mqe.un.set_feature.param_len = 8;
                break;
@@ -6512,6 +6515,50 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
                 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
 }
 
+int
+lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+                   struct lpfc_queue *drq, int count, int idx)
+{
+       int rc, i;
+       struct lpfc_rqe hrqe;
+       struct lpfc_rqe drqe;
+       struct lpfc_rqb *rqbp;
+       struct rqb_dmabuf *rqb_buffer;
+       LIST_HEAD(rqb_buf_list);
+
+       rqbp = hrq->rqbp;
+       for (i = 0; i < count; i++) {
+               /* IF RQ is already full, don't bother */
+               if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
+                       break;
+               rqb_buffer = rqbp->rqb_alloc_buffer(phba);
+               if (!rqb_buffer)
+                       break;
+               rqb_buffer->hrq = hrq;
+               rqb_buffer->drq = drq;
+               rqb_buffer->idx = idx;
+               list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
+       }
+       while (!list_empty(&rqb_buf_list)) {
+               list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
+                                hbuf.list);
+
+               hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
+               hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
+               drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
+               drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
+               rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
+               if (rc < 0) {
+                       rqbp->rqb_free_buffer(phba, rqb_buffer);
+               } else {
+                       list_add_tail(&rqb_buffer->hbuf.list,
+                                     &rqbp->rqb_buffer_list);
+                       rqbp->buffer_count++;
+               }
+       }
+       return 1;
+}
+
 /**
  * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
  * @phba: Pointer to HBA context object.
@@ -6524,7 +6571,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
 int
 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 {
-       int rc, i;
+       int rc, i, cnt;
        LPFC_MBOXQ_t *mboxq;
        struct lpfc_mqe *mqe;
        uint8_t *vpd;
@@ -6875,6 +6922,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                        goto out_destroy_queue;
                }
                phba->sli4_hba.nvmet_xri_cnt = rc;
+
+               cnt = phba->cfg_iocb_cnt * 1024;
+               /* We need 1 iocbq for every SGL, for IO processing */
+               cnt += phba->sli4_hba.nvmet_xri_cnt;
+               /* Initialize and populate the iocb list per host */
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "2821 initialize iocb list %d total %d\n",
+                               phba->cfg_iocb_cnt, cnt);
+               rc = lpfc_init_iocb_list(phba, cnt);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "1413 Failed to init iocb list.\n");
+                       goto out_destroy_queue;
+               }
+
                lpfc_nvmet_create_targetport(phba);
        } else {
                /* update host scsi xri-sgl sizes and mappings */
@@ -6894,28 +6956,34 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                                        "and mapping: %d\n", rc);
                        goto out_destroy_queue;
                }
+
+               cnt = phba->cfg_iocb_cnt * 1024;
+               /* Initialize and populate the iocb list per host */
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "2820 initialize iocb list %d total %d\n",
+                               phba->cfg_iocb_cnt, cnt);
+               rc = lpfc_init_iocb_list(phba, cnt);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "6301 Failed to init iocb list.\n");
+                       goto out_destroy_queue;
+               }
        }
 
        if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
-
                /* Post initial buffers to all RQs created */
                for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
                        rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
                        INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
                        rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
                        rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
-                       rqbp->entry_count = 256;
+                       rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
                        rqbp->buffer_count = 0;
 
-                       /* Divide by 4 and round down to multiple of 16 */
-                       rc = (phba->cfg_nvmet_mrq_post >> 2) & 0xfff8;
-                       phba->sli4_hba.nvmet_mrq_hdr[i]->entry_repost = rc;
-                       phba->sli4_hba.nvmet_mrq_data[i]->entry_repost = rc;
-
                        lpfc_post_rq_buffer(
                                phba, phba->sli4_hba.nvmet_mrq_hdr[i],
                                phba->sli4_hba.nvmet_mrq_data[i],
-                               phba->cfg_nvmet_mrq_post);
+                               LPFC_NVMET_RQE_DEF_COUNT, i);
                }
        }
 
@@ -7082,6 +7150,7 @@ out_unset_queue:
        /* Unset all the queues set up in this routine when error out */
        lpfc_sli4_queue_unset(phba);
 out_destroy_queue:
+       lpfc_free_iocb_list(phba);
        lpfc_sli4_queue_destroy(phba);
 out_stop_timers:
        lpfc_stop_hba_timers(phba);
@@ -8621,8 +8690,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                memset(wqe, 0, sizeof(union lpfc_wqe128));
        /* Some of the fields are in the right position already */
        memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
-       wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
-       wqe->generic.wqe_com.word10 = 0;
+       if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
+               /* The ct field has moved so reset */
+               wqe->generic.wqe_com.word7 = 0;
+               wqe->generic.wqe_com.word10 = 0;
+       }
 
        abort_tag = (uint32_t) iocbq->iotag;
        xritag = iocbq->sli4_xritag;
@@ -9116,6 +9188,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                }
 
                break;
+       case CMD_SEND_FRAME:
+               bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
+               bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
+               return 0;
        case CMD_XRI_ABORTED_CX:
        case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
        case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
@@ -12788,6 +12864,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
        struct fc_frame_header *fc_hdr;
        struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
        struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
+       struct lpfc_nvmet_tgtport *tgtp;
        struct hbq_dmabuf *dma_buf;
        uint32_t status, rq_id;
        unsigned long iflags;
@@ -12808,7 +12885,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
        case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "2537 Receive Frame Truncated!!\n");
-               hrq->RQ_buf_trunc++;
        case FC_STATUS_RQ_SUCCESS:
                lpfc_sli4_rq_release(hrq, drq);
                spin_lock_irqsave(&phba->hbalock, iflags);
@@ -12819,6 +12895,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
                        goto out;
                }
                hrq->RQ_rcv_buf++;
+               hrq->RQ_buf_posted--;
                memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
 
                /* If a NVME LS event (type 0x28), treat it as Fast path */
@@ -12832,8 +12909,21 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
                spin_unlock_irqrestore(&phba->hbalock, iflags);
                workposted = true;
                break;
-       case FC_STATUS_INSUFF_BUF_NEED_BUF:
        case FC_STATUS_INSUFF_BUF_FRM_DISC:
+               if (phba->nvmet_support) {
+                       tgtp = phba->targetport->private;
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
+                                       "6402 RQE Error x%x, posted %d err_cnt "
+                                       "%d: %x %x %x\n",
+                                       status, hrq->RQ_buf_posted,
+                                       hrq->RQ_no_posted_buf,
+                                       atomic_read(&tgtp->rcv_fcp_cmd_in),
+                                       atomic_read(&tgtp->rcv_fcp_cmd_out),
+                                       atomic_read(&tgtp->xmt_fcp_release));
+               }
+               /* fallthrough */
+
+       case FC_STATUS_INSUFF_BUF_NEED_BUF:
                hrq->RQ_no_posted_buf++;
                /* Post more buffers if possible */
                spin_lock_irqsave(&phba->hbalock, iflags);
@@ -12951,7 +13041,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
                while ((cqe = lpfc_sli4_cq_get(cq))) {
                        workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
                        if (!(++ecount % cq->entry_repost))
-                               lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+                               break;
                        cq->CQ_mbox++;
                }
                break;
@@ -12965,7 +13055,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
                                workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
                                                                      cqe);
                        if (!(++ecount % cq->entry_repost))
-                               lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+                               break;
                }
 
                /* Track the max number of CQEs processed in 1 EQ */
@@ -13135,6 +13225,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
        struct lpfc_queue *drq;
        struct rqb_dmabuf *dma_buf;
        struct fc_frame_header *fc_hdr;
+       struct lpfc_nvmet_tgtport *tgtp;
        uint32_t status, rq_id;
        unsigned long iflags;
        uint32_t fctl, idx;
@@ -13165,8 +13256,6 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
        case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "6126 Receive Frame Truncated!!\n");
-               hrq->RQ_buf_trunc++;
-               break;
        case FC_STATUS_RQ_SUCCESS:
                lpfc_sli4_rq_release(hrq, drq);
                spin_lock_irqsave(&phba->hbalock, iflags);
@@ -13178,6 +13267,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
                }
                spin_unlock_irqrestore(&phba->hbalock, iflags);
                hrq->RQ_rcv_buf++;
+               hrq->RQ_buf_posted--;
                fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
 
                /* Just some basic sanity checks on FCP Command frame */
@@ -13200,14 +13290,23 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
 drop:
                lpfc_in_buf_free(phba, &dma_buf->dbuf);
                break;
-       case FC_STATUS_INSUFF_BUF_NEED_BUF:
        case FC_STATUS_INSUFF_BUF_FRM_DISC:
+               if (phba->nvmet_support) {
+                       tgtp = phba->targetport->private;
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
+                                       "6401 RQE Error x%x, posted %d err_cnt "
+                                       "%d: %x %x %x\n",
+                                       status, hrq->RQ_buf_posted,
+                                       hrq->RQ_no_posted_buf,
+                                       atomic_read(&tgtp->rcv_fcp_cmd_in),
+                                       atomic_read(&tgtp->rcv_fcp_cmd_out),
+                                       atomic_read(&tgtp->xmt_fcp_release));
+               }
+               /* fallthrough */
+
+       case FC_STATUS_INSUFF_BUF_NEED_BUF:
                hrq->RQ_no_posted_buf++;
                /* Post more buffers if possible */
-               spin_lock_irqsave(&phba->hbalock, iflags);
-               phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
-               spin_unlock_irqrestore(&phba->hbalock, iflags);
-               workposted = true;
                break;
        }
 out:
@@ -13361,7 +13460,7 @@ process_cq:
        while ((cqe = lpfc_sli4_cq_get(cq))) {
                workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
                if (!(++ecount % cq->entry_repost))
-                       lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+                       break;
        }
 
        /* Track the max number of CQEs processed in 1 EQ */
@@ -13452,7 +13551,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
        while ((cqe = lpfc_sli4_cq_get(cq))) {
                workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
                if (!(++ecount % cq->entry_repost))
-                       lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+                       break;
        }
 
        /* Track the max number of CQEs processed in 1 EQ */
@@ -13534,7 +13633,7 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
        while ((eqe = lpfc_sli4_eq_get(eq))) {
                lpfc_sli4_fof_handle_eqe(phba, eqe);
                if (!(++ecount % eq->entry_repost))
-                       lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
+                       break;
                eq->EQ_processed++;
        }
 
@@ -13651,7 +13750,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
 
                lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
                if (!(++ecount % fpeq->entry_repost))
-                       lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
+                       break;
                fpeq->EQ_processed++;
        }
 
@@ -13832,17 +13931,10 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
        }
        queue->entry_size = entry_size;
        queue->entry_count = entry_count;
-
-       /*
-        * entry_repost is calculated based on the number of entries in the
-        * queue. This works out except for RQs. If buffers are NOT initially
-        * posted for every RQE, entry_repost should be adjusted accordingly.
-        */
-       queue->entry_repost = (entry_count >> 3);
-       if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
-               queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
        queue->phba = phba;
 
+       /* entry_repost will be set during q creation */
+
        return queue;
 out_fail:
        lpfc_sli4_queue_free(queue);
@@ -14073,6 +14165,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
                status = -ENXIO;
        eq->host_index = 0;
        eq->hba_index = 0;
+       eq->entry_repost = LPFC_EQ_REPOST;
 
        mempool_free(mbox, phba->mbox_mem_pool);
        return status;
@@ -14146,9 +14239,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "0361 Unsupported CQ count: "
-                               "entry cnt %d sz %d pg cnt %d repost %d\n",
+                               "entry cnt %d sz %d pg cnt %d\n",
                                cq->entry_count, cq->entry_size,
-                               cq->page_count, cq->entry_repost);
+                               cq->page_count);
                if (cq->entry_count < 256) {
                        status = -EINVAL;
                        goto out;
@@ -14201,6 +14294,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
        cq->assoc_qid = eq->queue_id;
        cq->host_index = 0;
        cq->hba_index = 0;
+       cq->entry_repost = LPFC_CQ_REPOST;
 
 out:
        mempool_free(mbox, phba->mbox_mem_pool);
@@ -14392,6 +14486,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
                cq->assoc_qid = eq->queue_id;
                cq->host_index = 0;
                cq->hba_index = 0;
+               cq->entry_repost = LPFC_CQ_REPOST;
 
                rc = 0;
                list_for_each_entry(dmabuf, &cq->page_list, list) {
@@ -14640,6 +14735,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
        mq->subtype = subtype;
        mq->host_index = 0;
        mq->hba_index = 0;
+       mq->entry_repost = LPFC_MQ_REPOST;
 
        /* link the mq onto the parent cq child list */
        list_add_tail(&mq->list, &cq->child_list);
@@ -14864,34 +14960,6 @@ out:
        return status;
 }
 
-/**
- * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
- * @phba: HBA structure that indicates port to create a queue on.
- * @rq:   The queue structure to use for the receive queue.
- * @qno:  The associated HBQ number
- *
- *
- * For SLI4 we need to adjust the RQ repost value based on
- * the number of buffers that are initially posted to the RQ.
- */
-void
-lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
-{
-       uint32_t cnt;
-
-       /* sanity check on queue memory */
-       if (!rq)
-               return;
-       cnt = lpfc_hbq_defs[qno]->entry_count;
-
-       /* Recalc repost for RQs based on buffers initially posted */
-       cnt = (cnt >> 3);
-       if (cnt < LPFC_QUEUE_MIN_REPOST)
-               cnt = LPFC_QUEUE_MIN_REPOST;
-
-       rq->entry_repost = cnt;
-}
-
 /**
  * lpfc_rq_create - Create a Receive Queue on the HBA
  * @phba: HBA structure that indicates port to create a queue on.
@@ -15077,6 +15145,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
        hrq->subtype = subtype;
        hrq->host_index = 0;
        hrq->hba_index = 0;
+       hrq->entry_repost = LPFC_RQ_REPOST;
 
        /* now create the data queue */
        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -15087,7 +15156,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
        if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
                bf_set(lpfc_rq_context_rqe_count_1,
                       &rq_create->u.request.context, hrq->entry_count);
-               rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
+               if (subtype == LPFC_NVMET)
+                       rq_create->u.request.context.buffer_size =
+                               LPFC_NVMET_DATA_BUF_SIZE;
+               else
+                       rq_create->u.request.context.buffer_size =
+                               LPFC_DATA_BUF_SIZE;
                bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
                       LPFC_RQE_SIZE_8);
                bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
@@ -15124,8 +15198,14 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
                               LPFC_RQ_RING_SIZE_4096);
                        break;
                }
-               bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
-                      LPFC_DATA_BUF_SIZE);
+               if (subtype == LPFC_NVMET)
+                       bf_set(lpfc_rq_context_buf_size,
+                              &rq_create->u.request.context,
+                              LPFC_NVMET_DATA_BUF_SIZE);
+               else
+                       bf_set(lpfc_rq_context_buf_size,
+                              &rq_create->u.request.context,
+                              LPFC_DATA_BUF_SIZE);
        }
        bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
               cq->queue_id);
@@ -15158,6 +15238,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
        drq->subtype = subtype;
        drq->host_index = 0;
        drq->hba_index = 0;
+       drq->entry_repost = LPFC_RQ_REPOST;
 
        /* link the header and data RQs onto the parent cq child list */
        list_add_tail(&hrq->list, &cq->child_list);
@@ -15270,7 +15351,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
                               cq->queue_id);
                        bf_set(lpfc_rq_context_data_size,
                               &rq_create->u.request.context,
-                              LPFC_DATA_BUF_SIZE);
+                              LPFC_NVMET_DATA_BUF_SIZE);
                        bf_set(lpfc_rq_context_hdr_size,
                               &rq_create->u.request.context,
                               LPFC_HDR_BUF_SIZE);
@@ -15315,6 +15396,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
                hrq->subtype = subtype;
                hrq->host_index = 0;
                hrq->hba_index = 0;
+               hrq->entry_repost = LPFC_RQ_REPOST;
 
                drq->db_format = LPFC_DB_RING_FORMAT;
                drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -15323,6 +15405,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
                drq->subtype = subtype;
                drq->host_index = 0;
                drq->hba_index = 0;
+               drq->entry_repost = LPFC_RQ_REPOST;
 
                list_add_tail(&hrq->list, &cq->child_list);
                list_add_tail(&drq->list, &cq->child_list);
@@ -16063,6 +16146,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
        struct fc_vft_header *fc_vft_hdr;
        uint32_t *header = (uint32_t *) fc_hdr;
 
+#define FC_RCTL_MDS_DIAGS      0xF4
+
        switch (fc_hdr->fh_r_ctl) {
        case FC_RCTL_DD_UNCAT:          /* uncategorized information */
        case FC_RCTL_DD_SOL_DATA:       /* solicited data */
@@ -16090,6 +16175,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
        case FC_RCTL_F_BSY:     /* fabric busy to data frame */
        case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
        case FC_RCTL_LCR:       /* link credit reset */
+       case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
        case FC_RCTL_END:       /* end */
                break;
        case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
@@ -16099,12 +16185,16 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
        default:
                goto drop;
        }
+
+#define FC_TYPE_VENDOR_UNIQUE  0xFF
+
        switch (fc_hdr->fh_type) {
        case FC_TYPE_BLS:
        case FC_TYPE_ELS:
        case FC_TYPE_FCP:
        case FC_TYPE_CT:
        case FC_TYPE_NVME:
+       case FC_TYPE_VENDOR_UNIQUE:
                break;
        case FC_TYPE_IP:
        case FC_TYPE_ILS:
@@ -16115,12 +16205,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
                        "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
                        "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
+                       (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS) ? "MDS Diags" :
                        lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
-                       lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type,
-                       be32_to_cpu(header[0]), be32_to_cpu(header[1]),
-                       be32_to_cpu(header[2]), be32_to_cpu(header[3]),
-                       be32_to_cpu(header[4]), be32_to_cpu(header[5]),
-                       be32_to_cpu(header[6]));
+                       (fc_hdr->fh_type == FC_TYPE_VENDOR_UNIQUE) ?
+                       "Vendor Unique" : lpfc_type_names[fc_hdr->fh_type],
+                       fc_hdr->fh_type, be32_to_cpu(header[0]),
+                       be32_to_cpu(header[1]), be32_to_cpu(header[2]),
+                       be32_to_cpu(header[3]), be32_to_cpu(header[4]),
+                       be32_to_cpu(header[5]), be32_to_cpu(header[6]));
        return 0;
 drop:
        lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -16926,6 +17018,96 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
        lpfc_sli_release_iocbq(phba, iocbq);
 }
 
+static void
+lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+                           struct lpfc_iocbq *rspiocb)
+{
+       struct lpfc_dmabuf *pcmd = cmdiocb->context2;
+
+       if (pcmd && pcmd->virt)
+               pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
+       kfree(pcmd);
+       lpfc_sli_release_iocbq(phba, cmdiocb);
+}
+
+static void
+lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
+                             struct hbq_dmabuf *dmabuf)
+{
+       struct fc_frame_header *fc_hdr;
+       struct lpfc_hba *phba = vport->phba;
+       struct lpfc_iocbq *iocbq = NULL;
+       union  lpfc_wqe *wqe;
+       struct lpfc_dmabuf *pcmd = NULL;
+       uint32_t frame_len;
+       int rc;
+
+       fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+       frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
+
+       /* Send the received frame back */
+       iocbq = lpfc_sli_get_iocbq(phba);
+       if (!iocbq)
+               goto exit;
+
+       /* Allocate buffer for command payload */
+       pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+       if (pcmd)
+               pcmd->virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+                                           &pcmd->phys);
+       if (!pcmd || !pcmd->virt)
+               goto exit;
+
+       INIT_LIST_HEAD(&pcmd->list);
+
+       /* copyin the payload */
+       memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
+
+       /* fill in BDE's for command */
+       iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
+       iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
+       iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
+       iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
+
+       iocbq->context2 = pcmd;
+       iocbq->vport = vport;
+       iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
+       iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+
+       /*
+        * Setup rest of the iocb as though it were a WQE
+        * Build the SEND_FRAME WQE
+        */
+       wqe = (union lpfc_wqe *)&iocbq->iocb;
+
+       wqe->send_frame.frame_len = frame_len;
+       wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
+       wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
+       wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
+       wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
+       wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
+       wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
+
+       iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
+       iocbq->iocb.ulpLe = 1;
+       iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
+       if (rc == IOCB_ERROR)
+               goto exit;
+
+       lpfc_in_buf_free(phba, &dmabuf->dbuf);
+       return;
+
+exit:
+       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                       "2023 Unable to process MDS loopback frame\n");
+       if (pcmd && pcmd->virt)
+               pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
+       kfree(pcmd);
+       lpfc_sli_release_iocbq(phba, iocbq);
+       lpfc_in_buf_free(phba, &dmabuf->dbuf);
+}
+
 /**
  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
  * @phba: Pointer to HBA context object.
@@ -16964,6 +17146,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
                fcfi = bf_get(lpfc_rcqe_fcf_id,
                              &dmabuf->cq_event.cqe.rcqe_cmpl);
 
+       if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
+               vport = phba->pport;
+               /* Handle MDS Loopback frames */
+               lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+               return;
+       }
+
        /* d_id this frame is directed to */
        did = sli4_did_from_fc_hdr(fc_hdr);
 
@@ -17137,6 +17326,14 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
                                "status x%x add_status x%x, mbx status x%x\n",
                                shdr_status, shdr_add_status, rc);
                rc = -ENXIO;
+       } else {
+               /*
+                * The next_rpi stores the next logical module-64 rpi value used
+                * to post physical rpis in subsequent rpi postings.
+                */
+               spin_lock_irq(&phba->hbalock);
+               phba->sli4_hba.next_rpi = rpi_page->next_rpi;
+               spin_unlock_irq(&phba->hbalock);
        }
        return rc;
 }
@@ -18717,7 +18914,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
 
                spin_lock_irqsave(&pring->ring_lock, iflags);
                ctxp = pwqe->context2;
-               sglq = ctxp->rqb_buffer->sglq;
+               sglq = ctxp->ctxbuf->sglq;
                if (pwqe->sli4_xritag ==  NO_XRI) {
                        pwqe->sli4_lxritag = sglq->sli4_lxritag;
                        pwqe->sli4_xritag = sglq->sli4_xritag;
index da46471337c8a7e3b6c38164bc4afe1a4f0087c2..cf863db27700a2400463bcc65777ec15f9806da4 100644 (file)
@@ -24,7 +24,6 @@
 #define LPFC_XRI_EXCH_BUSY_WAIT_TMO            10000
 #define LPFC_XRI_EXCH_BUSY_WAIT_T1             10
 #define LPFC_XRI_EXCH_BUSY_WAIT_T2              30000
-#define LPFC_RELEASE_NOTIFICATION_INTERVAL     32
 #define LPFC_RPI_LOW_WATER_MARK                        10
 
 #define LPFC_UNREG_FCF                          1
@@ -155,7 +154,11 @@ struct lpfc_queue {
        uint32_t entry_count;   /* Number of entries to support on the queue */
        uint32_t entry_size;    /* Size of each queue entry. */
        uint32_t entry_repost;  /* Count of entries before doorbell is rung */
-#define LPFC_QUEUE_MIN_REPOST  8
+#define LPFC_EQ_REPOST         8
+#define LPFC_MQ_REPOST         8
+#define LPFC_CQ_REPOST         64
+#define LPFC_RQ_REPOST         64
+#define LPFC_RELEASE_NOTIFICATION_INTERVAL     32  /* For WQs */
        uint32_t queue_id;      /* Queue ID assigned by the hardware */
        uint32_t assoc_qid;     /* Queue ID associated with, for CQ/WQ/MQ */
        uint32_t page_count;    /* Number of pages allocated for this queue */
@@ -195,7 +198,7 @@ struct lpfc_queue {
 /* defines for RQ stats */
 #define        RQ_no_posted_buf        q_cnt_1
 #define        RQ_no_buf_found         q_cnt_2
-#define        RQ_buf_trunc            q_cnt_3
+#define        RQ_buf_posted           q_cnt_3
 #define        RQ_rcv_buf              q_cnt_4
 
        uint64_t isr_timestamp;
@@ -617,12 +620,17 @@ struct lpfc_sli4_hba {
        uint16_t scsi_xri_start;
        uint16_t els_xri_cnt;
        uint16_t nvmet_xri_cnt;
+       uint16_t nvmet_ctx_cnt;
+       uint16_t nvmet_io_wait_cnt;
+       uint16_t nvmet_io_wait_total;
        struct list_head lpfc_els_sgl_list;
        struct list_head lpfc_abts_els_sgl_list;
        struct list_head lpfc_nvmet_sgl_list;
        struct list_head lpfc_abts_nvmet_ctx_list;
        struct list_head lpfc_abts_scsi_buf_list;
        struct list_head lpfc_abts_nvme_buf_list;
+       struct list_head lpfc_nvmet_ctx_list;
+       struct list_head lpfc_nvmet_io_wait_list;
        struct lpfc_sglq **lpfc_sglq_active_list;
        struct list_head lpfc_rpi_hdr_list;
        unsigned long *rpi_bmask;
@@ -654,6 +662,7 @@ struct lpfc_sli4_hba {
        spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
        spinlock_t sgl_list_lock; /* list of aborted els IOs */
        spinlock_t nvmet_io_lock;
+       spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
        uint32_t physical_port;
 
        /* CPU to vector mapping information */
@@ -661,8 +670,6 @@ struct lpfc_sli4_hba {
        uint16_t num_online_cpu;
        uint16_t num_present_cpu;
        uint16_t curr_disp_cpu;
-
-       uint16_t nvmet_mrq_post_idx;
 };
 
 enum lpfc_sge_type {
@@ -698,6 +705,7 @@ struct lpfc_rpi_hdr {
        struct lpfc_dmabuf *dmabuf;
        uint32_t page_count;
        uint32_t start_rpi;
+       uint16_t next_rpi;
 };
 
 struct lpfc_rsrc_blks {
@@ -762,7 +770,6 @@ int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
 int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
                        struct lpfc_queue **drqp, struct lpfc_queue **cqp,
                        uint32_t subtype);
-void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
 int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
 int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
 int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
index 1c26dc67151b65e050b729e63a0871cb6846564f..c2653244221cb1b2cb987d962686daeed43094c3 100644 (file)
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "11.2.0.12"
+#define LPFC_DRIVER_VERSION "11.2.0.14"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 5ca3e8c28a3f6af3b4faf1d82ec1ab6f37480127..32632c9b22766d59e223a256c430810a4fa33bec 100644 (file)
@@ -38,7 +38,7 @@ struct qedi_endpoint;
 #define QEDI_MAX_ISCSI_TASK            4096
 #define QEDI_MAX_TASK_NUM              0x0FFF
 #define QEDI_MAX_ISCSI_CONNS_PER_HBA   1024
-#define QEDI_ISCSI_MAX_BDS_PER_CMD     256     /* Firmware max BDs is 256 */
+#define QEDI_ISCSI_MAX_BDS_PER_CMD     255     /* Firmware max BDs is 255 */
 #define MAX_OUSTANDING_TASKS_PER_CON   1024
 
 #define QEDI_MAX_BD_LEN                0xffff
@@ -63,6 +63,7 @@ struct qedi_endpoint;
 #define QEDI_PAGE_MASK         (~((QEDI_PAGE_SIZE) - 1))
 
 #define QEDI_PAGE_SIZE         4096
+#define QEDI_HW_DMA_BOUNDARY   0xfff
 #define QEDI_PATH_HANDLE       0xFE0000000UL
 
 struct qedi_uio_ctrl {
index d6978cbc56f0586aa8a075191433184c50c93b01..8bc7ee1a8ca81626829329e80831ffa2d57b8c63 100644 (file)
@@ -1494,6 +1494,8 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
        tmf_hdr = (struct iscsi_tm *)mtask->hdr;
        qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
        ep = qedi_conn->ep;
+       if (!ep)
+               return -ENODEV;
 
        tid = qedi_get_task_idx(qedi);
        if (tid == -1)
index 3548d46f9b275825a9f76765966cf8c943762e88..87f0af358b33ae4563ba2ee83a1aaa4ce9ca7945 100644 (file)
@@ -59,6 +59,7 @@ struct scsi_host_template qedi_host_template = {
        .this_id = -1,
        .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
        .max_sectors = 0xffff,
+       .dma_boundary = QEDI_HW_DMA_BOUNDARY,
        .cmd_per_lun = 128,
        .use_clustering = ENABLE_CLUSTERING,
        .shost_attrs = qedi_shost_attrs,
@@ -1223,8 +1224,12 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
 
        iscsi_cid = (u32)path_data->handle;
        qedi_ep = qedi->ep_tbl[iscsi_cid];
-       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                  "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
+       if (!qedi_ep) {
+               ret = -EINVAL;
+               goto set_path_exit;
+       }
 
        if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
                QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
index 92775a8b74b1cdc068b8d8808b7bd9ccd5b212eb..09a294634bc7e8898a2d209a9a5cef3d50eb8f32 100644 (file)
@@ -151,6 +151,11 @@ static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
 
 static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
 {
+       if (udev->uctrl) {
+               free_page((unsigned long)udev->uctrl);
+               udev->uctrl = NULL;
+       }
+
        if (udev->ll2_ring) {
                free_page((unsigned long)udev->ll2_ring);
                udev->ll2_ring = NULL;
@@ -169,7 +174,6 @@ static void __qedi_free_uio(struct qedi_uio_dev *udev)
        __qedi_free_uio_rings(udev);
 
        pci_dev_put(udev->pdev);
-       kfree(udev->uctrl);
        kfree(udev);
 }
 
@@ -208,6 +212,11 @@ static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
        if (udev->ll2_ring || udev->ll2_buf)
                return rc;
 
+       /* Memory for control area.  */
+       udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!udev->uctrl)
+               return -ENOMEM;
+
        /* Allocating memory for LL2 ring  */
        udev->ll2_ring_size = QEDI_PAGE_SIZE;
        udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
@@ -237,7 +246,6 @@ exit_alloc_ring:
 static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
 {
        struct qedi_uio_dev *udev = NULL;
-       struct qedi_uio_ctrl *uctrl = NULL;
        int rc = 0;
 
        list_for_each_entry(udev, &qedi_udev_list, list) {
@@ -258,21 +266,14 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
                goto err_udev;
        }
 
-       uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL);
-       if (!uctrl) {
-               rc = -ENOMEM;
-               goto err_uctrl;
-       }
-
        udev->uio_dev = -1;
 
        udev->qedi = qedi;
        udev->pdev = qedi->pdev;
-       udev->uctrl = uctrl;
 
        rc = __qedi_alloc_uio_rings(udev);
        if (rc)
-               goto err_uio_rings;
+               goto err_uctrl;
 
        list_add(&udev->list, &qedi_udev_list);
 
@@ -283,8 +284,6 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
        udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
        return 0;
 
- err_uio_rings:
-       kfree(uctrl);
  err_uctrl:
        kfree(udev);
  err_udev:
@@ -828,6 +827,8 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
        qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
        qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
        qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
+       qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000;
+       qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
 
        for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
                if ((1 << log_page_size) == PAGE_SIZE)
index e31f1cc90b815b28a332d1a6915c82e19519a62f..99e16ac479e365d343840f44c9f7b0c50b4042cc 100644 (file)
@@ -1851,7 +1851,7 @@ static int scsi_mq_prep_fn(struct request *req)
 
        /* zero out the cmd, except for the embedded scsi_request */
        memset((char *)cmd + sizeof(cmd->req), 0,
-               sizeof(*cmd) - sizeof(cmd->req));
+               sizeof(*cmd) - sizeof(cmd->req) + shost->hostt->cmd_size);
 
        req->special = cmd;
 
index f9d1432d7cc589360354c9d3fdbe4c0967e23ee6..b6bb4e0ce0e3288f321ef1319621063f02d6c868 100644 (file)
@@ -827,21 +827,32 @@ static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
        u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+       int ret;
 
        if (!(rq->cmd_flags & REQ_NOUNMAP)) {
                switch (sdkp->zeroing_mode) {
                case SD_ZERO_WS16_UNMAP:
-                       return sd_setup_write_same16_cmnd(cmd, true);
+                       ret = sd_setup_write_same16_cmnd(cmd, true);
+                       goto out;
                case SD_ZERO_WS10_UNMAP:
-                       return sd_setup_write_same10_cmnd(cmd, true);
+                       ret = sd_setup_write_same10_cmnd(cmd, true);
+                       goto out;
                }
        }
 
        if (sdp->no_write_same)
                return BLKPREP_INVALID;
+
        if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff)
-               return sd_setup_write_same16_cmnd(cmd, false);
-       return sd_setup_write_same10_cmnd(cmd, false);
+               ret = sd_setup_write_same16_cmnd(cmd, false);
+       else
+               ret = sd_setup_write_same10_cmnd(cmd, false);
+
+out:
+       if (sd_is_zoned(sdkp) && ret == BLKPREP_OK)
+               return sd_zbc_write_lock_zone(cmd);
+
+       return ret;
 }
 
 static void sd_config_write_same(struct scsi_disk *sdkp)
@@ -948,6 +959,10 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
        rq->__data_len = sdp->sector_size;
        ret = scsi_init_io(cmd);
        rq->__data_len = nr_bytes;
+
+       if (sd_is_zoned(sdkp) && ret != BLKPREP_OK)
+               sd_zbc_write_unlock_zone(cmd);
+
        return ret;
 }
 
@@ -1567,17 +1582,21 @@ out:
        return retval;
 }
 
-static int sd_sync_cache(struct scsi_disk *sdkp)
+static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
 {
        int retries, res;
        struct scsi_device *sdp = sdkp->device;
        const int timeout = sdp->request_queue->rq_timeout
                * SD_FLUSH_TIMEOUT_MULTIPLIER;
-       struct scsi_sense_hdr sshdr;
+       struct scsi_sense_hdr my_sshdr;
 
        if (!scsi_device_online(sdp))
                return -ENODEV;
 
+       /* caller might not be interested in sense, but we need it */
+       if (!sshdr)
+               sshdr = &my_sshdr;
+
        for (retries = 3; retries > 0; --retries) {
                unsigned char cmd[10] = { 0 };
 
@@ -1586,7 +1605,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
                 * Leave the rest of the command zero to indicate
                 * flush everything.
                 */
-               res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+               res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
                                timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL);
                if (res == 0)
                        break;
@@ -1596,11 +1615,12 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
                sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
 
                if (driver_byte(res) & DRIVER_SENSE)
-                       sd_print_sense_hdr(sdkp, &sshdr);
+                       sd_print_sense_hdr(sdkp, sshdr);
+
                /* we need to evaluate the error return  */
-               if (scsi_sense_valid(&sshdr) &&
-                       (sshdr.asc == 0x3a ||   /* medium not present */
-                        sshdr.asc == 0x20))    /* invalid command */
+               if (scsi_sense_valid(sshdr) &&
+                       (sshdr->asc == 0x3a ||  /* medium not present */
+                        sshdr->asc == 0x20))   /* invalid command */
                                /* this is no error here */
                                return 0;
 
@@ -3444,7 +3464,7 @@ static void sd_shutdown(struct device *dev)
 
        if (sdkp->WCE && sdkp->media_present) {
                sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
-               sd_sync_cache(sdkp);
+               sd_sync_cache(sdkp, NULL);
        }
 
        if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
@@ -3456,6 +3476,7 @@ static void sd_shutdown(struct device *dev)
 static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
 {
        struct scsi_disk *sdkp = dev_get_drvdata(dev);
+       struct scsi_sense_hdr sshdr;
        int ret = 0;
 
        if (!sdkp)      /* E.g.: runtime suspend following sd_remove() */
@@ -3463,12 +3484,23 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
 
        if (sdkp->WCE && sdkp->media_present) {
                sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
-               ret = sd_sync_cache(sdkp);
+               ret = sd_sync_cache(sdkp, &sshdr);
+
                if (ret) {
                        /* ignore OFFLINE device */
                        if (ret == -ENODEV)
-                               ret = 0;
-                       goto done;
+                               return 0;
+
+                       if (!scsi_sense_valid(&sshdr) ||
+                           sshdr.sense_key != ILLEGAL_REQUEST)
+                               return ret;
+
+                       /*
+                        * sshdr.sense_key == ILLEGAL_REQUEST means this drive
+                        * doesn't support sync. There's not much to do and
+                        * suspend shouldn't fail.
+                        */
+                        ret = 0;
                }
        }
 
@@ -3480,7 +3512,6 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
                        ret = 0;
        }
 
-done:
        return ret;
 }
 
index 0a38ba01b7b4aac2151faf88421c8f2645dbd86a..82c33a6edbeaa7a00e6f7840ef4b5d8cdb8a084f 100644 (file)
@@ -2074,11 +2074,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
                if ((1 == resp->done) && (!resp->sg_io_owned) &&
                    ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
                        resp->done = 2; /* guard against other readers */
-                       break;
+                       write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+                       return resp;
                }
        }
        write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
-       return resp;
+       return NULL;
 }
 
 /* always adds to end of list */
index abc7e87937cc3087617ca404dc73b01b39ff7822..ffe8d86088181c7da9c30ad58986418842f7016d 100644 (file)
@@ -7698,6 +7698,12 @@ static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
        ufshcd_add_spm_lvl_sysfs_nodes(hba);
 }
 
+static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba)
+{
+       device_remove_file(hba->dev, &hba->rpm_lvl_attr);
+       device_remove_file(hba->dev, &hba->spm_lvl_attr);
+}
+
 /**
  * ufshcd_shutdown - shutdown routine
  * @hba: per adapter instance
@@ -7735,6 +7741,7 @@ EXPORT_SYMBOL(ufshcd_shutdown);
  */
 void ufshcd_remove(struct ufs_hba *hba)
 {
+       ufshcd_remove_sysfs_nodes(hba);
        scsi_remove_host(hba->host);
        /* disable interrupts */
        ufshcd_disable_intr(hba, hba->intr_mask);
index 26a9bcd5ee6a40c391195ef13e205d8d936c8a54..0d8f81591bed076fa1f89f7cd27360776488f349 100644 (file)
@@ -3790,6 +3790,8 @@ int iscsi_target_tx_thread(void *arg)
 {
        int ret = 0;
        struct iscsi_conn *conn = arg;
+       bool conn_freed = false;
+
        /*
         * Allow ourselves to be interrupted by SIGINT so that a
         * connection recovery / failure event can be triggered externally.
@@ -3815,12 +3817,14 @@ get_immediate:
                        goto transport_err;
 
                ret = iscsit_handle_response_queue(conn);
-               if (ret == 1)
+               if (ret == 1) {
                        goto get_immediate;
-               else if (ret == -ECONNRESET)
+               } else if (ret == -ECONNRESET) {
+                       conn_freed = true;
                        goto out;
-               else if (ret < 0)
+               } else if (ret < 0) {
                        goto transport_err;
+               }
        }
 
 transport_err:
@@ -3830,8 +3834,13 @@ transport_err:
         * responsible for cleaning up the early connection failure.
         */
        if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
-               iscsit_take_action_for_connection_exit(conn);
+               iscsit_take_action_for_connection_exit(conn, &conn_freed);
 out:
+       if (!conn_freed) {
+               while (!kthread_should_stop()) {
+                       msleep(100);
+               }
+       }
        return 0;
 }
 
@@ -4004,6 +4013,7 @@ int iscsi_target_rx_thread(void *arg)
 {
        int rc;
        struct iscsi_conn *conn = arg;
+       bool conn_freed = false;
 
        /*
         * Allow ourselves to be interrupted by SIGINT so that a
@@ -4016,7 +4026,7 @@ int iscsi_target_rx_thread(void *arg)
         */
        rc = wait_for_completion_interruptible(&conn->rx_login_comp);
        if (rc < 0 || iscsi_target_check_conn_state(conn))
-               return 0;
+               goto out;
 
        if (!conn->conn_transport->iscsit_get_rx_pdu)
                return 0;
@@ -4025,7 +4035,15 @@ int iscsi_target_rx_thread(void *arg)
 
        if (!signal_pending(current))
                atomic_set(&conn->transport_failed, 1);
-       iscsit_take_action_for_connection_exit(conn);
+       iscsit_take_action_for_connection_exit(conn, &conn_freed);
+
+out:
+       if (!conn_freed) {
+               while (!kthread_should_stop()) {
+                       msleep(100);
+               }
+       }
+
        return 0;
 }
 
index 9a96e17bf7cd5f7448c880ffafcaa123730ebe71..7fe2aa73cff69e04f8df8d79e3af1c634fb5ca04 100644 (file)
@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
        }
 }
 
-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
 {
+       *conn_freed = false;
+
        spin_lock_bh(&conn->state_lock);
        if (atomic_read(&conn->connection_exit)) {
                spin_unlock_bh(&conn->state_lock);
@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
        if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
                spin_unlock_bh(&conn->state_lock);
                iscsit_close_connection(conn);
+               *conn_freed = true;
                return;
        }
 
@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
        spin_unlock_bh(&conn->state_lock);
 
        iscsit_handle_connection_cleanup(conn);
+       *conn_freed = true;
 }
index 60e69e2af6eda981efb74e4ac313fb0d031093bd..3822d9cd12302071467af03d4920fda601fdd351 100644 (file)
@@ -15,6 +15,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
 extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
 extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
 extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
 
 #endif   /*** ISCSI_TARGET_ERL0_H ***/
index 66238477137bc46d35cade3167451e19f2d401ce..92b96b51d5068e77c45d85a5a4d16efc9ffa5a93 100644 (file)
@@ -1464,5 +1464,9 @@ int iscsi_target_login_thread(void *arg)
                        break;
        }
 
+       while (!kthread_should_stop()) {
+               msleep(100);
+       }
+
        return 0;
 }
index 7ccc9c1cbfd1a664fb4c37a5dd71f305e735f4bb..6f88b31242b0562b297e60fdf61552719ed7a97c 100644 (file)
@@ -493,14 +493,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
 
 static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
 
-static bool iscsi_target_sk_state_check(struct sock *sk)
+static bool __iscsi_target_sk_check_close(struct sock *sk)
 {
        if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
-               pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
+               pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
                        "returning FALSE\n");
-               return false;
+               return true;
        }
-       return true;
+       return false;
+}
+
+static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
+{
+       bool state = false;
+
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
+
+               read_lock_bh(&sk->sk_callback_lock);
+               state = (__iscsi_target_sk_check_close(sk) ||
+                        test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+               read_unlock_bh(&sk->sk_callback_lock);
+       }
+       return state;
+}
+
+static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
+{
+       bool state = false;
+
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
+
+               read_lock_bh(&sk->sk_callback_lock);
+               state = test_bit(flag, &conn->login_flags);
+               read_unlock_bh(&sk->sk_callback_lock);
+       }
+       return state;
+}
+
+static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
+{
+       bool state = false;
+
+       if (conn->sock) {
+               struct sock *sk = conn->sock->sk;
+
+               write_lock_bh(&sk->sk_callback_lock);
+               state = (__iscsi_target_sk_check_close(sk) ||
+                        test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+               if (!state)
+                       clear_bit(flag, &conn->login_flags);
+               write_unlock_bh(&sk->sk_callback_lock);
+       }
+       return state;
 }
 
 static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
@@ -540,6 +586,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
 
        pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
                        conn, current->comm, current->pid);
+       /*
+        * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
+        * before initial PDU processing in iscsi_target_start_negotiation()
+        * has completed, go ahead and retry until it's cleared.
+        *
+        * Otherwise if the TCP connection drops while this is occuring,
+        * iscsi_target_start_negotiation() will detect the failure, call
+        * cancel_delayed_work_sync(&conn->login_work), and cleanup the
+        * remaining iscsi connection resources from iscsi_np process context.
+        */
+       if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
+               schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
+               return;
+       }
 
        spin_lock(&tpg->tpg_state_lock);
        state = (tpg->tpg_state == TPG_STATE_ACTIVE);
@@ -547,26 +607,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
 
        if (!state) {
                pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
-               iscsi_target_restore_sock_callbacks(conn);
-               iscsi_target_login_drop(conn, login);
-               iscsit_deaccess_np(np, tpg, tpg_np);
-               return;
+               goto err;
        }
 
-       if (conn->sock) {
-               struct sock *sk = conn->sock->sk;
-
-               read_lock_bh(&sk->sk_callback_lock);
-               state = iscsi_target_sk_state_check(sk);
-               read_unlock_bh(&sk->sk_callback_lock);
-
-               if (!state) {
-                       pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
-                       iscsi_target_restore_sock_callbacks(conn);
-                       iscsi_target_login_drop(conn, login);
-                       iscsit_deaccess_np(np, tpg, tpg_np);
-                       return;
-               }
+       if (iscsi_target_sk_check_close(conn)) {
+               pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
+               goto err;
        }
 
        conn->login_kworker = current;
@@ -584,34 +630,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
        flush_signals(current);
        conn->login_kworker = NULL;
 
-       if (rc < 0) {
-               iscsi_target_restore_sock_callbacks(conn);
-               iscsi_target_login_drop(conn, login);
-               iscsit_deaccess_np(np, tpg, tpg_np);
-               return;
-       }
+       if (rc < 0)
+               goto err;
 
        pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
                        conn, current->comm, current->pid);
 
        rc = iscsi_target_do_login(conn, login);
        if (rc < 0) {
-               iscsi_target_restore_sock_callbacks(conn);
-               iscsi_target_login_drop(conn, login);
-               iscsit_deaccess_np(np, tpg, tpg_np);
+               goto err;
        } else if (!rc) {
-               if (conn->sock) {
-                       struct sock *sk = conn->sock->sk;
-
-                       write_lock_bh(&sk->sk_callback_lock);
-                       clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
-                       write_unlock_bh(&sk->sk_callback_lock);
-               }
+               if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
+                       goto err;
        } else if (rc == 1) {
                iscsi_target_nego_release(conn);
                iscsi_post_login_handler(np, conn, zero_tsih);
                iscsit_deaccess_np(np, tpg, tpg_np);
        }
+       return;
+
+err:
+       iscsi_target_restore_sock_callbacks(conn);
+       iscsi_target_login_drop(conn, login);
+       iscsit_deaccess_np(np, tpg, tpg_np);
 }
 
 static void iscsi_target_do_cleanup(struct work_struct *work)
@@ -659,31 +700,54 @@ static void iscsi_target_sk_state_change(struct sock *sk)
                orig_state_change(sk);
                return;
        }
+       state = __iscsi_target_sk_check_close(sk);
+       pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
+
        if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
                pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
                         " conn: %p\n", conn);
+               if (state)
+                       set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
                write_unlock_bh(&sk->sk_callback_lock);
                orig_state_change(sk);
                return;
        }
-       if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+       if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
                pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
                         conn);
                write_unlock_bh(&sk->sk_callback_lock);
                orig_state_change(sk);
                return;
        }
+       /*
+        * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
+        * but only queue conn->login_work -> iscsi_target_do_login_rx()
+        * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
+        *
+        * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
+        * will detect the dropped TCP connection from delayed workqueue context.
+        *
+        * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
+        * iscsi_target_start_negotiation() is running, iscsi_target_do_login()
+        * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
+        * via iscsi_target_sk_check_and_clear() is responsible for detecting the
+        * dropped TCP connection in iscsi_np process context, and cleaning up
+        * the remaining iscsi connection resources.
+        */
+       if (state) {
+               pr_debug("iscsi_target_sk_state_change got failed state\n");
+               set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
+               state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
+               write_unlock_bh(&sk->sk_callback_lock);
 
-       state = iscsi_target_sk_state_check(sk);
-       write_unlock_bh(&sk->sk_callback_lock);
-
-       pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
+               orig_state_change(sk);
 
-       if (!state) {
-               pr_debug("iscsi_target_sk_state_change got failed state\n");
-               schedule_delayed_work(&conn->login_cleanup_work, 0);
+               if (!state)
+                       schedule_delayed_work(&conn->login_work, 0);
                return;
        }
+       write_unlock_bh(&sk->sk_callback_lock);
+
        orig_state_change(sk);
 }
 
@@ -946,6 +1010,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
                        if (iscsi_target_handle_csg_one(conn, login) < 0)
                                return -1;
                        if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+                               /*
+                                * Check to make sure the TCP connection has not
+                                * dropped asynchronously while session reinstatement
+                                * was occuring in this kthread context, before
+                                * transitioning to full feature phase operation.
+                                */
+                               if (iscsi_target_sk_check_close(conn))
+                                       return -1;
+
                                login->tsih = conn->sess->tsih;
                                login->login_complete = 1;
                                iscsi_target_restore_sock_callbacks(conn);
@@ -972,21 +1045,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
                break;
        }
 
-       if (conn->sock) {
-               struct sock *sk = conn->sock->sk;
-               bool state;
-
-               read_lock_bh(&sk->sk_callback_lock);
-               state = iscsi_target_sk_state_check(sk);
-               read_unlock_bh(&sk->sk_callback_lock);
-
-               if (!state) {
-                       pr_debug("iscsi_target_do_login() failed state for"
-                                " conn: %p\n", conn);
-                       return -1;
-               }
-       }
-
        return 0;
 }
 
@@ -1255,10 +1313,22 @@ int iscsi_target_start_negotiation(
 
                write_lock_bh(&sk->sk_callback_lock);
                set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+               set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
                write_unlock_bh(&sk->sk_callback_lock);
        }
-
+       /*
+        * If iscsi_target_do_login returns zero to signal more PDU
+        * exchanges are required to complete the login, go ahead and
+        * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
+        * is still active.
+        *
+        * Otherwise if TCP connection dropped asynchronously, go ahead
+        * and perform connection cleanup now.
+        */
        ret = iscsi_target_do_login(conn, login);
+       if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
+               ret = -1;
+
        if (ret < 0) {
                cancel_delayed_work_sync(&conn->login_work);
                cancel_delayed_work_sync(&conn->login_cleanup_work);
index 37f57357d4a0827f5669cb89d1619c4651192547..6025935036c976edeeee0d7a91df79a66aa84a2b 100644 (file)
@@ -1160,15 +1160,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
        if (cmd->unknown_data_length) {
                cmd->data_length = size;
        } else if (size != cmd->data_length) {
-               pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
+               pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
                        " %u does not match SCSI CDB Length: %u for SAM Opcode:"
                        " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
                                cmd->data_length, size, cmd->t_task_cdb[0]);
 
-               if (cmd->data_direction == DMA_TO_DEVICE &&
-                   cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
-                       pr_err("Rejecting underflow/overflow WRITE data\n");
-                       return TCM_INVALID_CDB_FIELD;
+               if (cmd->data_direction == DMA_TO_DEVICE) {
+                       if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+                               pr_err_ratelimited("Rejecting underflow/overflow"
+                                                  " for WRITE data CDB\n");
+                               return TCM_INVALID_CDB_FIELD;
+                       }
+                       /*
+                        * Some fabric drivers like iscsi-target still expect to
+                        * always reject overflow writes.  Reject this case until
+                        * full fabric driver level support for overflow writes
+                        * is introduced tree-wide.
+                        */
+                       if (size > cmd->data_length) {
+                               pr_err_ratelimited("Rejecting overflow for"
+                                                  " WRITE control CDB\n");
+                               return TCM_INVALID_CDB_FIELD;
+                       }
                }
                /*
                 * Reject READ_* or WRITE_* with overflow/underflow for
index 9045837f748bd3b602256cfa9e83058ae92b8b33..beb5f098f32d6f7bb5851deb810065ab37e4ac4a 100644 (file)
@@ -97,7 +97,7 @@ struct tcmu_hba {
 
 struct tcmu_dev {
        struct list_head node;
-
+       struct kref kref;
        struct se_device se_dev;
 
        char *name;
@@ -969,6 +969,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
        udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
        if (!udev)
                return NULL;
+       kref_init(&udev->kref);
 
        udev->name = kstrdup(name, GFP_KERNEL);
        if (!udev->name) {
@@ -1145,6 +1146,24 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
        return 0;
 }
 
+static void tcmu_dev_call_rcu(struct rcu_head *p)
+{
+       struct se_device *dev = container_of(p, struct se_device, rcu_head);
+       struct tcmu_dev *udev = TCMU_DEV(dev);
+
+       kfree(udev->uio_info.name);
+       kfree(udev->name);
+       kfree(udev);
+}
+
+static void tcmu_dev_kref_release(struct kref *kref)
+{
+       struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
+       struct se_device *dev = &udev->se_dev;
+
+       call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
+}
+
 static int tcmu_release(struct uio_info *info, struct inode *inode)
 {
        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
@@ -1152,7 +1171,8 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
        clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
 
        pr_debug("close\n");
-
+       /* release ref from configure */
+       kref_put(&udev->kref, tcmu_dev_kref_release);
        return 0;
 }
 
@@ -1272,6 +1292,12 @@ static int tcmu_configure_device(struct se_device *dev)
                dev->dev_attrib.hw_max_sectors = 128;
        dev->dev_attrib.hw_queue_depth = 128;
 
+       /*
+        * Get a ref incase userspace does a close on the uio device before
+        * LIO has initiated tcmu_free_device.
+        */
+       kref_get(&udev->kref);
+
        ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
                                 udev->uio_info.uio_dev->minor);
        if (ret)
@@ -1284,11 +1310,13 @@ static int tcmu_configure_device(struct se_device *dev)
        return 0;
 
 err_netlink:
+       kref_put(&udev->kref, tcmu_dev_kref_release);
        uio_unregister_device(&udev->uio_info);
 err_register:
        vfree(udev->mb_addr);
 err_vzalloc:
        kfree(info->name);
+       info->name = NULL;
 
        return ret;
 }
@@ -1302,14 +1330,6 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
        return -EINVAL;
 }
 
-static void tcmu_dev_call_rcu(struct rcu_head *p)
-{
-       struct se_device *dev = container_of(p, struct se_device, rcu_head);
-       struct tcmu_dev *udev = TCMU_DEV(dev);
-
-       kfree(udev);
-}
-
 static bool tcmu_dev_configured(struct tcmu_dev *udev)
 {
        return udev->uio_info.uio_dev ? true : false;
@@ -1364,10 +1384,10 @@ static void tcmu_free_device(struct se_device *dev)
                                   udev->uio_info.uio_dev->minor);
 
                uio_unregister_device(&udev->uio_info);
-               kfree(udev->uio_info.name);
-               kfree(udev->name);
        }
-       call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
+
+       /* release ref from init */
+       kref_put(&udev->kref, tcmu_dev_kref_release);
 }
 
 enum {
index ab08af4654ef18c05d1ab9725dd96fb44f9264bf..42c098e86f849393eeb580e85fef1f93a14bdac7 100644 (file)
@@ -9,8 +9,9 @@ config BCM2835_THERMAL
 config BCM_NS_THERMAL
        tristate "Northstar thermal driver"
        depends on ARCH_BCM_IPROC || COMPILE_TEST
+       default y if ARCH_BCM_IPROC
        help
-         Northstar is a family of SoCs that includes e.g. BCM4708, BCM47081,
-         BCM4709 and BCM47094. It contains DMU (Device Management Unit) block
-         with a thermal sensor that allows checking CPU temperature. This
-         driver provides support for it.
+         Support for the Northstar and Northstar Plus family of SoCs (e.g.
+         BCM4708, BCM4709, BCM5301x, BCM95852X, etc). It contains DMU (Device
+         Management Unit) block with a thermal sensor that allows checking CPU
+         temperature.
index 644ba526d9ea09d2b1d0525cbf298cf10893cad0..4362a69ac88dcff8573e88e16fd6f3653432dd68 100644 (file)
@@ -195,7 +195,6 @@ static struct thermal_zone_of_device_ops tmu_tz_ops = {
 static int qoriq_tmu_probe(struct platform_device *pdev)
 {
        int ret;
-       const struct thermal_trip *trip;
        struct qoriq_tmu_data *data;
        struct device_node *np = pdev->dev.of_node;
        u32 site = 0;
@@ -243,8 +242,6 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
                goto err_tmu;
        }
 
-       trip = of_thermal_get_trip_points(data->tz);
-
        /* Enable monitoring */
        site |= 0x1 << (15 - data->sensor_id);
        tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
index b21b9cc2c8d6412897193a4ce53e295966737e9c..5a51c740e37238b31b9883d689aa67ffaea15704 100644 (file)
@@ -359,7 +359,7 @@ static DECLARE_DELAYED_WORK(thermal_emergency_poweroff_work,
  * This may be called from any critical situation to trigger a system shutdown
  * after a known period of time. By default this is not scheduled.
  */
-void thermal_emergency_poweroff(void)
+static void thermal_emergency_poweroff(void)
 {
        int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS;
        /*
index ba9c302454fb398490046be46831c0a0f821788c..696ab3046b87d98cff65182f60499c98578175bd 100644 (file)
@@ -1010,7 +1010,7 @@ ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id)
 }
 
 /**
- * ti_bandgap_set_continous_mode() - One time enabling of continuous mode
+ * ti_bandgap_set_continuous_mode() - One time enabling of continuous mode
  * @bgp: pointer to struct ti_bandgap
  *
  * Call this function only if HAS(MODE_CONFIG) is set. As this driver may
@@ -1214,22 +1214,18 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
        }
 
        bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL);
-       if (!bgp) {
-               dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
+       if (!bgp)
                return ERR_PTR(-ENOMEM);
-       }
 
        of_id = of_match_device(of_ti_bandgap_match, &pdev->dev);
        if (of_id)
                bgp->conf = of_id->data;
 
        /* register shadow for context save and restore */
-       bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) *
-                                  bgp->conf->sensor_count, GFP_KERNEL);
-       if (!bgp->regval) {
-               dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
+       bgp->regval = devm_kcalloc(&pdev->dev, bgp->conf->sensor_count,
+                                  sizeof(*bgp->regval), GFP_KERNEL);
+       if (!bgp->regval)
                return ERR_PTR(-ENOMEM);
-       }
 
        i = 0;
        do {
index 7ac9bcdf1e61a551bb8286a48c0afc492e4a95ee..61fe8d6fd24e1bb0cf2cdfd2a4de67ceb05123bf 100644 (file)
@@ -764,7 +764,7 @@ static int __init ehv_bc_init(void)
        ehv_bc_driver = alloc_tty_driver(count);
        if (!ehv_bc_driver) {
                ret = -ENOMEM;
-               goto error;
+               goto err_free_bcs;
        }
 
        ehv_bc_driver->driver_name = "ehv-bc";
@@ -778,24 +778,23 @@ static int __init ehv_bc_init(void)
        ret = tty_register_driver(ehv_bc_driver);
        if (ret) {
                pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret);
-               goto error;
+               goto err_put_tty_driver;
        }
 
        ret = platform_driver_register(&ehv_bc_tty_driver);
        if (ret) {
                pr_err("ehv-bc: could not register platform driver (ret=%i)\n",
                       ret);
-               goto error;
+               goto err_deregister_tty_driver;
        }
 
        return 0;
 
-error:
-       if (ehv_bc_driver) {
-               tty_unregister_driver(ehv_bc_driver);
-               put_tty_driver(ehv_bc_driver);
-       }
-
+err_deregister_tty_driver:
+       tty_unregister_driver(ehv_bc_driver);
+err_put_tty_driver:
+       put_tty_driver(ehv_bc_driver);
+err_free_bcs:
        kfree(bcs);
 
        return ret;
index 433de5ea9b02f53386b9367e64ad880ac6795e07..f71b47334149ace03ff276e41d8ecfd2a7038c6e 100644 (file)
@@ -122,6 +122,18 @@ void serdev_device_write_wakeup(struct serdev_device *serdev)
 }
 EXPORT_SYMBOL_GPL(serdev_device_write_wakeup);
 
+int serdev_device_write_buf(struct serdev_device *serdev,
+                           const unsigned char *buf, size_t count)
+{
+       struct serdev_controller *ctrl = serdev->ctrl;
+
+       if (!ctrl || !ctrl->ops->write_buf)
+               return -EINVAL;
+
+       return ctrl->ops->write_buf(ctrl, buf, count);
+}
+EXPORT_SYMBOL_GPL(serdev_device_write_buf);
+
 int serdev_device_write(struct serdev_device *serdev,
                        const unsigned char *buf, size_t count,
                        unsigned long timeout)
index 487c88f6aa0e36b5f986f5c02477e65f0f55143c..d0a021c93986252ceae888d7c26bde3eba295e9e 100644 (file)
@@ -102,9 +102,6 @@ static int ttyport_open(struct serdev_controller *ctrl)
                return PTR_ERR(tty);
        serport->tty = tty;
 
-       serport->port->client_ops = &client_ops;
-       serport->port->client_data = ctrl;
-
        if (tty->ops->open)
                tty->ops->open(serport->tty, NULL);
        else
@@ -215,6 +212,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
                                        struct device *parent,
                                        struct tty_driver *drv, int idx)
 {
+       const struct tty_port_client_operations *old_ops;
        struct serdev_controller *ctrl;
        struct serport *serport;
        int ret;
@@ -233,28 +231,37 @@ struct device *serdev_tty_port_register(struct tty_port *port,
 
        ctrl->ops = &ctrl_ops;
 
+       old_ops = port->client_ops;
+       port->client_ops = &client_ops;
+       port->client_data = ctrl;
+
        ret = serdev_controller_add(ctrl);
        if (ret)
-               goto err_controller_put;
+               goto err_reset_data;
 
        dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx);
        return &ctrl->dev;
 
-err_controller_put:
+err_reset_data:
+       port->client_data = NULL;
+       port->client_ops = old_ops;
        serdev_controller_put(ctrl);
+
        return ERR_PTR(ret);
 }
 
-void serdev_tty_port_unregister(struct tty_port *port)
+int serdev_tty_port_unregister(struct tty_port *port)
 {
        struct serdev_controller *ctrl = port->client_data;
        struct serport *serport = serdev_controller_get_drvdata(ctrl);
 
        if (!serport)
-               return;
+               return -ENODEV;
 
        serdev_controller_remove(ctrl);
        port->client_ops = NULL;
        port->client_data = NULL;
        serdev_controller_put(ctrl);
+
+       return 0;
 }
index 09a65a3ec7f7df0bb3f7355fd7758d797169f960..68fd045a7025047726860547ecd661b95d61ac80 100644 (file)
@@ -47,6 +47,7 @@
 /*
  * These are definitions for the Exar XR17V35X and XR17(C|D)15X
  */
+#define UART_EXAR_INT0         0x80
 #define UART_EXAR_SLEEP                0x8b    /* Sleep mode */
 #define UART_EXAR_DVID         0x8d    /* Device identification */
 
@@ -1337,7 +1338,7 @@ out_lock:
        /*
         * Check if the device is a Fintek F81216A
         */
-       if (port->type == PORT_16550A)
+       if (port->type == PORT_16550A && port->iotype == UPIO_PORT)
                fintek_8250_probe(up);
 
        if (up->capabilities != old_capabilities) {
@@ -1869,17 +1870,13 @@ static int serial8250_default_handle_irq(struct uart_port *port)
 static int exar_handle_irq(struct uart_port *port)
 {
        unsigned int iir = serial_port_in(port, UART_IIR);
-       int ret;
+       int ret = 0;
 
-       ret = serial8250_handle_irq(port, iir);
+       if (((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) &&
+           serial_port_in(port, UART_EXAR_INT0) != 0)
+               ret = 1;
 
-       if ((port->type == PORT_XR17V35X) ||
-          (port->type == PORT_XR17D15X)) {
-               serial_port_in(port, 0x80);
-               serial_port_in(port, 0x81);
-               serial_port_in(port, 0x82);
-               serial_port_in(port, 0x83);
-       }
+       ret |= serial8250_handle_irq(port, iir);
 
        return ret;
 }
@@ -2177,6 +2174,8 @@ int serial8250_do_startup(struct uart_port *port)
        serial_port_in(port, UART_RX);
        serial_port_in(port, UART_IIR);
        serial_port_in(port, UART_MSR);
+       if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X))
+               serial_port_in(port, UART_EXAR_INT0);
 
        /*
         * At this point, there's no way the LSR could still be 0xff;
@@ -2335,6 +2334,8 @@ dont_test_tx_en:
        serial_port_in(port, UART_RX);
        serial_port_in(port, UART_IIR);
        serial_port_in(port, UART_MSR);
+       if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X))
+               serial_port_in(port, UART_EXAR_INT0);
        up->lsr_saved_flags = 0;
        up->msr_saved_flags = 0;
 
index 18e3f8342b8554ae4e7d414dd39653fb3a33b26f..0475f5d261cef6ee60f7bcd2e9cde6572a520c46 100644 (file)
@@ -478,6 +478,7 @@ static int altera_jtaguart_remove(struct platform_device *pdev)
 
        port = &altera_jtaguart_ports[i].port;
        uart_remove_one_port(&altera_jtaguart_driver, port);
+       iounmap(port->membase);
 
        return 0;
 }
index 46d3438a0d27014ce97115e7a17b0eb024f12035..3e4b717670d7432e32d4b66568900773f6e239ce 100644 (file)
@@ -615,6 +615,7 @@ static int altera_uart_remove(struct platform_device *pdev)
        if (port) {
                uart_remove_one_port(&altera_uart_driver, port);
                port->mapbase = 0;
+               iounmap(port->membase);
        }
 
        return 0;
index ebd8569f9ad5da126b2ff15985943e630135925a..9fff25be87f9db91273aa96e5b015705e7fd9118 100644 (file)
@@ -27,6 +27,7 @@
 #define UARTn_FRAME            0x04
 #define UARTn_FRAME_DATABITS__MASK     0x000f
 #define UARTn_FRAME_DATABITS(n)                ((n) - 3)
+#define UARTn_FRAME_PARITY__MASK       0x0300
 #define UARTn_FRAME_PARITY_NONE                0x0000
 #define UARTn_FRAME_PARITY_EVEN                0x0200
 #define UARTn_FRAME_PARITY_ODD         0x0300
@@ -572,12 +573,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port,
                        16 * (4 + (clkdiv >> 6)));
 
        frame = efm32_uart_read32(efm_port, UARTn_FRAME);
-       if (frame & UARTn_FRAME_PARITY_ODD)
+       switch (frame & UARTn_FRAME_PARITY__MASK) {
+       case UARTn_FRAME_PARITY_ODD:
                *parity = 'o';
-       else if (frame & UARTn_FRAME_PARITY_EVEN)
+               break;
+       case UARTn_FRAME_PARITY_EVEN:
                *parity = 'e';
-       else
+               break;
+       default:
                *parity = 'n';
+       }
 
        *bits = (frame & UARTn_FRAME_DATABITS__MASK) -
                        UARTn_FRAME_DATABITS(4) + 4;
index 157883653256da9ebeae89d7dabead3285656c82..f190a84a02465668d64f3b6a57d6462588d781ad 100644 (file)
@@ -1382,9 +1382,9 @@ static struct spi_driver ifx_spi_driver = {
 static void __exit ifx_spi_exit(void)
 {
        /* unregister */
+       spi_unregister_driver(&ifx_spi_driver);
        tty_unregister_driver(tty_drv);
        put_tty_driver(tty_drv);
-       spi_unregister_driver(&ifx_spi_driver);
        unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
 }
 
index 33509b4beaec237715de1e8febd63e861fbb9c15..bbefddd92bfeae77b637033af8028ac481642931 100644 (file)
@@ -2184,7 +2184,9 @@ static int serial_imx_probe(struct platform_device *pdev)
                 * and DCD (when they are outputs) or enables the respective
                 * irqs. So set this bit early, i.e. before requesting irqs.
                 */
-               writel(UFCR_DCEDTE, sport->port.membase + UFCR);
+               reg = readl(sport->port.membase + UFCR);
+               if (!(reg & UFCR_DCEDTE))
+                       writel(reg | UFCR_DCEDTE, sport->port.membase + UFCR);
 
                /*
                 * Disable UCR3_RI and UCR3_DCD irqs. They are also not
@@ -2195,7 +2197,15 @@ static int serial_imx_probe(struct platform_device *pdev)
                       sport->port.membase + UCR3);
 
        } else {
-               writel(0, sport->port.membase + UFCR);
+               unsigned long ucr3 = UCR3_DSR;
+
+               reg = readl(sport->port.membase + UFCR);
+               if (reg & UFCR_DCEDTE)
+                       writel(reg & ~UFCR_DCEDTE, sport->port.membase + UFCR);
+
+               if (!is_imx1_uart(sport))
+                       ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
+               writel(ucr3, sport->port.membase + UCR3);
        }
 
        clk_disable_unprepare(sport->clk_ipg);
index 0f45b7884a2c58a299668591e87bead1f6628f98..13bfd5dcffce5c0bfaee47879277e32aedf308a9 100644 (file)
@@ -2083,7 +2083,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
        mutex_lock(&port->mutex);
 
        tty_dev = device_find_child(uport->dev, &match, serial_match_port);
-       if (device_may_wakeup(tty_dev)) {
+       if (tty_dev && device_may_wakeup(tty_dev)) {
                if (!enable_irq_wake(uport->irq))
                        uport->irq_wake = 1;
                put_device(tty_dev);
@@ -2782,7 +2782,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
         * Register the port whether it's detected or not.  This allows
         * setserial to be used to alter this port's parameters.
         */
-       tty_dev = tty_port_register_device_attr(port, drv->tty_driver,
+       tty_dev = tty_port_register_device_attr_serdev(port, drv->tty_driver,
                        uport->line, uport->dev, port, uport->tty_groups);
        if (likely(!IS_ERR(tty_dev))) {
                device_set_wakeup_capable(tty_dev, 1);
@@ -2845,7 +2845,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
        /*
         * Remove the devices from the tty layer
         */
-       tty_unregister_device(drv->tty_driver, uport->line);
+       tty_port_unregister_device(port, drv->tty_driver, uport->line);
 
        tty = tty_port_tty_get(port);
        if (tty) {
index 1d21a9c1d33e6e3c5ef007a25e4b4550fb62a51c..6b137194069fee47bc45b1866865ee27adfd33c5 100644 (file)
@@ -128,20 +128,86 @@ struct device *tty_port_register_device_attr(struct tty_port *port,
                struct tty_driver *driver, unsigned index,
                struct device *device, void *drvdata,
                const struct attribute_group **attr_grp)
+{
+       tty_port_link_device(port, driver, index);
+       return tty_register_device_attr(driver, index, device, drvdata,
+                       attr_grp);
+}
+EXPORT_SYMBOL_GPL(tty_port_register_device_attr);
+
+/**
+ * tty_port_register_device_attr_serdev - register tty or serdev device
+ * @port: tty_port of the device
+ * @driver: tty_driver for this device
+ * @index: index of the tty
+ * @device: parent if exists, otherwise NULL
+ * @drvdata: driver data for the device
+ * @attr_grp: attribute group for the device
+ *
+ * Register a serdev or tty device depending on if the parent device has any
+ * defined serdev clients or not.
+ */
+struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
+               struct tty_driver *driver, unsigned index,
+               struct device *device, void *drvdata,
+               const struct attribute_group **attr_grp)
 {
        struct device *dev;
 
        tty_port_link_device(port, driver, index);
 
        dev = serdev_tty_port_register(port, device, driver, index);
-       if (PTR_ERR(dev) != -ENODEV)
+       if (PTR_ERR(dev) != -ENODEV) {
                /* Skip creating cdev if we registered a serdev device */
                return dev;
+       }
 
        return tty_register_device_attr(driver, index, device, drvdata,
                        attr_grp);
 }
-EXPORT_SYMBOL_GPL(tty_port_register_device_attr);
+EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev);
+
+/**
+ * tty_port_register_device_serdev - register tty or serdev device
+ * @port: tty_port of the device
+ * @driver: tty_driver for this device
+ * @index: index of the tty
+ * @device: parent if exists, otherwise NULL
+ *
+ * Register a serdev or tty device depending on if the parent device has any
+ * defined serdev clients or not.
+ */
+struct device *tty_port_register_device_serdev(struct tty_port *port,
+               struct tty_driver *driver, unsigned index,
+               struct device *device)
+{
+       return tty_port_register_device_attr_serdev(port, driver, index,
+                       device, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(tty_port_register_device_serdev);
+
+/**
+ * tty_port_unregister_device - deregister a tty or serdev device
+ * @port: tty_port of the device
+ * @driver: tty_driver for this device
+ * @index: index of the tty
+ *
+ * If a tty or serdev device is registered with a call to
+ * tty_port_register_device_serdev() then this function must be called when
+ * the device is gone.
+ */
+void tty_port_unregister_device(struct tty_port *port,
+               struct tty_driver *driver, unsigned index)
+{
+       int ret;
+
+       ret = serdev_tty_port_unregister(port);
+       if (ret == 0)
+               return;
+
+       tty_unregister_device(driver, index);
+}
+EXPORT_SYMBOL_GPL(tty_port_unregister_device);
 
 int tty_port_alloc_xmit_buf(struct tty_port *port)
 {
@@ -189,9 +255,6 @@ static void tty_port_destructor(struct kref *kref)
        /* check if last port ref was dropped before tty release */
        if (WARN_ON(port->itty))
                return;
-
-       serdev_tty_port_unregister(port);
-
        if (port->xmit_buf)
                free_page((unsigned long)port->xmit_buf);
        tty_port_destroy(port);
index 3fdde0b283c9b86f7fa6aaa9585593699be29fca..29308a80d66ffa118b9aec996874d65a952c29aa 100644 (file)
@@ -1671,8 +1671,12 @@ static long ceph_fallocate(struct file *file, int mode,
        }
 
        size = i_size_read(inode);
-       if (!(mode & FALLOC_FL_KEEP_SIZE))
+       if (!(mode & FALLOC_FL_KEEP_SIZE)) {
                endoff = offset + length;
+               ret = inode_newsize_ok(inode, endoff);
+               if (ret)
+                       goto unlock;
+       }
 
        if (fi->fmode & CEPH_FILE_MODE_LAZY)
                want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
index c22eaf162f95c1456563b31a8362da9e531c9185..2a6889b3585f068c73091d8895639b7e941d702a 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1154,6 +1154,17 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
                goto out;
        }
 
+       /*
+        * It is possible, particularly with mixed reads & writes to private
+        * mappings, that we have raced with a PMD fault that overlaps with
+        * the PTE we need to set up.  If so just return and the fault will be
+        * retried.
+        */
+       if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
+               vmf_ret = VM_FAULT_NOPAGE;
+               goto unlock_entry;
+       }
+
        /*
         * Note that we don't bother to use iomap_apply here: DAX required
         * the file system block size to be equal the page size, which means
@@ -1397,6 +1408,18 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
        if (IS_ERR(entry))
                goto fallback;
 
+       /*
+        * It is possible, particularly with mixed reads & writes to private
+        * mappings, that we have raced with a PTE fault that overlaps with
+        * the PMD we need to set up.  If so just return and the fault will be
+        * retried.
+        */
+       if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
+                       !pmd_devmap(*vmf->pmd)) {
+               result = 0;
+               goto unlock_entry;
+       }
+
        /*
         * Note that we don't use iomap_apply here.  We aren't doing I/O, only
         * setting up a mapping, so really we're using iomap_begin() as a way
index f865b96374df2b5c40ecfb13663154499ec09b31..d2955daf17a4fcefa2ded3a412f67732315de12a 100644 (file)
@@ -659,7 +659,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
        struct gfs2_log_header *lh;
        unsigned int tail;
        u32 hash;
-       int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META;
+       int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
        struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
        enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
        lh = page_address(page);
index f5714ee01000de49c5efcd3675226a3a3b7a7074..23542dc44a25c9f398b8a2a69905bf9eafbe5270 100644 (file)
@@ -454,6 +454,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
                        goto out_err_free;
 
                /* fh */
+               rc = -EIO;
                p = xdr_inline_decode(&stream, 4);
                if (!p)
                        goto out_err_free;
index e9b4c3320e371a90020ea25f4be44d2d76508db7..3e24392f2caa1296c4475ed84701fdfabfff467f 100644 (file)
@@ -398,7 +398,6 @@ extern struct file_system_type nfs4_referral_fs_type;
 bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
 struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *,
                        struct nfs_subversion *);
-void nfs_initialise_sb(struct super_block *);
 int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
 int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
 struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *,
@@ -458,7 +457,6 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
 extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
 
 /* super.c */
-void nfs_clone_super(struct super_block *, struct nfs_mount_info *);
 void nfs_umount_begin(struct super_block *);
 int  nfs_statfs(struct dentry *, struct kstatfs *);
 int  nfs_show_options(struct seq_file *, struct dentry *);
index 1a224a33a6c23c362e1bbacb8150bb9bbf02b3fd..e5686be67be8d361a32344e3aaaae235d739ffd7 100644 (file)
@@ -246,7 +246,7 @@ struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
 
        devname = nfs_devname(dentry, page, PAGE_SIZE);
        if (IS_ERR(devname))
-               mnt = (struct vfsmount *)devname;
+               mnt = ERR_CAST(devname);
        else
                mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata);
 
index 929d09a5310ad7df79be527a45a9aa524825b7f0..319a47db218d133d36b749641b63e5fa4489014c 100644 (file)
@@ -177,7 +177,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
        if (status)
                goto out;
 
-       if (!nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
+       if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
                                    &res->commit_res.verf->verifier)) {
                status = -EAGAIN;
                goto out;
index 692a7a8bfc7afd05ad40d6e04a4a53db07e01753..66776f02211131bd003e31748dec6774a02a077f 100644 (file)
@@ -582,7 +582,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
                         */
                        nfs4_schedule_path_down_recovery(pos);
                default:
-                       spin_lock(&nn->nfs_client_lock);
                        goto out;
                }
 
index adc6ec28d4b59d3181c76ca8f33a9fed25d68ce9..c383d0913b54c90fb96020a62faaf989a5d946b3 100644 (file)
@@ -2094,12 +2094,26 @@ pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
 
+/*
+ * Check for any intersection between the request and the pgio->pg_lseg,
+ * and if none, put this pgio->pg_lseg away.
+ */
+static void
+pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+       if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
+               pnfs_put_lseg(pgio->pg_lseg);
+               pgio->pg_lseg = NULL;
+       }
+}
+
 void
 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
 {
        u64 rd_size = req->wb_bytes;
 
        pnfs_generic_pg_check_layout(pgio);
+       pnfs_generic_pg_check_range(pgio, req);
        if (pgio->pg_lseg == NULL) {
                if (pgio->pg_dreq == NULL)
                        rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
@@ -2131,6 +2145,7 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
                           struct nfs_page *req, u64 wb_size)
 {
        pnfs_generic_pg_check_layout(pgio);
+       pnfs_generic_pg_check_range(pgio, req);
        if (pgio->pg_lseg == NULL) {
                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                                   req->wb_context,
@@ -2191,16 +2206,10 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
                seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
                                     pgio->pg_lseg->pls_range.length);
                req_start = req_offset(req);
-               WARN_ON_ONCE(req_start >= seg_end);
+
                /* start of request is past the last byte of this segment */
-               if (req_start >= seg_end) {
-                       /* reference the new lseg */
-                       if (pgio->pg_ops->pg_cleanup)
-                               pgio->pg_ops->pg_cleanup(pgio);
-                       if (pgio->pg_ops->pg_init)
-                               pgio->pg_ops->pg_init(pgio, req);
+               if (req_start >= seg_end)
                        return 0;
-               }
 
                /* adjust 'size' iff there are fewer bytes left in the
                 * segment than what nfs_generic_pg_test returned */
index 2d05b756a8d6504e79796d71eb5471a578d9ef5a..99731e3e332f3ec32eec26cca47556193dcf68fe 100644 (file)
@@ -593,6 +593,16 @@ pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
        return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2);
 }
 
+static inline bool
+pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page *req)
+{
+       u64 seg_last = pnfs_end_offset(lseg->pls_range.offset, lseg->pls_range.length);
+       u64 req_last = req_offset(req) + req->wb_bytes;
+
+       return pnfs_is_range_intersecting(lseg->pls_range.offset, seg_last,
+                               req_offset(req), req_last);
+}
+
 extern unsigned int layoutstats_timer;
 
 #ifdef NFS_DEBUG
index 2f3822a4a7d565e9e2e607b61d2c580c1befdd05..eceb4eabb064953f830d1bf97a5d0daeecdd27f0 100644 (file)
@@ -2301,7 +2301,7 @@ EXPORT_SYMBOL_GPL(nfs_remount);
 /*
  * Initialise the common bits of the superblock
  */
-inline void nfs_initialise_sb(struct super_block *sb)
+static void nfs_initialise_sb(struct super_block *sb)
 {
        struct nfs_server *server = NFS_SB(sb);
 
@@ -2348,7 +2348,8 @@ EXPORT_SYMBOL_GPL(nfs_fill_super);
 /*
  * Finish setting up a cloned NFS2/3/4 superblock
  */
-void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
+static void nfs_clone_super(struct super_block *sb,
+                           struct nfs_mount_info *mount_info)
 {
        const struct super_block *old_sb = mount_info->cloned->sb;
        struct nfs_server *server = NFS_SB(sb);
index 12feac6ee2fd461a46c7b06b7a0ed0359fb4dfd1..452334694a5d1f37cc480e5d1cf2873c4246019d 100644 (file)
@@ -334,11 +334,8 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
        if (!p)
                return 0;
        p = xdr_decode_hyper(p, &args->offset);
-       args->count = ntohl(*p++);
-
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
 
+       args->count = ntohl(*p++);
        len = min(args->count, max_blocksize);
 
        /* set up the kvec */
@@ -352,7 +349,7 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
                v++;
        }
        args->vlen = v;
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -544,11 +541,9 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p,
        p = decode_fh(p, &args->fh);
        if (!p)
                return 0;
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -574,14 +569,10 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
        args->verf   = p; p += 2;
        args->dircount = ~0;
        args->count  = ntohl(*p++);
-
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
-
        args->count  = min_t(u32, args->count, PAGE_SIZE);
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -599,9 +590,6 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
        args->dircount = ntohl(*p++);
        args->count    = ntohl(*p++);
 
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
-
        len = args->count = min(args->count, max_blocksize);
        while (len > 0) {
                struct page *p = *(rqstp->rq_next_page++);
@@ -609,7 +597,8 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
                        args->buffer = page_address(p);
                len -= PAGE_SIZE;
        }
-       return 1;
+
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
index c453a1998e003d3e900407b266f1a15de5d5d94b..dadb3bf305b22f352a3f91a2df06b30284b4891c 100644 (file)
@@ -1769,6 +1769,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
                        opdesc->op_get_currentstateid(cstate, &op->u);
                op->status = opdesc->op_func(rqstp, cstate, &op->u);
 
+               /* Only from SEQUENCE */
+               if (cstate->status == nfserr_replay_cache) {
+                       dprintk("%s NFS4.1 replay from cache\n", __func__);
+                       status = op->status;
+                       goto out;
+               }
                if (!op->status) {
                        if (opdesc->op_set_currentstateid)
                                opdesc->op_set_currentstateid(cstate, &op->u);
@@ -1779,14 +1785,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
                        if (need_wrongsec_check(rqstp))
                                op->status = check_nfsd_access(current_fh->fh_export, rqstp);
                }
-
 encode_op:
-               /* Only from SEQUENCE */
-               if (cstate->status == nfserr_replay_cache) {
-                       dprintk("%s NFS4.1 replay from cache\n", __func__);
-                       status = op->status;
-                       goto out;
-               }
                if (op->status == nfserr_replay_me) {
                        op->replay = &cstate->replay_owner->so_replay;
                        nfsd4_encode_replay(&resp->xdr, op);
index 6a4947a3f4fa82be4118e4ed538a171118f4baa8..de07ff625777820fefc98bfa56adea81962e8135 100644 (file)
@@ -257,9 +257,6 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
        len = args->count     = ntohl(*p++);
        p++; /* totalcount - unused */
 
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
-
        len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2);
 
        /* set up somewhere to store response.
@@ -275,7 +272,7 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
                v++;
        }
        args->vlen = v;
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -365,11 +362,9 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readli
        p = decode_fh(p, &args->fh);
        if (!p)
                return 0;
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -407,11 +402,9 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
        args->cookie = ntohl(*p++);
        args->count  = ntohl(*p++);
        args->count  = min_t(u32, args->count, PAGE_SIZE);
-       if (!xdr_argsize_check(rqstp, p))
-               return 0;
        args->buffer = page_address(*(rqstp->rq_next_page++));
 
-       return 1;
+       return xdr_argsize_check(rqstp, p);
 }
 
 /*
index 358258364616cd3c2fee997daca2a192719cb045..4690cd75d8d7948a056fe899bc4600ade10b8566 100644 (file)
@@ -159,7 +159,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
                                        PTR_ERR(dent_inode));
                kfree(name);
                /* Return the error code. */
-               return (struct dentry *)dent_inode;
+               return ERR_CAST(dent_inode);
        }
        /* It is guaranteed that @name is no longer allocated at this point. */
        if (MREF_ERR(mref) == -ENOENT) {
index 827fc9809bc271f09b2c3b7abf4019c31d0e1636..9f88188060db9c7fa59e6882ecf33b55cf921788 100644 (file)
@@ -119,7 +119,7 @@ check_err:
 
        if (IS_ERR(inode)) {
                mlog_errno(PTR_ERR(inode));
-               result = (void *)inode;
+               result = ERR_CAST(inode);
                goto bail;
        }
 
index 0daac5112f7a32384b5febd39bd8499f31da8c31..c0c9683934b7a7883ab59eb8bbcd412e07385d0b 100644 (file)
@@ -1,5 +1,6 @@
 config OVERLAY_FS
        tristate "Overlay filesystem support"
+       select EXPORTFS
        help
          An overlay filesystem combines two filesystems - an 'upper' filesystem
          and a 'lower' filesystem.  When a name exists in both filesystems, the
index 9008ab9fbd2ebe89d419c249455eb740c48a9eb1..7a44533f4bbf24134a95bdc030bde5779f28457a 100644 (file)
@@ -300,7 +300,11 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
                        return PTR_ERR(fh);
        }
 
-       err = ovl_do_setxattr(upper, OVL_XATTR_ORIGIN, fh, fh ? fh->len : 0, 0);
+       /*
+        * Do not fail when upper doesn't support xattrs.
+        */
+       err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh,
+                                fh ? fh->len : 0, 0);
        kfree(fh);
 
        return err;
@@ -342,13 +346,14 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
        if (tmpfile)
                temp = ovl_do_tmpfile(upperdir, stat->mode);
        else
-               temp = ovl_lookup_temp(workdir, dentry);
-       err = PTR_ERR(temp);
-       if (IS_ERR(temp))
-               goto out1;
-
+               temp = ovl_lookup_temp(workdir);
        err = 0;
-       if (!tmpfile)
+       if (IS_ERR(temp)) {
+               err = PTR_ERR(temp);
+               temp = NULL;
+       }
+
+       if (!err && !tmpfile)
                err = ovl_create_real(wdir, temp, &cattr, NULL, true);
 
        if (new_creds) {
@@ -454,6 +459,11 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
        ovl_path_upper(parent, &parentpath);
        upperdir = parentpath.dentry;
 
+       /* Mark parent "impure" because it may now contain non-pure upper */
+       err = ovl_set_impure(parent, upperdir);
+       if (err)
+               return err;
+
        err = vfs_getattr(&parentpath, &pstat,
                          STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT);
        if (err)
index 723b98b9069876d1656b74735dabcaf01484e26d..a63a71656e9bdaef6ed5cadf8acdb6d8002fe1b6 100644 (file)
@@ -41,7 +41,7 @@ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
        }
 }
 
-struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry)
+struct dentry *ovl_lookup_temp(struct dentry *workdir)
 {
        struct dentry *temp;
        char name[20];
@@ -68,7 +68,7 @@ static struct dentry *ovl_whiteout(struct dentry *workdir,
        struct dentry *whiteout;
        struct inode *wdir = workdir->d_inode;
 
-       whiteout = ovl_lookup_temp(workdir, dentry);
+       whiteout = ovl_lookup_temp(workdir);
        if (IS_ERR(whiteout))
                return whiteout;
 
@@ -127,17 +127,28 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry,
        return err;
 }
 
-static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry)
+static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper,
+                              int xerr)
 {
        int err;
 
-       err = ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
+       err = ovl_check_setxattr(dentry, upper, OVL_XATTR_OPAQUE, "y", 1, xerr);
        if (!err)
                ovl_dentry_set_opaque(dentry);
 
        return err;
 }
 
+static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry)
+{
+       /*
+        * Fail with -EIO when trying to create opaque dir and upper doesn't
+        * support xattrs. ovl_rename() calls ovl_set_opaque_xerr(-EXDEV) to
+        * return a specific error for noxattr case.
+        */
+       return ovl_set_opaque_xerr(dentry, upperdentry, -EIO);
+}
+
 /* Common operations required to be done after creation of file on upper */
 static void ovl_instantiate(struct dentry *dentry, struct inode *inode,
                            struct dentry *newdentry, bool hardlink)
@@ -162,6 +173,11 @@ static bool ovl_type_merge(struct dentry *dentry)
        return OVL_TYPE_MERGE(ovl_path_type(dentry));
 }
 
+static bool ovl_type_origin(struct dentry *dentry)
+{
+       return OVL_TYPE_ORIGIN(ovl_path_type(dentry));
+}
+
 static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
                            struct cattr *attr, struct dentry *hardlink)
 {
@@ -250,7 +266,7 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
        if (upper->d_parent->d_inode != udir)
                goto out_unlock;
 
-       opaquedir = ovl_lookup_temp(workdir, dentry);
+       opaquedir = ovl_lookup_temp(workdir);
        err = PTR_ERR(opaquedir);
        if (IS_ERR(opaquedir))
                goto out_unlock;
@@ -382,7 +398,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
        if (err)
                goto out;
 
-       newdentry = ovl_lookup_temp(workdir, dentry);
+       newdentry = ovl_lookup_temp(workdir);
        err = PTR_ERR(newdentry);
        if (IS_ERR(newdentry))
                goto out_unlock;
@@ -846,18 +862,16 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
        if (IS_ERR(redirect))
                return PTR_ERR(redirect);
 
-       err = ovl_do_setxattr(ovl_dentry_upper(dentry), OVL_XATTR_REDIRECT,
-                             redirect, strlen(redirect), 0);
+       err = ovl_check_setxattr(dentry, ovl_dentry_upper(dentry),
+                                OVL_XATTR_REDIRECT,
+                                redirect, strlen(redirect), -EXDEV);
        if (!err) {
                spin_lock(&dentry->d_lock);
                ovl_dentry_set_redirect(dentry, redirect);
                spin_unlock(&dentry->d_lock);
        } else {
                kfree(redirect);
-               if (err == -EOPNOTSUPP)
-                       ovl_clear_redirect_dir(dentry->d_sb);
-               else
-                       pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
+               pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
                /* Fall back to userspace copy-up */
                err = -EXDEV;
        }
@@ -943,6 +957,25 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
        old_upperdir = ovl_dentry_upper(old->d_parent);
        new_upperdir = ovl_dentry_upper(new->d_parent);
 
+       if (!samedir) {
+               /*
+                * When moving a merge dir or non-dir with copy up origin into
+                * a new parent, we are marking the new parent dir "impure".
+                * When ovl_iterate() iterates an "impure" upper dir, it will
+                * lookup the origin inodes of the entries to fill d_ino.
+                */
+               if (ovl_type_origin(old)) {
+                       err = ovl_set_impure(new->d_parent, new_upperdir);
+                       if (err)
+                               goto out_revert_creds;
+               }
+               if (!overwrite && ovl_type_origin(new)) {
+                       err = ovl_set_impure(old->d_parent, old_upperdir);
+                       if (err)
+                               goto out_revert_creds;
+               }
+       }
+
        trap = lock_rename(new_upperdir, old_upperdir);
 
        olddentry = lookup_one_len(old->d_name.name, old_upperdir,
@@ -992,7 +1025,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
                if (ovl_type_merge_or_lower(old))
                        err = ovl_set_redirect(old, samedir);
                else if (!old_opaque && ovl_type_merge(new->d_parent))
-                       err = ovl_set_opaque(old, olddentry);
+                       err = ovl_set_opaque_xerr(old, olddentry, -EXDEV);
                if (err)
                        goto out_dput;
        }
@@ -1000,7 +1033,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
                if (ovl_type_merge_or_lower(new))
                        err = ovl_set_redirect(new, samedir);
                else if (!new_opaque && ovl_type_merge(old->d_parent))
-                       err = ovl_set_opaque(new, newdentry);
+                       err = ovl_set_opaque_xerr(new, newdentry, -EXDEV);
                if (err)
                        goto out_dput;
        }
index ad9547f82da57fa4bd51eb5738cb8f02235e4455..d613e2c41242a52a6c018f43f9987bdbf461e0bb 100644 (file)
@@ -240,6 +240,16 @@ int ovl_xattr_get(struct dentry *dentry, const char *name,
        return res;
 }
 
+static bool ovl_can_list(const char *s)
+{
+       /* List all non-trusted xatts */
+       if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
+               return true;
+
+       /* Never list trusted.overlay, list other trusted for superuser only */
+       return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
+}
+
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
        struct dentry *realdentry = ovl_dentry_real(dentry);
@@ -263,7 +273,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
                        return -EIO;
 
                len -= slen;
-               if (ovl_is_private_xattr(s)) {
+               if (!ovl_can_list(s)) {
                        res -= slen;
                        memmove(s, s + slen, len);
                } else {
index bad0f665a63521efde00b4c488d4ed2ba85a5b75..f3136c31e72af24cbb9949449a12d292fc3bf11b 100644 (file)
@@ -169,17 +169,7 @@ invalid:
 
 static bool ovl_is_opaquedir(struct dentry *dentry)
 {
-       int res;
-       char val;
-
-       if (!d_is_dir(dentry))
-               return false;
-
-       res = vfs_getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
-       if (res == 1 && val == 'y')
-               return true;
-
-       return false;
+       return ovl_check_dir_xattr(dentry, OVL_XATTR_OPAQUE);
 }
 
 static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
@@ -351,6 +341,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
        unsigned int ctr = 0;
        struct inode *inode = NULL;
        bool upperopaque = false;
+       bool upperimpure = false;
        char *upperredirect = NULL;
        struct dentry *this;
        unsigned int i;
@@ -395,6 +386,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                                poe = roe;
                }
                upperopaque = d.opaque;
+               if (upperdentry && d.is_dir)
+                       upperimpure = ovl_is_impuredir(upperdentry);
        }
 
        if (!d.stop && poe->numlower) {
@@ -463,6 +456,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 
        revert_creds(old_cred);
        oe->opaque = upperopaque;
+       oe->impure = upperimpure;
        oe->redirect = upperredirect;
        oe->__upperdentry = upperdentry;
        memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
index caa36cb9c46de9838805dc40e21672217407d40e..0623cebeefff8661d49d65a228ceec6290cee877 100644 (file)
@@ -24,6 +24,7 @@ enum ovl_path_type {
 #define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque"
 #define OVL_XATTR_REDIRECT OVL_XATTR_PREFIX "redirect"
 #define OVL_XATTR_ORIGIN OVL_XATTR_PREFIX "origin"
+#define OVL_XATTR_IMPURE OVL_XATTR_PREFIX "impure"
 
 /*
  * The tuple (fh,uuid) is a universal unique identifier for a copy up origin,
@@ -203,10 +204,10 @@ struct dentry *ovl_dentry_real(struct dentry *dentry);
 struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry);
 void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache);
 bool ovl_dentry_is_opaque(struct dentry *dentry);
+bool ovl_dentry_is_impure(struct dentry *dentry);
 bool ovl_dentry_is_whiteout(struct dentry *dentry);
 void ovl_dentry_set_opaque(struct dentry *dentry);
 bool ovl_redirect_dir(struct super_block *sb);
-void ovl_clear_redirect_dir(struct super_block *sb);
 const char *ovl_dentry_get_redirect(struct dentry *dentry);
 void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect);
 void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
@@ -219,6 +220,17 @@ bool ovl_is_whiteout(struct dentry *dentry);
 struct file *ovl_path_open(struct path *path, int flags);
 int ovl_copy_up_start(struct dentry *dentry);
 void ovl_copy_up_end(struct dentry *dentry);
+bool ovl_check_dir_xattr(struct dentry *dentry, const char *name);
+int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
+                      const char *name, const void *value, size_t size,
+                      int xerr);
+int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry);
+
+static inline bool ovl_is_impuredir(struct dentry *dentry)
+{
+       return ovl_check_dir_xattr(dentry, OVL_XATTR_IMPURE);
+}
+
 
 /* namei.c */
 int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
@@ -263,7 +275,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
 
 /* dir.c */
 extern const struct inode_operations ovl_dir_inode_operations;
-struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry);
+struct dentry *ovl_lookup_temp(struct dentry *workdir);
 struct cattr {
        dev_t rdev;
        umode_t mode;
index b2023ddb85323725b8bbfa687f31fc854c1ba5e9..34bc4a9f5c61d95f049b3f34ccd0de243aa27ddd 100644 (file)
@@ -28,6 +28,7 @@ struct ovl_fs {
        /* creds of process who forced instantiation of super block */
        const struct cred *creator_cred;
        bool tmpfile;
+       bool noxattr;
        wait_queue_head_t copyup_wq;
        /* sb common to all layers */
        struct super_block *same_sb;
@@ -42,6 +43,7 @@ struct ovl_entry {
                        u64 version;
                        const char *redirect;
                        bool opaque;
+                       bool impure;
                        bool copying;
                };
                struct rcu_head rcu;
index 9828b7de89992e64a1900a277b58423dde7b992a..4882ffb37baead1c4da41684158d22cbe58e5353 100644 (file)
@@ -891,6 +891,19 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                                dput(temp);
                        else
                                pr_warn("overlayfs: upper fs does not support tmpfile.\n");
+
+                       /*
+                        * Check if upper/work fs supports trusted.overlay.*
+                        * xattr
+                        */
+                       err = ovl_do_setxattr(ufs->workdir, OVL_XATTR_OPAQUE,
+                                             "0", 1, 0);
+                       if (err) {
+                               ufs->noxattr = true;
+                               pr_warn("overlayfs: upper fs does not support xattr.\n");
+                       } else {
+                               vfs_removexattr(ufs->workdir, OVL_XATTR_OPAQUE);
+                       }
                }
        }
 
@@ -961,7 +974,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        path_put(&workpath);
        kfree(lowertmp);
 
-       oe->__upperdentry = upperpath.dentry;
+       if (upperpath.dentry) {
+               oe->__upperdentry = upperpath.dentry;
+               oe->impure = ovl_is_impuredir(upperpath.dentry);
+       }
        for (i = 0; i < numlower; i++) {
                oe->lowerstack[i].dentry = stack[i].dentry;
                oe->lowerstack[i].mnt = ufs->lower_mnt[i];
index cfdea47313a10e22a9c06193e4cc422891badaae..809048913889189d083339d1d015ef4cad2af035 100644 (file)
@@ -175,6 +175,13 @@ bool ovl_dentry_is_opaque(struct dentry *dentry)
        return oe->opaque;
 }
 
+bool ovl_dentry_is_impure(struct dentry *dentry)
+{
+       struct ovl_entry *oe = dentry->d_fsdata;
+
+       return oe->impure;
+}
+
 bool ovl_dentry_is_whiteout(struct dentry *dentry)
 {
        return !dentry->d_inode && ovl_dentry_is_opaque(dentry);
@@ -191,14 +198,7 @@ bool ovl_redirect_dir(struct super_block *sb)
 {
        struct ovl_fs *ofs = sb->s_fs_info;
 
-       return ofs->config.redirect_dir;
-}
-
-void ovl_clear_redirect_dir(struct super_block *sb)
-{
-       struct ovl_fs *ofs = sb->s_fs_info;
-
-       ofs->config.redirect_dir = false;
+       return ofs->config.redirect_dir && !ofs->noxattr;
 }
 
 const char *ovl_dentry_get_redirect(struct dentry *dentry)
@@ -303,3 +303,59 @@ void ovl_copy_up_end(struct dentry *dentry)
        wake_up_locked(&ofs->copyup_wq);
        spin_unlock(&ofs->copyup_wq.lock);
 }
+
+bool ovl_check_dir_xattr(struct dentry *dentry, const char *name)
+{
+       int res;
+       char val;
+
+       if (!d_is_dir(dentry))
+               return false;
+
+       res = vfs_getxattr(dentry, name, &val, 1);
+       if (res == 1 && val == 'y')
+               return true;
+
+       return false;
+}
+
+int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
+                      const char *name, const void *value, size_t size,
+                      int xerr)
+{
+       int err;
+       struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
+
+       if (ofs->noxattr)
+               return xerr;
+
+       err = ovl_do_setxattr(upperdentry, name, value, size, 0);
+
+       if (err == -EOPNOTSUPP) {
+               pr_warn("overlayfs: cannot set %s xattr on upper\n", name);
+               ofs->noxattr = true;
+               return xerr;
+       }
+
+       return err;
+}
+
+int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry)
+{
+       int err;
+       struct ovl_entry *oe = dentry->d_fsdata;
+
+       if (oe->impure)
+               return 0;
+
+       /*
+        * Do not fail when upper doesn't support xattrs.
+        * Upper inodes won't have origin nor redirect xattr anyway.
+        */
+       err = ovl_check_setxattr(dentry, upperdentry, OVL_XATTR_IMPURE,
+                                "y", 1, 0);
+       if (!err)
+               oe->impure = true;
+
+       return err;
+}
index 45f6bf68fff3ed30df0f85d98c0f644c320a9bf7..f1e1927ccd484e7372fe2a38db7455468bbf06e8 100644 (file)
@@ -821,7 +821,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
        if (!mmget_not_zero(mm))
                goto free;
 
-       flags = write ? FOLL_WRITE : 0;
+       flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
 
        while (count > 0) {
                int this_len = min_t(int, count, PAGE_SIZE);
index da01f497180a165d163935c4744ef82521bf9151..39bb1e838d8da683fa64dadf7ff150b9369d699e 100644 (file)
@@ -1112,7 +1112,7 @@ static int flush_commit_list(struct super_block *s,
                depth = reiserfs_write_unlock_nested(s);
                if (reiserfs_barrier_flush(s))
                        __sync_dirty_buffer(jl->j_commit_bh,
-                                       REQ_PREFLUSH | REQ_FUA);
+                                       REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
                else
                        sync_dirty_buffer(jl->j_commit_bh);
                reiserfs_write_lock_nested(s, depth);
@@ -1271,7 +1271,7 @@ static int _update_journal_header_block(struct super_block *sb,
 
                if (reiserfs_barrier_flush(sb))
                        __sync_dirty_buffer(journal->j_header_bh,
-                                       REQ_PREFLUSH | REQ_FUA);
+                                       REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
                else
                        sync_dirty_buffer(journal->j_header_bh);
 
index 131b2b77c8185403dc3cdf0393e8e0c915f3a850..29ecaf739449c4036e6ed3ebed5499b8489079c4 100644 (file)
@@ -812,9 +812,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
        uspi->s_dirblksize = UFS_SECTOR_SIZE;
        super_block_offset=UFS_SBLOCK;
 
-       /* Keep 2Gig file limit. Some UFS variants need to override 
-          this but as I don't know which I'll let those in the know loosen
-          the rules */
+       sb->s_maxbytes = MAX_LFS_FILESIZE;
+
        switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
        case UFS_MOUNT_UFSTYPE_44BSD:
                UFSD("ufstype=44bsd\n");
index f02eb767339219e8e59418429c666b3934d795f4..a7048eafa8e6d8c92a0c6888017389d8562c75ba 100644 (file)
@@ -1280,7 +1280,6 @@ xfs_bmap_read_extents(
                xfs_bmbt_rec_t  *frp;
                xfs_fsblock_t   nextbno;
                xfs_extnum_t    num_recs;
-               xfs_extnum_t    start;
 
                num_recs = xfs_btree_get_numrecs(block);
                if (unlikely(i + num_recs > room)) {
@@ -1303,7 +1302,6 @@ xfs_bmap_read_extents(
                 * Copy records into the extent records.
                 */
                frp = XFS_BMBT_REC_ADDR(mp, block, 1);
-               start = i;
                for (j = 0; j < num_recs; j++, i++, frp++) {
                        xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
                        trp->l0 = be64_to_cpu(frp->l0);
@@ -2065,8 +2063,10 @@ xfs_bmap_add_extent_delay_real(
                }
                temp = xfs_bmap_worst_indlen(bma->ip, temp);
                temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
-               diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
-                       (bma->cur ? bma->cur->bc_private.b.allocated : 0));
+               diff = (int)(temp + temp2 -
+                            (startblockval(PREV.br_startblock) -
+                             (bma->cur ?
+                              bma->cur->bc_private.b.allocated : 0)));
                if (diff > 0) {
                        error = xfs_mod_fdblocks(bma->ip->i_mount,
                                                 -((int64_t)diff), false);
@@ -2123,7 +2123,6 @@ xfs_bmap_add_extent_delay_real(
                temp = da_new;
                if (bma->cur)
                        temp += bma->cur->bc_private.b.allocated;
-               ASSERT(temp <= da_old);
                if (temp < da_old)
                        xfs_mod_fdblocks(bma->ip->i_mount,
                                        (int64_t)(da_old - temp), false);
index 5392674bf8930550d949f6c01d0f47ec03228ba8..3a673ba201aae9f39c8190bfa2b8904120284ce5 100644 (file)
@@ -4395,7 +4395,7 @@ xfs_btree_visit_blocks(
                        xfs_btree_readahead_ptr(cur, ptr, 1);
 
                        /* save for the next iteration of the loop */
-                       lptr = *ptr;
+                       xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
                }
 
                /* for each buffer in the level */
index b177ef33cd4c3215de5f3bc228a467bcf73031e1..82a38d86ebad83346c441e802340a172402ef532 100644 (file)
@@ -1629,13 +1629,28 @@ xfs_refcount_recover_cow_leftovers(
        if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START)
                return -EOPNOTSUPP;
 
-       error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+       INIT_LIST_HEAD(&debris);
+
+       /*
+        * In this first part, we use an empty transaction to gather up
+        * all the leftover CoW extents so that we can subsequently
+        * delete them.  The empty transaction is used to avoid
+        * a buffer lock deadlock if there happens to be a loop in the
+        * refcountbt because we're allowed to re-grab a buffer that is
+        * already attached to our transaction.  When we're done
+        * recording the CoW debris we cancel the (empty) transaction
+        * and everything goes away cleanly.
+        */
+       error = xfs_trans_alloc_empty(mp, &tp);
        if (error)
                return error;
-       cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
+
+       error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+       if (error)
+               goto out_trans;
+       cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
 
        /* Find all the leftover CoW staging extents. */
-       INIT_LIST_HEAD(&debris);
        memset(&low, 0, sizeof(low));
        memset(&high, 0, sizeof(high));
        low.rc.rc_startblock = XFS_REFC_COW_START;
@@ -1645,10 +1660,11 @@ xfs_refcount_recover_cow_leftovers(
        if (error)
                goto out_cursor;
        xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
-       xfs_buf_relse(agbp);
+       xfs_trans_brelse(tp, agbp);
+       xfs_trans_cancel(tp);
 
        /* Now iterate the list to free the leftovers */
-       list_for_each_entry(rr, &debris, rr_list) {
+       list_for_each_entry_safe(rr, n, &debris, rr_list) {
                /* Set up transaction. */
                error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
                if (error)
@@ -1676,8 +1692,16 @@ xfs_refcount_recover_cow_leftovers(
                error = xfs_trans_commit(tp);
                if (error)
                        goto out_free;
+
+               list_del(&rr->rr_list);
+               kmem_free(rr);
        }
 
+       return error;
+out_defer:
+       xfs_defer_cancel(&dfops);
+out_trans:
+       xfs_trans_cancel(tp);
 out_free:
        /* Free the leftover list */
        list_for_each_entry_safe(rr, n, &debris, rr_list) {
@@ -1688,11 +1712,6 @@ out_free:
 
 out_cursor:
        xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
-       xfs_buf_relse(agbp);
-       goto out_free;
-
-out_defer:
-       xfs_defer_cancel(&dfops);
-       xfs_trans_cancel(tp);
-       goto out_free;
+       xfs_trans_brelse(tp, agbp);
+       goto out_trans;
 }
index 2b954308a1d671e9f09d06a5353713297f11be76..9e3cc2146d5b03cdc84a4a0d92ce3c186314525d 100644 (file)
@@ -582,9 +582,13 @@ xfs_getbmap(
                }
                break;
        default:
+               /* Local format data forks report no extents. */
+               if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+                       bmv->bmv_entries = 0;
+                       return 0;
+               }
                if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
-                   ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
-                   ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+                   ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
                        return -EINVAL;
 
                if (xfs_get_extsz_hint(ip) ||
@@ -712,7 +716,7 @@ xfs_getbmap(
                         * extents.
                         */
                        if (map[i].br_startblock == DELAYSTARTBLOCK &&
-                           map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
+                           map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
                                ASSERT((iflags & BMV_IF_DELALLOC) != 0);
 
                         if (map[i].br_startblock == HOLESTARTBLOCK &&
index 62fa39276a24bd91c26e3aa18c9f162f19842b95..07b77b73b0240c5cca4187d29e8e403cbf4f0714 100644 (file)
@@ -97,12 +97,16 @@ static inline void
 xfs_buf_ioacct_inc(
        struct xfs_buf  *bp)
 {
-       if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT))
+       if (bp->b_flags & XBF_NO_IOACCT)
                return;
 
        ASSERT(bp->b_flags & XBF_ASYNC);
-       bp->b_flags |= _XBF_IN_FLIGHT;
-       percpu_counter_inc(&bp->b_target->bt_io_count);
+       spin_lock(&bp->b_lock);
+       if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
+               bp->b_state |= XFS_BSTATE_IN_FLIGHT;
+               percpu_counter_inc(&bp->b_target->bt_io_count);
+       }
+       spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -110,14 +114,24 @@ xfs_buf_ioacct_inc(
  * freed and unaccount from the buftarg.
  */
 static inline void
-xfs_buf_ioacct_dec(
+__xfs_buf_ioacct_dec(
        struct xfs_buf  *bp)
 {
-       if (!(bp->b_flags & _XBF_IN_FLIGHT))
-               return;
+       ASSERT(spin_is_locked(&bp->b_lock));
 
-       bp->b_flags &= ~_XBF_IN_FLIGHT;
-       percpu_counter_dec(&bp->b_target->bt_io_count);
+       if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
+               bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
+               percpu_counter_dec(&bp->b_target->bt_io_count);
+       }
+}
+
+static inline void
+xfs_buf_ioacct_dec(
+       struct xfs_buf  *bp)
+{
+       spin_lock(&bp->b_lock);
+       __xfs_buf_ioacct_dec(bp);
+       spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -149,9 +163,9 @@ xfs_buf_stale(
         * unaccounted (released to LRU) before that occurs. Drop in-flight
         * status now to preserve accounting consistency.
         */
-       xfs_buf_ioacct_dec(bp);
-
        spin_lock(&bp->b_lock);
+       __xfs_buf_ioacct_dec(bp);
+
        atomic_set(&bp->b_lru_ref, 0);
        if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
            (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
@@ -979,12 +993,12 @@ xfs_buf_rele(
                 * ensures the decrement occurs only once per-buf.
                 */
                if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
-                       xfs_buf_ioacct_dec(bp);
+                       __xfs_buf_ioacct_dec(bp);
                goto out_unlock;
        }
 
        /* the last reference has been dropped ... */
-       xfs_buf_ioacct_dec(bp);
+       __xfs_buf_ioacct_dec(bp);
        if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
                /*
                 * If the buffer is added to the LRU take a new reference to the
index 8d1d44f87ce98834ad67cee6840d96d07d5666c0..1508121f29f29191da1a4efc7c8f12cf42eb0107 100644 (file)
@@ -63,7 +63,6 @@ typedef enum {
 #define _XBF_KMEM       (1 << 21)/* backed by heap memory */
 #define _XBF_DELWRI_Q   (1 << 22)/* buffer on a delwri queue */
 #define _XBF_COMPOUND   (1 << 23)/* compound buffer */
-#define _XBF_IN_FLIGHT  (1 << 25) /* I/O in flight, for accounting purposes */
 
 typedef unsigned int xfs_buf_flags_t;
 
@@ -84,14 +83,14 @@ typedef unsigned int xfs_buf_flags_t;
        { _XBF_PAGES,           "PAGES" }, \
        { _XBF_KMEM,            "KMEM" }, \
        { _XBF_DELWRI_Q,        "DELWRI_Q" }, \
-       { _XBF_COMPOUND,        "COMPOUND" }, \
-       { _XBF_IN_FLIGHT,       "IN_FLIGHT" }
+       { _XBF_COMPOUND,        "COMPOUND" }
 
 
 /*
  * Internal state flags.
  */
 #define XFS_BSTATE_DISPOSE      (1 << 0)       /* buffer being discarded */
+#define XFS_BSTATE_IN_FLIGHT    (1 << 1)       /* I/O in flight */
 
 /*
  * The xfs_buftarg contains 2 notions of "sector size" -
index 35703a80137208b81cb38ab91ce50e28d80a19f2..5fb5a0958a1485db46a2f753446c11828ff1913e 100644 (file)
@@ -1043,49 +1043,17 @@ xfs_find_get_desired_pgoff(
 
        index = startoff >> PAGE_SHIFT;
        endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
-       end = endoff >> PAGE_SHIFT;
+       end = (endoff - 1) >> PAGE_SHIFT;
        do {
                int             want;
                unsigned        nr_pages;
                unsigned int    i;
 
-               want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+               want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
                nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
                                          want);
-               /*
-                * No page mapped into given range.  If we are searching holes
-                * and if this is the first time we got into the loop, it means
-                * that the given offset is landed in a hole, return it.
-                *
-                * If we have already stepped through some block buffers to find
-                * holes but they all contains data.  In this case, the last
-                * offset is already updated and pointed to the end of the last
-                * mapped page, if it does not reach the endpoint to search,
-                * that means there should be a hole between them.
-                */
-               if (nr_pages == 0) {
-                       /* Data search found nothing */
-                       if (type == DATA_OFF)
-                               break;
-
-                       ASSERT(type == HOLE_OFF);
-                       if (lastoff == startoff || lastoff < endoff) {
-                               found = true;
-                               *offset = lastoff;
-                       }
-                       break;
-               }
-
-               /*
-                * At lease we found one page.  If this is the first time we
-                * step into the loop, and if the first page index offset is
-                * greater than the given search offset, a hole was found.
-                */
-               if (type == HOLE_OFF && lastoff == startoff &&
-                   lastoff < page_offset(pvec.pages[0])) {
-                       found = true;
+               if (nr_pages == 0)
                        break;
-               }
 
                for (i = 0; i < nr_pages; i++) {
                        struct page     *page = pvec.pages[i];
@@ -1098,18 +1066,18 @@ xfs_find_get_desired_pgoff(
                         * file mapping. However, page->index will not change
                         * because we have a reference on the page.
                         *
-                        * Searching done if the page index is out of range.
-                        * If the current offset is not reaches the end of
-                        * the specified search range, there should be a hole
-                        * between them.
+                        * If current page offset is beyond where we've ended,
+                        * we've found a hole.
                         */
-                       if (page->index > end) {
-                               if (type == HOLE_OFF && lastoff < endoff) {
-                                       *offset = lastoff;
-                                       found = true;
-                               }
+                       if (type == HOLE_OFF && lastoff < endoff &&
+                           lastoff < page_offset(pvec.pages[i])) {
+                               found = true;
+                               *offset = lastoff;
                                goto out;
                        }
+                       /* Searching done if the page index is out of range. */
+                       if (page->index > end)
+                               goto out;
 
                        lock_page(page);
                        /*
@@ -1151,21 +1119,20 @@ xfs_find_get_desired_pgoff(
 
                /*
                 * The number of returned pages less than our desired, search
-                * done.  In this case, nothing was found for searching data,
-                * but we found a hole behind the last offset.
+                * done.
                 */
-               if (nr_pages < want) {
-                       if (type == HOLE_OFF) {
-                               *offset = lastoff;
-                               found = true;
-                       }
+               if (nr_pages < want)
                        break;
-               }
 
                index = pvec.pages[i - 1]->index + 1;
                pagevec_release(&pvec);
        } while (index <= end);
 
+       /* No page at lastoff and we are not done - we found a hole. */
+       if (type == HOLE_OFF && lastoff < endoff) {
+               *offset = lastoff;
+               found = true;
+       }
 out:
        pagevec_release(&pvec);
        return found;
index 3683819887a5658eff23d6e3258d21567bbdc0b4..814ed729881d9a4305c3dd5646d75ef0f112b87b 100644 (file)
@@ -828,6 +828,7 @@ xfs_getfsmap(
        struct xfs_fsmap                dkeys[2];       /* per-dev keys */
        struct xfs_getfsmap_dev         handlers[XFS_GETFSMAP_DEVS];
        struct xfs_getfsmap_info        info = { NULL };
+       bool                            use_rmap;
        int                             i;
        int                             error = 0;
 
@@ -837,12 +838,14 @@ xfs_getfsmap(
            !xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[1]))
                return -EINVAL;
 
+       use_rmap = capable(CAP_SYS_ADMIN) &&
+                  xfs_sb_version_hasrmapbt(&mp->m_sb);
        head->fmh_entries = 0;
 
        /* Set up our device handlers. */
        memset(handlers, 0, sizeof(handlers));
        handlers[0].dev = new_encode_dev(mp->m_ddev_targp->bt_dev);
-       if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+       if (use_rmap)
                handlers[0].fn = xfs_getfsmap_datadev_rmapbt;
        else
                handlers[0].fn = xfs_getfsmap_datadev_bnobt;
index c0bd0d7651a947bf06407d7c680dde5c377b9b26..bb837310c07e98c529472abceda0a8b426d8c9a5 100644 (file)
@@ -913,4 +913,55 @@ void drm_dp_aux_unregister(struct drm_dp_aux *aux);
 int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc);
 int drm_dp_stop_crc(struct drm_dp_aux *aux);
 
+struct drm_dp_dpcd_ident {
+       u8 oui[3];
+       u8 device_id[6];
+       u8 hw_rev;
+       u8 sw_major_rev;
+       u8 sw_minor_rev;
+} __packed;
+
+/**
+ * struct drm_dp_desc - DP branch/sink device descriptor
+ * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch).
+ * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks.
+ */
+struct drm_dp_desc {
+       struct drm_dp_dpcd_ident ident;
+       u32 quirks;
+};
+
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+                    bool is_branch);
+
+/**
+ * enum drm_dp_quirk - Display Port sink/branch device specific quirks
+ *
+ * Display Port sink and branch devices in the wild have a variety of bugs, try
+ * to collect them here. The quirks are shared, but it's up to the drivers to
+ * implement workarounds for them.
+ */
+enum drm_dp_quirk {
+       /**
+        * @DP_DPCD_QUIRK_LIMITED_M_N:
+        *
+        * The device requires main link attributes Mvid and Nvid to be limited
+        * to 16 bits.
+        */
+       DP_DPCD_QUIRK_LIMITED_M_N,
+};
+
+/**
+ * drm_dp_has_quirk() - does the DP device have a specific quirk
+ * @desc: Device decriptor filled by drm_dp_read_desc()
+ * @quirk: Quirk to query for
+ *
+ * Return true if DP device identified by @desc has @quirk.
+ */
+static inline bool
+drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
+{
+       return desc->quirks & BIT(quirk);
+}
+
 #endif /* _DRM_DP_HELPER_H_ */
index c47aa248c640bc5ea806d5112ce7f7b5fb6b7a46..fcd641032f8d3a87162b0e9f1a63e08ae16d10df 100644 (file)
@@ -238,7 +238,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
                                bool kick_requeue_list);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
-void blk_mq_abort_requeue_list(struct request_queue *q);
 void blk_mq_complete_request(struct request *rq);
 
 bool blk_mq_queue_stopped(struct request_queue *q);
index aa2e19182d990eedda70de69bf638db5856811d1..51c5bd64bd0022c8a3f197d846c23448e7bb6a6e 100644 (file)
@@ -3,6 +3,8 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/string.h>
+
 #ifdef CONFIG_CEPH_LIB_PRETTYDEBUG
 
 /*
  */
 
 # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
-extern const char *ceph_file_part(const char *s, int len);
 #  define dout(fmt, ...)                                               \
        pr_debug("%.*s %12.12s:%-4d : " fmt,                            \
                 8 - (int)sizeof(KBUILD_MODNAME), "    ",               \
-                ceph_file_part(__FILE__, sizeof(__FILE__)),            \
-                __LINE__, ##__VA_ARGS__)
+                kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
 # else
 /* faux printk call just to see any compiler warnings. */
 #  define dout(fmt, ...)       do {                            \
index 21745946cae154f53cd87311e9350465388f70a5..ec47101cb1bf80f0867dbcff1d6aa10878df7418 100644 (file)
@@ -48,6 +48,7 @@ enum {
        CSS_ONLINE      = (1 << 1), /* between ->css_online() and ->css_offline() */
        CSS_RELEASED    = (1 << 2), /* refcnt reached zero, released */
        CSS_VISIBLE     = (1 << 3), /* css is visible to userland */
+       CSS_DYING       = (1 << 4), /* css is dying */
 };
 
 /* bits in struct cgroup flags field */
index ed2573e149faf070714e04f8160f7056b2fb1d3e..710a005c6b7a652bb9c32b5457dbd64196f6f4a0 100644 (file)
@@ -343,6 +343,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css)
        return true;
 }
 
+/**
+ * css_is_dying - test whether the specified css is dying
+ * @css: target css
+ *
+ * Test whether @css is in the process of offlining or already offline.  In
+ * most cases, ->css_online() and ->css_offline() callbacks should be
+ * enough; however, the actual offline operations are RCU delayed and this
+ * test returns %true also when @css is scheduled to be offlined.
+ *
+ * This is useful, for example, when the use case requires synchronous
+ * behavior with respect to cgroup removal.  cgroup removal schedules css
+ * offlining but the css can seem alive while the operation is being
+ * delayed.  If the delay affects user visible semantics, this test can be
+ * used to resolve the situation.
+ */
+static inline bool css_is_dying(struct cgroup_subsys_state *css)
+{
+       return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
+}
+
 /**
  * css_put - put a css reference
  * @css: target css
index 56197f82af45fb0f7cf492961fe1eb1956105735..62d948f80730fdd94c587b4f0235c72cf5d3399a 100644 (file)
@@ -272,6 +272,16 @@ struct bpf_prog_aux;
                .off   = OFF,                                   \
                .imm   = IMM })
 
+/* Unconditional jumps, goto pc + off16 */
+
+#define BPF_JMP_A(OFF)                                         \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_JMP | BPF_JA,                      \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
 /* Function call */
 
 #define BPF_EMIT_CALL(FUNC)                                    \
index 2b1a44f5bdb60e6a28d874630f71dec7a0f4c40b..a89d37e8b3873cc8e6fa79389a6c0d3a2f4843ab 100644 (file)
@@ -41,7 +41,7 @@ struct vm_area_struct;
 #define ___GFP_WRITE           0x800000u
 #define ___GFP_KSWAPD_RECLAIM  0x1000000u
 #ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP       0x4000000u
+#define ___GFP_NOLOCKDEP       0x2000000u
 #else
 #define ___GFP_NOLOCKDEP       0
 #endif
index c0d712d22b079ebc16129ef2618f41762276cf5e..f738d50cc17d3fcaa0b9b7cf681b70dd2646897d 100644 (file)
@@ -56,7 +56,14 @@ struct gpiod_lookup_table {
        .flags = _flags,                                                  \
 }
 
+#ifdef CONFIG_GPIOLIB
 void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
 void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
+#else
+static inline
+void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
+static inline
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
+#endif
 
 #endif /* __LINUX_GPIO_MACHINE_H */
index 8d5fcd6284ce0f4702d9eb28703bf2ae80d7e399..283dc2f5364d77c13df5a16ed0474154ee6724ff 100644 (file)
@@ -614,14 +614,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
 static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
                                                    netdev_features_t features)
 {
-       if (skb_vlan_tagged_multi(skb))
-               features = netdev_intersect_features(features,
-                                                    NETIF_F_SG |
-                                                    NETIF_F_HIGHDMA |
-                                                    NETIF_F_FRAGLIST |
-                                                    NETIF_F_HW_CSUM |
-                                                    NETIF_F_HW_VLAN_CTAG_TX |
-                                                    NETIF_F_HW_VLAN_STAG_TX);
+       if (skb_vlan_tagged_multi(skb)) {
+               /* In the case of multi-tagged packets, use a direct mask
+                * instead of using netdev_interesect_features(), to make
+                * sure that only devices supporting NETIF_F_HW_CSUM will
+                * have checksum offloading support.
+                */
+               features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
+                           NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
+                           NETIF_F_HW_VLAN_STAG_TX;
+       }
 
        return features;
 }
index 36872fbb815d72203e14582e3dab6ba5051ee8a7..734377ad42e9f0e4719edc4750b57245fa2d3f17 100644 (file)
@@ -64,13 +64,17 @@ extern int register_refined_jiffies(long clock_tick_rate);
 /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
 #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
 
+#ifndef __jiffy_arch_data
+#define __jiffy_arch_data
+#endif
+
 /*
  * The 64-bit value is not atomic - you MUST NOT read it
  * without sampling the sequence number in jiffies_lock.
  * get_jiffies_64() will do this for you as appropriate.
  */
 extern u64 __cacheline_aligned_in_smp jiffies_64;
-extern unsigned long volatile __cacheline_aligned_in_smp jiffies;
+extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
 
 #if (BITS_PER_LONG < 64)
 u64 get_jiffies_64(void);
index 4ce24a3762627be20e805da3eaab26ba1bc47578..8098695e5d8d9dfba3815fd359823f92833a5d61 100644 (file)
@@ -425,12 +425,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
 }
 #endif
 
+extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+               phys_addr_t end_addr);
 #else
 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
 {
        return 0;
 }
 
+static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+               phys_addr_t end_addr)
+{
+       return 0;
+}
+
 #endif /* CONFIG_HAVE_MEMBLOCK */
 
 #endif /* __KERNEL__ */
index dd9a263ed368d5476b06a3464bbfada0436cf6e2..a940ec6a046cfd0dc8c3016226c71ea9e2c343e5 100644 (file)
@@ -787,8 +787,14 @@ enum {
 };
 
 enum {
-       CQE_RSS_HTYPE_IP        = 0x3 << 6,
-       CQE_RSS_HTYPE_L4        = 0x3 << 2,
+       CQE_RSS_HTYPE_IP        = 0x3 << 2,
+       /* cqe->rss_hash_type[3:2] - IP destination selected for hash
+        * (00 = none,  01 = IPv4, 10 = IPv6, 11 = Reserved)
+        */
+       CQE_RSS_HTYPE_L4        = 0x3 << 6,
+       /* cqe->rss_hash_type[7:6] - L4 destination selected for hash
+        * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
+        */
 };
 
 enum {
index bcdf739ee41a2cf38fe537e48172e9ca9053e3b9..93273d9ea4d145f125846f0c9a56a5a6e74915e4 100644 (file)
@@ -787,7 +787,12 @@ enum {
 
 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
 
+enum {
+       MLX5_CMD_ENT_STATE_PENDING_COMP,
+};
+
 struct mlx5_cmd_work_ent {
+       unsigned long           state;
        struct mlx5_cmd_msg    *in;
        struct mlx5_cmd_msg    *out;
        void                   *uout;
@@ -976,7 +981,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
                       int nent, u64 mask, const char *name,
index 32de0724b40009adc2a802dcc5abafbb5505c0b1..edafedb7b509010c904fccf2cdeffea1afa9317b 100644 (file)
@@ -766,6 +766,12 @@ enum {
        MLX5_CAP_PORT_TYPE_ETH = 0x1,
 };
 
+enum {
+       MLX5_CAP_UMR_FENCE_STRONG       = 0x0,
+       MLX5_CAP_UMR_FENCE_SMALL        = 0x1,
+       MLX5_CAP_UMR_FENCE_NONE         = 0x2,
+};
+
 struct mlx5_ifc_cmd_hca_cap_bits {
        u8         reserved_at_0[0x80];
 
@@ -875,7 +881,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         reserved_at_202[0x1];
        u8         ipoib_enhanced_offloads[0x1];
        u8         ipoib_basic_offloads[0x1];
-       u8         reserved_at_205[0xa];
+       u8         reserved_at_205[0x5];
+       u8         umr_fence[0x2];
+       u8         reserved_at_20c[0x3];
        u8         drain_sigerr[0x1];
        u8         cmdif_checksum[0x2];
        u8         sigerr_cqe[0x1];
index 7cb17c6b97de38b1e8d55ca6d8c90b8415a9c3fa..b892e95d4929d311b51877dfb9eb3de67780bbdf 100644 (file)
@@ -2327,6 +2327,17 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
 #define FOLL_REMOTE    0x2000  /* we are working on non-current tsk/mm */
 #define FOLL_COW       0x4000  /* internal GUP flag */
 
+static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
+{
+       if (vm_fault & VM_FAULT_OOM)
+               return -ENOMEM;
+       if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
+               return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
+       if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
+               return -EFAULT;
+       return 0;
+}
+
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
                        void *data);
 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
index ebaccd4e7d8cdc5f5ef13fed1475a8b202d93628..ef6a13b7bd3e851385bea32434e207a5cf6eec7f 100644 (file)
@@ -678,6 +678,7 @@ typedef struct pglist_data {
         * is the first PFN that needs to be initialised.
         */
        unsigned long first_deferred_pfn;
+       unsigned long static_init_size;
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 566fda587fcf7a76af1c6a01e84ce33336606f3f..3f74ef2281e8afac1e4667b4fbf4abcc5f89ff17 100644 (file)
@@ -467,6 +467,7 @@ enum dmi_field {
        DMI_PRODUCT_VERSION,
        DMI_PRODUCT_SERIAL,
        DMI_PRODUCT_UUID,
+       DMI_PRODUCT_FAMILY,
        DMI_BOARD_VENDOR,
        DMI_BOARD_NAME,
        DMI_BOARD_VERSION,
index dc8224ae28d5d9e6106dc0d06a8a9bc2eb85fd0a..e0d1946270f38e5238ddf0a3bb25cf03c3f3ebe4 100644 (file)
@@ -64,6 +64,7 @@ extern struct platform_device *of_platform_device_create(struct device_node *np,
                                                   const char *bus_id,
                                                   struct device *parent);
 
+extern int of_platform_device_destroy(struct device *dev, void *data);
 extern int of_platform_bus_probe(struct device_node *root,
                                 const struct of_device_id *matches,
                                 struct device *parent);
index 33c2b0b77429d09aaa31cb9735458db5762e08a7..8039f9f0ca054ba20fd9992b13b7926859d029b5 100644 (file)
@@ -183,6 +183,11 @@ enum pci_dev_flags {
        PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
        /* Do not use FLR even if device advertises PCI_AF_CAP */
        PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
+       /*
+        * Resume before calling the driver's system suspend hooks, disabling
+        * the direct_complete optimization.
+        */
+       PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
 };
 
 enum pci_irq_reroute_variant {
@@ -1342,9 +1347,9 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
                               unsigned int max_vecs, unsigned int flags,
                               const struct irq_affinity *aff_desc)
 {
-       if (min_vecs > 1)
-               return -EINVAL;
-       return 1;
+       if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
+               return 1;
+       return -ENOSPC;
 }
 
 static inline void pci_free_irq_vectors(struct pci_dev *dev)
index 279e3c5326e3a4e65bf6f86556ea984a69d0ca9c..7620eb127cffc5edbc475457732a042bac357055 100644 (file)
@@ -42,8 +42,6 @@
  * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
  *     impedance to VDD). If the argument is != 0 pull-up is enabled,
  *     if it is 0, pull-up is total, i.e. the pin is connected to VDD.
- * @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous
- *     input and output operations.
  * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
  *     collector) which means it is usually wired with other output ports
  *     which are then pulled up with an external resistor. Setting this
@@ -98,7 +96,6 @@ enum pin_config_param {
        PIN_CONFIG_BIAS_PULL_DOWN,
        PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
        PIN_CONFIG_BIAS_PULL_UP,
-       PIN_CONFIG_BIDIRECTIONAL,
        PIN_CONFIG_DRIVE_OPEN_DRAIN,
        PIN_CONFIG_DRIVE_OPEN_SOURCE,
        PIN_CONFIG_DRIVE_PUSH_PULL,
index 422bc2e4cb6a6fc47571d28cb0602d59477d7caf..ef3eb8bbfee482e04aa06c83b21fbc2ce02d50b3 100644 (file)
@@ -54,7 +54,8 @@ extern int ptrace_request(struct task_struct *child, long request,
                          unsigned long addr, unsigned long data);
 extern void ptrace_notify(int exit_code);
 extern void __ptrace_link(struct task_struct *child,
-                         struct task_struct *new_parent);
+                         struct task_struct *new_parent,
+                         const struct cred *ptracer_cred);
 extern void __ptrace_unlink(struct task_struct *child);
 extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
 #define PTRACE_MODE_READ       0x01
@@ -206,7 +207,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
 
        if (unlikely(ptrace) && current->ptrace) {
                child->ptrace = current->ptrace;
-               __ptrace_link(child, current->parent);
+               __ptrace_link(child, current->parent, current->ptracer_cred);
 
                if (child->ptrace & PT_SEIZED)
                        task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
@@ -215,6 +216,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
 
                set_tsk_thread_flag(child, TIF_SIGPENDING);
        }
+       else
+               child->ptracer_cred = NULL;
 }
 
 /**
index cda76c6506ca411c42c5e1cdf6d287263d97dde6..e69402d4a8aecbdc6fe4a40e8ad7e01957badbd6 100644 (file)
@@ -195,6 +195,7 @@ int serdev_device_open(struct serdev_device *);
 void serdev_device_close(struct serdev_device *);
 unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
 void serdev_device_set_flow_control(struct serdev_device *, bool);
+int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
 void serdev_device_wait_until_sent(struct serdev_device *, long);
 int serdev_device_get_tiocm(struct serdev_device *);
 int serdev_device_set_tiocm(struct serdev_device *, int, int);
@@ -236,6 +237,12 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev
        return 0;
 }
 static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
+static inline int serdev_device_write_buf(struct serdev_device *serdev,
+                                         const unsigned char *buf,
+                                         size_t count)
+{
+       return -ENODEV;
+}
 static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {}
 static inline int serdev_device_get_tiocm(struct serdev_device *serdev)
 {
@@ -301,7 +308,7 @@ struct tty_driver;
 struct device *serdev_tty_port_register(struct tty_port *port,
                                        struct device *parent,
                                        struct tty_driver *drv, int idx);
-void serdev_tty_port_unregister(struct tty_port *port);
+int serdev_tty_port_unregister(struct tty_port *port);
 #else
 static inline struct device *serdev_tty_port_register(struct tty_port *port,
                                           struct device *parent,
@@ -309,14 +316,10 @@ static inline struct device *serdev_tty_port_register(struct tty_port *port,
 {
        return ERR_PTR(-ENODEV);
 }
-static inline void serdev_tty_port_unregister(struct tty_port *port) {}
-#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
-
-static inline int serdev_device_write_buf(struct serdev_device *serdev,
-                                         const unsigned char *data,
-                                         size_t count)
+static inline int serdev_tty_port_unregister(struct tty_port *port)
 {
-       return serdev_device_write(serdev, data, count, 0);
+       return -ENODEV;
 }
+#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
 
 #endif /*_LINUX_SERDEV_H */
index 94631026f79c56f022976a85dcde92379507e87c..11cef5a7bc87a9fe67a4bfbfca9f52e41d0abd88 100644 (file)
@@ -336,7 +336,8 @@ xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
 {
        char *cp = (char *)p;
        struct kvec *vec = &rqstp->rq_arg.head[0];
-       return cp == (char *)vec->iov_base + vec->iov_len;
+       return cp >= (char*)vec->iov_base
+               && cp <= (char*)vec->iov_base + vec->iov_len;
 }
 
 static inline int
index d07cd2105a6c6a3bdcac20c918622c7ad4ea0840..eccb4ec30a8a7b94064ff1ede0645234b94ae49c 100644 (file)
@@ -558,6 +558,15 @@ extern struct device *tty_port_register_device_attr(struct tty_port *port,
                struct tty_driver *driver, unsigned index,
                struct device *device, void *drvdata,
                const struct attribute_group **attr_grp);
+extern struct device *tty_port_register_device_serdev(struct tty_port *port,
+               struct tty_driver *driver, unsigned index,
+               struct device *device);
+extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
+               struct tty_driver *driver, unsigned index,
+               struct device *device, void *drvdata,
+               const struct attribute_group **attr_grp);
+extern void tty_port_unregister_device(struct tty_port *port,
+               struct tty_driver *driver, unsigned index);
 extern int tty_port_alloc_xmit_buf(struct tty_port *port);
 extern void tty_port_free_xmit_buf(struct tty_port *port);
 extern void tty_port_destroy(struct tty_port *port);
index 7dffa5624ea62bee992e547c44ed4a86a577b0a7..97116379db5ff6a850e078c35047fe995f45b265 100644 (file)
@@ -206,6 +206,7 @@ struct cdc_state {
 };
 
 extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *);
+extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf);
 extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
 extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *);
 extern void usbnet_cdc_status(struct usbnet *, struct urb *);
index 049af33da3b6c95897d544670cea65c542317673..cfc0437841665d7ed46a714915c50d723c24901c 100644 (file)
@@ -107,10 +107,16 @@ struct dst_entry {
        };
 };
 
+struct dst_metrics {
+       u32             metrics[RTAX_MAX];
+       atomic_t        refcnt;
+};
+extern const struct dst_metrics dst_default_metrics;
+
 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
-extern const u32 dst_default_metrics[];
 
 #define DST_METRICS_READ_ONLY          0x1UL
+#define DST_METRICS_REFCOUNTED         0x2UL
 #define DST_METRICS_FLAGS              0x3UL
 #define __DST_METRICS_PTR(Y)   \
        ((u32 *)((Y) & ~DST_METRICS_FLAGS))
index 6692c5758b332d468f1e0611ecc4f3e03ae03b2b..f7f6aa789c6174c41ca9739206d586c559c1f3a1 100644 (file)
@@ -114,11 +114,11 @@ struct fib_info {
        __be32                  fib_prefsrc;
        u32                     fib_tb_id;
        u32                     fib_priority;
-       u32                     *fib_metrics;
-#define fib_mtu fib_metrics[RTAX_MTU-1]
-#define fib_window fib_metrics[RTAX_WINDOW-1]
-#define fib_rtt fib_metrics[RTAX_RTT-1]
-#define fib_advmss fib_metrics[RTAX_ADVMSS-1]
+       struct dst_metrics      *fib_metrics;
+#define fib_mtu fib_metrics->metrics[RTAX_MTU-1]
+#define fib_window fib_metrics->metrics[RTAX_WINDOW-1]
+#define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
+#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
        int                     fib_nhs;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        int                     fib_weight;
index f31fb6331a532eed7258177d5cfbb48fde9a594a..3248beaf16b0513283cd3c863695a77d09793cdc 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/types.h>
 #include <net/act_api.h>
+#include <linux/tc_act/tc_csum.h>
 
 struct tcf_csum {
        struct tc_action common;
@@ -11,4 +12,18 @@ struct tcf_csum {
 };
 #define to_tcf_csum(a) ((struct tcf_csum *)a)
 
+static inline bool is_tcf_csum(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+       if (a->ops && a->ops->type == TCA_ACT_CSUM)
+               return true;
+#endif
+       return false;
+}
+
+static inline u32 tcf_csum_update_flags(const struct tc_action *a)
+{
+       return to_tcf_csum(a)->update_flags;
+}
+
 #endif /* __NET_TC_CSUM_H */
index 6793a30c66b1f054516a27229c29037fcdfa2f6f..7e7e2b0d29157047fa0d3596b3f97cf501d754f9 100644 (file)
@@ -979,10 +979,6 @@ struct xfrm_dst {
        struct flow_cache_object flo;
        struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
        int num_pols, num_xfrms;
-#ifdef CONFIG_XFRM_SUB_POLICY
-       struct flowi *origin;
-       struct xfrm_selector *partner;
-#endif
        u32 xfrm_genid;
        u32 policy_genid;
        u32 route_mtu_cached;
@@ -998,12 +994,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
        dst_release(xdst->route);
        if (likely(xdst->u.dst.xfrm))
                xfrm_state_put(xdst->u.dst.xfrm);
-#ifdef CONFIG_XFRM_SUB_POLICY
-       kfree(xdst->origin);
-       xdst->origin = NULL;
-       kfree(xdst->partner);
-       xdst->partner = NULL;
-#endif
 }
 #endif
 
index f5f70e345318151356e022ce46b623a357031920..355b81f4242defd82a3802cd47e2a28abfb24ee5 100644 (file)
@@ -158,7 +158,6 @@ enum sa_path_rec_type {
 };
 
 struct sa_path_rec_ib {
-       __be64       service_id;
        __be16       dlid;
        __be16       slid;
        u8           raw_traffic;
@@ -174,7 +173,6 @@ struct sa_path_rec_roce {
 };
 
 struct sa_path_rec_opa {
-       __be64       service_id;
        __be32       dlid;
        __be32       slid;
        u8           raw_traffic;
@@ -189,6 +187,7 @@ struct sa_path_rec_opa {
 struct sa_path_rec {
        union ib_gid dgid;
        union ib_gid sgid;
+       __be64       service_id;
        /* reserved */
        __be32       flow_label;
        u8           hop_limit;
@@ -262,7 +261,7 @@ static inline void path_conv_opa_to_ib(struct sa_path_rec *ib,
                ib->ib.dlid     = htons(ntohl(opa->opa.dlid));
                ib->ib.slid     = htons(ntohl(opa->opa.slid));
        }
-       ib->ib.service_id       = opa->opa.service_id;
+       ib->service_id          = opa->service_id;
        ib->ib.raw_traffic      = opa->opa.raw_traffic;
 }
 
@@ -281,7 +280,7 @@ static inline void path_conv_ib_to_opa(struct sa_path_rec *opa,
        }
        opa->opa.slid           = slid;
        opa->opa.dlid           = dlid;
-       opa->opa.service_id     = ib->ib.service_id;
+       opa->service_id         = ib->service_id;
        opa->opa.raw_traffic    = ib->ib.raw_traffic;
 }
 
@@ -591,15 +590,6 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec)
                (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
 }
 
-static inline void sa_path_set_service_id(struct sa_path_rec *rec,
-                                         __be64 service_id)
-{
-       if (rec->rec_type == SA_PATH_REC_TYPE_IB)
-               rec->ib.service_id = service_id;
-       else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
-               rec->opa.service_id = service_id;
-}
-
 static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid)
 {
        if (rec->rec_type == SA_PATH_REC_TYPE_IB)
@@ -625,15 +615,6 @@ static inline void sa_path_set_raw_traffic(struct sa_path_rec *rec,
                rec->opa.raw_traffic = raw_traffic;
 }
 
-static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec)
-{
-       if (rec->rec_type == SA_PATH_REC_TYPE_IB)
-               return rec->ib.service_id;
-       else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
-               return rec->opa.service_id;
-       return 0;
-}
-
 static inline __be32 sa_path_get_slid(struct sa_path_rec *rec)
 {
        if (rec->rec_type == SA_PATH_REC_TYPE_IB)
index 5852661443290d52eb8a3e719716452d752b6eaa..348c102cb5f6afdbe9f90a7c788431bac0219226 100644 (file)
@@ -10,9 +10,6 @@ struct ibnl_client_cbs {
        struct module *module;
 };
 
-int ibnl_init(void);
-void ibnl_cleanup(void);
-
 /**
  * Add a a client to the list of IB netlink exporters.
  * @index: Index of the added client
@@ -77,11 +74,4 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
 int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
                        unsigned int group, gfp_t flags);
 
-/**
- * Check if there are any listeners to the netlink group
- * @group: the netlink group ID
- * Returns 0 on success or a negative for no listeners.
- */
-int ibnl_chk_listeners(unsigned int group);
-
 #endif /* _RDMA_NETLINK_H */
index 275581d483ddd90d97c550ee8bf44d705833ecf8..5f17fb770477bbdfa2729a7b35cf21f70493515e 100644 (file)
@@ -557,6 +557,7 @@ struct iscsi_conn {
 #define LOGIN_FLAGS_READ_ACTIVE                1
 #define LOGIN_FLAGS_CLOSED             2
 #define LOGIN_FLAGS_READY              4
+#define LOGIN_FLAGS_INITIAL_PDU                8
        unsigned long           login_flags;
        struct delayed_work     login_work;
        struct delayed_work     login_cleanup_work;
index 5e00b2333c26e3256a6f4dc51a2a58011a2c9db6..172dc8ee0e3bda95c39f7e037e7a33baf8012214 100644 (file)
@@ -86,6 +86,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
        array->map.key_size = attr->key_size;
        array->map.value_size = attr->value_size;
        array->map.max_entries = attr->max_entries;
+       array->map.map_flags = attr->map_flags;
        array->elem_size = elem_size;
 
        if (!percpu)
index 39cfafd895b80d2f0fb5054626af001949a8ad98..b09185f0f17d3428ac445de2ed52756004ca5368 100644 (file)
@@ -432,6 +432,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
        trie->map.key_size = attr->key_size;
        trie->map.value_size = attr->value_size;
        trie->map.max_entries = attr->max_entries;
+       trie->map.map_flags = attr->map_flags;
        trie->data_size = attr->key_size -
                          offsetof(struct bpf_lpm_trie_key, data);
        trie->max_prefixlen = trie->data_size * 8;
index 4dfd6f2ec2f9725681f490971e60acc8033c64b6..31147d730abf532cc8f10efc9a871b5dd498928d 100644 (file)
@@ -88,6 +88,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
        smap->map.key_size = attr->key_size;
        smap->map.value_size = value_size;
        smap->map.max_entries = attr->max_entries;
+       smap->map.map_flags = attr->map_flags;
        smap->n_buckets = n_buckets;
        smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
index c72cd41f5b8b9c2e05b1f65aaf4730fb9a285ed0..339c8a1371de0201df0f7ac799280168ea4d22e3 100644 (file)
@@ -463,19 +463,22 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
        BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
 };
 
+static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno)
+{
+       BUG_ON(regno >= MAX_BPF_REG);
+
+       memset(&regs[regno], 0, sizeof(regs[regno]));
+       regs[regno].type = NOT_INIT;
+       regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
+       regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
+}
+
 static void init_reg_state(struct bpf_reg_state *regs)
 {
        int i;
 
-       for (i = 0; i < MAX_BPF_REG; i++) {
-               regs[i].type = NOT_INIT;
-               regs[i].imm = 0;
-               regs[i].min_value = BPF_REGISTER_MIN_RANGE;
-               regs[i].max_value = BPF_REGISTER_MAX_RANGE;
-               regs[i].min_align = 0;
-               regs[i].aux_off = 0;
-               regs[i].aux_off_align = 0;
-       }
+       for (i = 0; i < MAX_BPF_REG; i++)
+               mark_reg_not_init(regs, i);
 
        /* frame pointer */
        regs[BPF_REG_FP].type = FRAME_PTR;
@@ -843,9 +846,6 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
 {
        bool strict = env->strict_alignment;
 
-       if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
-               strict = true;
-
        switch (reg->type) {
        case PTR_TO_PACKET:
                return check_pkt_ptr_alignment(reg, off, size, strict);
@@ -1349,7 +1349,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
        struct bpf_verifier_state *state = &env->cur_state;
        const struct bpf_func_proto *fn = NULL;
        struct bpf_reg_state *regs = state->regs;
-       struct bpf_reg_state *reg;
        struct bpf_call_arg_meta meta;
        bool changes_data;
        int i, err;
@@ -1416,11 +1415,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
        }
 
        /* reset caller saved regs */
-       for (i = 0; i < CALLER_SAVED_REGS; i++) {
-               reg = regs + caller_saved[i];
-               reg->type = NOT_INIT;
-               reg->imm = 0;
-       }
+       for (i = 0; i < CALLER_SAVED_REGS; i++)
+               mark_reg_not_init(regs, caller_saved[i]);
 
        /* update return register */
        if (fn->ret_type == RET_INTEGER) {
@@ -2448,7 +2444,6 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
 {
        struct bpf_reg_state *regs = env->cur_state.regs;
        u8 mode = BPF_MODE(insn->code);
-       struct bpf_reg_state *reg;
        int i, err;
 
        if (!may_access_skb(env->prog->type)) {
@@ -2481,11 +2476,8 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
        }
 
        /* reset caller saved regs to unreadable */
-       for (i = 0; i < CALLER_SAVED_REGS; i++) {
-               reg = regs + caller_saved[i];
-               reg->type = NOT_INIT;
-               reg->imm = 0;
-       }
+       for (i = 0; i < CALLER_SAVED_REGS; i++)
+               mark_reg_not_init(regs, caller_saved[i]);
 
        /* mark destination R0 register as readable, since it contains
         * the value fetched from the packet
@@ -2696,7 +2688,8 @@ err_free:
 /* the following conditions reduce the number of explored insns
  * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet
  */
-static bool compare_ptrs_to_packet(struct bpf_reg_state *old,
+static bool compare_ptrs_to_packet(struct bpf_verifier_env *env,
+                                  struct bpf_reg_state *old,
                                   struct bpf_reg_state *cur)
 {
        if (old->id != cur->id)
@@ -2739,7 +2732,7 @@ static bool compare_ptrs_to_packet(struct bpf_reg_state *old,
         * 'if (R4 > data_end)' and all further insn were already good with r=20,
         * so they will be good with r=30 and we can prune the search.
         */
-       if (old->off <= cur->off &&
+       if (!env->strict_alignment && old->off <= cur->off &&
            old->off >= old->range && cur->off >= cur->range)
                return true;
 
@@ -2810,7 +2803,7 @@ static bool states_equal(struct bpf_verifier_env *env,
                        continue;
 
                if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
-                   compare_ptrs_to_packet(rold, rcur))
+                   compare_ptrs_to_packet(env, rold, rcur))
                        continue;
 
                return false;
@@ -3588,10 +3581,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
        } else {
                log_level = 0;
        }
-       if (attr->prog_flags & BPF_F_STRICT_ALIGNMENT)
+
+       env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
+       if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
                env->strict_alignment = true;
-       else
-               env->strict_alignment = false;
 
        ret = replace_map_fd_with_map_ptr(env);
        if (ret < 0)
@@ -3697,7 +3690,10 @@ int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
        mutex_lock(&bpf_verifier_lock);
 
        log_level = 0;
+
        env->strict_alignment = false;
+       if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+               env->strict_alignment = true;
 
        env->explored_states = kcalloc(env->prog->len,
                                       sizeof(struct bpf_verifier_state_list *),
index c3c9a0e1b3c9a474bd80b8cb10ea1049284474b0..8d4e85eae42c08481899e415075ee42c6d12f90f 100644 (file)
@@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsys_state *css)
 {
        lockdep_assert_held(&cgroup_mutex);
 
+       if (css->flags & CSS_DYING)
+               return;
+
+       css->flags |= CSS_DYING;
+
        /*
         * This must happen before css is disassociated with its cgroup.
         * See seq_css() for details.
index f6501f4f6040b5a9c21e84aeb57e20906ef1c614..ae643412948added94f05d7efb120631f7798b44 100644 (file)
@@ -176,9 +176,9 @@ typedef enum {
 } cpuset_flagbits_t;
 
 /* convenient tests for these bits */
-static inline bool is_cpuset_online(const struct cpuset *cs)
+static inline bool is_cpuset_online(struct cpuset *cs)
 {
-       return test_bit(CS_ONLINE, &cs->flags);
+       return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
 }
 
 static inline int is_cpu_exclusive(const struct cpuset *cs)
index f8c27d3ef3a1309a39b7d1c2798d8c8882aa4481..3de0b98c441465976121ec6fcdbba00c7e0d8a28 100644 (file)
@@ -7316,6 +7316,21 @@ int perf_event_account_interrupt(struct perf_event *event)
        return __perf_event_account_interrupt(event, 1);
 }
 
+static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
+{
+       /*
+        * Due to interrupt latency (AKA "skid"), we may enter the
+        * kernel before taking an overflow, even if the PMU is only
+        * counting user events.
+        * To avoid leaking information to userspace, we must always
+        * reject kernel samples when exclude_kernel is set.
+        */
+       if (event->attr.exclude_kernel && !user_mode(regs))
+               return false;
+
+       return true;
+}
+
 /*
  * Generic event overflow handling, sampling.
  */
@@ -7336,6 +7351,12 @@ static int __perf_event_overflow(struct perf_event *event,
 
        ret = __perf_event_account_interrupt(event, throttle);
 
+       /*
+        * For security, drop the skid kernel samples if necessary.
+        */
+       if (!sample_is_allowed(event, regs))
+               return ret;
+
        /*
         * XXX event_limit might not quite work as expected on inherited
         * events
index aa1076c5e4a9f3a5d9e6f58fef1c6f34e332de8c..e53770d2bf956bf5bf6ec1a42c75121552713da6 100644 (file)
@@ -1577,6 +1577,18 @@ static __latent_entropy struct task_struct *copy_process(
        if (!p)
                goto fork_out;
 
+       /*
+        * This _must_ happen before we call free_task(), i.e. before we jump
+        * to any of the bad_fork_* labels. This is to avoid freeing
+        * p->set_child_tid which is (ab)used as a kthread's data pointer for
+        * kernel threads (PF_KTHREAD).
+        */
+       p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
+       /*
+        * Clear TID on mm_release()?
+        */
+       p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
+
        ftrace_graph_init_task(p);
 
        rt_mutex_init_task(p);
@@ -1743,11 +1755,6 @@ static __latent_entropy struct task_struct *copy_process(
                }
        }
 
-       p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
-       /*
-        * Clear TID on mm_release()?
-        */
-       p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
 #ifdef CONFIG_BLOCK
        p->plug = NULL;
 #endif
index 2d2d3a568e4e8b3aeedb16711c90e660f698d6cf..adfe3b4cfe05a101bcee0da8e72db6fe6a10cf72 100644 (file)
@@ -122,7 +122,7 @@ static void *alloc_insn_page(void)
        return module_alloc(PAGE_SIZE);
 }
 
-static void free_insn_page(void *page)
+void __weak free_insn_page(void *page)
 {
        module_memfree(page);
 }
index 0450225579367eee10de6695679fffa7b963f013..ec4565122e6553f490dfb2434b6c14102b152bb2 100644 (file)
@@ -10,6 +10,7 @@ config LIVEPATCH
        depends on SYSFS
        depends on KALLSYMS_ALL
        depends on HAVE_LIVEPATCH
+       depends on !TRIM_UNUSED_KSYMS
        help
          Say Y here if you want to support kernel live patching.
          This option has no runtime impact until a kernel "patch"
index b9550941690915bc23323cd772e060cda7d3a704..28cd09e635ed669fe6c93b28eb1d59e46bbf4615 100644 (file)
@@ -1785,12 +1785,14 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
        int ret;
 
        raw_spin_lock_irq(&lock->wait_lock);
-
-       set_current_state(TASK_INTERRUPTIBLE);
-
        /* sleep on the mutex */
+       set_current_state(TASK_INTERRUPTIBLE);
        ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
-
+       /*
+        * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+        * have to fix that up.
+        */
+       fixup_rt_mutex_waiters(lock);
        raw_spin_unlock_irq(&lock->wait_lock);
 
        return ret;
@@ -1821,16 +1823,26 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
        bool cleanup = false;
 
        raw_spin_lock_irq(&lock->wait_lock);
+       /*
+        * Do an unconditional try-lock, this deals with the lock stealing
+        * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
+        * sets a NULL owner.
+        *
+        * We're not interested in the return value, because the subsequent
+        * test on rt_mutex_owner() will infer that. If the trylock succeeded,
+        * we will own the lock and it will have removed the waiter. If we
+        * failed the trylock, we're still not owner and we need to remove
+        * ourselves.
+        */
+       try_to_take_rt_mutex(lock, current, waiter);
        /*
         * Unless we're the owner; we're still enqueued on the wait_list.
         * So check if we became owner, if not, take us off the wait_list.
         */
        if (rt_mutex_owner(lock) != current) {
                remove_waiter(lock, waiter);
-               fixup_rt_mutex_waiters(lock);
                cleanup = true;
        }
-
        /*
         * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
         * have to fix that up.
index 266ddcc1d8bbbc6af7bceda3657618beef2a9c59..60f356d91060c8974268cc5bc02d57c75d359afc 100644 (file)
@@ -60,19 +60,25 @@ int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
 }
 
 
+void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
+                  const struct cred *ptracer_cred)
+{
+       BUG_ON(!list_empty(&child->ptrace_entry));
+       list_add(&child->ptrace_entry, &new_parent->ptraced);
+       child->parent = new_parent;
+       child->ptracer_cred = get_cred(ptracer_cred);
+}
+
 /*
  * ptrace a task: make the debugger its new parent and
  * move it to the ptrace list.
  *
  * Must be called with the tasklist lock write-held.
  */
-void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
+static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
 {
-       BUG_ON(!list_empty(&child->ptrace_entry));
-       list_add(&child->ptrace_entry, &new_parent->ptraced);
-       child->parent = new_parent;
        rcu_read_lock();
-       child->ptracer_cred = get_cred(__task_cred(new_parent));
+       __ptrace_link(child, new_parent, __task_cred(new_parent));
        rcu_read_unlock();
 }
 
@@ -386,7 +392,7 @@ static int ptrace_attach(struct task_struct *task, long request,
                flags |= PT_SEIZED;
        task->ptrace = flags;
 
-       __ptrace_link(task, current);
+       ptrace_link(task, current);
 
        /* SEIZE doesn't trap tracee on attach */
        if (!seize)
@@ -459,7 +465,7 @@ static int ptrace_traceme(void)
                 */
                if (!ret && !(current->real_parent->flags & PF_EXITING)) {
                        current->ptrace = PT_PTRACED;
-                       __ptrace_link(current, current->real_parent);
+                       ptrace_link(current, current->real_parent);
                }
        }
        write_unlock_irq(&tasklist_lock);
index 1370f067fb51511f26d578cb3954032952a62027..d2a1e6dd02913f4beb58b1d79be12d72761deec3 100644 (file)
@@ -825,8 +825,10 @@ static void check_thread_timers(struct task_struct *tsk,
                         * At the hard limit, we just die.
                         * No need to calculate anything else now.
                         */
-                       pr_info("CPU Watchdog Timeout (hard): %s[%d]\n",
-                               tsk->comm, task_pid_nr(tsk));
+                       if (print_fatal_signals) {
+                               pr_info("CPU Watchdog Timeout (hard): %s[%d]\n",
+                                       tsk->comm, task_pid_nr(tsk));
+                       }
                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
                        return;
                }
@@ -838,8 +840,10 @@ static void check_thread_timers(struct task_struct *tsk,
                                soft += USEC_PER_SEC;
                                sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
                        }
-                       pr_info("RT Watchdog Timeout (soft): %s[%d]\n",
-                               tsk->comm, task_pid_nr(tsk));
+                       if (print_fatal_signals) {
+                               pr_info("RT Watchdog Timeout (soft): %s[%d]\n",
+                                       tsk->comm, task_pid_nr(tsk));
+                       }
                        __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
                }
        }
@@ -936,8 +940,10 @@ static void check_process_timers(struct task_struct *tsk,
                         * At the hard limit, we just die.
                         * No need to calculate anything else now.
                         */
-                       pr_info("RT Watchdog Timeout (hard): %s[%d]\n",
-                               tsk->comm, task_pid_nr(tsk));
+                       if (print_fatal_signals) {
+                               pr_info("RT Watchdog Timeout (hard): %s[%d]\n",
+                                       tsk->comm, task_pid_nr(tsk));
+                       }
                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
                        return;
                }
@@ -945,8 +951,10 @@ static void check_process_timers(struct task_struct *tsk,
                        /*
                         * At the soft limit, send a SIGXCPU every second.
                         */
-                       pr_info("CPU Watchdog Timeout (soft): %s[%d]\n",
-                               tsk->comm, task_pid_nr(tsk));
+                       if (print_fatal_signals) {
+                               pr_info("CPU Watchdog Timeout (soft): %s[%d]\n",
+                                       tsk->comm, task_pid_nr(tsk));
+                       }
                        __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
                        if (soft < hard) {
                                soft++;
index 74fdfe9ed3dba7fa659cb11feafac25919984bbb..9e5841dc14b5fa62e0c1470df704c3dc9b99f8ad 100644 (file)
@@ -5063,7 +5063,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
        }
 
  out:
-       kfree(fgd->new_hash);
+       free_ftrace_hash(fgd->new_hash);
        kfree(fgd);
 
        return ret;
index 889bc31785bee57901ae969f16a55390db8265c4..be88cbaadde3aeed809b2997e2ef683fba62dba4 100644 (file)
@@ -4504,6 +4504,44 @@ static struct bpf_test tests[] = {
                { },
                { { 0, 1 } },
        },
+       {
+               "JMP_JSGE_K: Signed jump: value walk 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, -3),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 6),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 1),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 1),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 1),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
+                       BPF_EXIT_INSN(),                /* bad exit */
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),  /* good exit */
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSGE_K: Signed jump: value walk 2",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, -3),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 2),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 2),
+                       BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
+                       BPF_EXIT_INSN(),                /* bad exit */
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),  /* good exit */
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
        /* BPF_JMP | BPF_JGT | BPF_K */
        {
                "JMP_JGT_K: if (3 > 2) return 1",
index d9e6fddcc51f06a1286c56a24c510c1a3efa8add..b3c7214d710d5ea8bab8648b5182c53d882f3c31 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -407,12 +407,10 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
 
        ret = handle_mm_fault(vma, address, fault_flags);
        if (ret & VM_FAULT_ERROR) {
-               if (ret & VM_FAULT_OOM)
-                       return -ENOMEM;
-               if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
-                       return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
-               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
-                       return -EFAULT;
+               int err = vm_fault_to_errno(ret, *flags);
+
+               if (err)
+                       return err;
                BUG();
        }
 
@@ -723,12 +721,10 @@ retry:
        ret = handle_mm_fault(vma, address, fault_flags);
        major |= ret & VM_FAULT_MAJOR;
        if (ret & VM_FAULT_ERROR) {
-               if (ret & VM_FAULT_OOM)
-                       return -ENOMEM;
-               if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
-                       return -EHWPOISON;
-               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
-                       return -EFAULT;
+               int err = vm_fault_to_errno(ret, 0);
+
+               if (err)
+                       return err;
                BUG();
        }
 
index e5828875f7bbd7a770d5c23334a0e3994ffe544f..3eedb187e5496f36f7f3186267f475254bcda5ab 100644 (file)
@@ -4170,6 +4170,11 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        }
                        ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
                        if (ret & VM_FAULT_ERROR) {
+                               int err = vm_fault_to_errno(ret, flags);
+
+                               if (err)
+                                       return err;
+
                                remainder = 0;
                                break;
                        }
index d9fc0e4561283d9a351f6dd7c4cfb74ad26ab566..216184af0e192b5405efc5a594129fa7c53ae953 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1028,8 +1028,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
                goto out;
 
        if (PageTransCompound(page)) {
-               err = split_huge_page(page);
-               if (err)
+               if (split_huge_page(page))
                        goto out_unlock;
        }
 
index b049c9b2dba8718a6f57777591c2f2753d8d40ad..7b8a5db76a2fec7331f09048f3c19ebdfddf9f16 100644 (file)
@@ -1739,6 +1739,29 @@ static void __init_memblock memblock_dump(struct memblock_type *type)
        }
 }
 
+extern unsigned long __init_memblock
+memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
+{
+       struct memblock_region *rgn;
+       unsigned long size = 0;
+       int idx;
+
+       for_each_memblock_type((&memblock.reserved), rgn) {
+               phys_addr_t start, end;
+
+               if (rgn->base + rgn->size < start_addr)
+                       continue;
+               if (rgn->base > end_addr)
+                       continue;
+
+               start = rgn->base;
+               end = start + rgn->size;
+               size += end - start;
+       }
+
+       return size;
+}
+
 void __init_memblock __memblock_dump_all(void)
 {
        pr_info("MEMBLOCK configuration:\n");
index 2527dfeddb003d245ac2e2bd964134030426f777..342fac9ba89b0da3e207b1fdaef2be71c9837a24 100644 (file)
@@ -1595,12 +1595,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
        if (ret) {
                pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
                        pfn, ret, page->flags, &page->flags);
-               /*
-                * We know that soft_offline_huge_page() tries to migrate
-                * only one hugepage pointed to by hpage, so we need not
-                * run through the pagelist here.
-                */
-               putback_active_hugepage(hpage);
+               if (!list_empty(&pagelist))
+                       putback_movable_pages(&pagelist);
                if (ret > 0)
                        ret = -EIO;
        } else {
index 6ff5d729ded0ecd3a5607d10248697a786091f7e..2e65df1831d941dcd1282c56312bdbd153df0a79 100644 (file)
@@ -3029,6 +3029,17 @@ static int __do_fault(struct vm_fault *vmf)
        return ret;
 }
 
+/*
+ * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
+ * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
+ * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
+ * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
+ */
+static int pmd_devmap_trans_unstable(pmd_t *pmd)
+{
+       return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
+}
+
 static int pte_alloc_one_map(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
@@ -3052,18 +3063,27 @@ static int pte_alloc_one_map(struct vm_fault *vmf)
 map_pte:
        /*
         * If a huge pmd materialized under us just retry later.  Use
-        * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
-        * didn't become pmd_trans_huge under us and then back to pmd_none, as
-        * a result of MADV_DONTNEED running immediately after a huge pmd fault
-        * in a different thread of this mm, in turn leading to a misleading
-        * pmd_trans_huge() retval.  All we have to ensure is that it is a
-        * regular pmd that we can walk with pte_offset_map() and we can do that
-        * through an atomic read in C, which is what pmd_trans_unstable()
-        * provides.
+        * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
+        * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
+        * under us and then back to pmd_none, as a result of MADV_DONTNEED
+        * running immediately after a huge pmd fault in a different thread of
+        * this mm, in turn leading to a misleading pmd_trans_huge() retval.
+        * All we have to ensure is that it is a regular pmd that we can walk
+        * with pte_offset_map() and we can do that through an atomic read in
+        * C, which is what pmd_trans_unstable() provides.
         */
-       if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd))
+       if (pmd_devmap_trans_unstable(vmf->pmd))
                return VM_FAULT_NOPAGE;
 
+       /*
+        * At this point we know that our vmf->pmd points to a page of ptes
+        * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
+        * for the duration of the fault.  If a racing MADV_DONTNEED runs and
+        * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
+        * be valid and we will re-check to make sure the vmf->pte isn't
+        * pte_none() under vmf->ptl protection when we return to
+        * alloc_set_pte().
+        */
        vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
                        &vmf->ptl);
        return 0;
@@ -3690,7 +3710,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
                vmf->pte = NULL;
        } else {
                /* See comment in pte_alloc_one_map() */
-               if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd))
+               if (pmd_devmap_trans_unstable(vmf->pmd))
                        return 0;
                /*
                 * A regular pmd is established and it can't morph into a huge
index c483c5c20b4bd12bcca50972c9f74a0dbd3a713e..b562b5523a6544e6c0ae6e4f792943441f6217a3 100644 (file)
@@ -284,7 +284,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
 {
        int i;
        int nr = pagevec_count(pvec);
-       int delta_munlocked;
+       int delta_munlocked = -nr;
        struct pagevec pvec_putback;
        int pgrescued = 0;
 
@@ -304,6 +304,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
                                continue;
                        else
                                __munlock_isolation_failed(page);
+               } else {
+                       delta_munlocked++;
                }
 
                /*
@@ -315,7 +317,6 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
                pagevec_add(&pvec_putback, pvec->pages[i]);
                pvec->pages[i] = NULL;
        }
-       delta_munlocked = -nr + pagevec_count(&pvec_putback);
        __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
        spin_unlock_irq(zone_lru_lock(zone));
 
index f9e450c6b6e414d61b00d5a61be9cdea3b773e1b..2302f250d6b1ba150e3c2e4e17cfb6c99574ab5b 100644 (file)
@@ -292,6 +292,26 @@ int page_group_by_mobility_disabled __read_mostly;
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 static inline void reset_deferred_meminit(pg_data_t *pgdat)
 {
+       unsigned long max_initialise;
+       unsigned long reserved_lowmem;
+
+       /*
+        * Initialise at least 2G of a node but also take into account that
+        * two large system hashes that can take up 1GB for 0.25TB/node.
+        */
+       max_initialise = max(2UL << (30 - PAGE_SHIFT),
+               (pgdat->node_spanned_pages >> 8));
+
+       /*
+        * Compensate the all the memblock reservations (e.g. crash kernel)
+        * from the initial estimation to make sure we will initialize enough
+        * memory to boot.
+        */
+       reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
+                       pgdat->node_start_pfn + max_initialise);
+       max_initialise += reserved_lowmem;
+
+       pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
        pgdat->first_deferred_pfn = ULONG_MAX;
 }
 
@@ -314,20 +334,11 @@ static inline bool update_defer_init(pg_data_t *pgdat,
                                unsigned long pfn, unsigned long zone_end,
                                unsigned long *nr_initialised)
 {
-       unsigned long max_initialise;
-
        /* Always populate low zones for address-contrained allocations */
        if (zone_end < pgdat_end_pfn(pgdat))
                return true;
-       /*
-        * Initialise at least 2G of a node but also take into account that
-        * two large system hashes that can take up 1GB for 0.25TB/node.
-        */
-       max_initialise = max(2UL << (30 - PAGE_SHIFT),
-               (pgdat->node_spanned_pages >> 8));
-
        (*nr_initialised)++;
-       if ((*nr_initialised > max_initialise) &&
+       if ((*nr_initialised > pgdat->static_init_size) &&
            (pfn & (PAGES_PER_SECTION - 1)) == 0) {
                pgdat->first_deferred_pfn = pfn;
                return false;
@@ -3870,7 +3881,9 @@ retry:
                goto got_pg;
 
        /* Avoid allocations with no watermarks from looping endlessly */
-       if (test_thread_flag(TIF_MEMDIE))
+       if (test_thread_flag(TIF_MEMDIE) &&
+           (alloc_flags == ALLOC_NO_WATERMARKS ||
+            (gfp_mask & __GFP_NOMEMALLOC)))
                goto nopage;
 
        /* Retry as long as the OOM killer is making progress */
@@ -6136,7 +6149,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
        /* pg_data_t should be reset to zero when it's allocated */
        WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
 
-       reset_deferred_meminit(pgdat);
        pgdat->node_id = nid;
        pgdat->node_start_pfn = node_start_pfn;
        pgdat->per_cpu_nodestats = NULL;
@@ -6158,6 +6170,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
                (unsigned long)pgdat->node_mem_map);
 #endif
 
+       reset_deferred_meminit(pgdat);
        free_area_init_core(pgdat);
 }
 
index 57e5156f02be6bcc23e70ec801e9cc1c3bbdd631..7449593fca724147cef5b8f7a46752333e5e0585 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5512,6 +5512,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
                char mbuf[64];
                char *buf;
                struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
+               ssize_t len;
 
                if (!attr || !attr->store || !attr->show)
                        continue;
@@ -5536,8 +5537,9 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
                        buf = buffer;
                }
 
-               attr->show(root_cache, buf);
-               attr->store(s, buf, strlen(buf));
+               len = attr->show(root_cache, buf);
+               if (len > 0)
+                       attr->store(s, buf, len);
        }
 
        if (buffer)
index 464df34899031d46058b7cbadc1c3be24ae8dc89..26be6407abd7efe452a585d341a8d7e5b53d2b32 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -357,8 +357,11 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
        WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
 
        /*
-        * Make sure that larger requests are not too disruptive - no OOM
-        * killer and no allocation failure warnings as we have a fallback
+        * We want to attempt a large physically contiguous block first because
+        * it is less likely to fragment multiple larger blocks and therefore
+        * contribute to a long term fragmentation less than vmalloc fallback.
+        * However make sure that larger requests are not too disruptive - no
+        * OOM killer and no allocation failure warnings as we have a fallback.
         */
        if (size > PAGE_SIZE) {
                kmalloc_flags |= __GFP_NOWARN;
index 2034fb9266700e6b456b141db9b5d264aed77d62..8757fb87dab871faaaeccaed0ac11d76521ad594 100644 (file)
@@ -151,7 +151,7 @@ static int process_one_ticket(struct ceph_auth_client *ac,
        struct timespec validity;
        void *tp, *tpend;
        void **ptp;
-       struct ceph_crypto_key new_session_key;
+       struct ceph_crypto_key new_session_key = { 0 };
        struct ceph_buffer *new_ticket_blob;
        unsigned long new_expires, new_renew_after;
        u64 new_secret_id;
@@ -215,6 +215,9 @@ static int process_one_ticket(struct ceph_auth_client *ac,
        dout(" ticket blob is %d bytes\n", dlen);
        ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad);
        blob_struct_v = ceph_decode_8(ptp);
+       if (blob_struct_v != 1)
+               goto bad;
+
        new_secret_id = ceph_decode_64(ptp);
        ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend);
        if (ret)
@@ -234,13 +237,13 @@ static int process_one_ticket(struct ceph_auth_client *ac,
             type, ceph_entity_type_name(type), th->secret_id,
             (int)th->ticket_blob->vec.iov_len);
        xi->have_keys |= th->service;
-
-out:
-       return ret;
+       return 0;
 
 bad:
        ret = -EINVAL;
-       goto out;
+out:
+       ceph_crypto_key_destroy(&new_session_key);
+       return ret;
 }
 
 static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
index 4fd02831beed20fb3d98475c7da2a0b4f2818513..47e94b560ba0d37daf2fb801fa11b9aa39720013 100644 (file)
@@ -56,19 +56,6 @@ static const struct kernel_param_ops param_ops_supported_features = {
 module_param_cb(supported_features, &param_ops_supported_features, NULL,
                S_IRUGO);
 
-/*
- * find filename portion of a path (/foo/bar/baz -> baz)
- */
-const char *ceph_file_part(const char *s, int len)
-{
-       const char *e = s + len;
-
-       while (e != s && *(e-1) != '/')
-               e--;
-       return e;
-}
-EXPORT_SYMBOL(ceph_file_part);
-
 const char *ceph_msg_type_name(int type)
 {
        switch (type) {
index 5766a6c896c4fa7290fffdbee239701ca4e2eeef..588a919300514ad2cd4b843c528bb41e601fa7d8 100644 (file)
@@ -1174,8 +1174,8 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
  * Returns true if the result moves the cursor on to the next piece
  * of the data item.
  */
-static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
-                               size_t bytes)
+static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
+                                 size_t bytes)
 {
        bool new_piece;
 
@@ -1207,8 +1207,6 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
                new_piece = true;
        }
        cursor->need_crc = new_piece;
-
-       return new_piece;
 }
 
 static size_t sizeof_footer(struct ceph_connection *con)
@@ -1577,7 +1575,6 @@ static int write_partial_message_data(struct ceph_connection *con)
                size_t page_offset;
                size_t length;
                bool last_piece;
-               bool need_crc;
                int ret;
 
                page = ceph_msg_data_next(cursor, &page_offset, &length,
@@ -1592,7 +1589,7 @@ static int write_partial_message_data(struct ceph_connection *con)
                }
                if (do_datacrc && cursor->need_crc)
                        crc = ceph_crc32c_page(crc, page, page_offset, length);
-               need_crc = ceph_msg_data_advance(cursor, (size_t)ret);
+               ceph_msg_data_advance(cursor, (size_t)ret);
        }
 
        dout("%s %p msg %p done\n", __func__, con, msg);
@@ -2231,10 +2228,18 @@ static void process_ack(struct ceph_connection *con)
        struct ceph_msg *m;
        u64 ack = le64_to_cpu(con->in_temp_ack);
        u64 seq;
+       bool reconnect = (con->in_tag == CEPH_MSGR_TAG_SEQ);
+       struct list_head *list = reconnect ? &con->out_queue : &con->out_sent;
 
-       while (!list_empty(&con->out_sent)) {
-               m = list_first_entry(&con->out_sent, struct ceph_msg,
-                                    list_head);
+       /*
+        * In the reconnect case, con_fault() has requeued messages
+        * in out_sent. We should cleanup old messages according to
+        * the reconnect seq.
+        */
+       while (!list_empty(list)) {
+               m = list_first_entry(list, struct ceph_msg, list_head);
+               if (reconnect && m->needs_out_seq)
+                       break;
                seq = le64_to_cpu(m->hdr.seq);
                if (seq > ack)
                        break;
@@ -2243,6 +2248,7 @@ static void process_ack(struct ceph_connection *con)
                m->ack_stamp = jiffies;
                ceph_msg_remove(m);
        }
+
        prepare_read_tag(con);
 }
 
@@ -2299,7 +2305,7 @@ static int read_partial_msg_data(struct ceph_connection *con)
 
                if (do_datacrc)
                        crc = ceph_crc32c_page(crc, page, page_offset, ret);
-               (void) ceph_msg_data_advance(cursor, (size_t)ret);
+               ceph_msg_data_advance(cursor, (size_t)ret);
        }
        if (do_datacrc)
                con->in_data_crc = crc;
index 29a0ef351c5e068838fc1ed8f634439853ff0a3a..250f11f786092d902b52da5af9a228d9786acf62 100644 (file)
@@ -43,15 +43,13 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
        int i, err = -EINVAL;
        struct ceph_fsid fsid;
        u32 epoch, num_mon;
-       u16 version;
        u32 len;
 
        ceph_decode_32_safe(&p, end, len, bad);
        ceph_decode_need(&p, end, len, bad);
 
        dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
-
-       ceph_decode_16_safe(&p, end, version, bad);
+       p += sizeof(u16);  /* skip version */
 
        ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
        ceph_decode_copy(&p, &fsid, sizeof(fsid));
index ffe9e904d4d1d130b0353edbe45d50d236b4f74e..55e3a477f92d4cba92e342d13686b6e3b94204c0 100644 (file)
@@ -317,6 +317,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
                u32 yes;
                struct crush_rule *r;
 
+               err = -EINVAL;
                ceph_decode_32_safe(p, end, yes, bad);
                if (!yes) {
                        dout("crush_decode NO rule %d off %x %p to %p\n",
index 960e503b5a529a2c4f1866f49c150493ee98d7da..6192f11beec9077de964e2aeff4f78547f08b8da 100644 (file)
@@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(dst_discard_out);
 
-const u32 dst_default_metrics[RTAX_MAX + 1] = {
+const struct dst_metrics dst_default_metrics = {
        /* This initializer is needed to force linker to place this variable
         * into const section. Otherwise it might end into bss section.
         * We really want to avoid false sharing on this variable, and catch
         * any writes on it.
         */
-       [RTAX_MAX] = 0xdeadbeef,
+       .refcnt = ATOMIC_INIT(1),
 };
 
 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
@@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
        if (dev)
                dev_hold(dev);
        dst->ops = ops;
-       dst_init_metrics(dst, dst_default_metrics, true);
+       dst_init_metrics(dst, dst_default_metrics.metrics, true);
        dst->expires = 0UL;
        dst->path = dst;
        dst->from = NULL;
@@ -314,25 +314,30 @@ EXPORT_SYMBOL(dst_release);
 
 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
 {
-       u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+       struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
 
        if (p) {
-               u32 *old_p = __DST_METRICS_PTR(old);
+               struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
                unsigned long prev, new;
 
-               memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+               atomic_set(&p->refcnt, 1);
+               memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
 
                new = (unsigned long) p;
                prev = cmpxchg(&dst->_metrics, old, new);
 
                if (prev != old) {
                        kfree(p);
-                       p = __DST_METRICS_PTR(prev);
+                       p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
                        if (prev & DST_METRICS_READ_ONLY)
                                p = NULL;
+               } else if (prev & DST_METRICS_REFCOUNTED) {
+                       if (atomic_dec_and_test(&old_p->refcnt))
+                               kfree(old_p);
                }
        }
-       return p;
+       BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
+       return (u32 *)p;
 }
 EXPORT_SYMBOL(dst_cow_metrics_generic);
 
@@ -341,7 +346,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
 {
        unsigned long prev, new;
 
-       new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
+       new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
        prev = cmpxchg(&dst->_metrics, old, new);
        if (prev == old)
                kfree(__DST_METRICS_PTR(old));
index a253a6197e6b37a7ae2fe451c646b01c861a3e22..a6bb95fa87b26a6627ba38cd9a94269c7e59f73d 100644 (file)
@@ -2281,6 +2281,7 @@ bool bpf_helper_changes_pkt_data(void *func)
            func == bpf_skb_change_head ||
            func == bpf_skb_change_tail ||
            func == bpf_skb_pull_data ||
+           func == bpf_clone_redirect ||
            func == bpf_l3_csum_replace ||
            func == bpf_l4_csum_replace ||
            func == bpf_xdp_adjust_head)
index 1934efd4a9d4986f3497abc40968fd08c91896b3..26bbfababff27cecc589bca69e3eb14739187f5b 100644 (file)
@@ -315,6 +315,25 @@ out_undo:
        goto out;
 }
 
+static int __net_init net_defaults_init_net(struct net *net)
+{
+       net->core.sysctl_somaxconn = SOMAXCONN;
+       return 0;
+}
+
+static struct pernet_operations net_defaults_ops = {
+       .init = net_defaults_init_net,
+};
+
+static __init int net_defaults_init(void)
+{
+       if (register_pernet_subsys(&net_defaults_ops))
+               panic("Cannot initialize net default settings");
+
+       return 0;
+}
+
+core_initcall(net_defaults_init);
 
 #ifdef CONFIG_NET_NS
 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
index 49a279a7cc15b0d2409236f53a0629ccc927e07c..9e2c0a7cb3256e8cb2af1d65aaf293db96b9a418 100644 (file)
@@ -3231,8 +3231,11 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
        int err = 0;
        int fidx = 0;
 
-       if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
-                       IFLA_MAX, ifla_policy, NULL) == 0) {
+       err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
+                         IFLA_MAX, ifla_policy, NULL);
+       if (err < 0) {
+               return -EINVAL;
+       } else if (err == 0) {
                if (tb[IFLA_MASTER])
                        br_idx = nla_get_u32(tb[IFLA_MASTER]);
        }
index ea23254b2457cf15eeae495130dab437090f8e2e..b7cd9aafe99e9b9216b13cce617133cdf832eeeb 100644 (file)
@@ -479,8 +479,6 @@ static __net_init int sysctl_core_net_init(struct net *net)
 {
        struct ctl_table *tbl;
 
-       net->core.sysctl_somaxconn = SOMAXCONN;
-
        tbl = netns_core_table;
        if (!net_eq(net, &init_net)) {
                tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
index ae96e6f3e0cb048e6a9273421343daf3582e9463..e9f3386a528b4a792839d5e50894a68d3097eb42 100644 (file)
@@ -863,8 +863,8 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
 
        n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
 
+       addr_type = -1;
        if (n || IN_DEV_ARP_ACCEPT(in_dev)) {
-               addr_type = -1;
                is_garp = arp_is_garp(net, dev, &addr_type, arp->ar_op,
                                      sip, tip, sha, tha);
        }
index 65cc02bd82bc87991a29135a58c059c7916a5a35..93322f895eab6136adbe22aa37a4eedc9687a0e4 100644 (file)
@@ -248,6 +248,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
        u8 *tail;
        u8 *vaddr;
        int nfrags;
+       int esph_offset;
        struct page *page;
        struct sk_buff *trailer;
        int tailen = esp->tailen;
@@ -313,11 +314,13 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
        }
 
 cow:
+       esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
+
        nfrags = skb_cow_data(skb, tailen, &trailer);
        if (nfrags < 0)
                goto out;
        tail = skb_tail_pointer(trailer);
-       esp->esph = ip_esp_hdr(skb);
+       esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
 
 skip_cow:
        esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
index da449ddb8cc172bd9091c00057a69a095f98b56d..ad9ad4aab5da7c7d11c3b80edbdfcbdd3d7153fe 100644 (file)
@@ -203,6 +203,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
 static void free_fib_info_rcu(struct rcu_head *head)
 {
        struct fib_info *fi = container_of(head, struct fib_info, rcu);
+       struct dst_metrics *m;
 
        change_nexthops(fi) {
                if (nexthop_nh->nh_dev)
@@ -213,8 +214,9 @@ static void free_fib_info_rcu(struct rcu_head *head)
                rt_fibinfo_free(&nexthop_nh->nh_rth_input);
        } endfor_nexthops(fi);
 
-       if (fi->fib_metrics != (u32 *) dst_default_metrics)
-               kfree(fi->fib_metrics);
+       m = fi->fib_metrics;
+       if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
+               kfree(m);
        kfree(fi);
 }
 
@@ -971,11 +973,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
                        val = 255;
                if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
                        return -EINVAL;
-               fi->fib_metrics[type - 1] = val;
+               fi->fib_metrics->metrics[type - 1] = val;
        }
 
        if (ecn_ca)
-               fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
+               fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
 
        return 0;
 }
@@ -1033,11 +1035,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
                goto failure;
        fib_info_cnt++;
        if (cfg->fc_mx) {
-               fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+               fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
                if (!fi->fib_metrics)
                        goto failure;
+               atomic_set(&fi->fib_metrics->refcnt, 1);
        } else
-               fi->fib_metrics = (u32 *) dst_default_metrics;
+               fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
 
        fi->fib_net = net;
        fi->fib_protocol = cfg->fc_protocol;
@@ -1238,7 +1241,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
        if (fi->fib_priority &&
            nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
                goto nla_put_failure;
-       if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
+       if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
                goto nla_put_failure;
 
        if (fi->fib_prefsrc &&
index 655d9eebe43e16a59102edcd3ea4bc177c6b341d..6883b3d4ba8f69de2cb924612d60f5671a219a84 100644 (file)
@@ -1385,8 +1385,12 @@ static void rt_add_uncached_list(struct rtable *rt)
 
 static void ipv4_dst_destroy(struct dst_entry *dst)
 {
+       struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
        struct rtable *rt = (struct rtable *) dst;
 
+       if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
+               kfree(p);
+
        if (!list_empty(&rt->rt_uncached)) {
                struct uncached_list *ul = rt->rt_uncached_list;
 
@@ -1438,7 +1442,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
                        rt->rt_gateway = nh->nh_gw;
                        rt->rt_uses_gateway = 1;
                }
-               dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+               dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
+               if (fi->fib_metrics != &dst_default_metrics) {
+                       rt->dst._metrics |= DST_METRICS_REFCOUNTED;
+                       atomic_inc(&fi->fib_metrics->refcnt);
+               }
 #ifdef CONFIG_IP_ROUTE_CLASSID
                rt->dst.tclassid = nh->nh_tclassid;
 #endif
index 842b575f8fdddc41a41aa6f03fb9086cec7ee451..59792d283ff8c19048904cb790dbeebef14da73d 100644 (file)
@@ -1084,9 +1084,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_sock *inet = inet_sk(sk);
+       struct sockaddr *uaddr = msg->msg_name;
        int err, flags;
 
-       if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
+       if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
+           (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
+            uaddr->sa_family == AF_UNSPEC))
                return -EOPNOTSUPP;
        if (tp->fastopen_req)
                return -EALREADY; /* Another Fast Open is in progress */
@@ -1108,7 +1111,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
                }
        }
        flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
-       err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
+       err = __inet_stream_connect(sk->sk_socket, uaddr,
                                    msg->msg_namelen, flags, 1);
        /* fastopen_req could already be freed in __inet_stream_connect
         * if the connection times out or gets rst
index 8d128ba79b66de52d4d60628fe8df6cc9bccbc3b..0c5b4caa19491eb04bc755032611c76f03008acb 100644 (file)
@@ -537,11 +537,10 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
 
-       dsfield = ipv4_get_dsfield(iph);
-
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-               fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
-                                         & IPV6_TCLASS_MASK;
+               dsfield = ipv4_get_dsfield(iph);
+       else
+               dsfield = ip6_tclass(t->parms.flowinfo);
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
                fl6.flowi6_mark = skb->mark;
        else
@@ -598,9 +597,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
 
-       dsfield = ipv6_get_dsfield(ipv6h);
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-               fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
+               dsfield = ipv6_get_dsfield(ipv6h);
+       else
+               dsfield = ip6_tclass(t->parms.flowinfo);
+
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
                fl6.flowlabel |= ip6_flowlabel(ipv6h);
        if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
index 6eb2ae507500b30959134aa3ed35c53c7b79aad9..7ae6c503f1ca2b089388598bfceb43e4aa2d2fea 100644 (file)
@@ -1196,7 +1196,7 @@ route_lookup:
        skb_push(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        ipv6h = ipv6_hdr(skb);
-       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
+       ip6_flow_hdr(ipv6h, dsfield,
                     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
        ipv6h->hop_limit = hop_limit;
        ipv6h->nexthdr = proto;
@@ -1231,8 +1231,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        if (tproto != IPPROTO_IPIP && tproto != 0)
                return -1;
 
-       dsfield = ipv4_get_dsfield(iph);
-
        if (t->parms.collect_md) {
                struct ip_tunnel_info *tun_info;
                const struct ip_tunnel_key *key;
@@ -1246,6 +1244,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowi6_proto = IPPROTO_IPIP;
                fl6.daddr = key->u.ipv6.dst;
                fl6.flowlabel = key->label;
+               dsfield = ip6_tclass(key->label);
        } else {
                if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                        encap_limit = t->parms.encap_limit;
@@ -1254,8 +1253,9 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowi6_proto = IPPROTO_IPIP;
 
                if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-                       fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
-                                        & IPV6_TCLASS_MASK;
+                       dsfield = ipv4_get_dsfield(iph);
+               else
+                       dsfield = ip6_tclass(t->parms.flowinfo);
                if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
                        fl6.flowi6_mark = skb->mark;
                else
@@ -1267,6 +1267,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
                return -1;
 
+       dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
+
        skb_set_inner_ipproto(skb, IPPROTO_IPIP);
 
        err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1300,8 +1302,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
            ip6_tnl_addr_conflict(t, ipv6h))
                return -1;
 
-       dsfield = ipv6_get_dsfield(ipv6h);
-
        if (t->parms.collect_md) {
                struct ip_tunnel_info *tun_info;
                const struct ip_tunnel_key *key;
@@ -1315,6 +1315,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowi6_proto = IPPROTO_IPV6;
                fl6.daddr = key->u.ipv6.dst;
                fl6.flowlabel = key->label;
+               dsfield = ip6_tclass(key->label);
        } else {
                offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
                /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
@@ -1337,7 +1338,9 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowi6_proto = IPPROTO_IPV6;
 
                if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-                       fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK);
+                       dsfield = ipv6_get_dsfield(ipv6h);
+               else
+                       dsfield = ip6_tclass(t->parms.flowinfo);
                if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
                        fl6.flowlabel |= ip6_flowlabel(ipv6h);
                if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
@@ -1351,6 +1354,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
                return -1;
 
+       dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
+
        skb_set_inner_ipproto(skb, IPPROTO_IPV6);
 
        err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
index c1950bb14735bc914b53c0476342f2679ad50b87..512dc43d0ce6814f0a6d0507805025d75234eece 100644 (file)
@@ -3285,7 +3285,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
                p += pol->sadb_x_policy_len*8;
                sec_ctx = (struct sadb_x_sec_ctx *)p;
                if (len < pol->sadb_x_policy_len*8 +
-                   sec_ctx->sadb_x_sec_len) {
+                   sec_ctx->sadb_x_sec_len*8) {
                        *dir = -EINVAL;
                        goto out;
                }
index 8364fe5b59e4ca01ef8d05b1038dbe96fedf657b..c38d16f22d2a7ff265b8729d43b164312598fd9f 100644 (file)
@@ -311,6 +311,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
        int rc = -EINVAL;
 
        dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
+
+       lock_sock(sk);
        if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
                goto out;
        rc = -EAFNOSUPPORT;
@@ -382,6 +384,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
 out_put:
        llc_sap_put(sap);
 out:
+       release_sock(sk);
        return rc;
 }
 
index 35f4c7d7a50089e3998ff9860e86f6d26e1da600..1f75280ba26c78b3ad9864d0b63305cbba43f8fe 100644 (file)
@@ -2492,7 +2492,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
                if (is_multicast_ether_addr(hdr->addr1)) {
                        mpp_addr = hdr->addr3;
                        proxied_addr = mesh_hdr->eaddr1;
-               } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) {
+               } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
+                           MESH_FLAGS_AE_A5_A6) {
                        /* has_a4 already checked in ieee80211_rx_mesh_check */
                        mpp_addr = hdr->addr4;
                        proxied_addr = mesh_hdr->eaddr2;
index a9708da28eb53ff2987264c6c7d7ca6ec2ff09e9..95238284c422e23fbd834a3aedc54c9243743d0f 100644 (file)
@@ -1176,7 +1176,9 @@ void sctp_assoc_update(struct sctp_association *asoc,
 
                asoc->ctsn_ack_point = asoc->next_tsn - 1;
                asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
-               if (!asoc->stream) {
+
+               if (sctp_state(asoc, COOKIE_WAIT)) {
+                       sctp_stream_free(asoc->stream);
                        asoc->stream = new->stream;
                        new->stream = NULL;
                }
index 0e06a278d2a911e2360e75e983b623e453284b7b..ba9ad32fc44740b9ec45d95e265d3d895148d7a7 100644 (file)
@@ -473,15 +473,14 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
                             struct sctp_association **app,
                             struct sctp_transport **tpp)
 {
+       struct sctp_init_chunk *chunkhdr, _chunkhdr;
        union sctp_addr saddr;
        union sctp_addr daddr;
        struct sctp_af *af;
        struct sock *sk = NULL;
        struct sctp_association *asoc;
        struct sctp_transport *transport = NULL;
-       struct sctp_init_chunk *chunkhdr;
        __u32 vtag = ntohl(sctphdr->vtag);
-       int len = skb->len - ((void *)sctphdr - (void *)skb->data);
 
        *app = NULL; *tpp = NULL;
 
@@ -516,13 +515,16 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
         * discard the packet.
         */
        if (vtag == 0) {
-               chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
-               if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
-                         + sizeof(__be32) ||
+               /* chunk header + first 4 octects of init header */
+               chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
+                                             sizeof(struct sctphdr),
+                                             sizeof(struct sctp_chunkhdr) +
+                                             sizeof(__be32), &_chunkhdr);
+               if (!chunkhdr ||
                    chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
-                   ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
+                   ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
                        goto out;
-               }
+
        } else if (vtag != asoc->c.peer_vtag) {
                goto out;
        }
index 8a08f13469c4cc26aeec55de6793b2c2e3c562a6..92e332e173914f16c10ba54038dbb78dee0d0c49 100644 (file)
@@ -2454,16 +2454,11 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
         * stream sequence number shall be set to 0.
         */
 
-       /* Allocate storage for the negotiated streams if it is not a temporary
-        * association.
-        */
-       if (!asoc->temp) {
-               if (sctp_stream_init(asoc, gfp))
-                       goto clean_up;
+       if (sctp_stream_init(asoc, gfp))
+               goto clean_up;
 
-               if (sctp_assoc_set_id(asoc, gfp))
-                       goto clean_up;
-       }
+       if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
+               goto clean_up;
 
        /* ADDIP Section 4.1 ASCONF Chunk Procedures
         *
index 4f5e6cfc7f601b4de8db8802668d553f1af8491d..f863b5573e42d6f1c4908bf045af0079edc4b2d9 100644 (file)
@@ -2088,6 +2088,9 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
                }
        }
 
+       /* Set temp so that it won't be added into hashtable */
+       new_asoc->temp = 1;
+
        /* Compare the tie_tag in cookie with the verification tag of
         * current association.
         */
index 24fedd4b117e8f61cf500f629f2b47f2bc53e76f..03f6b5840764dcc2c486015edd8dc7de4cd84b26 100644 (file)
@@ -119,11 +119,9 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
 
        for (i = 0; i < (reqs << 1); i++) {
                rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
-               if (!rqst) {
-                       pr_err("RPC:       %s: Failed to create bc rpc_rqst\n",
-                              __func__);
+               if (!rqst)
                        goto out_free;
-               }
+
                dprintk("RPC:       %s: new rqst %p\n", __func__, rqst);
 
                rqst->rq_xprt = &r_xprt->rx_xprt;
index 16aff8ddc16f8f3e66e31a86ce227b3ac49857bf..d5b54c020decdc2665d671f34d74dd809aa6682a 100644 (file)
@@ -2432,7 +2432,12 @@ static void xs_tcp_setup_socket(struct work_struct *work)
        case -ENETUNREACH:
        case -EADDRINUSE:
        case -ENOBUFS:
-               /* retry with existing socket, after a delay */
+               /*
+                * xs_tcp_force_close() wakes tasks with -EIO.
+                * We need to wake them first to ensure the
+                * correct error code.
+                */
+               xprt_wake_pending_tasks(xprt, status);
                xs_tcp_force_close(xprt);
                goto out;
        }
index 14d5f0c8c45ff07224f2dc8e6310c2edc440874e..9f0901f3e42b60db2c6aee7927b2b0ddc0ab7759 100644 (file)
@@ -322,9 +322,9 @@ cfg80211_find_sched_scan_req(struct cfg80211_registered_device *rdev, u64 reqid)
 {
        struct cfg80211_sched_scan_request *pos;
 
-       ASSERT_RTNL();
+       WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
 
-       list_for_each_entry(pos, &rdev->sched_scan_req_list, list) {
+       list_for_each_entry_rcu(pos, &rdev->sched_scan_req_list, list) {
                if (pos->reqid == reqid)
                        return pos;
        }
@@ -398,13 +398,13 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid)
        trace_cfg80211_sched_scan_results(wiphy, reqid);
        /* ignore if we're not scanning */
 
-       rtnl_lock();
+       rcu_read_lock();
        request = cfg80211_find_sched_scan_req(rdev, reqid);
        if (request) {
                request->report_results = true;
                queue_work(cfg80211_wq, &rdev->sched_scan_res_wk);
        }
-       rtnl_unlock();
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_results);
 
index 7198373e29206a0f19f006578596fea78e82f66e..4992f1025c9d3749bddd5f22948d2dad791aa9a9 100644 (file)
@@ -454,6 +454,8 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
        if (iftype == NL80211_IFTYPE_MESH_POINT)
                skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
 
+       mesh_flags &= MESH_FLAGS_AE;
+
        switch (hdr->frame_control &
                cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
        case cpu_to_le16(IEEE80211_FCTL_TODS):
@@ -469,9 +471,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
                             iftype != NL80211_IFTYPE_STATION))
                        return -1;
                if (iftype == NL80211_IFTYPE_MESH_POINT) {
-                       if (mesh_flags & MESH_FLAGS_AE_A4)
+                       if (mesh_flags == MESH_FLAGS_AE_A4)
                                return -1;
-                       if (mesh_flags & MESH_FLAGS_AE_A5_A6) {
+                       if (mesh_flags == MESH_FLAGS_AE_A5_A6) {
                                skb_copy_bits(skb, hdrlen +
                                        offsetof(struct ieee80211s_hdr, eaddr1),
                                        tmp.h_dest, 2 * ETH_ALEN);
@@ -487,9 +489,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
                     ether_addr_equal(tmp.h_source, addr)))
                        return -1;
                if (iftype == NL80211_IFTYPE_MESH_POINT) {
-                       if (mesh_flags & MESH_FLAGS_AE_A5_A6)
+                       if (mesh_flags == MESH_FLAGS_AE_A5_A6)
                                return -1;
-                       if (mesh_flags & MESH_FLAGS_AE_A4)
+                       if (mesh_flags == MESH_FLAGS_AE_A4)
                                skb_copy_bits(skb, hdrlen +
                                        offsetof(struct ieee80211s_hdr, eaddr1),
                                        tmp.h_source, ETH_ALEN);
index 8ec8a3fcf8d4740670b16c2c1af099ef0beaa5c5..574e6f32f94f29a496ef20f1a673a0e718a1a31b 100644 (file)
@@ -170,7 +170,7 @@ static int xfrm_dev_feat_change(struct net_device *dev)
 
 static int xfrm_dev_down(struct net_device *dev)
 {
-       if (dev->hw_features & NETIF_F_HW_ESP)
+       if (dev->features & NETIF_F_HW_ESP)
                xfrm_dev_state_flush(dev_net(dev), dev, true);
 
        xfrm_garbage_collect(dev_net(dev));
index b00a1d5a7f52e54f2ac454da3eeb14447f84d8b2..ed4e52d95172e2d8dcca603cdf62cfb55517f3c9 100644 (file)
@@ -1797,43 +1797,6 @@ free_dst:
        goto out;
 }
 
-#ifdef CONFIG_XFRM_SUB_POLICY
-static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
-{
-       if (!*target) {
-               *target = kmalloc(size, GFP_ATOMIC);
-               if (!*target)
-                       return -ENOMEM;
-       }
-
-       memcpy(*target, src, size);
-       return 0;
-}
-#endif
-
-static int xfrm_dst_update_parent(struct dst_entry *dst,
-                                 const struct xfrm_selector *sel)
-{
-#ifdef CONFIG_XFRM_SUB_POLICY
-       struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
-       return xfrm_dst_alloc_copy((void **)&(xdst->partner),
-                                  sel, sizeof(*sel));
-#else
-       return 0;
-#endif
-}
-
-static int xfrm_dst_update_origin(struct dst_entry *dst,
-                                 const struct flowi *fl)
-{
-#ifdef CONFIG_XFRM_SUB_POLICY
-       struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
-       return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
-#else
-       return 0;
-#endif
-}
-
 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
                                struct xfrm_policy **pols,
                                int *num_pols, int *num_xfrms)
@@ -1905,16 +1868,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
 
        xdst = (struct xfrm_dst *)dst;
        xdst->num_xfrms = err;
-       if (num_pols > 1)
-               err = xfrm_dst_update_parent(dst, &pols[1]->selector);
-       else
-               err = xfrm_dst_update_origin(dst, fl);
-       if (unlikely(err)) {
-               dst_free(dst);
-               XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
-               return ERR_PTR(err);
-       }
-
        xdst->num_pols = num_pols;
        memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
        xdst->policy_genid = atomic_read(&pols[0]->genid);
index fc3c5aa387543a63bf18955e60309207bf06e961..2e291bc5f1fc1003ca62e40f67da8a6a46875c02 100644 (file)
@@ -1383,6 +1383,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
        x->curlft.add_time = orig->curlft.add_time;
        x->km.state = orig->km.state;
        x->km.seq = orig->km.seq;
+       x->replay = orig->replay;
+       x->preplay = orig->preplay;
 
        return x;
 
index f9b92ece78343a463a37d33e3f73fa8621b84444..5afd1098e33a173a18b2310aa24d205534df711c 100644 (file)
@@ -23,10 +23,11 @@ class LxDmesg(gdb.Command):
         super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
 
     def invoke(self, arg, from_tty):
-        log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16)
-        log_first_idx = int(gdb.parse_and_eval("log_first_idx"))
-        log_next_idx = int(gdb.parse_and_eval("log_next_idx"))
-        log_buf_len = int(gdb.parse_and_eval("log_buf_len"))
+        log_buf_addr = int(str(gdb.parse_and_eval(
+            "'printk.c'::log_buf")).split()[0], 16)
+        log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx"))
+        log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx"))
+        log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len"))
 
         inf = gdb.inferiors()[0]
         start = log_buf_addr + log_first_idx
index 58df440013c54c00da464166f1905bfd4edfbd36..a57988d617e934847bff6b56f08b64b813071875 100644 (file)
@@ -2324,10 +2324,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
 
        SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
-       SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
-       SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
        SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+       SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+       SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+       SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -2342,6 +2343,7 @@ static const struct hda_model_fixup alc882_fixup_models[] = {
        {.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"},
        {.id = ALC882_FIXUP_INV_DMIC, .name = "inv-dmic"},
        {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"},
+       {.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"},
        {}
 };
 
@@ -6014,6 +6016,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
        {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
        {.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
+       {.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
        {}
 };
 #define ALC225_STANDARD_PINS \
@@ -6465,8 +6468,11 @@ static int patch_alc269(struct hda_codec *codec)
                break;
        case 0x10ec0225:
        case 0x10ec0295:
+               spec->codec_variant = ALC269_TYPE_ALC225;
+               break;
        case 0x10ec0299:
                spec->codec_variant = ALC269_TYPE_ALC225;
+               spec->gen.mixer_nid = 0; /* no loopback on ALC299 */
                break;
        case 0x10ec0234:
        case 0x10ec0274:
@@ -7338,6 +7344,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
        {.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"},
        {.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"},
        {.id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
+       {.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
        {}
 };
 
index faa3d38bac0b7e51ab206440e8d17f4f6d751530..6cefdf6c0b758770e2615126285d7cd83d736d64 100644 (file)
@@ -1559,6 +1559,8 @@ static const struct snd_pci_quirk stac9200_fixup_tbl[] = {
                      "Dell Inspiron 1501", STAC_9200_DELL_M26),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6,
                      "unknown Dell", STAC_9200_DELL_M26),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0201,
+                     "Dell Latitude D430", STAC_9200_DELL_M22),
        /* Panasonic */
        SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC),
        /* Gateway machines needs EAPD to be set on resume */
index dc48eedea92e7aaaba64f4db4053fea1b73d0b77..26ed23b18b7774fd495f7ce90e51ec6dc8022e2d 100644 (file)
@@ -698,16 +698,18 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol,
        struct snd_usb_audio *chip = elem->head.mixer->chip;
        struct snd_us16x08_meter_store *store = elem->private_data;
        u8 meter_urb[64];
-       char tmp[sizeof(mix_init_msg2)] = {0};
 
        switch (kcontrol->private_value) {
-       case 0:
-               snd_us16x08_send_urb(chip, (char *)mix_init_msg1,
-                                    sizeof(mix_init_msg1));
+       case 0: {
+               char tmp[sizeof(mix_init_msg1)];
+
+               memcpy(tmp, mix_init_msg1, sizeof(mix_init_msg1));
+               snd_us16x08_send_urb(chip, tmp, 4);
                snd_us16x08_recv_urb(chip, meter_urb,
                        sizeof(meter_urb));
                kcontrol->private_value++;
                break;
+       }
        case 1:
                snd_us16x08_recv_urb(chip, meter_urb,
                        sizeof(meter_urb));
@@ -718,15 +720,18 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol,
                        sizeof(meter_urb));
                kcontrol->private_value++;
                break;
-       case 3:
+       case 3: {
+               char tmp[sizeof(mix_init_msg2)];
+
                memcpy(tmp, mix_init_msg2, sizeof(mix_init_msg2));
                tmp[2] = snd_get_meter_comp_index(store);
-               snd_us16x08_send_urb(chip, tmp, sizeof(mix_init_msg2));
+               snd_us16x08_send_urb(chip, tmp, 10);
                snd_us16x08_recv_urb(chip, meter_urb,
                        sizeof(meter_urb));
                kcontrol->private_value = 0;
                break;
        }
+       }
 
        for (set = 0; set < 6; set++)
                get_meter_levels_from_urb(set, store, meter_urb);
@@ -1135,7 +1140,7 @@ static const struct snd_us16x08_control_params eq_controls[] = {
                .control_id = SND_US16X08_ID_EQLOWMIDWIDTH,
                .type = USB_MIXER_U8,
                .num_channels = 16,
-               .name = "EQ MidQLow Q",
+               .name = "EQ MidLow Q",
        },
        { /* EQ mid high gain */
                .kcontrol_new = &snd_us16x08_eq_gain_ctl,
index 01eff6ce6401a3f8c6c8b51da93524f635bba86a..d7b0b0a3a2db55617a908e2fe4a8a2af90082e02 100644 (file)
@@ -1364,7 +1364,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        /* Amanero Combo384 USB interface with native DSD support */
        case USB_ID(0x16d0, 0x071a):
                if (fp->altsetting == 2) {
-                       switch (chip->dev->descriptor.bcdDevice) {
+                       switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) {
                        case 0x199:
                                return SNDRV_PCM_FMTBIT_DSD_U32_LE;
                        case 0x19b:
index 6ebd3e6a1fd12d3202067020b48446fd9bdcff98..5e3c673fa3f4435b027fbc634815be10c26cf39f 100644 (file)
@@ -27,6 +27,8 @@
 #define __KVM_HAVE_IRQ_LINE
 #define __KVM_HAVE_READONLY_MEM
 
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
 #define KVM_REG_SIZE(id)                                               \
        (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
 
@@ -114,6 +116,8 @@ struct kvm_debug_exit_arch {
 };
 
 struct kvm_sync_regs {
+       /* Used with KVM_CAP_ARM_USER_IRQ */
+       __u64 device_irq_level;
 };
 
 struct kvm_arch_memory_slot {
@@ -192,13 +196,17 @@ struct kvm_arch_memory_slot {
 #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
 #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
 #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO  7
+#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS  8
 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
                        (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
 #define VGIC_LEVEL_INFO_LINE_LEVEL     0
 
-#define   KVM_DEV_ARM_VGIC_CTRL_INIT    0
+#define   KVM_DEV_ARM_VGIC_CTRL_INIT           0
+#define   KVM_DEV_ARM_ITS_SAVE_TABLES          1
+#define   KVM_DEV_ARM_ITS_RESTORE_TABLES       2
+#define   KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
 
 /* KVM_IRQ_LINE irq field index values */
 #define KVM_ARM_IRQ_TYPE_SHIFT         24
index c2860358ae3e0c3271d9ca4b944351986276e397..70eea2ecc6631cab3d718c3958cd0513a7f7e6a3 100644 (file)
@@ -39,6 +39,8 @@
 #define __KVM_HAVE_IRQ_LINE
 #define __KVM_HAVE_READONLY_MEM
 
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
 #define KVM_REG_SIZE(id)                                               \
        (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
 
@@ -143,6 +145,8 @@ struct kvm_debug_exit_arch {
 #define KVM_GUESTDBG_USE_HW            (1 << 17)
 
 struct kvm_sync_regs {
+       /* Used with KVM_CAP_ARM_USER_IRQ */
+       __u64 device_irq_level;
 };
 
 struct kvm_arch_memory_slot {
@@ -212,13 +216,17 @@ struct kvm_arch_memory_slot {
 #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
 #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
 #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO  7
+#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
                        (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
 #define VGIC_LEVEL_INFO_LINE_LEVEL     0
 
-#define   KVM_DEV_ARM_VGIC_CTRL_INIT   0
+#define   KVM_DEV_ARM_VGIC_CTRL_INIT           0
+#define   KVM_DEV_ARM_ITS_SAVE_TABLES           1
+#define   KVM_DEV_ARM_ITS_RESTORE_TABLES        2
+#define   KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
 
 /* Device Control API on vcpu fd */
 #define KVM_ARM_VCPU_PMU_V3_CTRL       0
index 4edbe4bb0e8b0cdd2553c74df9988823990833d9..07fbeb927834f3a96278414aedaa59ea580ae8de 100644 (file)
@@ -29,6 +29,9 @@
 #define __KVM_HAVE_IRQ_LINE
 #define __KVM_HAVE_GUEST_DEBUG
 
+/* Not always available, but if it is, this is the correct offset.  */
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
 struct kvm_regs {
        __u64 pc;
        __u64 cr;
index 7f4fd65e9208514b99112c51f0e018f3f441712f..3dd2a1d308dd0b92c36c3c5ca5276fd19838f252 100644 (file)
@@ -26,6 +26,8 @@
 #define KVM_DEV_FLIC_ADAPTER_REGISTER  6
 #define KVM_DEV_FLIC_ADAPTER_MODIFY    7
 #define KVM_DEV_FLIC_CLEAR_IO_IRQ      8
+#define KVM_DEV_FLIC_AISM              9
+#define KVM_DEV_FLIC_AIRQ_INJECT       10
 /*
  * We can have up to 4*64k pending subchannels + 8 adapter interrupts,
  * as well as up  to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts.
@@ -41,7 +43,14 @@ struct kvm_s390_io_adapter {
        __u8 isc;
        __u8 maskable;
        __u8 swap;
-       __u8 pad;
+       __u8 flags;
+};
+
+#define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01
+
+struct kvm_s390_ais_req {
+       __u8 isc;
+       __u16 mode;
 };
 
 #define KVM_S390_IO_ADAPTER_MASK 1
@@ -110,6 +119,7 @@ struct kvm_s390_vm_cpu_machine {
 #define KVM_S390_VM_CPU_FEAT_CMMA      10
 #define KVM_S390_VM_CPU_FEAT_PFMFI     11
 #define KVM_S390_VM_CPU_FEAT_SIGPIF    12
+#define KVM_S390_VM_CPU_FEAT_KSS       13
 struct kvm_s390_vm_cpu_feat {
        __u64 feat[16];
 };
@@ -198,6 +208,10 @@ struct kvm_guest_debug_arch {
 #define KVM_SYNC_VRS    (1UL << 6)
 #define KVM_SYNC_RICCB  (1UL << 7)
 #define KVM_SYNC_FPRS   (1UL << 8)
+#define KVM_SYNC_GSCB   (1UL << 9)
+/* length and alignment of the sdnx as a power of two */
+#define SDNXC 8
+#define SDNXL (1UL << SDNXC)
 /* definition of registers in kvm_run */
 struct kvm_sync_regs {
        __u64 prefix;   /* prefix register */
@@ -218,8 +232,16 @@ struct kvm_sync_regs {
        };
        __u8  reserved[512];    /* for future vector expansion */
        __u32 fpc;              /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
-       __u8 padding[52];       /* riccb needs to be 64byte aligned */
+       __u8 padding1[52];      /* riccb needs to be 64byte aligned */
        __u8 riccb[64];         /* runtime instrumentation controls block */
+       __u8 padding2[192];     /* sdnx needs to be 256byte aligned */
+       union {
+               __u8 sdnx[SDNXL];  /* state description annex */
+               struct {
+                       __u64 reserved1[2];
+                       __u64 gscb[4];
+               };
+       };
 };
 
 #define KVM_REG_S390_TODPR     (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
index 0fe00446f9cac8c16e1ae3fcabc4a469b772597b..2701e5f8145bd250c3a35dc12cab5839e16ff30d 100644 (file)
 #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
 
+#define X86_FEATURE_MBA         ( 7*32+18) /* Memory Bandwidth Allocation */
+
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
 #define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
index 85599ad4d0247863cef655d02b9a4b3f83c77fb7..5dff775af7cd6456f7177d9ce5888ae78dc6bc10 100644 (file)
 # define DISABLE_OSPKE         (1<<(X86_FEATURE_OSPKE & 31))
 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
 
+#ifdef CONFIG_X86_5LEVEL
+# define DISABLE_LA57  0
+#else
+# define DISABLE_LA57  (1<<(X86_FEATURE_LA57 & 31))
+#endif
+
 /*
  * Make sure to add features to the correct mask
  */
@@ -55,7 +61,7 @@
 #define DISABLED_MASK13        0
 #define DISABLED_MASK14        0
 #define DISABLED_MASK15        0
-#define DISABLED_MASK16        (DISABLE_PKU|DISABLE_OSPKE)
+#define DISABLED_MASK16        (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
 #define DISABLED_MASK17        0
 #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
 
index fac9a5c0abe94b233b72b35bca8c7a665847b694..d91ba04dd00709b7e549ced791759c12cf70d5d0 100644 (file)
 # define NEED_MOVBE    0
 #endif
 
+#ifdef CONFIG_X86_5LEVEL
+# define NEED_LA57     (1<<(X86_FEATURE_LA57 & 31))
+#else
+# define NEED_LA57     0
+#endif
+
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_PARAVIRT
 /* Paravirtualized systems may not have PSE or PGE available */
 #define REQUIRED_MASK13        0
 #define REQUIRED_MASK14        0
 #define REQUIRED_MASK15        0
-#define REQUIRED_MASK16        0
+#define REQUIRED_MASK16        (NEED_LA57)
 #define REQUIRED_MASK17        0
 #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
 
index 739c0c5940226d7af38d2ab4bc068f7772f1a8c7..c2824d02ba3762553b62a07709058102050e34da 100644 (file)
@@ -9,6 +9,9 @@
 #include <linux/types.h>
 #include <linux/ioctl.h>
 
+#define KVM_PIO_PAGE_OFFSET 1
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
+
 #define DE_VECTOR 0
 #define DB_VECTOR 1
 #define BP_VECTOR 3
index 14458658e988bb6c9333e73701ea55e7b6525a92..690a2dcf407860cf54793f03e69d8761b06f068d 100644 (file)
 #define EXIT_REASON_WBINVD              54
 #define EXIT_REASON_XSETBV              55
 #define EXIT_REASON_APIC_WRITE          56
+#define EXIT_REASON_RDRAND              57
 #define EXIT_REASON_INVPCID             58
+#define EXIT_REASON_VMFUNC              59
+#define EXIT_REASON_ENCLS               60
+#define EXIT_REASON_RDSEED              61
 #define EXIT_REASON_PML_FULL            62
 #define EXIT_REASON_XSAVES              63
 #define EXIT_REASON_XRSTORS             64
@@ -90,6 +94,7 @@
        { EXIT_REASON_TASK_SWITCH,           "TASK_SWITCH" }, \
        { EXIT_REASON_CPUID,                 "CPUID" }, \
        { EXIT_REASON_HLT,                   "HLT" }, \
+       { EXIT_REASON_INVD,                  "INVD" }, \
        { EXIT_REASON_INVLPG,                "INVLPG" }, \
        { EXIT_REASON_RDPMC,                 "RDPMC" }, \
        { EXIT_REASON_RDTSC,                 "RDTSC" }, \
        { EXIT_REASON_IO_INSTRUCTION,        "IO_INSTRUCTION" }, \
        { EXIT_REASON_MSR_READ,              "MSR_READ" }, \
        { EXIT_REASON_MSR_WRITE,             "MSR_WRITE" }, \
+       { EXIT_REASON_INVALID_STATE,         "INVALID_STATE" }, \
+       { EXIT_REASON_MSR_LOAD_FAIL,         "MSR_LOAD_FAIL" }, \
        { EXIT_REASON_MWAIT_INSTRUCTION,     "MWAIT_INSTRUCTION" }, \
        { EXIT_REASON_MONITOR_TRAP_FLAG,     "MONITOR_TRAP_FLAG" }, \
        { EXIT_REASON_MONITOR_INSTRUCTION,   "MONITOR_INSTRUCTION" }, \
        { EXIT_REASON_MCE_DURING_VMENTRY,    "MCE_DURING_VMENTRY" }, \
        { EXIT_REASON_TPR_BELOW_THRESHOLD,   "TPR_BELOW_THRESHOLD" }, \
        { EXIT_REASON_APIC_ACCESS,           "APIC_ACCESS" }, \
-       { EXIT_REASON_GDTR_IDTR,             "GDTR_IDTR" }, \
-       { EXIT_REASON_LDTR_TR,               "LDTR_TR" }, \
+       { EXIT_REASON_EOI_INDUCED,           "EOI_INDUCED" }, \
+       { EXIT_REASON_GDTR_IDTR,             "GDTR_IDTR" }, \
+       { EXIT_REASON_LDTR_TR,               "LDTR_TR" }, \
        { EXIT_REASON_EPT_VIOLATION,         "EPT_VIOLATION" }, \
        { EXIT_REASON_EPT_MISCONFIG,         "EPT_MISCONFIG" }, \
        { EXIT_REASON_INVEPT,                "INVEPT" }, \
+       { EXIT_REASON_RDTSCP,                "RDTSCP" }, \
        { EXIT_REASON_PREEMPTION_TIMER,      "PREEMPTION_TIMER" }, \
+       { EXIT_REASON_INVVPID,               "INVVPID" }, \
        { EXIT_REASON_WBINVD,                "WBINVD" }, \
+       { EXIT_REASON_XSETBV,                "XSETBV" }, \
        { EXIT_REASON_APIC_WRITE,            "APIC_WRITE" }, \
-       { EXIT_REASON_EOI_INDUCED,           "EOI_INDUCED" }, \
-       { EXIT_REASON_INVALID_STATE,         "INVALID_STATE" }, \
-       { EXIT_REASON_MSR_LOAD_FAIL,         "MSR_LOAD_FAIL" }, \
-       { EXIT_REASON_INVD,                  "INVD" }, \
-       { EXIT_REASON_INVVPID,               "INVVPID" }, \
+       { EXIT_REASON_RDRAND,                "RDRAND" }, \
        { EXIT_REASON_INVPCID,               "INVPCID" }, \
+       { EXIT_REASON_VMFUNC,                "VMFUNC" }, \
+       { EXIT_REASON_ENCLS,                 "ENCLS" }, \
+       { EXIT_REASON_RDSEED,                "RDSEED" }, \
+       { EXIT_REASON_PML_FULL,              "PML_FULL" }, \
        { EXIT_REASON_XSAVES,                "XSAVES" }, \
        { EXIT_REASON_XRSTORS,               "XRSTORS" }
 
index 390d7c9685fd6107c83be2296ead9cb198b571a3..4ce25d43e8e305df8afcfc6fb1747a58fad20c1b 100644 (file)
                .off   = OFF,                                   \
                .imm   = IMM })
 
+/* Unconditional jumps, goto pc + off16 */
+
+#define BPF_JMP_A(OFF)                                         \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_JMP | BPF_JA,                      \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
 /* Function call */
 
 #define BPF_EMIT_CALL(FUNC)                                    \
index d538897b8e08bb4358c56148c189fc7b8128a9da..17b10304c393355da9ed2da743107a5c59748290 100644 (file)
  * tv_sec holds the number of seconds before (negative) or after (positive)
  * 00:00:00 1st January 1970 UTC.
  *
- * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is
- * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time.
- *
- * Note that if both tv_sec and tv_nsec are non-zero, then the two values must
- * either be both positive or both negative.
+ * tv_nsec holds a number of nanoseconds (0..999,999,999) after the tv_sec time.
  *
  * __reserved is held in case we need a yet finer resolution.
  */
 struct statx_timestamp {
        __s64   tv_sec;
-       __s32   tv_nsec;
+       __u32   tv_nsec;
        __s32   __reserved;
 };
 
index cb0eda3925e6d8562da9d3091147c5fca602ebbf..3517e204a2b30754e430213c41b1d48e51d9b52c 100644 (file)
@@ -311,6 +311,10 @@ include::itrace.txt[]
        Set the maximum number of program blocks to print with brstackasm for
        each sample.
 
+--inline::
+       If a callgraph address belongs to an inlined function, the inline stack
+       will be printed. Each entry has function name and file/line.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-script-perl[1],
index 837067f48a4c54a88b883e8de6dc95df0dd5b3d0..6b40e9f017404f87877668645f7afa8f28649788 100644 (file)
@@ -26,6 +26,7 @@ const char *const arm64_triplets[] = {
 
 const char *const powerpc_triplets[] = {
        "powerpc-unknown-linux-gnu-",
+       "powerpc-linux-gnu-",
        "powerpc64-unknown-linux-gnu-",
        "powerpc64-linux-gnu-",
        "powerpc64le-linux-gnu-",
index d05aec491cff22189d7fe763120884a639cd2cfb..4761b0d7fcb5b2586e7fd86c1ed8b219d0cec987 100644 (file)
@@ -2494,6 +2494,8 @@ int cmd_script(int argc, const char **argv)
                        "Enable kernel symbol demangling"),
        OPT_STRING(0, "time", &script.time_str, "str",
                   "Time span of interest (start,stop)"),
+       OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
+                   "Show inline function"),
        OPT_END()
        };
        const char * const script_subcommands[] = { "record", "report", NULL };
index a935b502373253217d6f43680e6b88a6ab143bfe..ad9324d1daf9f29a990a0d8f903563873ac91ef9 100644 (file)
@@ -1578,6 +1578,7 @@ static void print_header(int argc, const char **argv)
 static void print_footer(void)
 {
        FILE *output = stat_config.output;
+       int n;
 
        if (!null_run)
                fprintf(output, "\n");
@@ -1590,7 +1591,9 @@ static void print_footer(void)
        }
        fprintf(output, "\n\n");
 
-       if (print_free_counters_hint)
+       if (print_free_counters_hint &&
+           sysctl__read_int("kernel/nmi_watchdog", &n) >= 0 &&
+           n > 0)
                fprintf(output,
 "Some events weren't counted. Try disabling the NMI watchdog:\n"
 "      echo 0 > /proc/sys/kernel/nmi_watchdog\n"
index d014350adc526722da3a12f29421cd6d3c00f4af..4b2a5d2981970baf86e4e458ef4ced256fc98c0e 100644 (file)
@@ -681,6 +681,10 @@ static struct syscall_fmt {
        { .name     = "mlockall",   .errmsg = true,
          .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
        { .name     = "mmap",       .hexret = true,
+/* The standard mmap maps to old_mmap on s390x */
+#if defined(__s390x__)
+       .alias = "old_mmap",
+#endif
          .arg_scnprintf = { [0] = SCA_HEX,       /* addr */
                             [2] = SCA_MMAP_PROT, /* prot */
                             [3] = SCA_MMAP_FLAGS, /* flags */ }, },
index e7664fe3bd33739fd92be2579c30102e481e8f03..8ba2c4618fe90231d1157e8218bf10a2cb82f6a0 100644 (file)
@@ -288,3 +288,17 @@ int test__bp_signal(int subtest __maybe_unused)
        return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ?
                TEST_OK : TEST_FAIL;
 }
+
+bool test__bp_signal_is_supported(void)
+{
+/*
+ * The powerpc so far does not have support to even create
+ * instruction breakpoint using the perf event interface.
+ * Once it's there we can release this.
+ */
+#ifdef __powerpc__
+       return false;
+#else
+       return true;
+#endif
+}
index 9e08d297f1a905f57554bf6fd7b9555e980ad10a..3ccfd58a8c3cf3e8b16cc513a67324b8f11eae7b 100644 (file)
@@ -97,10 +97,12 @@ static struct test generic_tests[] = {
        {
                .desc = "Breakpoint overflow signal handler",
                .func = test__bp_signal,
+               .is_supported = test__bp_signal_is_supported,
        },
        {
                .desc = "Breakpoint overflow sampling",
                .func = test__bp_signal_overflow,
+               .is_supported = test__bp_signal_is_supported,
        },
        {
                .desc = "Number of exit events of a simple workload",
@@ -401,6 +403,11 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
                if (!perf_test__matches(t, curr, argc, argv))
                        continue;
 
+               if (t->is_supported && !t->is_supported()) {
+                       pr_debug("%2d: %-*s: Disabled\n", i, width, t->desc);
+                       continue;
+               }
+
                pr_info("%2d: %-*s:", i, width, t->desc);
 
                if (intlist__find(skiplist, i)) {
index 6318596294032602b2858acf42c2aa80993fdac9..577363809c9b1b54731f7e80b291278bf2764e78 100644 (file)
@@ -34,6 +34,7 @@ struct test {
                int (*get_nr)(void);
                const char *(*get_desc)(int subtest);
        } subtest;
+       bool (*is_supported)(void);
 };
 
 /* Tests */
@@ -99,6 +100,8 @@ const char *test__clang_subtest_get_desc(int subtest);
 int test__clang_subtest_get_nr(void);
 int test__unit_number__scnprint(int subtest);
 
+bool test__bp_signal_is_supported(void);
+
 #if defined(__arm__) || defined(__aarch64__)
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
 struct thread;
index 59addd52d9cd51ab2d8789989a9d377d2ce23eb5..ddb2c6fbdf919e8124ffb40eee47c801b43bd6c6 100644 (file)
@@ -210,6 +210,8 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
                        return 0;
 
                ret = b->callchain->max_depth - a->callchain->max_depth;
+               if (callchain_param.order == ORDER_CALLER)
+                       ret = -ret;
        }
        return ret;
 }
index 683f8340460c1777f82f35860b3bb581a07f222a..1367d7e3524212d45edd0f0807c931f421dad69b 100644 (file)
@@ -239,10 +239,20 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
        const char *s = strchr(ops->raw, '+');
        const char *c = strchr(ops->raw, ',');
 
-       if (c++ != NULL)
+       /*
+        * skip over possible up to 2 operands to get to address, e.g.:
+        * tbnz  w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
+        */
+       if (c++ != NULL) {
                ops->target.addr = strtoull(c, NULL, 16);
-       else
+               if (!ops->target.addr) {
+                       c = strchr(c, ',');
+                       if (c++ != NULL)
+                               ops->target.addr = strtoull(c, NULL, 16);
+               }
+       } else {
                ops->target.addr = strtoull(ops->raw, NULL, 16);
+       }
 
        if (s++ != NULL) {
                ops->target.offset = strtoull(s, NULL, 16);
@@ -257,10 +267,27 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
 static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
                           struct ins_operands *ops)
 {
+       const char *c = strchr(ops->raw, ',');
+
        if (!ops->target.addr || ops->target.offset < 0)
                return ins__raw_scnprintf(ins, bf, size, ops);
 
-       return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
+       if (c != NULL) {
+               const char *c2 = strchr(c + 1, ',');
+
+               /* check for 3-op insn */
+               if (c2 != NULL)
+                       c = c2;
+               c++;
+
+               /* mirror arch objdump's space-after-comma style */
+               if (*c == ' ')
+                       c++;
+       }
+
+       return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64,
+                        ins->name, c ? c - ops->raw : 0, ops->raw,
+                        ops->target.offset);
 }
 
 static struct ins_ops jump_ops = {
@@ -1429,7 +1456,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
        snprintf(command, sizeof(command),
                 "%s %s%s --start-address=0x%016" PRIx64
                 " --stop-address=0x%016" PRIx64
-                " -l -d %s %s -C %s 2>/dev/null|grep -v %s:|expand",
+                " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
                 objdump_path ? objdump_path : "objdump",
                 disassembler_style ? "-M " : "",
                 disassembler_style ? disassembler_style : "",
index 81fc29ac798facf789712d81147eacda0c01552d..b4204b43ed58a83c9530ae3b75df4c01004b442e 100644 (file)
@@ -621,14 +621,19 @@ enum match_result {
 static enum match_result match_chain_srcline(struct callchain_cursor_node *node,
                                             struct callchain_list *cnode)
 {
-       char *left = get_srcline(cnode->ms.map->dso,
+       char *left = NULL;
+       char *right = NULL;
+       enum match_result ret = MATCH_EQ;
+       int cmp;
+
+       if (cnode->ms.map)
+               left = get_srcline(cnode->ms.map->dso,
                                 map__rip_2objdump(cnode->ms.map, cnode->ip),
                                 cnode->ms.sym, true, false);
-       char *right = get_srcline(node->map->dso,
+       if (node->map)
+               right = get_srcline(node->map->dso,
                                  map__rip_2objdump(node->map, node->ip),
                                  node->sym, true, false);
-       enum match_result ret = MATCH_EQ;
-       int cmp;
 
        if (left && right)
                cmp = strcmp(left, right);
index a96a99d2369f800634025bcdfa9838d1d6bc9d97..b27d127cdf68f996ee4f96ce24aa51044e31bf9f 100644 (file)
@@ -335,6 +335,21 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
        return 0;
 }
 
+void dso__set_module_info(struct dso *dso, struct kmod_path *m,
+                         struct machine *machine)
+{
+       if (machine__is_host(machine))
+               dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
+       else
+               dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
+
+       /* _KMODULE_COMP should be next to _KMODULE */
+       if (m->kmod && m->comp)
+               dso->symtab_type++;
+
+       dso__set_short_name(dso, strdup(m->name), true);
+}
+
 /*
  * Global list of open DSOs and the counter.
  */
index 12350b17172730adf0dffdf324cb9e0b4a4be2ba..5fe2ab5877bd6c30fd66bb81596ac3e564d9ce04 100644 (file)
@@ -259,6 +259,9 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
 #define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false)
 #define kmod_path__parse_ext(__m, __p)  __kmod_path__parse(__m, __p, false, true)
 
+void dso__set_module_info(struct dso *dso, struct kmod_path *m,
+                         struct machine *machine);
+
 /*
  * The dso__data_* external interface provides following functions:
  *   dso__data_get_fd
index e415aee6a24520f3c88e9ce70d621b39109896b6..583f3a602506f29f198d7153b8ad8ee8073a6fd9 100644 (file)
@@ -7,6 +7,7 @@
 #include "map.h"
 #include "strlist.h"
 #include "symbol.h"
+#include "srcline.h"
 
 static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
 {
@@ -168,6 +169,38 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
                        if (!print_oneline)
                                printed += fprintf(fp, "\n");
 
+                       if (symbol_conf.inline_name && node->map) {
+                               struct inline_node *inode;
+
+                               addr = map__rip_2objdump(node->map, node->ip),
+                               inode = dso__parse_addr_inlines(node->map->dso, addr);
+
+                               if (inode) {
+                                       struct inline_list *ilist;
+
+                                       list_for_each_entry(ilist, &inode->val, list) {
+                                               if (print_arrow)
+                                                       printed += fprintf(fp, " <-");
+
+                                               /* IP is same, just skip it */
+                                               if (print_ip)
+                                                       printed += fprintf(fp, "%c%16s",
+                                                                          s, "");
+                                               if (print_sym)
+                                                       printed += fprintf(fp, " %s",
+                                                                          ilist->funcname);
+                                               if (print_srcline)
+                                                       printed += fprintf(fp, "\n  %s:%d",
+                                                                          ilist->filename,
+                                                                          ilist->line_nr);
+                                               if (!print_oneline)
+                                                       printed += fprintf(fp, "\n");
+                                       }
+
+                                       inline_node__delete(inode);
+                               }
+                       }
+
                        if (symbol_conf.bt_stop_list &&
                            node->sym &&
                            strlist__has_entry(symbol_conf.bt_stop_list,
index 314a07151fb772377752dae62658b79ffdc87cd6..5cac8d5e009a88ff096d9e2f8026e39e8567c595 100644 (file)
@@ -1469,8 +1469,16 @@ static int __event_process_build_id(struct build_id_event *bev,
 
                dso__set_build_id(dso, &bev->build_id);
 
-               if (!is_kernel_module(filename, cpumode))
-                       dso->kernel = dso_type;
+               if (dso_type != DSO_TYPE_USER) {
+                       struct kmod_path m = { .name = NULL, };
+
+                       if (!kmod_path__parse_name(&m, filename) && m.kmod)
+                               dso__set_module_info(dso, &m, machine);
+                       else
+                               dso->kernel = dso_type;
+
+                       free(m.name);
+               }
 
                build_id__sprintf(dso->build_id, sizeof(dso->build_id),
                                  sbuild_id);
index d97e014c3df395e51e61da31f9624e12927a43b2..d7f31cb0a4cbeb41e6c02d58323a8848ff82cfc4 100644 (file)
@@ -572,16 +572,7 @@ static struct dso *machine__findnew_module_dso(struct machine *machine,
                if (dso == NULL)
                        goto out_unlock;
 
-               if (machine__is_host(machine))
-                       dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
-               else
-                       dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
-
-               /* _KMODULE_COMP should be next to _KMODULE */
-               if (m->kmod && m->comp)
-                       dso->symtab_type++;
-
-               dso__set_short_name(dso, strdup(m->name), true);
+               dso__set_module_info(dso, m, machine);
                dso__set_long_name(dso, strdup(filename), true);
        }
 
index df051a52393c1de8e85f46d43f567755888133e7..ebc88a74e67b7129cbbd6790c34f50dcefec0de1 100644 (file)
@@ -56,7 +56,10 @@ static int inline_list__append(char *filename, char *funcname, int line_nr,
                }
        }
 
-       list_add_tail(&ilist->list, &node->val);
+       if (callchain_param.order == ORDER_CALLEE)
+               list_add_tail(&ilist->list, &node->val);
+       else
+               list_add(&ilist->list, &node->val);
 
        return 0;
 }
@@ -200,12 +203,14 @@ static void addr2line_cleanup(struct a2l_data *a2l)
 
 #define MAX_INLINE_NEST 1024
 
-static void inline_list__reverse(struct inline_node *node)
+static int inline_list__append_dso_a2l(struct dso *dso,
+                                      struct inline_node *node)
 {
-       struct inline_list *ilist, *n;
+       struct a2l_data *a2l = dso->a2l;
+       char *funcname = a2l->funcname ? strdup(a2l->funcname) : NULL;
+       char *filename = a2l->filename ? strdup(a2l->filename) : NULL;
 
-       list_for_each_entry_safe_reverse(ilist, n, &node->val, list)
-               list_move_tail(&ilist->list, &node->val);
+       return inline_list__append(filename, funcname, a2l->line, node, dso);
 }
 
 static int addr2line(const char *dso_name, u64 addr,
@@ -230,36 +235,36 @@ static int addr2line(const char *dso_name, u64 addr,
 
        bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l);
 
-       if (a2l->found && unwind_inlines) {
+       if (!a2l->found)
+               return 0;
+
+       if (unwind_inlines) {
                int cnt = 0;
 
+               if (node && inline_list__append_dso_a2l(dso, node))
+                       return 0;
+
                while (bfd_find_inliner_info(a2l->abfd, &a2l->filename,
                                             &a2l->funcname, &a2l->line) &&
                       cnt++ < MAX_INLINE_NEST) {
 
                        if (node != NULL) {
-                               if (inline_list__append(strdup(a2l->filename),
-                                                       strdup(a2l->funcname),
-                                                       a2l->line, node,
-                                                       dso) != 0)
+                               if (inline_list__append_dso_a2l(dso, node))
                                        return 0;
+                               // found at least one inline frame
+                               ret = 1;
                        }
                }
+       }
 
-               if ((node != NULL) &&
-                   (callchain_param.order != ORDER_CALLEE)) {
-                       inline_list__reverse(node);
-               }
+       if (file) {
+               *file = a2l->filename ? strdup(a2l->filename) : NULL;
+               ret = *file ? 1 : 0;
        }
 
-       if (a2l->found && a2l->filename) {
-               *file = strdup(a2l->filename);
+       if (line)
                *line = a2l->line;
 
-               if (*file)
-                       ret = 1;
-       }
-
        return ret;
 }
 
@@ -278,8 +283,6 @@ void dso__free_a2l(struct dso *dso)
 static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
        struct dso *dso)
 {
-       char *file = NULL;
-       unsigned int line = 0;
        struct inline_node *node;
 
        node = zalloc(sizeof(*node));
@@ -291,7 +294,7 @@ static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
        INIT_LIST_HEAD(&node->val);
        node->addr = addr;
 
-       if (!addr2line(dso_name, addr, &file, &line, dso, TRUE, node))
+       if (!addr2line(dso_name, addr, NULL, NULL, dso, TRUE, node))
                goto out_free_inline_node;
 
        if (list_empty(&node->val))
index e7ee47f7377ab17bbb414be4240cf01565ae863a..1fb2efae4f0254b56eb22abe69b9041bb220e963 100644 (file)
@@ -649,10 +649,7 @@ static int decompress_kmodule(struct dso *dso, const char *name,
            type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
                return -1;
 
-       if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
-               name = dso->long_name;
-
-       if (kmod_path__parse_ext(&m, name) || !m.comp)
+       if (kmod_path__parse_ext(&m, dso->long_name) || !m.comp)
                return -1;
 
        fd = mkstemp(tmpbuf);
index f90e11a555b208302f9dd2a4163d73051e0d2f47..da45c4be5fb3e77ee59131602667b4d675bc3a40 100644 (file)
@@ -39,6 +39,14 @@ static int __report_module(struct addr_location *al, u64 ip,
                return 0;
 
        mod = dwfl_addrmodule(ui->dwfl, ip);
+       if (mod) {
+               Dwarf_Addr s;
+
+               dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
+               if (s != al->map->start)
+                       mod = 0;
+       }
+
        if (!mod)
                mod = dwfl_report_elf(ui->dwfl, dso->short_name,
                                      dso->long_name, -1, al->map->start,
@@ -168,12 +176,16 @@ frame_callback(Dwfl_Frame *state, void *arg)
 {
        struct unwind_info *ui = arg;
        Dwarf_Addr pc;
+       bool isactivation;
 
-       if (!dwfl_frame_pc(state, &pc, NULL)) {
+       if (!dwfl_frame_pc(state, &pc, &isactivation)) {
                pr_err("%s", dwfl_errmsg(-1));
                return DWARF_CB_ABORT;
        }
 
+       if (!isactivation)
+               --pc;
+
        return entry(pc, ui) || !(--ui->max_stack) ?
               DWARF_CB_ABORT : DWARF_CB_OK;
 }
@@ -220,7 +232,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
 
        err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui);
 
-       if (err && !ui->max_stack)
+       if (err && ui->max_stack != max_stack)
                err = 0;
 
        /*
index f8455bed6e653705183b878c40d294991edf4e92..672c2ada9357a25054b4b1ba7eeec643505984e2 100644 (file)
@@ -692,6 +692,17 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
 
                while (!ret && (unw_step(&c) > 0) && i < max_stack) {
                        unw_get_reg(&c, UNW_REG_IP, &ips[i]);
+
+                       /*
+                        * Decrement the IP for any non-activation frames.
+                        * this is required to properly find the srcline
+                        * for caller frames.
+                        * See also the documentation for dwfl_frame_pc(),
+                        * which this code tries to replicate.
+                        */
+                       if (unw_is_signal_frame(&c) <= 0)
+                               --ips[i];
+
                        ++i;
                }
 
index 3773562056da267aee91878ed8088b3c577a997e..cabb19b1e3718b289910fcc921c698f44f162768 100644 (file)
@@ -49,6 +49,7 @@
 #define MAX_NR_MAPS    4
 
 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS     (1 << 0)
+#define F_LOAD_WITH_STRICT_ALIGNMENT           (1 << 1)
 
 struct bpf_test {
        const char *descr;
@@ -2614,6 +2615,30 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
+       {
+               "direct packet access: test17 (pruning, alignment)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+                       BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+                       BPF_JMP_A(-6),
+               },
+               .errstr = "misaligned packet access off 2+15+-4 size 4",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+       },
        {
                "helper access to packet: test1, valid packet_ptr range",
                .insns = {
@@ -3340,6 +3365,70 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
+       {
+               "alu ops on ptr_to_map_value_or_null, 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 10),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "R4 invalid mem access",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+       },
+       {
+               "alu ops on ptr_to_map_value_or_null, 2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 10),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "R4 invalid mem access",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+       },
+       {
+               "alu ops on ptr_to_map_value_or_null, 3",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 10),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "R4 invalid mem access",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+       },
        {
                "invalid memory access with multiple map_lookup_elem calls",
                .insns = {
@@ -4937,7 +5026,149 @@ static struct bpf_test tests[] = {
                .fixup_map_in_map = { 3 },
                .errstr = "R1 type=map_value_or_null expected=map_ptr",
                .result = REJECT,
-       }
+       },
+       {
+               "ld_abs: check calling conv, r1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_1, 0),
+                       BPF_LD_ABS(BPF_W, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_abs: check calling conv, r2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_LD_ABS(BPF_W, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R2 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_abs: check calling conv, r3",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_LD_ABS(BPF_W, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R3 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_abs: check calling conv, r4",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_LD_ABS(BPF_W, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R4 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_abs: check calling conv, r5",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_5, 0),
+                       BPF_LD_ABS(BPF_W, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R5 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_abs: check calling conv, r7",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_7, 0),
+                       BPF_LD_ABS(BPF_W, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "ld_ind: check calling conv, r1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_ind: check calling conv, r2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R2 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_ind: check calling conv, r3",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_3, 1),
+                       BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R3 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_ind: check calling conv, r4",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_4, 1),
+                       BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R4 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_ind: check calling conv, r5",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_5, 1),
+                       BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R5 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "ld_ind: check calling conv, r7",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_7, 1),
+                       BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
@@ -5059,9 +5290,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
 
        do_test_fixup(test, prog, map_fds);
 
-       fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
-                                  prog, prog_len, "GPL", 0, bpf_vlog,
-                                  sizeof(bpf_vlog));
+       fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
+                                    prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
+                                    "GPL", 0, bpf_vlog, sizeof(bpf_vlog));
 
        expected_ret = unpriv && test->result_unpriv != UNDEF ?
                       test->result_unpriv : test->result;
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
new file mode 100644 (file)
index 0000000..f4d1ff7
--- /dev/null
@@ -0,0 +1,21 @@
+#!/bin/sh
+# description: Register/unregister many kprobe events
+
+# ftrace fentry skip size depends on the machine architecture.
+# Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc
+case `uname -m` in
+  x86_64|i[3456]86) OFFS=5;;
+  ppc*) OFFS=4;;
+  *) OFFS=0;;
+esac
+
+echo "Setup up to 256 kprobes"
+grep t /proc/kallsyms | cut -f3 -d" " | grep -v .*\\..* | \
+head -n 256 | while read i; do echo p ${i}+${OFFS} ; done > kprobe_events ||:
+
+echo 1 > events/kprobes/enable
+echo 0 > events/kprobes/enable
+echo > kprobe_events
+echo "Waiting for unoptimizing & freeing"
+sleep 5
+echo "Done"
index d9c49f41515e704994ed1669dbe0a859a6dadd79..e79ccd6aada1a9acf10bb2860e76ddb444415d52 100644 (file)
@@ -42,12 +42,12 @@ int test_body(void)
        printf("Check DSCR TM context switch: ");
        fflush(stdout);
        for (;;) {
-               rv = 1;
                asm __volatile__ (
                        /* set a known value into the DSCR */
                        "ld      3, %[dscr1];"
                        "mtspr   %[sprn_dscr], 3;"
 
+                       "li      %[rv], 1;"
                        /* start and suspend a transaction */
                        "tbegin.;"
                        "beq     1f;"
index c0c48507e44e2992b3624b2374cd08769a8bb607..ad0543e21760562d94bc0a44676700b1179c1787 100644 (file)
@@ -220,6 +220,7 @@ config INITRAMFS_COMPRESSION_LZ4
 endchoice
 
 config INITRAMFS_COMPRESSION
+       depends on INITRAMFS_SOURCE!=""
        string
        default ""      if INITRAMFS_COMPRESSION_NONE
        default ".gz"   if INITRAMFS_COMPRESSION_GZIP