]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 14 Dec 2016 18:49:33 +0000 (10:49 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 14 Dec 2016 18:49:33 +0000 (10:49 -0800)
Pull SCSI updates from James Bottomley:
 "This update includes the usual round of major driver updates (ncr5380,
  lpfc, hisi_sas, megaraid_sas, ufs, ibmvscsis, mpt3sas).

  There's also an assortment of minor fixes, mostly in error legs or
  other not very user visible stuff. The major change is the
  pci_alloc_irq_vectors replacement for the old pci_msix_.. calls; this
  effectively makes IRQ mapping generic for the drivers and allows
  blk_mq to use the information"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (256 commits)
  scsi: qla4xxx: switch to pci_alloc_irq_vectors
  scsi: hisi_sas: support deferred probe for v2 hw
  scsi: megaraid_sas: switch to pci_alloc_irq_vectors
  scsi: scsi_devinfo: remove synchronous ALUA for NETAPP devices
  scsi: be2iscsi: set errno on error path
  scsi: be2iscsi: set errno on error path
  scsi: hpsa: fallback to use legacy REPORT PHYS command
  scsi: scsi_dh_alua: Fix RCU annotations
  scsi: hpsa: use %phN for short hex dumps
  scsi: hisi_sas: fix free'ing in probe and remove
  scsi: isci: switch to pci_alloc_irq_vectors
  scsi: ipr: Fix runaway IRQs when falling back from MSI to LSI
  scsi: dpt_i2o: double free on error path
  scsi: cxlflash: Migrate scsi command pointer to AFU command
  scsi: cxlflash: Migrate IOARRIN specific routines to function pointers
  scsi: cxlflash: Cleanup queuecommand()
  scsi: cxlflash: Cleanup send_tmf()
  scsi: cxlflash: Remove AFU command lock
  scsi: cxlflash: Wait for active AFU commands to timeout upon tear down
  scsi: cxlflash: Remove private command pool
  ...

17 files changed:
1  2 
MAINTAINERS
block/blk-mq.h
block/bsg-lib.c
drivers/scsi/Makefile
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/hpsa.c
drivers/scsi/hpsa.h
drivers/scsi/libfc/fc_lport.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/pmcraid.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/ufs/ufshcd.c
include/linux/blk-mq.h

diff --combined MAINTAINERS
index 8007e2811264205bd76ad7b39f632cae74ea90c1,11fdf455c6142db40e094ea4645affee1fed580d..e376bbd1c555a66cf702141b19446905a0bcf66c
@@@ -35,13 -35,13 +35,13 @@@ trivial patch so apply some common sens
  
        PLEASE check your patch with the automated style checker
        (scripts/checkpatch.pl) to catch trivial style violations.
 -      See Documentation/CodingStyle for guidance here.
 +      See Documentation/process/coding-style.rst for guidance here.
  
        PLEASE CC: the maintainers and mailing lists that are generated
        by scripts/get_maintainer.pl.  The results returned by the
        script will be best if you have git installed and are making
        your changes in a branch derived from Linus' latest git tree.
 -      See Documentation/SubmittingPatches for details.
 +      See Documentation/process/submitting-patches.rst for details.
  
        PLEASE try to include any credit lines you want added with the
        patch. It avoids people being missed off by mistake and makes
@@@ -54,7 -54,7 +54,7 @@@
        of the Linux Foundation certificate of contribution and should
        include a Signed-off-by: line.  The current version of this
        "Developer's Certificate of Origin" (DCO) is listed in the file
 -      Documentation/SubmittingPatches.
 +      Documentation/process/submitting-patches.rst.
  
  6.    Make sure you have the right to send any changes you make. If you
        do changes at work you may find your employer owns the patch
@@@ -74,14 -74,9 +74,14 @@@ Descriptions of section entries
           These reviewers should be CCed on patches.
        L: Mailing list that is relevant to this area
        W: Web-page with status/info
 +      B: URI for where to file bugs. A web-page with detailed bug
 +         filing info, a direct bug tracker link, or a mailto: URI.
 +      C: URI for chat protocol, server and channel where developers
 +         usually hang out, for example irc://server/channel.
        Q: Patchwork web based patch tracking system site
        T: SCM tree type and location.
           Type is one of: git, hg, quilt, stgit, topgit
 +      B: Bug tracking system location.
        S: Status, one of the following:
           Supported:   Someone is actually paid to look after this.
           Maintained:  Someone actually looks after it.
@@@ -260,12 -255,6 +260,12 @@@ L:       linux-gpio@vger.kernel.or
  S:    Maintained
  F:    drivers/gpio/gpio-104-idio-16.c
  
 +ACCES 104-QUAD-8 IIO DRIVER
 +M:    William Breathitt Gray <vilhelm.gray@gmail.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/iio/counter/104-quad-8.c
 +
  ACENIC DRIVER
  M:    Jes Sorensen <jes@trained-monkey.org>
  L:    linux-acenic@sunsite.dk
@@@ -292,7 -281,6 +292,7 @@@ L: linux-acpi@vger.kernel.or
  W:    https://01.org/linux-acpi
  Q:    https://patchwork.kernel.org/project/linux-acpi/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    drivers/acpi/
  F:    drivers/pnp/pnpacpi/
@@@ -316,8 -304,6 +316,8 @@@ W: https://acpica.org
  W:    https://github.com/acpica/acpica/
  Q:    https://patchwork.kernel.org/project/linux-acpi/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
 +B:    https://bugzilla.kernel.org
 +B:    https://bugs.acpica.org
  S:    Supported
  F:    drivers/acpi/acpica/
  F:    include/acpi/
@@@ -327,7 -313,6 +327,7 @@@ ACPI FAN DRIVE
  M:    Zhang Rui <rui.zhang@intel.com>
  L:    linux-acpi@vger.kernel.org
  W:    https://01.org/linux-acpi
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    drivers/acpi/fan.c
  
@@@ -343,7 -328,6 +343,7 @@@ ACPI THERMAL DRIVE
  M:    Zhang Rui <rui.zhang@intel.com>
  L:    linux-acpi@vger.kernel.org
  W:    https://01.org/linux-acpi
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    drivers/acpi/*thermal*
  
@@@ -351,7 -335,6 +351,7 @@@ ACPI VIDEO DRIVE
  M:    Zhang Rui <rui.zhang@intel.com>
  L:    linux-acpi@vger.kernel.org
  W:    https://01.org/linux-acpi
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    drivers/acpi/acpi_video.c
  
@@@ -587,11 -570,6 +587,11 @@@ T:       git git://linuxtv.org/anttip/media_t
  S:    Maintained
  F:    drivers/media/usb/airspy/
  
 +ALACRITECH GIGABIT ETHERNET DRIVER
 +M:    Lino Sanfilippo <LinoSanfilippo@gmx.de>
 +S:    Maintained
 +F:    drivers/net/ethernet/alacritech/*
 +
  ALCATEL SPEEDTOUCH USB DRIVER
  M:    Duncan Sands <duncan.sands@free.fr>
  L:    linux-usb@vger.kernel.org
@@@ -809,7 -787,7 +809,7 @@@ S: Supporte
  F:    drivers/iio/*/ad*
  X:    drivers/iio/*/adjd*
  F:    drivers/staging/iio/*/ad*
 -F:    staging/iio/trigger/iio-trig-bfin-timer.c
 +F:    drivers/staging/iio/trigger/iio-trig-bfin-timer.c
  
  ANALOG DEVICES INC DMA DRIVERS
  M:    Lars-Peter Clausen <lars@metafoo.de>
@@@ -1058,7 -1036,6 +1058,7 @@@ F:      arch/arm/mach-meson
  F:    arch/arm/boot/dts/meson*
  F:    arch/arm64/boot/dts/amlogic/
  F:    drivers/pinctrl/meson/
 +F:    drivers/mmc/host/meson*
  N:    meson
  
  ARM/Annapurna Labs ALPINE ARCHITECTURE
@@@ -1798,7 -1775,6 +1798,7 @@@ F:      drivers/char/hw_random/st-rng.
  F:    drivers/clocksource/arm_global_timer.c
  F:    drivers/clocksource/clksrc_st_lpc.c
  F:    drivers/cpufreq/sti-cpufreq.c
 +F:    drivers/dma/st_fdma*
  F:    drivers/i2c/busses/i2c-st.c
  F:    drivers/media/rc/st_rc.c
  F:    drivers/media/platform/sti/c8sectpfe/
@@@ -1809,7 -1785,6 +1809,7 @@@ F:      drivers/phy/phy-stih407-usb.
  F:    drivers/phy/phy-stih41x-usb.c
  F:    drivers/pinctrl/pinctrl-st.c
  F:    drivers/remoteproc/st_remoteproc.c
 +F:    drivers/remoteproc/st_slim_rproc.c
  F:    drivers/reset/sti/
  F:    drivers/rtc/rtc-st-lpc.c
  F:    drivers/tty/serial/st-asc.c
@@@ -1818,7 -1793,6 +1818,7 @@@ F:      drivers/usb/host/ehci-st.
  F:    drivers/usb/host/ohci-st.c
  F:    drivers/watchdog/st_lpc_wdt.c
  F:    drivers/ata/ahci_st.c
 +F:    include/linux/remoteproc/st_slim_rproc.h
  
  ARM/STM32 ARCHITECTURE
  M:    Maxime Coquelin <mcoquelin.stm32@gmail.com>
@@@ -2556,8 -2530,6 +2556,8 @@@ L:      netdev@vger.kernel.or
  L:    linux-kernel@vger.kernel.org
  S:    Supported
  F:    kernel/bpf/
 +F:    tools/testing/selftests/bpf/
 +F:    lib/test_bpf.c
  
  BROADCOM B44 10/100 ETHERNET DRIVER
  M:    Michael Chan <michael.chan@broadcom.com>
@@@ -2618,7 -2590,6 +2618,7 @@@ L:      linux-arm-kernel@lists.infradead.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
  S:    Maintained
  N:    bcm2835
 +F:    drivers/staging/vc04_services
  
  BROADCOM BCM47XX MIPS ARCHITECTURE
  M:    Hauke Mehrtens <hauke@hauke-m.de>
@@@ -2771,14 -2742,6 +2771,14 @@@ L:    bcm-kernel-feedback-list@broadcom.co
  S:    Maintained
  F:    drivers/mtd/nand/brcmnand/
  
 +BROADCOM STB AVS CPUFREQ DRIVER
 +M:    Markus Mayer <mmayer@broadcom.com>
 +M:    bcm-kernel-feedback-list@broadcom.com
 +L:    linux-pm@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
 +F:    drivers/cpufreq/brcmstb*
 +
  BROADCOM SPECIFIC AMBA DRIVER (BCMA)
  M:    Rafał Miłecki <zajec5@gmail.com>
  L:    linux-wireless@vger.kernel.org
@@@ -2967,7 -2930,7 +2967,7 @@@ CAPELLA MICROSYSTEMS LIGHT SENSOR DRIVE
  M:    Kevin Tsai <ktsai@capellamicro.com>
  S:    Maintained
  F:    drivers/iio/light/cm*
 -F:    Documentation/devicetree/bindings/i2c/trivial-devices.txt
 +F:    Documentation/devicetree/bindings/i2c/trivial-admin-guide/devices.rst
  
  CAVIUM I2C DRIVER
  M:    Jan Glauber <jglauber@cavium.com>
@@@ -3067,12 -3030,6 +3067,12 @@@ F:    drivers/usb/host/whci
  F:    drivers/usb/wusbcore/
  F:    include/linux/usb/wusb*
  
 +HT16K33 LED CONTROLLER DRIVER
 +M:    Robin van der Gracht <robin@protonic.nl>
 +S:    Maintained
 +F:    drivers/auxdisplay/ht16k33.c
 +F:    Documentation/devicetree/bindings/display/ht16k33.txt
 +
  CFAG12864B LCD DRIVER
  M:    Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
  W:    http://miguelojeda.es/auxdisplay.htm
@@@ -3121,7 -3078,7 +3121,7 @@@ M:      Harry Wei <harryxiyou@gmail.com
  L:    xiyoulinuxkernelgroup@googlegroups.com (subscribers-only)
  L:    linux-kernel@zh-kernel.org (moderated for non-subscribers)
  S:    Maintained
 -F:    Documentation/zh_CN/
 +F:    Documentation/translations/zh_CN/
  
  CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
  M:    Peter Chen <Peter.Chen@nxp.com>
@@@ -3192,15 -3149,15 +3192,15 @@@ S:   Supporte
  F:    drivers/clocksource
  
  CISCO FCOE HBA DRIVER
- M:    Hiral Patel <hiralpat@cisco.com>
- M:    Suma Ramars <sramars@cisco.com>
- M:    Brian Uchino <buchino@cisco.com>
+ M:    Satish Kharat <satishkh@cisco.com>
+ M:    Sesidhar Baddela <sebaddel@cisco.com>
+ M:    Karan Tilak Kumar <kartilak@cisco.com>
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    drivers/scsi/fnic/
  
  CISCO SCSI HBA DRIVER
- M:    Narsimhulu Musini <nmusini@cisco.com>
+ M:    Karan Tilak Kumar <kartilak@cisco.com>
  M:    Sesidhar Baddela <sebaddel@cisco.com>
  L:    linux-scsi@vger.kernel.org
  S:    Supported
@@@ -3377,7 -3334,6 +3377,7 @@@ L:      linux-pm@vger.kernel.or
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
  T:    git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates)
 +B:    https://bugzilla.kernel.org
  F:    Documentation/cpu-freq/
  F:    drivers/cpufreq/
  F:    include/linux/cpufreq.h
@@@ -3417,7 -3373,6 +3417,7 @@@ M:      Daniel Lezcano <daniel.lezcano@linar
  L:    linux-pm@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
 +B:    https://bugzilla.kernel.org
  F:    drivers/cpuidle/*
  F:    include/linux/cpuidle.h
  
@@@ -3956,7 -3911,7 +3956,7 @@@ F:      include/linux/dma-buf
  F:    include/linux/reservation.h
  F:    include/linux/*fence.h
  F:    Documentation/dma-buf-sharing.txt
 -T:    git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
  
  SYNC FILE FRAMEWORK
  M:    Sumit Semwal <sumit.semwal@linaro.org>
@@@ -3964,12 -3919,10 +3964,12 @@@ R:   Gustavo Padovan <gustavo@padovan.org
  S:    Maintained
  L:    linux-media@vger.kernel.org
  L:    dri-devel@lists.freedesktop.org
 -F:    drivers/dma-buf/sync_file.c
 +F:    drivers/dma-buf/sync_*
 +F:    drivers/dma-buf/sw_sync.c
  F:    include/linux/sync_file.h
 +F:    include/uapi/linux/sync_file.h
  F:    Documentation/sync_file.txt
 -T:    git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
  
  DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
  M:    Vinod Koul <vinod.koul@intel.com>
@@@ -4057,8 -4010,6 +4057,8 @@@ DRM DRIVER
  M:    David Airlie <airlied@linux.ie>
  L:    dri-devel@lists.freedesktop.org
  T:    git git://people.freedesktop.org/~airlied/linux
 +B:    https://bugs.freedesktop.org/
 +C:    irc://chat.freenode.net/dri-devel
  S:    Maintained
  F:    drivers/gpu/drm/
  F:    drivers/gpu/vga/
@@@ -4069,30 -4020,11 +4069,30 @@@ F:   Documentation/gpu
  F:    include/drm/
  F:    include/uapi/drm/
  
 +DRM DRIVERS AND MISC GPU PATCHES
 +M:    Daniel Vetter <daniel.vetter@intel.com>
 +M:    Jani Nikula <jani.nikula@linux.intel.com>
 +M:    Sean Paul <seanpaul@chromium.org>
 +W:    https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
 +S:    Maintained
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +F:    Documentation/gpu/
 +F:    drivers/gpu/vga/
 +F:    drivers/gpu/drm/*
 +F:    include/drm/drm*
 +F:    include/uapi/drm/drm*
 +
  DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
  M:    Dave Airlie <airlied@redhat.com>
  S:    Odd Fixes
  F:    drivers/gpu/drm/ast/
  
 +DRM DRIVERS FOR BRIDGE CHIPS
 +M:    Archit Taneja <architt@codeaurora.org>
 +S:    Maintained
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +F:    drivers/gpu/drm/bridge/
 +
  DRM DRIVER FOR BOCHS VIRTUAL GPU
  M:    Gerd Hoffmann <kraxel@redhat.com>
  S:    Odd Fixes
@@@ -4128,9 -4060,8 +4128,9 @@@ INTEL DRM DRIVERS (excluding Poulsbo, M
  M:    Daniel Vetter <daniel.vetter@intel.com>
  M:    Jani Nikula <jani.nikula@linux.intel.com>
  L:    intel-gfx@lists.freedesktop.org
 -L:    dri-devel@lists.freedesktop.org
  W:    https://01.org/linuxgraphics/
 +B:    https://01.org/linuxgraphics/documentation/how-report-bugs
 +C:    irc://chat.freenode.net/intel-gfx
  Q:    http://patchwork.freedesktop.org/project/intel-gfx/
  T:    git git://anongit.freedesktop.org/drm-intel
  S:    Supported
@@@ -4139,16 -4070,6 +4139,16 @@@ F:    include/drm/i915
  F:    include/uapi/drm/i915_drm.h
  F:    Documentation/gpu/i915.rst
  
 +INTEL GVT-g DRIVERS (Intel GPU Virtualization)
 +M:      Zhenyu Wang <zhenyuw@linux.intel.com>
 +M:      Zhi Wang <zhi.a.wang@intel.com>
 +L:      igvt-g-dev@lists.01.org
 +L:      intel-gfx@lists.freedesktop.org
 +W:      https://01.org/igvt-g
 +T:      git https://github.com/01org/gvt-linux.git
 +S:      Supported
 +F:      drivers/gpu/drm/i915/gvt/
 +
  DRM DRIVERS FOR ATMEL HLCDC
  M:    Boris Brezillon <boris.brezillon@free-electrons.com>
  L:    dri-devel@lists.freedesktop.org
@@@ -4163,15 -4084,6 +4163,15 @@@ S:    Supporte
  F:    drivers/gpu/drm/sun4i/
  F:    Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
  
 +DRM DRIVERS FOR AMLOGIC SOCS
 +M:    Neil Armstrong <narmstrong@baylibre.com>
 +L:    dri-devel@lists.freedesktop.org
 +L:    linux-amlogic@lists.infradead.org
 +W:    http://linux-meson.com/
 +S:    Supported
 +F:    drivers/gpu/drm/meson/
 +F:    Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
 +
  DRM DRIVERS FOR EXYNOS
  M:    Inki Dae <inki.dae@samsung.com>
  M:    Joonyoung Shim <jy0922.shim@samsung.com>
@@@ -4211,7 -4123,6 +4211,7 @@@ F:      drivers/gpu/drm/gma500
  
  DRM DRIVERS FOR HISILICON
  M:    Xinliang Liu <z.liuxinliang@hisilicon.com>
 +M:    Rongrong Zou <zourongrong@gmail.com>
  R:    Xinwei Kong <kong.kongxinwei@hisilicon.com>
  R:    Chen Feng <puck.chen@hisilicon.com>
  L:    dri-devel@lists.freedesktop.org
@@@ -4336,7 -4247,6 +4336,7 @@@ DRM DRIVERS FOR VIVANTE GPU I
  M:    Lucas Stach <l.stach@pengutronix.de>
  R:    Russell King <linux+etnaviv@armlinux.org.uk>
  R:    Christian Gmeiner <christian.gmeiner@gmail.com>
 +L:    etnaviv@lists.freedesktop.org
  L:    dri-devel@lists.freedesktop.org
  S:    Maintained
  F:    drivers/gpu/drm/etnaviv/
@@@ -4377,13 -4287,6 +4377,13 @@@ S:    Maintaine
  F:    drivers/gpu/drm/tilcdc/
  F:    Documentation/devicetree/bindings/display/tilcdc/
  
 +DRM DRIVERS FOR ZTE ZX
 +M:    Shawn Guo <shawnguo@kernel.org>
 +L:    dri-devel@lists.freedesktop.org
 +S:    Maintained
 +F:    drivers/gpu/drm/zte/
 +F:    Documentation/devicetree/bindings/display/zte,vou.txt
 +
  DSBR100 USB FM RADIO DRIVER
  M:    Alexey Klimov <klimov.linux@gmail.com>
  L:    linux-media@vger.kernel.org
@@@ -4728,14 -4631,12 +4728,14 @@@ L:   linux-efi@vger.kernel.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
  S:    Maintained
  F:    Documentation/efi-stub.txt
 -F:    arch/ia64/kernel/efi.c
 +F:    arch/*/kernel/efi.c
  F:    arch/x86/boot/compressed/eboot.[ch]
 -F:    arch/x86/include/asm/efi.h
 +F:    arch/*/include/asm/efi.h
  F:    arch/x86/platform/efi/
  F:    drivers/firmware/efi/
  F:    include/linux/efi*.h
 +F:    arch/arm/boot/compressed/efi-header.S
 +F:    arch/arm64/kernel/efi-entry.S
  
  EFI VARIABLE FILESYSTEM
  M:    Matthew Garrett <matthew.garrett@nebula.com>
@@@ -4787,11 -4688,11 +4787,11 @@@ M:   David Woodhouse <dwmw2@infradead.org
  L:    linux-embedded@vger.kernel.org
  S:    Maintained
  
- EMULEX/AVAGO LPFC FC/FCOE SCSI DRIVER
- M:    James Smart <james.smart@avagotech.com>
- M:    Dick Kennedy <dick.kennedy@avagotech.com>
+ EMULEX/BROADCOM LPFC FC/FCOE SCSI DRIVER
+ M:    James Smart <james.smart@broadcom.com>
+ M:    Dick Kennedy <dick.kennedy@broadcom.com>
  L:    linux-scsi@vger.kernel.org
- W:    http://www.avagotech.com
+ W:    http://www.broadcom.com
  S:    Supported
  F:    drivers/scsi/lpfc/
  
@@@ -5049,9 -4950,7 +5049,9 @@@ K:      fmc_d.*registe
  FPGA MANAGER FRAMEWORK
  M:    Alan Tull <atull@opensource.altera.com>
  R:    Moritz Fischer <moritz.fischer@ettus.com>
 +L:    linux-fpga@vger.kernel.org
  S:    Maintained
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
  F:    drivers/fpga/
  F:    include/linux/fpga/fpga-mgr.h
  W:    http://www.rocketboards.org
@@@ -5069,9 -4968,10 +5069,9 @@@ F:     drivers/net/wan/dlci.
  F:    drivers/net/wan/sdla.c
  
  FRAMEBUFFER LAYER
 -M:    Tomi Valkeinen <tomi.valkeinen@ti.com>
  L:    linux-fbdev@vger.kernel.org
  Q:    http://patchwork.kernel.org/project/linux-fbdev/list/
 -S:    Maintained
 +S:    Orphan
  F:    Documentation/fb/
  F:    drivers/video/
  F:    include/video/
@@@ -5144,18 -5044,9 +5144,18 @@@ S:    Maintaine
  F:    drivers/net/ethernet/freescale/fman
  F:    Documentation/devicetree/bindings/powerpc/fsl/fman.txt
  
 +FREESCALE SOC DRIVERS
 +M:    Scott Wood <oss@buserror.net>
 +L:    linuxppc-dev@lists.ozlabs.org
 +L:    linux-arm-kernel@lists.infradead.org
 +S:    Maintained
 +F:    drivers/soc/fsl/
 +F:    include/linux/fsl/
 +
  FREESCALE QUICC ENGINE LIBRARY
 +M:    Qiang Zhao <qiang.zhao@nxp.com>
  L:    linuxppc-dev@lists.ozlabs.org
 -S:    Orphan
 +S:    Maintained
  F:    drivers/soc/fsl/qe/
  F:    include/soc/fsl/*qe*.h
  F:    include/soc/fsl/*ucc*.h
@@@ -5207,6 -5098,13 +5207,6 @@@ F:     sound/soc/fsl/fsl
  F:    sound/soc/fsl/imx*
  F:    sound/soc/fsl/mpc8610_hpcd.c
  
 -FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER
 -M:    "J. German Rivera" <German.Rivera@freescale.com>
 -M:    Stuart Yoder <stuart.yoder@nxp.com>
 -L:    linux-kernel@vger.kernel.org
 -S:    Maintained
 -F:    drivers/staging/fsl-mc/
 -
  FREEVXFS FILESYSTEM
  M:    Christoph Hellwig <hch@infradead.org>
  W:    ftp://ftp.openlinux.org/pub/people/hch/vxfs
@@@ -5240,7 -5138,6 +5240,7 @@@ F:      include/linux/fscache*.
  FS-CRYPTO: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
  M:    Theodore Y. Ts'o <tytso@mit.edu>
  M:    Jaegeuk Kim <jaegeuk@kernel.org>
 +L:    linux-fsdevel@vger.kernel.org
  S:    Supported
  F:    fs/crypto/
  F:    include/linux/fscrypto.h
@@@ -5305,7 -5202,6 +5305,7 @@@ L:      kernel-hardening@lists.openwall.co
  S:    Maintained
  F:    scripts/gcc-plugins/
  F:    scripts/gcc-plugin.sh
 +F:    scripts/Makefile.gcc-plugins
  F:    Documentation/gcc-plugins.txt
  
  GCOV BASED KERNEL PROFILING
@@@ -5717,7 -5613,6 +5717,6 @@@ F:      drivers/watchdog/hpwdt.
  
  HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
  M:    Don Brace <don.brace@microsemi.com>
- L:    iss_storagedev@hp.com
  L:    esc.storagedev@microsemi.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
@@@ -5728,7 -5623,6 +5727,6 @@@ F:      include/uapi/linux/cciss*.
  
  HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss)
  M:    Don Brace <don.brace@microsemi.com>
- L:    iss_storagedev@hp.com
  L:    esc.storagedev@microsemi.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
@@@ -5767,7 -5661,6 +5765,7 @@@ HIBERNATION (aka Software Suspend, aka 
  M:    "Rafael J. Wysocki" <rjw@rjwysocki.net>
  M:    Pavel Machek <pavel@ucw.cz>
  L:    linux-pm@vger.kernel.org
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    arch/x86/power/
  F:    drivers/base/power/
@@@ -5949,7 -5842,6 +5947,7 @@@ F:      drivers/input/serio/hyperv-keyboard.
  F:    drivers/pci/host/pci-hyperv.c
  F:    drivers/net/hyperv/
  F:    drivers/scsi/storvsc_drv.c
 +F:    drivers/uio/uio_hv_generic.c
  F:    drivers/video/fbdev/hyperv_fb.c
  F:    include/linux/hyperv.h
  F:    tools/hv/
@@@ -6193,9 -6085,14 +6191,9 @@@ S:     Maintaine
  F:    Documentation/cdrom/ide-cd
  F:    drivers/ide/ide-cd*
  
 -IDLE-I7300
 -M:    Andy Henroid <andrew.d.henroid@intel.com>
 -L:    linux-pm@vger.kernel.org
 -S:    Supported
 -F:    drivers/idle/i7300_idle.c
 -
  IEEE 802.15.4 SUBSYSTEM
  M:    Alexander Aring <aar@pengutronix.de>
 +M:    Stefan Schmidt <stefan@osg.samsung.com>
  L:    linux-wpan@vger.kernel.org
  W:    http://wpan.cakelab.org/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
@@@ -6225,22 -6122,6 +6223,22 @@@ L:    linux-media@vger.kernel.or
  S:    Maintained
  F:    drivers/media/rc/iguanair.c
  
 +IIO DIGITAL POTENTIOMETER DAC
 +M:    Peter Rosin <peda@axentia.se>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
 +F:    Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
 +F:    drivers/iio/dac/dpot-dac.c
 +
 +IIO ENVELOPE DETECTOR
 +M:    Peter Rosin <peda@axentia.se>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
 +F:    Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
 +F:    drivers/iio/adc/envelope-detector.c
 +
  IIO SUBSYSTEM AND DRIVERS
  M:    Jonathan Cameron <jic23@kernel.org>
  R:    Hartmut Knaack <knaack.h@gmx.de>
@@@ -6398,11 -6279,9 +6396,11 @@@ S:    Maintaine
  F:    drivers/platform/x86/intel-vbtn.c
  
  INTEL IDLE DRIVER
 +M:    Jacob Pan <jacob.jun.pan@linux.intel.com>
  M:    Len Brown <lenb@kernel.org>
  L:    linux-pm@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    drivers/idle/intel_idle.c
  
@@@ -6622,13 -6501,6 +6620,13 @@@ S:    Maintaine
  F:    arch/x86/include/asm/pmc_core.h
  F:    drivers/platform/x86/intel_pmc_core*
  
 +INVENSENSE MPU-3050 GYROSCOPE DRIVER
 +M:    Linus Walleij <linus.walleij@linaro.org>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/iio/gyro/mpu3050*
 +F:    Documentation/devicetree/bindings/iio/gyroscope/inv,mpu3050.txt
 +
  IOC3 ETHERNET DRIVER
  M:    Ralf Baechle <ralf@linux-mips.org>
  L:    linux-mips@linux-mips.org
@@@ -7210,7 -7082,6 +7208,7 @@@ F:      drivers/scsi/53c700
  LED SUBSYSTEM
  M:    Richard Purdie <rpurdie@rpsys.net>
  M:    Jacek Anaszewski <j.anaszewski@samsung.com>
 +M:    Pavel Machek <pavel@ucw.cz>
  L:    linux-leds@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
  S:    Maintained
@@@ -7683,10 -7554,8 +7681,10 @@@ S:    Maintaine
  MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER
  M:    Andrew Lunn <andrew@lunn.ch>
  M:    Vivien Didelot <vivien.didelot@savoirfairelinux.com>
 +L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/dsa/mv88e6xxx/
 +F:    Documentation/devicetree/bindings/net/dsa/marvell.txt
  
  MARVELL ARMADA DRM SUPPORT
  M:    Russell King <rmk+kernel@armlinux.org.uk>
@@@ -7836,7 -7705,6 +7834,7 @@@ MCP4531 MICROCHIP DIGITAL POTENTIOMETE
  M:    Peter Rosin <peda@axentia.se>
  L:    linux-iio@vger.kernel.org
  S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
  F:    drivers/iio/potentiometer/mcp4531.c
  
  MEASUREMENT COMPUTING CIO-DAC IIO DRIVER
@@@ -7968,12 -7836,12 +7966,12 @@@ S:   Maintaine
  F:    drivers/net/wireless/mediatek/mt7601u/
  
  MEGARAID SCSI/SAS DRIVERS
- M:    Kashyap Desai <kashyap.desai@avagotech.com>
- M:    Sumit Saxena <sumit.saxena@avagotech.com>
- M:    Uday Lingala <uday.lingala@avagotech.com>
- L:    megaraidlinux.pdl@avagotech.com
+ M:    Kashyap Desai <kashyap.desai@broadcom.com>
+ M:    Sumit Saxena <sumit.saxena@broadcom.com>
+ M:    Shivasharan S <shivasharan.srikanteshwara@broadcom.com>
+ L:    megaraidlinux.pdl@broadcom.com
  L:    linux-scsi@vger.kernel.org
- W:    http://www.lsi.com
+ W:    http://www.avagotech.com/support/
  S:    Maintained
  F:    Documentation/scsi/megaraid.txt
  F:    drivers/scsi/megaraid.*
@@@ -8187,7 -8055,6 +8185,7 @@@ F:      drivers/infiniband/hw/mlx4
  F:    include/linux/mlx4/
  
  MELLANOX MLX5 core VPI driver
 +M:    Saeed Mahameed <saeedm@mellanox.com>
  M:    Matan Barak <matanb@mellanox.com>
  M:    Leon Romanovsky <leonro@mellanox.com>
  L:    netdev@vger.kernel.org
@@@ -8401,12 -8268,6 +8399,12 @@@ T:    git git://linuxtv.org/mkrufky/tuners
  S:    Maintained
  F:    drivers/media/tuners/mxl5007t.*
  
 +MXSFB DRM DRIVER
 +M:    Marek Vasut <marex@denx.de>
 +S:    Supported
 +F:    drivers/gpu/drm/mxsfb/
 +F:    Documentation/devicetree/bindings/display/mxsfb-drm.txt
 +
  MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
  M:    Hyong-Youb Kim <hykim@myri.com>
  L:    netdev@vger.kernel.org
@@@ -8453,7 -8314,6 +8451,6 @@@ F:      drivers/scsi/arm/oak.
  F:    drivers/scsi/atari_scsi.*
  F:    drivers/scsi/dmx3191d.c
  F:    drivers/scsi/g_NCR5380.*
- F:    drivers/scsi/g_NCR5380_mmio.c
  F:    drivers/scsi/mac_scsi.*
  F:    drivers/scsi/sun3_scsi.*
  F:    drivers/scsi/sun3_scsi_vme.c
@@@ -8584,6 -8444,7 +8581,6 @@@ F:      include/uapi/linux/net_namespace.
  F:    tools/net/
  F:    tools/testing/selftests/net/
  F:    lib/random32.c
 -F:    lib/test_bpf.c
  
  NETWORKING [IPv4/IPv6]
  M:    "David S. Miller" <davem@davemloft.net>
@@@ -8812,16 -8673,6 +8809,16 @@@ L:    linux-nvme@lists.infradead.or
  S:    Supported
  F:    drivers/nvme/target/
  
 +NVM EXPRESS FC TRANSPORT DRIVERS
 +M:    James Smart <james.smart@broadcom.com>
 +L:    linux-nvme@lists.infradead.org
 +S:    Supported
 +F:    include/linux/nvme-fc.h
 +F:    include/linux/nvme-fc-driver.h
 +F:    drivers/nvme/host/fc.c
 +F:    drivers/nvme/target/fc.c
 +F:    drivers/nvme/target/fcloop.c
 +
  NVMEM FRAMEWORK
  M:    Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
  M:    Maxime Ripard <maxime.ripard@free-electrons.com>
@@@ -8884,7 -8735,6 +8881,7 @@@ F:      drivers/regulator/tps65217-regulator
  F:    drivers/regulator/tps65218-regulator.c
  F:    drivers/regulator/tps65910-regulator.c
  F:    drivers/regulator/twl-regulator.c
 +F:    drivers/regulator/twl6030-regulator.c
  F:    include/linux/i2c-omap.h
  
  OMAP DEVICE TREE SUPPORT
@@@ -9105,11 -8955,9 +9102,11 @@@ F:    drivers/of/resolver.
  
  OPENRISC ARCHITECTURE
  M:    Jonas Bonn <jonas@southpole.se>
 -W:    http://openrisc.net
 +M:    Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
 +M:    Stafford Horne <shorne@gmail.com>
 +L:    openrisc@lists.librecores.org
 +W:    http://openrisc.io
  S:    Maintained
 -T:    git git://openrisc.net/~jonas/linux
  F:    arch/openrisc/
  
  OPENVSWITCH
@@@ -9241,7 -9089,7 +9238,7 @@@ F:      drivers/misc/panel.
  
  PARALLEL PORT SUBSYSTEM
  M:    Sudip Mukherjee <sudipm.mukherjee@gmail.com>
 -M:    Sudip Mukherjee <sudip@vectorindia.org>
 +M:    Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
  L:    linux-parport@lists.infradead.org (subscribers-only)
  S:    Maintained
  F:    drivers/parport/
@@@ -9396,12 -9244,11 +9393,12 @@@ S:   Maintaine
  F:    drivers/pci/host/*layerscape*
  
  PCI DRIVER FOR IMX6
 -M:    Richard Zhu <Richard.Zhu@freescale.com>
 +M:    Richard Zhu <hongxing.zhu@nxp.com>
  M:    Lucas Stach <l.stach@pengutronix.de>
  L:    linux-pci@vger.kernel.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 +F:    Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
  F:    drivers/pci/host/*imx6*
  
  PCI DRIVER FOR TI KEYSTONE
@@@ -9460,11 -9307,17 +9457,11 @@@ F:   drivers/pci/host/pci-exynos.
  
  PCI DRIVER FOR SYNOPSIS DESIGNWARE
  M:    Jingoo Han <jingoohan1@gmail.com>
 -M:    Pratyush Anand <pratyush.anand@gmail.com>
 -L:    linux-pci@vger.kernel.org
 -S:    Maintained
 -F:    drivers/pci/host/*designware*
 -
 -PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE
 -M:    Jose Abreu <Jose.Abreu@synopsys.com>
 +M:    Joao Pinto <Joao.Pinto@synopsys.com>
  L:    linux-pci@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/pci/designware-pcie.txt
 -F:    drivers/pci/host/pcie-designware-plat.c
 +F:    drivers/pci/host/*designware*
  
  PCI DRIVER FOR GENERIC OF HOSTS
  M:    Will Deacon <will.deacon@arm.com>
@@@ -9479,7 -9332,7 +9476,7 @@@ PCI DRIVER FOR INTEL VOLUME MANAGEMENT 
  M:    Keith Busch <keith.busch@intel.com>
  L:    linux-pci@vger.kernel.org
  S:    Supported
 -F:    arch/x86/pci/vmd.c
 +F:    drivers/pci/host/vmd.c
  
  PCIE DRIVER FOR ST SPEAR13XX
  M:    Pratyush Anand <pratyush.anand@gmail.com>
@@@ -9712,8 -9565,8 +9709,8 @@@ F:      arch/mips/boot/dts/pistachio
  F:      arch/mips/configs/pistachio*_defconfig
  
  PKTCDVD DRIVER
 -M:    Jiri Kosina <jikos@kernel.org>
 -S:    Maintained
 +S:    Orphan
 +M:    linux-block@vger.kernel.org
  F:    drivers/block/pktcdvd.c
  F:    include/linux/pktcdvd.h
  F:    include/uapi/linux/pktcdvd.h
@@@ -9766,7 -9619,6 +9763,7 @@@ POWER MANAGEMENT COR
  M:    "Rafael J. Wysocki" <rjw@rjwysocki.net>
  L:    linux-pm@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    drivers/base/power/
  F:    include/linux/pm.h
@@@ -9948,7 -9800,7 +9945,7 @@@ F:      drivers/media/usb/pwc/
  
  PWM FAN DRIVER
  M:    Kamil Debski <kamil@wypas.org>
 -M:    Lukasz Majewski <l.majewski@samsung.com>
 +M:    Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Supported
  F:    Documentation/devicetree/bindings/hwmon/pwm-fan.txt
@@@ -10090,12 -9942,6 +10087,12 @@@ F:  fs/qnx4
  F:    include/uapi/linux/qnx4_fs.h
  F:    include/uapi/linux/qnxtypes.h
  
 +QORIQ DPAA2 FSL-MC BUS DRIVER
 +M:    Stuart Yoder <stuart.yoder@nxp.com>
 +L:    linux-kernel@vger.kernel.org
 +S:    Maintained
 +F:    drivers/staging/fsl-mc/
 +
  QT1010 MEDIA DRIVER
  M:    Antti Palosaari <crope@iki.fi>
  L:    linux-media@vger.kernel.org
@@@ -10558,7 -10404,7 +10555,7 @@@ F:   arch/s390/pci
  F:    drivers/pci/hotplug/s390_pci_hpc.c
  
  S390 ZCRYPT DRIVER
 -M:    Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com>
 +M:    Harald Freudenberger <freude@de.ibm.com>
  L:    linux-s390@vger.kernel.org
  W:    http://www.ibm.com/developerworks/linux/linux390/
  S:    Supported
@@@ -10725,7 -10571,7 +10722,7 @@@ L:   netdev@vger.kernel.or
  F:    drivers/net/ethernet/samsung/sxgbe/
  
  SAMSUNG THERMAL DRIVER
 -M:    Lukasz Majewski <l.majewski@samsung.com>
 +M:    Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
  L:    linux-pm@vger.kernel.org
  L:    linux-samsung-soc@vger.kernel.org
  S:    Supported
@@@ -10851,11 -10697,6 +10848,11 @@@ W: http://www.sunplus.co
  S:    Supported
  F:    arch/score/
  
 +SCR24X CHIP CARD INTERFACE DRIVER
 +M:    Lubomir Rintel <lkundrak@v3.sk>
 +S:    Supported
 +F:    drivers/char/pcmcia/scr24x_cs.c
 +
  SYSTEM CONTROL & POWER INTERFACE (SCPI) Message Protocol drivers
  M:    Sudeep Holla <sudeep.holla@arm.com>
  L:    linux-arm-kernel@lists.infradead.org
@@@ -11259,7 -11100,7 +11256,7 @@@ F:   include/media/i2c/ov2659.
  SILICON MOTION SM712 FRAME BUFFER DRIVER
  M:    Sudip Mukherjee <sudipm.mukherjee@gmail.com>
  M:    Teddy Wang <teddy.wang@siliconmotion.com>
 -M:    Sudip Mukherjee <sudip@vectorindia.org>
 +M:    Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
  L:    linux-fbdev@vger.kernel.org
  S:    Maintained
  F:    drivers/video/fbdev/sm712*
@@@ -11621,7 -11462,7 +11618,7 @@@ STABLE BRANC
  M:    Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  L:    stable@vger.kernel.org
  S:    Supported
 -F:    Documentation/stable_kernel_rules.txt
 +F:    Documentation/process/stable-kernel-rules.rst
  
  STAGING SUBSYSTEM
  M:    Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@@ -11687,11 -11528,17 +11684,11 @@@ F:        drivers/staging/rtl8712
  STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER
  M:    Sudip Mukherjee <sudipm.mukherjee@gmail.com>
  M:    Teddy Wang <teddy.wang@siliconmotion.com>
 -M:    Sudip Mukherjee <sudip@vectorindia.org>
 +M:    Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
  L:    linux-fbdev@vger.kernel.org
  S:    Maintained
  F:    drivers/staging/sm750fb/
  
 -STAGING - SLICOSS
 -M:    Lior Dotan <liodot@gmail.com>
 -M:    Christopher Harrer <charrer@alacritech.com>
 -S:    Odd Fixes
 -F:    drivers/staging/slicoss/
 -
  STAGING - SPEAKUP CONSOLE SPEECH DRIVER
  M:    William Hubbs <w.d.hubbs@gmail.com>
  M:    Chris Brannon <chris@the-brannons.com>
@@@ -11761,7 -11608,6 +11758,7 @@@ M:   "Rafael J. Wysocki" <rjw@rjwysocki.n
  M:    Len Brown <len.brown@intel.com>
  M:    Pavel Machek <pavel@ucw.cz>
  L:    linux-pm@vger.kernel.org
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    Documentation/power/
  F:    arch/x86/kernel/acpi/
@@@ -12547,7 -12393,8 +12544,8 @@@ F:   Documentation/scsi/ufs.tx
  F:    drivers/scsi/ufs/
  
  UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
- M:    Joao Pinto <Joao.Pinto@synopsys.com>
+ M:    Manjunath M Bettegowda <manjumb@synopsys.com>
+ M:    Prabu Thangamuthu <prabut@synopsys.com>
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    drivers/scsi/ufs/*dwc*
@@@ -12905,15 -12752,6 +12903,15 @@@ F: drivers/vfio
  F:    include/linux/vfio.h
  F:    include/uapi/linux/vfio.h
  
 +VFIO MEDIATED DEVICE DRIVERS
 +M:    Kirti Wankhede <kwankhede@nvidia.com>
 +L:    kvm@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/vfio-mediated-device.txt
 +F:    drivers/vfio/mdev/
 +F:    include/linux/mdev.h
 +F:    samples/vfio-mdev/
 +
  VFIO PLATFORM DRIVER
  M:    Baptiste Reynal <b.reynal@virtualopensystems.com>
  L:    kvm@vger.kernel.org
@@@ -13066,7 -12904,7 +13064,7 @@@ M:   Greg Kroah-Hartman <gregkh@linuxfoun
  L:    devel@driverdev.osuosl.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
 -F:    Documentation/vme_api.txt
 +F:    Documentation/driver-api/vme.rst
  F:    drivers/staging/vme/
  F:    drivers/vme/
  F:    include/linux/vme*
@@@ -13290,7 -13128,7 +13288,7 @@@ T:   git git://git.kernel.org/pub/scm/lin
  S:    Maintained
  F:    include/linux/workqueue.h
  F:    kernel/workqueue.c
 -F:    Documentation/workqueue.txt
 +F:    Documentation/core-api/workqueue.rst
  
  X-POWERS MULTIFUNCTION PMIC DEVICE DRIVERS
  M:    Chen-Yu Tsai <wens@csie.org>
@@@ -13355,6 -13193,7 +13353,6 @@@ F:   drivers/media/tuners/tuner-xc2028.
  
  XEN HYPERVISOR INTERFACE
  M:    Boris Ostrovsky <boris.ostrovsky@oracle.com>
 -M:    David Vrabel <david.vrabel@citrix.com>
  M:    Juergen Gross <jgross@suse.com>
  L:    xen-devel@lists.xenproject.org (moderated for non-subscribers)
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
diff --combined block/blk-mq.h
index 3a54dd32a6fc33a55da78237567e5da835f04599,5347f011e90d9044601fca15d70bc99909691287..63e9116cddbd575c9ed85d716df0e4252f89d261
@@@ -1,8 -1,6 +1,8 @@@
  #ifndef INT_BLK_MQ_H
  #define INT_BLK_MQ_H
  
 +#include "blk-stat.h"
 +
  struct blk_mq_tag_set;
  
  struct blk_mq_ctx {
@@@ -20,7 -18,6 +20,7 @@@
  
        /* incremented at completion time */
        unsigned long           ____cacheline_aligned_in_smp rq_completed[2];
 +      struct blk_rq_stat      stat[2];
  
        struct request_queue    *queue;
        struct kobject          kobj;
@@@ -31,7 -28,6 +31,7 @@@ void blk_mq_freeze_queue(struct request
  void blk_mq_free_queue(struct request_queue *q);
  int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
  void blk_mq_wake_waiters(struct request_queue *q);
 +bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
  
  /*
   * CPU hotplug helpers
@@@ -42,7 -38,6 +42,6 @@@ void blk_mq_disable_hotplug(void)
  /*
   * CPU -> queue mappings
   */
- int blk_mq_map_queues(struct blk_mq_tag_set *set);
  extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
  
  static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
@@@ -104,11 -99,6 +103,11 @@@ static inline void blk_mq_set_alloc_dat
        data->hctx = hctx;
  }
  
 +static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
 +{
 +      return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
 +}
 +
  static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
  {
        return hctx->nr_ctx && hctx->tags;
diff --combined block/bsg-lib.c
index b2a61e3ecb1472ce6287f810979e216530d7920e,2d1df5cc0507b72c785af62668deca82432d9509..9d652a99231615b3f959a8407f866d3d22c51a6a
   * bsg_destroy_job - routine to teardown/delete a bsg job
   * @job: bsg_job that is to be torn down
   */
- static void bsg_destroy_job(struct bsg_job *job)
+ static void bsg_destroy_job(struct kref *kref)
  {
+       struct bsg_job *job = container_of(kref, struct bsg_job, kref);
+       struct request *rq = job->req;
+       blk_end_request_all(rq, rq->errors);
        put_device(job->dev);   /* release reference for the request */
  
        kfree(job->request_payload.sg_list);
        kfree(job);
  }
  
+ void bsg_job_put(struct bsg_job *job)
+ {
+       kref_put(&job->kref, bsg_destroy_job);
+ }
+ EXPORT_SYMBOL_GPL(bsg_job_put);
+ int bsg_job_get(struct bsg_job *job)
+ {
+       return kref_get_unless_zero(&job->kref);
+ }
+ EXPORT_SYMBOL_GPL(bsg_job_get);
  /**
   * bsg_job_done - completion routine for bsg requests
   * @job: bsg_job that is complete
@@@ -83,8 -100,7 +100,7 @@@ static void bsg_softirq_done(struct req
  {
        struct bsg_job *job = rq->special;
  
-       blk_end_request_all(rq, rq->errors);
-       bsg_destroy_job(job);
+       bsg_job_put(job);
  }
  
  static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
@@@ -142,6 -158,7 +158,7 @@@ static int bsg_create_job(struct devic
        job->dev = dev;
        /* take a reference for the request */
        get_device(job->dev);
+       kref_init(&job->kref);
        return 0;
  
  failjob_rls_rqst_payload:
@@@ -161,8 -178,6 +178,8 @@@ failjob_rls_job
   * Drivers/subsys should pass this to the queue init function.
   */
  void bsg_request_fn(struct request_queue *q)
 +      __releases(q->queue_lock)
 +      __acquires(q->queue_lock)
  {
        struct device *dev = q->queuedata;
        struct request *req;
diff --combined drivers/scsi/Makefile
index 1520596f54a6c118048d7a2b819f79b8d7fbc41d,2ac1b9fe56ea363e92e3a77c9298f3cc41fb043f..a2d03957cbe2e85626199e854f6ad1e26f5128ee
@@@ -74,7 -74,6 +74,6 @@@ obj-$(CONFIG_SCSI_ISCI)               += isci
  obj-$(CONFIG_SCSI_IPS)                += ips.o
  obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
  obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
- obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o
  obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o
  obj-$(CONFIG_SCSI_NCR_D700)   += 53c700.o NCR_D700.o
  obj-$(CONFIG_SCSI_NCR_Q720)   += NCR_Q720_mod.o
@@@ -173,7 -172,6 +172,7 @@@ hv_storvsc-y                       := storvsc_drv.
  
  sd_mod-objs   := sd.o
  sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
 +sd_mod-$(CONFIG_BLK_DEV_ZONED) += sd_zbc.o
  
  sr_mod-objs   := sr.o sr_ioctl.o sr_vendor.o
  ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
index 4655a9f9dceae8aeefd6857d80a018a4e9c23d98,688fde61d12aa3646bc50acc75b448c3362d935e..9e6f647ff1c16828f48d5abed6ce248155106977
@@@ -85,7 -85,6 +85,7 @@@ static inline int send_tx_flowc_wr(stru
  static const struct cxgb4_uld_info cxgb4i_uld_info = {
        .name = DRV_MODULE_NAME,
        .nrxq = MAX_ULD_QSETS,
 +      .ntxq = MAX_ULD_QSETS,
        .rxq_size = 1024,
        .lro = false,
        .add = t4_uld_add,
@@@ -1411,7 -1410,7 +1411,7 @@@ static int init_act_open(struct cxgbi_s
        csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
        if (csk->atid < 0) {
                pr_err("%s, NO atid available.\n", ndev->name);
-               return -EINVAL;
+               goto rel_resource_without_clip;
        }
        cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
        cxgbi_sock_get(csk);
index db03c49e23502257ca8e5e5591e2f516cfb3ce06,32e48cc4cf7115ded2beb4f789eca2d5a1d7ed7b..d704752b63329f8094f52ec57cea5d823104c43c
@@@ -95,7 -95,7 +95,7 @@@ struct alua_port_group 
  
  struct alua_dh_data {
        struct list_head        node;
-       struct alua_port_group  *pg;
+       struct alua_port_group __rcu *pg;
        int                     group_id;
        spinlock_t              pg_lock;
        struct scsi_device      *sdev;
@@@ -154,8 -154,7 +154,8 @@@ static int submit_rtpg(struct scsi_devi
        return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE,
                                      buff, bufflen, sshdr,
                                      ALUA_FAILOVER_TIMEOUT * HZ,
 -                                    ALUA_FAILOVER_RETRIES, NULL, req_flags);
 +                                    ALUA_FAILOVER_RETRIES, NULL,
 +                                    req_flags, 0);
  }
  
  /*
@@@ -188,8 -187,7 +188,8 @@@ static int submit_stpg(struct scsi_devi
        return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
                                      stpg_data, stpg_len,
                                      sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
 -                                    ALUA_FAILOVER_RETRIES, NULL, req_flags);
 +                                    ALUA_FAILOVER_RETRIES, NULL,
 +                                    req_flags, 0);
  }
  
  static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
@@@ -371,7 -369,7 +371,7 @@@ static int alua_check_vpd(struct scsi_d
  
        /* Check for existing port group references */
        spin_lock(&h->pg_lock);
-       old_pg = h->pg;
+       old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
        if (old_pg != pg) {
                /* port group has changed. Update to new port group */
                if (h->pg) {
                list_add_rcu(&h->node, &pg->dh_list);
        spin_unlock_irqrestore(&pg->lock, flags);
  
-       alua_rtpg_queue(h->pg, sdev, NULL, true);
+       alua_rtpg_queue(rcu_dereference_protected(h->pg,
+                                                 lockdep_is_held(&h->pg_lock)),
+                       sdev, NULL, true);
        spin_unlock(&h->pg_lock);
  
        if (old_pg)
@@@ -795,7 -795,6 +797,7 @@@ static void alua_rtpg_work(struct work_
                WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
                WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
                spin_unlock_irqrestore(&pg->lock, flags);
 +              kref_put(&pg->kref, release_port_group);
                return;
        }
        if (pg->flags & ALUA_SYNC_STPG)
@@@ -893,7 -892,6 +895,7 @@@ static void alua_rtpg_queue(struct alua
                /* Do not queue if the worker is already running */
                if (!(pg->flags & ALUA_PG_RUNNING)) {
                        kref_get(&pg->kref);
 +                      sdev = NULL;
                        start_queue = 1;
                }
        }
        if (start_queue &&
            !queue_delayed_work(alua_wq, &pg->rtpg_work,
                                msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
 -              scsi_device_put(sdev);
 +              if (sdev)
 +                      scsi_device_put(sdev);
                kref_put(&pg->kref, release_port_group);
        }
  }
@@@ -942,7 -939,7 +944,7 @@@ static int alua_initialize(struct scsi_
  static int alua_set_params(struct scsi_device *sdev, const char *params)
  {
        struct alua_dh_data *h = sdev->handler_data;
-       struct alua_port_group __rcu *pg = NULL;
+       struct alua_port_group *pg = NULL;
        unsigned int optimize = 0, argc;
        const char *p = params;
        int result = SCSI_DH_OK;
@@@ -989,7 -986,7 +991,7 @@@ static int alua_activate(struct scsi_de
        struct alua_dh_data *h = sdev->handler_data;
        int err = SCSI_DH_OK;
        struct alua_queue_data *qdata;
-       struct alua_port_group __rcu *pg;
+       struct alua_port_group *pg;
  
        qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
        if (!qdata) {
@@@ -1053,7 -1050,7 +1055,7 @@@ static void alua_check(struct scsi_devi
  static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
  {
        struct alua_dh_data *h = sdev->handler_data;
-       struct alua_port_group __rcu *pg;
+       struct alua_port_group *pg;
        unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
        int ret = BLKPREP_OK;
  
                 state != SCSI_ACCESS_STATE_ACTIVE &&
                 state != SCSI_ACCESS_STATE_LBA) {
                ret = BLKPREP_KILL;
 -              req->cmd_flags |= REQ_QUIET;
 +              req->rq_flags |= RQF_QUIET;
        }
        return ret;
  
@@@ -1123,7 -1120,7 +1125,7 @@@ static void alua_bus_detach(struct scsi
        struct alua_port_group *pg;
  
        spin_lock(&h->pg_lock);
-       pg = h->pg;
+       pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
        rcu_assign_pointer(h->pg, NULL);
        h->sdev = NULL;
        spin_unlock(&h->pg_lock);
diff --combined drivers/scsi/hpsa.c
index a1d6ab76a51418f9a4f26ecbb76529ea1b775835,f5ab690b3091c7b8a7e655182a4cc048355127e4..691a0931695238cf07fcfc544dfe33740cb38a1c
@@@ -276,6 -276,9 +276,9 @@@ static int hpsa_find_cfg_addrs(struct p
  static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
                                    unsigned long *memory_bar);
  static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
+ static int wait_for_device_to_become_ready(struct ctlr_info *h,
+                                          unsigned char lunaddr[],
+                                          int reply_queue);
  static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
                                     int wait_for_ready);
  static inline void finish_cmd(struct CommandList *c);
@@@ -700,9 -703,7 +703,7 @@@ static ssize_t lunid_show(struct devic
        }
        memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
        spin_unlock_irqrestore(&h->lock, flags);
-       return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-               lunid[0], lunid[1], lunid[2], lunid[3],
-               lunid[4], lunid[5], lunid[6], lunid[7]);
+       return snprintf(buf, 20, "0x%8phN\n", lunid);
  }
  
  static ssize_t unique_id_show(struct device *dev,
@@@ -864,6 -865,16 +865,16 @@@ static ssize_t path_info_show(struct de
        return output_len;
  }
  
+ static ssize_t host_show_ctlr_num(struct device *dev,
+       struct device_attribute *attr, char *buf)
+ {
+       struct ctlr_info *h;
+       struct Scsi_Host *shost = class_to_shost(dev);
+       h = shost_to_hba(shost);
+       return snprintf(buf, 20, "%d\n", h->ctlr);
+ }
  static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
  static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
  static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
@@@ -887,6 -898,8 +898,8 @@@ static DEVICE_ATTR(resettable, S_IRUGO
        host_show_resettable, NULL);
  static DEVICE_ATTR(lockup_detected, S_IRUGO,
        host_show_lockup_detected, NULL);
+ static DEVICE_ATTR(ctlr_num, S_IRUGO,
+       host_show_ctlr_num, NULL);
  
  static struct device_attribute *hpsa_sdev_attrs[] = {
        &dev_attr_raid_level,
@@@ -907,6 -920,7 +920,7 @@@ static struct device_attribute *hpsa_sh
        &dev_attr_hp_ssd_smart_path_status,
        &dev_attr_raid_offload_debug,
        &dev_attr_lockup_detected,
+       &dev_attr_ctlr_num,
        NULL,
  };
  
@@@ -1001,7 -1015,7 +1015,7 @@@ static void set_performant_mode(struct 
  {
        if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
                c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
-               if (unlikely(!h->msix_vector))
+               if (unlikely(!h->msix_vectors))
                        return;
                if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
                        c->Header.ReplyQueue =
@@@ -2009,7 -2023,7 +2023,7 @@@ static struct hpsa_scsi_dev_t *lookup_h
  
  static int hpsa_slave_alloc(struct scsi_device *sdev)
  {
 -      struct hpsa_scsi_dev_t *sd;
 +      struct hpsa_scsi_dev_t *sd = NULL;
        unsigned long flags;
        struct ctlr_info *h;
  
                        sd->target = sdev_id(sdev);
                        sd->lun = sdev->lun;
                }
 -      } else
 +      }
 +      if (!sd)
                sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
                                        sdev_id(sdev), sdev->lun);
  
@@@ -2541,7 -2554,7 +2555,7 @@@ static void complete_scsi_command(struc
  
        if ((unlikely(hpsa_is_pending_event(cp)))) {
                if (cp->reset_pending)
-                       return hpsa_cmd_resolve_and_free(h, cp);
+                       return hpsa_cmd_free_and_done(h, cp, cmd);
                if (cp->abort_pending)
                        return hpsa_cmd_abort_and_free(h, cp, cmd);
        }
@@@ -2824,14 -2837,8 +2838,8 @@@ static void hpsa_print_cmd(struct ctlr_
        const u8 *cdb = c->Request.CDB;
        const u8 *lun = c->Header.LUN.LunAddrBytes;
  
-       dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
-       " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-               txt, lun[0], lun[1], lun[2], lun[3],
-               lun[4], lun[5], lun[6], lun[7],
-               cdb[0], cdb[1], cdb[2], cdb[3],
-               cdb[4], cdb[5], cdb[6], cdb[7],
-               cdb[8], cdb[9], cdb[10], cdb[11],
-               cdb[12], cdb[13], cdb[14], cdb[15]);
+       dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
+                txt, lun, cdb);
  }
  
  static void hpsa_scsi_interpret_error(struct ctlr_info *h,
@@@ -3080,6 -3087,8 +3088,8 @@@ static int hpsa_do_reset(struct ctlr_in
  
        if (unlikely(rc))
                atomic_set(&dev->reset_cmds_out, 0);
+       else
+               wait_for_device_to_become_ready(h, scsi3addr, 0);
  
        mutex_unlock(&h->reset_mutex);
        return rc;
  static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
                struct ReportExtendedLUNdata *buf, int bufsize)
  {
-       return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
-                                               HPSA_REPORT_PHYS_EXTENDED);
+       int rc;
+       struct ReportLUNdata *lbuf;
+       rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
+                                     HPSA_REPORT_PHYS_EXTENDED);
+       if (!rc || !hpsa_allow_any)
+               return rc;
+       /* REPORT PHYS EXTENDED is not supported */
+       lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
+       if (!lbuf)
+               return -ENOMEM;
+       rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
+       if (!rc) {
+               int i;
+               u32 nphys;
+               /* Copy ReportLUNdata header */
+               memcpy(buf, lbuf, 8);
+               nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
+               for (i = 0; i < nphys; i++)
+                       memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
+       }
+       kfree(lbuf);
+       return rc;
  }
  
  static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
@@@ -3841,7 -3874,6 +3875,7 @@@ static int hpsa_update_device_info(stru
                sizeof(this_device->vendor));
        memcpy(this_device->model, &inq_buff[16],
                sizeof(this_device->model));
 +      this_device->rev = inq_buff[2];
        memset(this_device->device_id, 0,
                sizeof(this_device->device_id));
        if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
@@@ -3931,14 -3963,10 +3965,14 @@@ static void figure_bus_target_lun(struc
  
        if (!is_logical_dev_addr_mode(lunaddrbytes)) {
                /* physical device, target and lun filled in later */
 -              if (is_hba_lunid(lunaddrbytes))
 +              if (is_hba_lunid(lunaddrbytes)) {
 +                      int bus = HPSA_HBA_BUS;
 +
 +                      if (!device->rev)
 +                              bus = HPSA_LEGACY_HBA_BUS;
                        hpsa_set_bus_target_lun(device,
 -                                      HPSA_HBA_BUS, 0, lunid & 0x3fff);
 -              else
 +                                      bus, 0, lunid & 0x3fff);
 +              else
                        /* defer target, lun assignment for physical devices */
                        hpsa_set_bus_target_lun(device,
                                        HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
@@@ -5488,7 -5516,7 +5522,7 @@@ static int hpsa_scsi_queue_command(stru
  
        dev = cmd->device->hostdata;
        if (!dev) {
-               cmd->result = NOT_READY << 16; /* host byte */
+               cmd->result = DID_NO_CONNECT << 16;
                cmd->scsi_done(cmd);
                return 0;
        }
@@@ -5569,6 -5597,14 +5603,14 @@@ static void hpsa_scan_start(struct Scsi
        if (unlikely(lockup_detected(h)))
                return hpsa_scan_complete(h);
  
+       /*
+        * Do the scan after a reset completion
+        */
+       if (h->reset_in_progress) {
+               h->drv_req_rescan = 1;
+               return;
+       }
        hpsa_update_scsi_devices(h);
  
        hpsa_scan_complete(h);
@@@ -5624,7 -5660,7 +5666,7 @@@ static int hpsa_scsi_host_alloc(struct 
        sh->sg_tablesize = h->maxsgentries;
        sh->transportt = hpsa_sas_transport_template;
        sh->hostdata[0] = (unsigned long) h;
-       sh->irq = h->intr[h->intr_mode];
+       sh->irq = pci_irq_vector(h->pdev, 0);
        sh->unique_id = sh->irq;
  
        h->scsi_host = sh;
@@@ -5999,11 -6035,9 +6041,9 @@@ static int hpsa_send_reset_as_abort_ioa
  
        if (h->raid_offload_debug > 0)
                dev_info(&h->pdev->dev,
-                       "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+                       "scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n",
                        h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
-                       "Reset as abort",
-                       scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
-                       scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
+                       "Reset as abort", scsi3addr);
  
        if (!dev->offload_enabled) {
                dev_warn(&h->pdev->dev,
        /* send the reset */
        if (h->raid_offload_debug > 0)
                dev_info(&h->pdev->dev,
-                       "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-                       psa[0], psa[1], psa[2], psa[3],
-                       psa[4], psa[5], psa[6], psa[7]);
+                       "Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n",
+                       psa);
        rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
        if (rc != 0) {
                dev_warn(&h->pdev->dev,
-                       "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-                       psa[0], psa[1], psa[2], psa[3],
-                       psa[4], psa[5], psa[6], psa[7]);
+                       "Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n",
+                       psa);
                return rc; /* failed to reset */
        }
  
        /* wait for device to recover */
        if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
                dev_warn(&h->pdev->dev,
-                       "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-                       psa[0], psa[1], psa[2], psa[3],
-                       psa[4], psa[5], psa[6], psa[7]);
+                       "Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n",
+                       psa);
                return -1;  /* failed to recover */
        }
  
        /* device recovered */
        dev_info(&h->pdev->dev,
-               "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-               psa[0], psa[1], psa[2], psa[3],
-               psa[4], psa[5], psa[6], psa[7]);
+               "Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n",
+               psa);
  
        return rc; /* success */
  }
@@@ -6663,8 -6693,7 +6699,7 @@@ static int hpsa_big_passthru_ioctl(stru
                return -EINVAL;
        if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
-       ioc = (BIG_IOCTL_Command_struct *)
-           kmalloc(sizeof(*ioc), GFP_KERNEL);
+       ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
        if (!ioc) {
                status = -ENOMEM;
                goto cleanup1;
@@@ -7658,67 -7687,41 +7693,41 @@@ static int find_PCI_BAR_index(struct pc
  
  static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
  {
-       if (h->msix_vector) {
-               if (h->pdev->msix_enabled)
-                       pci_disable_msix(h->pdev);
-               h->msix_vector = 0;
-       } else if (h->msi_vector) {
-               if (h->pdev->msi_enabled)
-                       pci_disable_msi(h->pdev);
-               h->msi_vector = 0;
-       }
+       pci_free_irq_vectors(h->pdev);
+       h->msix_vectors = 0;
  }
  
  /* If MSI/MSI-X is supported by the kernel we will try to enable it on
   * controllers that are capable. If not, we use legacy INTx mode.
   */
- static void hpsa_interrupt_mode(struct ctlr_info *h)
+ static int hpsa_interrupt_mode(struct ctlr_info *h)
  {
- #ifdef CONFIG_PCI_MSI
-       int err, i;
-       struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
-       for (i = 0; i < MAX_REPLY_QUEUES; i++) {
-               hpsa_msix_entries[i].vector = 0;
-               hpsa_msix_entries[i].entry = i;
-       }
+       unsigned int flags = PCI_IRQ_LEGACY;
+       int ret;
  
        /* Some boards advertise MSI but don't really support it */
-       if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
-           (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
-               goto default_int_mode;
-       if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
-               dev_info(&h->pdev->dev, "MSI-X capable controller\n");
-               h->msix_vector = MAX_REPLY_QUEUES;
-               if (h->msix_vector > num_online_cpus())
-                       h->msix_vector = num_online_cpus();
-               err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
-                                           1, h->msix_vector);
-               if (err < 0) {
-                       dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
-                       h->msix_vector = 0;
-                       goto single_msi_mode;
-               } else if (err < h->msix_vector) {
-                       dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
-                              "available\n", err);
+       switch (h->board_id) {
+       case 0x40700E11:
+       case 0x40800E11:
+       case 0x40820E11:
+       case 0x40830E11:
+               break;
+       default:
+               ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
+                               PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+               if (ret > 0) {
+                       h->msix_vectors = ret;
+                       return 0;
                }
-               h->msix_vector = err;
-               for (i = 0; i < h->msix_vector; i++)
-                       h->intr[i] = hpsa_msix_entries[i].vector;
-               return;
-       }
- single_msi_mode:
-       if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
-               dev_info(&h->pdev->dev, "MSI capable controller\n");
-               if (!pci_enable_msi(h->pdev))
-                       h->msi_vector = 1;
-               else
-                       dev_warn(&h->pdev->dev, "MSI init failed\n");
+               flags |= PCI_IRQ_MSI;
+               break;
        }
- default_int_mode:
- #endif                                /* CONFIG_PCI_MSI */
-       /* if we get here we're going to use the default interrupt mode */
-       h->intr[h->intr_mode] = h->pdev->irq;
+       ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
+       if (ret < 0)
+               return ret;
+       return 0;
  }
  
  static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@@@ -8074,7 -8077,9 +8083,9 @@@ static int hpsa_pci_init(struct ctlr_in
  
        pci_set_master(h->pdev);
  
-       hpsa_interrupt_mode(h);
+       err = hpsa_interrupt_mode(h);
+       if (err)
+               goto clean1;
        err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
        if (err)
                goto clean2;    /* intmode+region, pci */
@@@ -8110,6 -8115,7 +8121,7 @@@ clean3: /* vaddr, intmode+region, pci *
        h->vaddr = NULL;
  clean2:       /* intmode+region, pci */
        hpsa_disable_interrupt_mode(h);
+ clean1:
        /*
         * call pci_disable_device before pci_release_regions per
         * Documentation/PCI/pci.txt
@@@ -8243,34 -8249,20 +8255,20 @@@ clean_up
        return -ENOMEM;
  }
  
- static void hpsa_irq_affinity_hints(struct ctlr_info *h)
- {
-       int i, cpu;
-       cpu = cpumask_first(cpu_online_mask);
-       for (i = 0; i < h->msix_vector; i++) {
-               irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
-               cpu = cpumask_next(cpu, cpu_online_mask);
-       }
- }
  /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
  static void hpsa_free_irqs(struct ctlr_info *h)
  {
        int i;
  
-       if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
+       if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
                /* Single reply queue, only one irq to free */
-               i = h->intr_mode;
-               irq_set_affinity_hint(h->intr[i], NULL);
-               free_irq(h->intr[i], &h->q[i]);
-               h->q[i] = 0;
+               free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
+               h->q[h->intr_mode] = 0;
                return;
        }
  
-       for (i = 0; i < h->msix_vector; i++) {
-               irq_set_affinity_hint(h->intr[i], NULL);
-               free_irq(h->intr[i], &h->q[i]);
+       for (i = 0; i < h->msix_vectors; i++) {
+               free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
                h->q[i] = 0;
        }
        for (; i < MAX_REPLY_QUEUES; i++)
@@@ -8291,11 -8283,11 +8289,11 @@@ static int hpsa_request_irqs(struct ctl
        for (i = 0; i < MAX_REPLY_QUEUES; i++)
                h->q[i] = (u8) i;
  
-       if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
+       if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
                /* If performant mode and MSI-X, use multiple reply queues */
-               for (i = 0; i < h->msix_vector; i++) {
+               for (i = 0; i < h->msix_vectors; i++) {
                        sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
-                       rc = request_irq(h->intr[i], msixhandler,
+                       rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
                                        0, h->intrname[i],
                                        &h->q[i]);
                        if (rc) {
  
                                dev_err(&h->pdev->dev,
                                        "failed to get irq %d for %s\n",
-                                      h->intr[i], h->devname);
+                                      pci_irq_vector(h->pdev, i), h->devname);
                                for (j = 0; j < i; j++) {
-                                       free_irq(h->intr[j], &h->q[j]);
+                                       free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
                                        h->q[j] = 0;
                                }
                                for (; j < MAX_REPLY_QUEUES; j++)
                                return rc;
                        }
                }
-               hpsa_irq_affinity_hints(h);
        } else {
                /* Use single reply pool */
-               if (h->msix_vector > 0 || h->msi_vector) {
-                       if (h->msix_vector)
-                               sprintf(h->intrname[h->intr_mode],
-                                       "%s-msix", h->devname);
-                       else
-                               sprintf(h->intrname[h->intr_mode],
-                                       "%s-msi", h->devname);
-                       rc = request_irq(h->intr[h->intr_mode],
+               if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
+                       sprintf(h->intrname[0], "%s-msi%s", h->devname,
+                               h->msix_vectors ? "x" : "");
+                       rc = request_irq(pci_irq_vector(h->pdev, 0),
                                msixhandler, 0,
-                               h->intrname[h->intr_mode],
+                               h->intrname[0],
                                &h->q[h->intr_mode]);
                } else {
                        sprintf(h->intrname[h->intr_mode],
                                "%s-intx", h->devname);
-                       rc = request_irq(h->intr[h->intr_mode],
+                       rc = request_irq(pci_irq_vector(h->pdev, 0),
                                intxhandler, IRQF_SHARED,
-                               h->intrname[h->intr_mode],
+                               h->intrname[0],
                                &h->q[h->intr_mode]);
                }
-               irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
        }
        if (rc) {
                dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
-                      h->intr[h->intr_mode], h->devname);
+                      pci_irq_vector(h->pdev, 0), h->devname);
                hpsa_free_irqs(h);
                return -ENODEV;
        }
@@@ -8640,6 -8626,14 +8632,14 @@@ static void hpsa_rescan_ctlr_worker(str
        if (h->remove_in_progress)
                return;
  
+       /*
+        * Do the scan after the reset
+        */
+       if (h->reset_in_progress) {
+               h->drv_req_rescan = 1;
+               return;
+       }
        if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
                scsi_host_get(h->scsi_host);
                hpsa_ack_ctlr_events(h);
@@@ -9525,7 -9519,7 +9525,7 @@@ static int hpsa_put_ctlr_into_performan
                        return rc;
        }
  
-       h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
+       h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
        hpsa_get_max_perf_mode_cmds(h);
        /* Performant mode ring buffer and supporting data structures */
        h->reply_queue_size = h->max_commands * sizeof(u64);
diff --combined drivers/scsi/hpsa.h
index 9ea162de80dcfa976b737cf209c4570de6003888,3faf6cff95ee9294d4aa29988c37c92447395e9d..64e98295b70703bdf1d9a07a483f472da75a35c5
@@@ -69,7 -69,6 +69,7 @@@ struct hpsa_scsi_dev_t 
        u64 sas_address;
        unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
        unsigned char model[16];        /* bytes 16-31 of inquiry data */
 +      unsigned char rev;              /* byte 2 of inquiry data */
        unsigned char raid_level;       /* from inquiry page 0xC1 */
        unsigned char volume_offline;   /* discovered via TUR or VPD */
        u16 queue_depth;                /* max queue_depth for this device */
@@@ -176,9 -175,7 +176,7 @@@ struct ctlr_info 
  #     define DOORBELL_INT     1
  #     define SIMPLE_MODE_INT  2
  #     define MEMQ_MODE_INT    3
-       unsigned int intr[MAX_REPLY_QUEUES];
-       unsigned int msix_vector;
-       unsigned int msi_vector;
+       unsigned int msix_vectors;
        int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
        struct access_method access;
  
@@@ -403,7 -400,6 +401,7 @@@ struct offline_device_entry 
  #define HPSA_RAID_VOLUME_BUS          1
  #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
  #define HPSA_HBA_BUS                  0
 +#define HPSA_LEGACY_HBA_BUS           3
  
  /*
        Send the command to the hardware
@@@ -466,7 -462,7 +464,7 @@@ static unsigned long SA5_performant_com
        unsigned long register_value = FIFO_EMPTY;
  
        /* msi auto clears the interrupt pending bit. */
-       if (unlikely(!(h->msi_vector || h->msix_vector))) {
+       if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
                /* flush the controller write of the reply queue by reading
                 * outbound doorbell status register.
                 */
index 50c71678a156e8f32d833575de49379e5cd3a151,2be7015498fdd394fe46857c49556a8b0b2beacb..919736a74ffa6e8b46e1f87b042968f42eae71b8
@@@ -149,7 -149,7 +149,7 @@@ static const char *fc_lport_state_names
   * @offset:   The offset into the response data
   */
  struct fc_bsg_info {
-       struct fc_bsg_job *job;
+       struct bsg_job *job;
        struct fc_lport *lport;
        u16 rsp_code;
        struct scatterlist *sg;
@@@ -200,7 -200,7 +200,7 @@@ static void fc_lport_rport_callback(str
                                     "in the DNS or FDMI state, it's in the "
                                     "%d state", rdata->ids.port_id,
                                     lport->state);
-                       lport->tt.rport_logoff(rdata);
+                       fc_rport_logoff(rdata);
                }
                break;
        case RPORT_EV_LOGO:
@@@ -237,23 -237,26 +237,26 @@@ static const char *fc_lport_state(struc
   * @remote_fid:        The FID of the ptp rport
   * @remote_wwpn: The WWPN of the ptp rport
   * @remote_wwnn: The WWNN of the ptp rport
+  *
+  * Locking Note: The lport lock is expected to be held before calling
+  * this routine.
   */
  static void fc_lport_ptp_setup(struct fc_lport *lport,
                               u32 remote_fid, u64 remote_wwpn,
                               u64 remote_wwnn)
  {
-       mutex_lock(&lport->disc.disc_mutex);
        if (lport->ptp_rdata) {
-               lport->tt.rport_logoff(lport->ptp_rdata);
-               kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+               fc_rport_logoff(lport->ptp_rdata);
+               kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
        }
-       lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
+       mutex_lock(&lport->disc.disc_mutex);
+       lport->ptp_rdata = fc_rport_create(lport, remote_fid);
        kref_get(&lport->ptp_rdata->kref);
        lport->ptp_rdata->ids.port_name = remote_wwpn;
        lport->ptp_rdata->ids.node_name = remote_wwnn;
        mutex_unlock(&lport->disc.disc_mutex);
  
-       lport->tt.rport_login(lport->ptp_rdata);
+       fc_rport_login(lport->ptp_rdata);
  
        fc_lport_enter_ready(lport);
  }
@@@ -308,7 -311,7 +311,7 @@@ struct fc_host_statistics *fc_get_host_
        fc_stats = &lport->host_stats;
        memset(fc_stats, 0, sizeof(struct fc_host_statistics));
  
 -      fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ;
 +      fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ;
  
        for_each_possible_cpu(cpu) {
                struct fc_stats *stats;
@@@ -409,7 -412,7 +412,7 @@@ static void fc_lport_recv_rlir_req(stru
        FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
                     fc_lport_state(lport));
  
-       lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+       fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
        fc_frame_free(fp);
  }
  
@@@ -478,7 -481,7 +481,7 @@@ static void fc_lport_recv_rnid_req(stru
        if (!req) {
                rjt_data.reason = ELS_RJT_LOGIC;
                rjt_data.explan = ELS_EXPL_NONE;
-               lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+               fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
        } else {
                fmt = req->rnid_fmt;
                len = sizeof(*rp);
   */
  static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
  {
-       lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+       fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
        fc_lport_enter_reset(lport);
        fc_frame_free(fp);
  }
@@@ -620,9 -623,9 +623,9 @@@ int fc_fabric_logoff(struct fc_lport *l
        lport->tt.disc_stop_final(lport);
        mutex_lock(&lport->lp_mutex);
        if (lport->dns_rdata)
-               lport->tt.rport_logoff(lport->dns_rdata);
+               fc_rport_logoff(lport->dns_rdata);
        mutex_unlock(&lport->lp_mutex);
-       lport->tt.rport_flush_queue();
+       fc_rport_flush_queue();
        mutex_lock(&lport->lp_mutex);
        fc_lport_enter_logo(lport);
        mutex_unlock(&lport->lp_mutex);
@@@ -899,7 -902,7 +902,7 @@@ static void fc_lport_recv_els_req(struc
                /*
                 * Check opcode.
                 */
-               recv = lport->tt.rport_recv_req;
+               recv = fc_rport_recv_req;
                switch (fc_frame_payload_op(fp)) {
                case ELS_FLOGI:
                        if (!lport->point_to_multipoint)
@@@ -941,15 -944,14 +944,14 @@@ struct fc4_prov fc_lport_els_prov = 
  };
  
  /**
-  * fc_lport_recv_req() - The generic lport request handler
+  * fc_lport_recv() - The generic lport request handler
   * @lport: The lport that received the request
   * @fp: The frame the request is in
   *
   * Locking Note: This function should not be called with the lport
   *             lock held because it may grab the lock.
   */
- static void fc_lport_recv_req(struct fc_lport *lport,
-                             struct fc_frame *fp)
+ void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp)
  {
        struct fc_frame_header *fh = fc_frame_header_get(fp);
        struct fc_seq *sp = fr_seq(fp);
@@@ -978,8 -980,9 +980,9 @@@ drop
        FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
        fc_frame_free(fp);
        if (sp)
-               lport->tt.exch_done(sp);
+               fc_exch_done(sp);
  }
+ EXPORT_SYMBOL(fc_lport_recv);
  
  /**
   * fc_lport_reset() - Reset a local port
@@@ -1007,12 -1010,14 +1010,14 @@@ EXPORT_SYMBOL(fc_lport_reset)
   */
  static void fc_lport_reset_locked(struct fc_lport *lport)
  {
-       if (lport->dns_rdata)
-               lport->tt.rport_logoff(lport->dns_rdata);
+       if (lport->dns_rdata) {
+               fc_rport_logoff(lport->dns_rdata);
+               lport->dns_rdata = NULL;
+       }
  
        if (lport->ptp_rdata) {
-               lport->tt.rport_logoff(lport->ptp_rdata);
-               kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+               fc_rport_logoff(lport->ptp_rdata);
+               kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
                lport->ptp_rdata = NULL;
        }
  
@@@ -1426,13 -1431,13 +1431,13 @@@ static void fc_lport_enter_dns(struct f
        fc_lport_state_enter(lport, LPORT_ST_DNS);
  
        mutex_lock(&lport->disc.disc_mutex);
-       rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
+       rdata = fc_rport_create(lport, FC_FID_DIR_SERV);
        mutex_unlock(&lport->disc.disc_mutex);
        if (!rdata)
                goto err;
  
        rdata->ops = &fc_lport_rport_ops;
-       lport->tt.rport_login(rdata);
+       fc_rport_login(rdata);
        return;
  
  err:
@@@ -1543,13 -1548,13 +1548,13 @@@ static void fc_lport_enter_fdmi(struct 
        fc_lport_state_enter(lport, LPORT_ST_FDMI);
  
        mutex_lock(&lport->disc.disc_mutex);
-       rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV);
+       rdata = fc_rport_create(lport, FC_FID_MGMT_SERV);
        mutex_unlock(&lport->disc.disc_mutex);
        if (!rdata)
                goto err;
  
        rdata->ops = &fc_lport_rport_ops;
-       lport->tt.rport_login(rdata);
+       fc_rport_login(rdata);
        return;
  
  err:
@@@ -1772,7 -1777,7 +1777,7 @@@ void fc_lport_flogi_resp(struct fc_seq 
        if ((csp_flags & FC_SP_FT_FPORT) == 0) {
                if (e_d_tov > lport->e_d_tov)
                        lport->e_d_tov = e_d_tov;
-               lport->r_a_tov = 2 * e_d_tov;
+               lport->r_a_tov = 2 * lport->e_d_tov;
                fc_lport_set_port_id(lport, did, fp);
                printk(KERN_INFO "host%d: libfc: "
                       "Port (%6.6x) entered "
                                   get_unaligned_be64(
                                           &flp->fl_wwnn));
        } else {
-               lport->e_d_tov = e_d_tov;
-               lport->r_a_tov = r_a_tov;
+               if (e_d_tov > lport->e_d_tov)
+                       lport->e_d_tov = e_d_tov;
+               if (r_a_tov > lport->r_a_tov)
+                       lport->r_a_tov = r_a_tov;
                fc_host_fabric_name(lport->host) =
                        get_unaligned_be64(&flp->fl_wwnn);
                fc_lport_set_port_id(lport, did, fp);
@@@ -1858,12 -1865,6 +1865,6 @@@ EXPORT_SYMBOL(fc_lport_config)
   */
  int fc_lport_init(struct fc_lport *lport)
  {
-       if (!lport->tt.lport_recv)
-               lport->tt.lport_recv = fc_lport_recv_req;
-       if (!lport->tt.lport_reset)
-               lport->tt.lport_reset = fc_lport_reset;
        fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
        fc_host_node_name(lport->host) = lport->wwnn;
        fc_host_port_name(lport->host) = lport->wwpn;
@@@ -1900,18 -1901,19 +1901,19 @@@ static void fc_lport_bsg_resp(struct fc
                              void *info_arg)
  {
        struct fc_bsg_info *info = info_arg;
-       struct fc_bsg_job *job = info->job;
+       struct bsg_job *job = info->job;
+       struct fc_bsg_reply *bsg_reply = job->reply;
        struct fc_lport *lport = info->lport;
        struct fc_frame_header *fh;
        size_t len;
        void *buf;
  
        if (IS_ERR(fp)) {
-               job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
+               bsg_reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
                        -ECONNABORTED : -ETIMEDOUT;
                job->reply_len = sizeof(uint32_t);
-               job->state_flags |= FC_RQST_STATE_DONE;
-               job->job_done(job);
+               bsg_job_done(job, bsg_reply->result,
+                              bsg_reply->reply_payload_rcv_len);
                kfree(info);
                return;
        }
                        (unsigned short)fc_frame_payload_op(fp);
  
                /* Save the reply status of the job */
-               job->reply->reply_data.ctels_reply.status =
+               bsg_reply->reply_data.ctels_reply.status =
                        (cmd == info->rsp_code) ?
                        FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
        }
  
-       job->reply->reply_payload_rcv_len +=
+       bsg_reply->reply_payload_rcv_len +=
                fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
                                         &info->offset, NULL);
  
        if (fr_eof(fp) == FC_EOF_T &&
            (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
            (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
-               if (job->reply->reply_payload_rcv_len >
+               if (bsg_reply->reply_payload_rcv_len >
                    job->reply_payload.payload_len)
-                       job->reply->reply_payload_rcv_len =
+                       bsg_reply->reply_payload_rcv_len =
                                job->reply_payload.payload_len;
-               job->reply->result = 0;
-               job->state_flags |= FC_RQST_STATE_DONE;
-               job->job_done(job);
+               bsg_reply->result = 0;
+               bsg_job_done(job, bsg_reply->result,
+                              bsg_reply->reply_payload_rcv_len);
                kfree(info);
        }
        fc_frame_free(fp);
   * Locking Note: The lport lock is expected to be held before calling
   * this routine.
   */
- static int fc_lport_els_request(struct fc_bsg_job *job,
+ static int fc_lport_els_request(struct bsg_job *job,
                                struct fc_lport *lport,
                                u32 did, u32 tov)
  {
        info->nents = job->reply_payload.sg_cnt;
        info->sg = job->reply_payload.sg_list;
  
-       if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
-                                    NULL, info, tov)) {
+       if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+                             NULL, info, tov)) {
                kfree(info);
                return -ECOMM;
        }
   * Locking Note: The lport lock is expected to be held before calling
   * this routine.
   */
- static int fc_lport_ct_request(struct fc_bsg_job *job,
+ static int fc_lport_ct_request(struct bsg_job *job,
                               struct fc_lport *lport, u32 did, u32 tov)
  {
        struct fc_bsg_info *info;
        info->nents = job->reply_payload.sg_cnt;
        info->sg = job->reply_payload.sg_list;
  
-       if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
-                                    NULL, info, tov)) {
+       if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+                             NULL, info, tov)) {
                kfree(info);
                return -ECOMM;
        }
   *                        FC Passthrough requests
   * @job: The BSG passthrough job
   */
- int fc_lport_bsg_request(struct fc_bsg_job *job)
+ int fc_lport_bsg_request(struct bsg_job *job)
  {
+       struct fc_bsg_request *bsg_request = job->request;
+       struct fc_bsg_reply *bsg_reply = job->reply;
        struct request *rsp = job->req->next_rq;
-       struct Scsi_Host *shost = job->shost;
+       struct Scsi_Host *shost = fc_bsg_to_shost(job);
        struct fc_lport *lport = shost_priv(shost);
        struct fc_rport *rport;
        struct fc_rport_priv *rdata;
        int rc = -EINVAL;
        u32 did, tov;
  
-       job->reply->reply_payload_rcv_len = 0;
+       bsg_reply->reply_payload_rcv_len = 0;
        if (rsp)
                rsp->resid_len = job->reply_payload.payload_len;
  
        mutex_lock(&lport->lp_mutex);
  
-       switch (job->request->msgcode) {
+       switch (bsg_request->msgcode) {
        case FC_BSG_RPT_ELS:
-               rport = job->rport;
+               rport = fc_bsg_to_rport(job);
                if (!rport)
                        break;
  
                break;
  
        case FC_BSG_RPT_CT:
-               rport = job->rport;
+               rport = fc_bsg_to_rport(job);
                if (!rport)
                        break;
  
                break;
  
        case FC_BSG_HST_CT:
-               did = ntoh24(job->request->rqst_data.h_ct.port_id);
+               did = ntoh24(bsg_request->rqst_data.h_ct.port_id);
                if (did == FC_FID_DIR_SERV) {
                        rdata = lport->dns_rdata;
                        if (!rdata)
                                break;
                        tov = rdata->e_d_tov;
                } else {
-                       rdata = lport->tt.rport_lookup(lport, did);
+                       rdata = fc_rport_lookup(lport, did);
                        if (!rdata)
                                break;
                        tov = rdata->e_d_tov;
-                       kref_put(&rdata->kref, lport->tt.rport_destroy);
+                       kref_put(&rdata->kref, fc_rport_destroy);
                }
  
                rc = fc_lport_ct_request(job, lport, did, tov);
                break;
  
        case FC_BSG_HST_ELS_NOLOGIN:
-               did = ntoh24(job->request->rqst_data.h_els.port_id);
+               did = ntoh24(bsg_request->rqst_data.h_els.port_id);
                rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
                break;
        }
index f4f77c5b0c83da0520eb868b79095f836179cecf,27cbd68535245cae768291234a898028a575c88e..4faa7672fc1d80add7e603e7bda066e5b98fd34b
@@@ -47,6 -47,7 +47,7 @@@
  #include "lpfc_compat.h"
  #include "lpfc_debugfs.h"
  #include "lpfc_vport.h"
+ #include "lpfc_version.h"
  
  /* There are only four IOCB completion types. */
  typedef enum _lpfc_iocb_type {
@@@ -1323,20 -1324,18 +1324,20 @@@ lpfc_sli_ringtxcmpl_put(struct lpfc_hb
  {
        lockdep_assert_held(&phba->hbalock);
  
 -      BUG_ON(!piocb || !piocb->vport);
 +      BUG_ON(!piocb);
  
        list_add_tail(&piocb->list, &pring->txcmplq);
        piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
  
        if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
           (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
 -         (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
 -          (!(piocb->vport->load_flag & FC_UNLOADING)))
 -              mod_timer(&piocb->vport->els_tmofunc,
 -                        jiffies +
 -                        msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
 +         (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
 +              BUG_ON(!piocb->vport);
 +              if (!(piocb->vport->load_flag & FC_UNLOADING))
 +                      mod_timer(&piocb->vport->els_tmofunc,
 +                                jiffies +
 +                                msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
 +      }
  
        return 0;
  }
@@@ -2678,15 -2677,16 +2679,16 @@@ lpfc_sli_iocbq_lookup(struct lpfc_hba *
  
        if (iotag != 0 && iotag <= phba->sli.last_iotag) {
                cmd_iocb = phba->sli.iocbq_lookup[iotag];
-               list_del_init(&cmd_iocb->list);
                if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+                       /* remove from txcmpl queue list */
+                       list_del_init(&cmd_iocb->list);
                        cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+                       return cmd_iocb;
                }
-               return cmd_iocb;
        }
  
        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                       "0317 iotag x%x is out off "
+                       "0317 iotag x%x is out of "
                        "range: max iotag x%x wd0 x%x\n",
                        iotag, phba->sli.last_iotag,
                        *(((uint32_t *) &prspiocb->iocb) + 7));
@@@ -2721,8 -2721,9 +2723,9 @@@ lpfc_sli_iocbq_lookup_by_tag(struct lpf
                        return cmd_iocb;
                }
        }
        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                       "0372 iotag x%x is out off range: max iotag (x%x)\n",
+                       "0372 iotag x%x is out of range: max iotag (x%x)\n",
                        iotag, phba->sli.last_iotag);
        return NULL;
  }
@@@ -6291,6 -6292,25 +6294,25 @@@ lpfc_sli4_repost_els_sgl_list(struct lp
        return 0;
  }
  
+ void
+ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+ {
+       uint32_t len;
+       len = sizeof(struct lpfc_mbx_set_host_data) -
+               sizeof(struct lpfc_sli4_cfg_mhdr);
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
+                        LPFC_SLI4_MBX_EMBED);
+       mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
+       mbox->u.mqe.un.set_host_data.param_len = 8;
+       snprintf(mbox->u.mqe.un.set_host_data.data,
+                LPFC_HOST_OS_DRIVER_VERSION_SIZE,
+                "Linux %s v"LPFC_DRIVER_VERSION,
+                (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
+ }
  /**
   * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
   * @phba: Pointer to HBA context object.
@@@ -6542,6 -6562,15 +6564,15 @@@ lpfc_sli4_hba_setup(struct lpfc_hba *ph
                goto out_free_mbox;
        }
  
+       lpfc_set_host_data(phba, mboxq);
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+                               "2134 Failed to set host os driver version %x",
+                               rc);
+       }
        /* Read the port's service parameters. */
        rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
        if (rc) {
@@@ -11781,6 -11810,8 +11812,8 @@@ lpfc_sli4_els_wcqe_to_rspiocbq(struct l
        /* Look up the ELS command IOCB and create pseudo response IOCB */
        cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
                                bf_get(lpfc_wcqe_c_request_tag, wcqe));
+       /* Put the iocb back on the txcmplq */
+       lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
        spin_unlock_irqrestore(&pring->ring_lock, iflags);
  
        if (unlikely(!cmdiocbq)) {
index 3aaea713bf3712b2ad8874aaecf8a6337b7a2119,757ddda1d63ecdd4c96767f164d9a8960a7d9b43..fdd519c1dd5753bc59dff5a11b7e2c19f89d52bd
@@@ -35,8 -35,8 +35,8 @@@
  /*
   * MegaRAID SAS Driver meta data
   */
- #define MEGASAS_VERSION                               "06.811.02.00-rc1"
- #define MEGASAS_RELDATE                               "April 12, 2016"
+ #define MEGASAS_VERSION                               "06.812.07.00-rc1"
+ #define MEGASAS_RELDATE                               "August 22, 2016"
  
  /*
   * Device IDs
@@@ -1429,6 -1429,8 +1429,8 @@@ enum FW_BOOT_CONTEXT 
  #define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT    14
  #define MR_MAX_MSIX_REG_ARRAY                   16
  #define MR_RDPQ_MODE_OFFSET                   0X00800000
+ #define MR_CAN_HANDLE_SYNC_CACHE_OFFSET               0X01000000
  /*
  * register set for both 1068 and 1078 controllers
  * structure extended for 1078 registers
@@@ -2118,7 -2120,6 +2120,6 @@@ struct megasas_instance 
        u32 ctrl_context_pages;
        struct megasas_ctrl_info *ctrl_info;
        unsigned int msix_vectors;
-       struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
        struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
        u64 map_id;
        u64 pd_seq_map_id;
        u8 is_imr;
        u8 is_rdpq;
        bool dev_handle;
+       bool fw_sync_cache_support;
  };
  struct MR_LD_VF_MAP {
        u32 size;
@@@ -2233,7 -2235,7 +2235,7 @@@ struct megasas_instance_template 
  };
  
  #define MEGASAS_IS_LOGICAL(scp)                                               \
 -      (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
 +      ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
  
  #define MEGASAS_DEV_INDEX(scp)                                                \
        (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +   \
index 1c4744e78173bc0f78383b8f6cf901715b946d34,4f28963b50741b667033d2fc70faffbe82f7676c..5c8f75247d739489313613e2f18965fd63b80e86
@@@ -423,7 -423,7 +423,7 @@@ _scsih_get_sas_address(struct MPT3SAS_A
                return 0;
        }
  
-       /* we hit this becuase the given parent handle doesn't exist */
+       /* we hit this because the given parent handle doesn't exist */
        if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
                return -ENXIO;
  
@@@ -788,6 -788,11 +788,11 @@@ _scsih_sas_device_add(struct MPT3SAS_AD
        list_add_tail(&sas_device->list, &ioc->sas_device_list);
        spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  
+       if (ioc->hide_drives) {
+               clear_bit(sas_device->handle, ioc->pend_os_device_add);
+               return;
+       }
        if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
             sas_device->sas_address_parent)) {
                _scsih_sas_device_remove(ioc, sas_device);
                            sas_device->sas_address_parent);
                        _scsih_sas_device_remove(ioc, sas_device);
                }
-       }
+       } else
+               clear_bit(sas_device->handle, ioc->pend_os_device_add);
  }
  
  /**
@@@ -1273,9 -1279,9 +1279,9 @@@ scsih_target_alloc(struct scsi_target *
                        sas_target_priv_data->handle = raid_device->handle;
                        sas_target_priv_data->sas_address = raid_device->wwid;
                        sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
 -                      sas_target_priv_data->raid_device = raid_device;
                        if (ioc->is_warpdrive)
 -                              raid_device->starget = starget;
 +                              sas_target_priv_data->raid_device = raid_device;
 +                      raid_device->starget = starget;
                }
                spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
                return 0;
@@@ -1517,7 -1523,7 +1523,7 @@@ _scsih_display_sata_capabilities(struc
  /*
   * raid transport support -
   * Enabled for SLES11 and newer, in older kernels the driver will panic when
-  * unloading the driver followed by a load - I beleive that the subroutine
+  * unloading the driver followed by a load - I believe that the subroutine
   * raid_class_release() is not cleaning up properly.
   */
  
@@@ -2279,7 -2285,7 +2285,7 @@@ mpt3sas_scsih_issue_tm(struct MPT3SAS_A
                msix_task = scsi_lookup->msix_io;
        else
                msix_task = 0;
-       mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
+       ioc->put_smid_hi_priority(ioc, smid, msix_task);
        wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
        if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
                pr_err(MPT3SAS_FMT "%s: timeout\n",
@@@ -2837,7 -2843,7 +2843,7 @@@ _scsih_internal_device_block(struct scs
        if (r == -EINVAL)
                sdev_printk(KERN_WARNING, sdev,
                    "device_block failed with return(%d) for handle(0x%04x)\n",
-                   sas_device_priv_data->sas_target->handle, r);
+                   r, sas_device_priv_data->sas_target->handle);
  }
  
  /**
@@@ -2867,20 -2873,20 +2873,20 @@@ _scsih_internal_device_unblock(struct s
                sdev_printk(KERN_WARNING, sdev,
                    "device_unblock failed with return(%d) for handle(0x%04x) "
                    "performing a block followed by an unblock\n",
-                   sas_device_priv_data->sas_target->handle, r);
+                   r, sas_device_priv_data->sas_target->handle);
                sas_device_priv_data->block = 1;
                r = scsi_internal_device_block(sdev);
                if (r)
                        sdev_printk(KERN_WARNING, sdev, "retried device_block "
                            "failed with return(%d) for handle(0x%04x)\n",
-                           sas_device_priv_data->sas_target->handle, r);
+                           r, sas_device_priv_data->sas_target->handle);
  
                sas_device_priv_data->block = 0;
                r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
                if (r)
                        sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
                            " failed with return(%d) for handle(0x%04x)\n",
-                           sas_device_priv_data->sas_target->handle, r);
+                           r, sas_device_priv_data->sas_target->handle);
        }
  }
  
@@@ -2942,7 -2948,7 +2948,7 @@@ _scsih_ublock_io_device(struct MPT3SAS_
   * @ioc: per adapter object
   * @handle: device handle
   *
-  * During device pull we need to appropiately set the sdev state.
+  * During device pull we need to appropriately set the sdev state.
   */
  static void
  _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
   * @ioc: per adapter object
   * @handle: device handle
   *
-  * During device pull we need to appropiately set the sdev state.
+  * During device pull we need to appropriately set the sdev state.
   */
  static void
  _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@@ -3138,6 -3144,8 +3144,8 @@@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTE
        if (test_bit(handle, ioc->pd_handles))
                return;
  
+       clear_bit(handle, ioc->pend_os_device_add);
        spin_lock_irqsave(&ioc->sas_device_lock, flags);
        sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
        if (sas_device && sas_device->starget &&
        mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
        mpi_request->DevHandle = cpu_to_le16(handle);
        mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
-       mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+       set_bit(handle, ioc->device_remove_in_progress);
+       ioc->put_smid_hi_priority(ioc, smid, 0);
        mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
  
  out:
@@@ -3291,7 -3300,7 +3300,7 @@@ _scsih_tm_tr_complete(struct MPT3SAS_AD
        mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
        mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
        mpi_request->DevHandle = mpi_request_tm->DevHandle;
-       mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
+       ioc->put_smid_default(ioc, smid_sas_ctrl);
  
        return _scsih_check_for_pending_tm(ioc, smid);
  }
@@@ -3326,6 -3335,11 +3335,11 @@@ _scsih_sas_control_complete(struct MPT3
                ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
                le16_to_cpu(mpi_reply->IOCStatus),
                le32_to_cpu(mpi_reply->IOCLogInfo)));
+               if (le16_to_cpu(mpi_reply->IOCStatus) ==
+                    MPI2_IOCSTATUS_SUCCESS) {
+                       clear_bit(le16_to_cpu(mpi_reply->DevHandle),
+                           ioc->device_remove_in_progress);
+               }
        } else {
                pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
                    ioc->name, __FILE__, __LINE__, __func__);
@@@ -3381,7 -3395,7 +3395,7 @@@ _scsih_tm_tr_volume_send(struct MPT3SAS
        mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
        mpi_request->DevHandle = cpu_to_le16(handle);
        mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
-       mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+       ioc->put_smid_hi_priority(ioc, smid, 0);
  }
  
  /**
@@@ -3473,7 -3487,7 +3487,7 @@@ _scsih_issue_delayed_event_ack(struct M
        ack_request->EventContext = event_context;
        ack_request->VF_ID = 0;  /* TODO */
        ack_request->VP_ID = 0;
-       mpt3sas_base_put_smid_default(ioc, smid);
+       ioc->put_smid_default(ioc, smid);
  }
  
  /**
@@@ -3530,7 -3544,7 +3544,7 @@@ _scsih_issue_delayed_sas_io_unit_ctrl(s
        mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
        mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
        mpi_request->DevHandle = handle;
-       mpt3sas_base_put_smid_default(ioc, smid);
+       ioc->put_smid_default(ioc, smid);
  }
  
  /**
@@@ -3885,11 -3899,6 +3899,11 @@@ _scsih_temp_threshold_events(struct MPT
        }
  }
  
 +static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
 +{
 +      return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
 +}
 +
  /**
   * _scsih_flush_running_cmds - completing outstanding commands.
   * @ioc: per adapter object
@@@ -3911,9 -3920,6 +3925,9 @@@ _scsih_flush_running_cmds(struct MPT3SA
                if (!scmd)
                        continue;
                count++;
 +              if (ata_12_16_cmd(scmd))
 +                      scsi_internal_device_unblock(scmd->device,
 +                                                      SDEV_RUNNING);
                mpt3sas_base_free_smid(ioc, smid);
                scsi_dma_unmap(scmd);
                if (ioc->pci_error_recovery)
   * _scsih_setup_eedp - setup MPI request for EEDP transfer
   * @ioc: per adapter object
   * @scmd: pointer to scsi command object
-  * @mpi_request: pointer to the SCSI_IO reqest message frame
+  * @mpi_request: pointer to the SCSI_IO request message frame
   *
   * Supporting protection 1 and 3.
   *
@@@ -3983,6 -3989,9 +3997,9 @@@ _scsih_setup_eedp(struct MPT3SAS_ADAPTE
  
        mpi_request_3v->EEDPBlockSize =
            cpu_to_le16(scmd->device->sector_size);
+       if (ioc->is_gen35_ioc)
+               eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
        mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
  }
  
@@@ -4018,6 -4027,8 +4035,6 @@@ _scsih_eedp_error_handling(struct scsi_
            SAM_STAT_CHECK_CONDITION;
  }
  
 -
 -
  /**
   * scsih_qcmd - main scsi request entry point
   * @scmd: pointer to scsi command object
@@@ -4044,13 -4055,6 +4061,13 @@@ scsih_qcmd(struct Scsi_Host *shost, str
        if (ioc->logging_level & MPT_DEBUG_SCSI)
                scsi_print_command(scmd);
  
 +      /*
 +       * Lock the device for any subsequent command until command is
 +       * done.
 +       */
 +      if (ata_12_16_cmd(scmd))
 +              scsi_internal_device_block(scmd->device);
 +
        sas_device_priv_data = scmd->device->hostdata;
        if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
                scmd->result = DID_NO_CONNECT << 16;
                scmd->result = DID_NO_CONNECT << 16;
                scmd->scsi_done(scmd);
                return 0;
-       /* device busy with task managment */
+       /* device busy with task management */
        } else if (sas_target_priv_data->tm_busy ||
            sas_device_priv_data->block)
                return SCSI_MLQUEUE_DEVICE_BUSY;
                if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
                        mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
                            MPI25_SCSIIO_IOFLAGS_FAST_PATH);
-                       mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
+                       ioc->put_smid_fast_path(ioc, smid, handle);
                } else
-                       mpt3sas_base_put_smid_scsi_io(ioc, smid,
+                       ioc->put_smid_scsi_io(ioc, smid,
                            le16_to_cpu(mpi_request->DevHandle));
        } else
-               mpt3sas_base_put_smid_default(ioc, smid);
+               ioc->put_smid_default(ioc, smid);
        return 0;
  
   out:
@@@ -4626,9 -4630,6 +4643,9 @@@ _scsih_io_done(struct MPT3SAS_ADAPTER *
        if (scmd == NULL)
                return 1;
  
 +      if (ata_12_16_cmd(scmd))
 +              scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
 +
        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  
        if (mpi_reply == NULL) {
                memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
                mpi_request->DevHandle =
                    cpu_to_le16(sas_device_priv_data->sas_target->handle);
-               mpt3sas_base_put_smid_scsi_io(ioc, smid,
+               ioc->put_smid_scsi_io(ioc, smid,
                    sas_device_priv_data->sas_target->handle);
                return 0;
        }
@@@ -5383,10 -5384,10 +5400,10 @@@ _scsih_check_device(struct MPT3SAS_ADAP
                        sas_device->handle, handle);
                sas_target_priv_data->handle = handle;
                sas_device->handle = handle;
-               if (sas_device_pg0.Flags &
+               if (le16_to_cpu(sas_device_pg0.Flags) &
                     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
                        sas_device->enclosure_level =
-                               le16_to_cpu(sas_device_pg0.EnclosureLevel);
+                               sas_device_pg0.EnclosureLevel;
                        memcpy(sas_device->connector_name,
                                sas_device_pg0.ConnectorName, 4);
                        sas_device->connector_name[4] = '\0';
@@@ -5465,6 -5466,7 +5482,7 @@@ _scsih_add_device(struct MPT3SAS_ADAPTE
        device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
        if (!(_scsih_is_end_device(device_info)))
                return -1;
+       set_bit(handle, ioc->pend_os_device_add);
        sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
  
        /* check if device is present */
        sas_device = mpt3sas_get_sdev_by_addr(ioc,
                                        sas_address);
        if (sas_device) {
+               clear_bit(handle, ioc->pend_os_device_add);
                sas_device_put(sas_device);
                return -1;
        }
        sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
            MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
  
-       if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+       if (le16_to_cpu(sas_device_pg0.Flags)
+               & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
                sas_device->enclosure_level =
-                       le16_to_cpu(sas_device_pg0.EnclosureLevel);
+                       sas_device_pg0.EnclosureLevel;
                memcpy(sas_device->connector_name,
                        sas_device_pg0.ConnectorName, 4);
                sas_device->connector_name[4] = '\0';
@@@ -5806,6 -5810,9 +5826,9 @@@ _scsih_sas_topology_change_event(struc
                        _scsih_check_device(ioc, sas_address, handle,
                            phy_number, link_rate);
  
+                       if (!test_bit(handle, ioc->pend_os_device_add))
+                               break;
  
                case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
  
@@@ -6267,7 -6274,7 +6290,7 @@@ _scsih_ir_fastpath(struct MPT3SAS_ADAPT
            handle, phys_disk_num));
  
        init_completion(&ioc->scsih_cmds.done);
-       mpt3sas_base_put_smid_default(ioc, smid);
+       ioc->put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
  
        if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@@ -6320,7 -6327,7 +6343,7 @@@ _scsih_reprobe_lun(struct scsi_device *
  {
        sdev->no_uld_attach = no_uld_attach ? 1 : 0;
        sdev_printk(KERN_INFO, sdev, "%s raid component\n",
-           sdev->no_uld_attach ? "hidding" : "exposing");
+           sdev->no_uld_attach ? "hiding" : "exposing");
        WARN_ON(scsi_device_reprobe(sdev));
  }
  
@@@ -7050,7 -7057,7 +7073,7 @@@ Mpi2SasDevicePage0_t *sas_device_pg0
                        if (sas_device_pg0->Flags &
                              MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
                                sas_device->enclosure_level =
-                                  le16_to_cpu(sas_device_pg0->EnclosureLevel);
+                                  sas_device_pg0->EnclosureLevel;
                                memcpy(&sas_device->connector_name[0],
                                        &sas_device_pg0->ConnectorName[0], 4);
                        } else {
@@@ -7112,6 -7119,7 +7135,7 @@@ _scsih_search_responding_sas_devices(st
                sas_device_pg0.SASAddress =
                                le64_to_cpu(sas_device_pg0.SASAddress);
                sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot);
+               sas_device_pg0.Flags = le16_to_cpu(sas_device_pg0.Flags);
                _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
        }
  
@@@ -7723,6 -7731,9 +7747,9 @@@ mpt3sas_scsih_reset_handler(struct MPT3
                        complete(&ioc->tm_cmds.done);
                }
  
+               memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
+               memset(ioc->device_remove_in_progress, 0,
+                      ioc->device_remove_in_progress_sz);
                _scsih_fw_event_cleanup_queue(ioc);
                _scsih_flush_running_cmds(ioc);
                break;
@@@ -8113,7 -8124,7 +8140,7 @@@ _scsih_ir_shutdown(struct MPT3SAS_ADAPT
        if (!ioc->hide_ir_msg)
                pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
        init_completion(&ioc->scsih_cmds.done);
-       mpt3sas_base_put_smid_default(ioc, smid);
+       ioc->put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
  
        if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@@ -8654,6 -8665,12 +8681,12 @@@ _scsih_determine_hba_mpi_version(struc
        case MPI26_MFGPAGE_DEVID_SAS3324_2:
        case MPI26_MFGPAGE_DEVID_SAS3324_3:
        case MPI26_MFGPAGE_DEVID_SAS3324_4:
+       case MPI26_MFGPAGE_DEVID_SAS3508:
+       case MPI26_MFGPAGE_DEVID_SAS3508_1:
+       case MPI26_MFGPAGE_DEVID_SAS3408:
+       case MPI26_MFGPAGE_DEVID_SAS3516:
+       case MPI26_MFGPAGE_DEVID_SAS3516_1:
+       case MPI26_MFGPAGE_DEVID_SAS3416:
                return MPI26_VERSION;
        }
        return 0;
@@@ -8722,10 -8739,29 +8755,29 @@@ _scsih_probe(struct pci_dev *pdev, cons
                ioc->hba_mpi_version_belonged = hba_mpi_version;
                ioc->id = mpt3_ids++;
                sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
+               switch (pdev->device) {
+               case MPI26_MFGPAGE_DEVID_SAS3508:
+               case MPI26_MFGPAGE_DEVID_SAS3508_1:
+               case MPI26_MFGPAGE_DEVID_SAS3408:
+               case MPI26_MFGPAGE_DEVID_SAS3516:
+               case MPI26_MFGPAGE_DEVID_SAS3516_1:
+               case MPI26_MFGPAGE_DEVID_SAS3416:
+                       ioc->is_gen35_ioc = 1;
+                       break;
+               default:
+                       ioc->is_gen35_ioc = 0;
+               }
                if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
                        pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
-                       (ioc->hba_mpi_version_belonged == MPI26_VERSION))
-                       ioc->msix96_vector = 1;
+                       (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
+                       ioc->combined_reply_queue = 1;
+                       if (ioc->is_gen35_ioc)
+                               ioc->combined_reply_index_count =
+                                MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
+                       else
+                               ioc->combined_reply_index_count =
+                                MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
+               }
                break;
        default:
                return -ENODEV;
@@@ -9128,6 -9164,19 +9180,19 @@@ static const struct pci_device_id mpt3s
                PCI_ANY_ID, PCI_ANY_ID },
        { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
                PCI_ANY_ID, PCI_ANY_ID },
+       /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
+       { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
+               PCI_ANY_ID, PCI_ANY_ID },
+       { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
+               PCI_ANY_ID, PCI_ANY_ID },
+       { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
+               PCI_ANY_ID, PCI_ANY_ID },
+       { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
+               PCI_ANY_ID, PCI_ANY_ID },
+       { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
+               PCI_ANY_ID, PCI_ANY_ID },
+       { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
+               PCI_ANY_ID, PCI_ANY_ID },
        {0}     /* Terminating entry */
  };
  MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
@@@ -9168,7 -9217,7 +9233,7 @@@ scsih_init(void
         /* queuecommand callback hander */
        scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
  
-       /* task managment callback handler */
+       /* task management callback handler */
        tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
  
        /* base internal commands callback handler */
diff --combined drivers/scsi/pmcraid.c
index 845affa112f711d8df861d3dd36299377a6ff34a,cb12b7bad8e606d37ac8e57e65f0ca635959373a..337982cf3d638198c1d674ab974d696b582ab160
@@@ -1368,8 -1368,13 +1368,8 @@@ static struct genl_multicast_group pmcr
        { .name = "events", /* not really used - see ID discussion below */ },
  };
  
 -static struct genl_family pmcraid_event_family = {
 -      /*
 -       * Due to prior multicast group abuse (the code having assumed that
 -       * the family ID can be used as a multicast group ID) we need to
 -       * statically allocate a family (and thus group) ID.
 -       */
 -      .id = GENL_ID_PMCRAID,
 +static struct genl_family pmcraid_event_family __ro_after_init = {
 +      .module = THIS_MODULE,
        .name = "pmcraid",
        .version = 1,
        .maxattr = PMCRAID_AEN_ATTR_MAX,
   *    0 if the pmcraid_event_family is successfully registered
   *    with netlink generic, non-zero otherwise
   */
 -static int pmcraid_netlink_init(void)
 +static int __init pmcraid_netlink_init(void)
  {
        int result;
  
@@@ -3787,11 -3792,11 +3787,11 @@@ static long pmcraid_ioctl_passthrough
                                                      direction);
                if (rc) {
                        pmcraid_err("couldn't build passthrough ioadls\n");
-                       goto out_free_buffer;
+                       goto out_free_cmd;
                }
        } else if (request_size < 0) {
                rc = -EINVAL;
-               goto out_free_buffer;
+               goto out_free_cmd;
        }
  
        /* If data is being written into the device, copy the data from user
@@@ -3908,6 -3913,8 +3908,8 @@@ out_handle_response
  
  out_free_sglist:
        pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
+ out_free_cmd:
        pmcraid_return_cmd(cmd);
  
  out_free_buffer:
@@@ -6018,8 -6025,10 +6020,10 @@@ static int __init pmcraid_init(void
  
        error = pmcraid_netlink_init();
  
-       if (error)
+       if (error) {
+               class_destroy(pmcraid_class);
                goto out_unreg_chrdev;
+       }
  
        error = pci_register_driver(&pmcraid_driver);
  
diff --combined drivers/scsi/scsi_lib.c
index 9a8ccff1121fb02638a4ef70b355546c2ca7a4dd,0f81add6025ed1b9f38dbe9fa9af1e3b0fd70062..c35b6de4ca643297d1908341421c865c2cb93e84
@@@ -86,8 -86,10 +86,8 @@@ scsi_set_blocked(struct scsi_cmnd *cmd
  static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
  {
        struct scsi_device *sdev = cmd->device;
 -      struct request_queue *q = cmd->request->q;
  
 -      blk_mq_requeue_request(cmd->request);
 -      blk_mq_kick_requeue_list(q);
 +      blk_mq_requeue_request(cmd->request, true);
        put_device(&sdev->sdev_gendev);
  }
  
@@@ -161,11 -163,26 +161,11 @@@ void scsi_queue_insert(struct scsi_cmn
  {
        __scsi_queue_insert(cmd, reason, 1);
  }
 -/**
 - * scsi_execute - insert request and wait for the result
 - * @sdev:     scsi device
 - * @cmd:      scsi command
 - * @data_direction: data direction
 - * @buffer:   data buffer
 - * @bufflen:  len of buffer
 - * @sense:    optional sense buffer
 - * @timeout:  request timeout in seconds
 - * @retries:  number of times to retry request
 - * @flags:    or into request flags;
 - * @resid:    optional residual length
 - *
 - * returns the req->errors value which is the scsi_cmnd result
 - * field.
 - */
 -int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 +
 +static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
                 int data_direction, void *buffer, unsigned bufflen,
                 unsigned char *sense, int timeout, int retries, u64 flags,
 -               int *resid)
 +               req_flags_t rq_flags, int *resid)
  {
        struct request *req;
        int write = (data_direction == DMA_TO_DEVICE);
        req->sense_len = 0;
        req->retries = retries;
        req->timeout = timeout;
 -      req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
 +      req->cmd_flags |= flags;
 +      req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT;
  
        /*
         * head injection *required* here otherwise quiesce won't work
  
        return ret;
  }
 +
 +/**
 + * scsi_execute - insert request and wait for the result
 + * @sdev:     scsi device
 + * @cmd:      scsi command
 + * @data_direction: data direction
 + * @buffer:   data buffer
 + * @bufflen:  len of buffer
 + * @sense:    optional sense buffer
 + * @timeout:  request timeout in seconds
 + * @retries:  number of times to retry request
 + * @flags:    or into request flags;
 + * @resid:    optional residual length
 + *
 + * returns the req->errors value which is the scsi_cmnd result
 + * field.
 + */
 +int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 +               int data_direction, void *buffer, unsigned bufflen,
 +               unsigned char *sense, int timeout, int retries, u64 flags,
 +               int *resid)
 +{
 +      return __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense,
 +                      timeout, retries, flags, 0, resid);
 +}
  EXPORT_SYMBOL(scsi_execute);
  
  int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
                     int data_direction, void *buffer, unsigned bufflen,
                     struct scsi_sense_hdr *sshdr, int timeout, int retries,
 -                   int *resid, u64 flags)
 +                   int *resid, u64 flags, req_flags_t rq_flags)
  {
        char *sense = NULL;
        int result;
                if (!sense)
                        return DRIVER_ERROR << 24;
        }
 -      result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
 -                            sense, timeout, retries, flags, resid);
 +      result = __scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
 +                            sense, timeout, retries, flags, rq_flags, resid);
        if (sshdr)
                scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
  
@@@ -822,7 -813,7 +822,7 @@@ void scsi_io_completion(struct scsi_cmn
                 */
                if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
                        ;
 -              else if (!(req->cmd_flags & REQ_QUIET))
 +              else if (!(req->rq_flags & RQF_QUIET))
                        scsi_print_sense(cmd);
                result = 0;
                /* BLOCK_PC may have set error */
        switch (action) {
        case ACTION_FAIL:
                /* Give up and fail the remainder of the request */
 -              if (!(req->cmd_flags & REQ_QUIET)) {
 +              if (!(req->rq_flags & RQF_QUIET)) {
                        static DEFINE_RATELIMIT_STATE(_rs,
                                        DEFAULT_RATELIMIT_INTERVAL,
                                        DEFAULT_RATELIMIT_BURST);
                 * A new command will be prepared and issued.
                 */
                if (q->mq_ops) {
 -                      cmd->request->cmd_flags &= ~REQ_DONTPREP;
 +                      cmd->request->rq_flags &= ~RQF_DONTPREP;
                        scsi_mq_uninit_cmd(cmd);
                        scsi_mq_requeue_cmd(cmd);
                } else {
@@@ -1007,8 -998,8 +1007,8 @@@ static int scsi_init_sgtable(struct req
        /*
         * If sg table allocation fails, requeue request later.
         */
 -      if (unlikely(sg_alloc_table_chained(&sdb->table, req->nr_phys_segments,
 -                                      sdb->table.sgl)))
 +      if (unlikely(sg_alloc_table_chained(&sdb->table,
 +                      blk_rq_nr_phys_segments(req), sdb->table.sgl)))
                return BLKPREP_DEFER;
  
        /* 
@@@ -1040,7 -1031,7 +1040,7 @@@ int scsi_init_io(struct scsi_cmnd *cmd
        bool is_mq = (rq->mq_ctx != NULL);
        int error;
  
 -      BUG_ON(!rq->nr_phys_segments);
 +      BUG_ON(!blk_rq_nr_phys_segments(rq));
  
        error = scsi_init_sgtable(rq, &cmd->sdb);
        if (error)
@@@ -1243,7 -1234,7 +1243,7 @@@ scsi_prep_state_check(struct scsi_devic
                        /*
                         * If the devices is blocked we defer normal commands.
                         */
 -                      if (!(req->cmd_flags & REQ_PREEMPT))
 +                      if (!(req->rq_flags & RQF_PREEMPT))
                                ret = BLKPREP_DEFER;
                        break;
                default:
                         * special commands.  In particular any user initiated
                         * command is not allowed.
                         */
 -                      if (!(req->cmd_flags & REQ_PREEMPT))
 +                      if (!(req->rq_flags & RQF_PREEMPT))
                                ret = BLKPREP_KILL;
                        break;
                }
@@@ -1288,7 -1279,7 +1288,7 @@@ scsi_prep_return(struct request_queue *
                        blk_delay_queue(q, SCSI_QUEUE_DELAY);
                break;
        default:
 -              req->cmd_flags |= REQ_DONTPREP;
 +              req->rq_flags |= RQF_DONTPREP;
        }
  
        return ret;
@@@ -1745,7 -1736,7 +1745,7 @@@ static void scsi_request_fn(struct requ
                 * we add the dev to the starved list so it eventually gets
                 * a run when a tag is freed.
                 */
 -              if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) {
 +              if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) {
                        spin_lock_irq(shost->host_lock);
                        if (list_empty(&sdev->starved_entry))
                                list_add_tail(&sdev->starved_entry,
@@@ -1810,7 -1801,7 +1810,7 @@@ static inline int prep_to_mq(int ret
  {
        switch (ret) {
        case BLKPREP_OK:
 -              return 0;
 +              return BLK_MQ_RQ_QUEUE_OK;
        case BLKPREP_DEFER:
                return BLK_MQ_RQ_QUEUE_BUSY;
        default:
@@@ -1897,7 -1888,7 +1897,7 @@@ static int scsi_queue_rq(struct blk_mq_
        int reason;
  
        ret = prep_to_mq(scsi_prep_state_check(sdev, req));
 -      if (ret)
 +      if (ret != BLK_MQ_RQ_QUEUE_OK)
                goto out;
  
        ret = BLK_MQ_RQ_QUEUE_BUSY;
                goto out_dec_target_busy;
  
  
 -      if (!(req->cmd_flags & REQ_DONTPREP)) {
 +      if (!(req->rq_flags & RQF_DONTPREP)) {
                ret = prep_to_mq(scsi_mq_prep_fn(req));
 -              if (ret)
 +              if (ret != BLK_MQ_RQ_QUEUE_OK)
                        goto out_dec_host_busy;
 -              req->cmd_flags |= REQ_DONTPREP;
 +              req->rq_flags |= RQF_DONTPREP;
        } else {
                blk_mq_start_request(req);
        }
@@@ -1950,6 -1941,7 +1950,6 @@@ out_put_device
  out:
        switch (ret) {
        case BLK_MQ_RQ_QUEUE_BUSY:
 -              blk_mq_stop_hw_queue(hctx);
                if (atomic_read(&sdev->device_busy) == 0 &&
                    !scsi_device_blocked(sdev))
                        blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
                 * we hit an error, as we will never see this command
                 * again.
                 */
 -              if (req->cmd_flags & REQ_DONTPREP)
 +              if (req->rq_flags & RQF_DONTPREP)
                        scsi_mq_uninit_cmd(cmd);
                break;
        default:
@@@ -1998,6 -1990,15 +1998,15 @@@ static void scsi_exit_request(void *dat
        kfree(cmd->sense_buffer);
  }
  
+ static int scsi_map_queues(struct blk_mq_tag_set *set)
+ {
+       struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
+       if (shost->hostt->map_queues)
+               return shost->hostt->map_queues(shost);
+       return blk_mq_map_queues(set);
+ }
  static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
  {
        struct device *host_dev;
@@@ -2090,6 -2091,7 +2099,7 @@@ static struct blk_mq_ops scsi_mq_ops = 
        .timeout        = scsi_timeout,
        .init_request   = scsi_init_request,
        .exit_request   = scsi_exit_request,
+       .map_queues     = scsi_map_queues,
  };
  
  struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
@@@ -2731,6 -2733,39 +2741,39 @@@ void sdev_evt_send_simple(struct scsi_d
  }
  EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
  
+ /**
+  * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+  * @sdev: SCSI device to count the number of scsi_request_fn() callers for.
+  */
+ static int scsi_request_fn_active(struct scsi_device *sdev)
+ {
+       struct request_queue *q = sdev->request_queue;
+       int request_fn_active;
+       WARN_ON_ONCE(sdev->host->use_blk_mq);
+       spin_lock_irq(q->queue_lock);
+       request_fn_active = q->request_fn_active;
+       spin_unlock_irq(q->queue_lock);
+       return request_fn_active;
+ }
+ /**
+  * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
+  * @sdev: SCSI device pointer.
+  *
+  * Wait until the ongoing shost->hostt->queuecommand() calls that are
+  * invoked from scsi_request_fn() have finished.
+  */
+ static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
+ {
+       WARN_ON_ONCE(sdev->host->use_blk_mq);
+       while (scsi_request_fn_active(sdev))
+               msleep(20);
+ }
  /**
   *    scsi_device_quiesce - Block user issued commands.
   *    @sdev:  scsi device to quiesce.
@@@ -2815,8 -2850,7 +2858,7 @@@ EXPORT_SYMBOL(scsi_target_resume)
   * @sdev:     device to block
   *
   * Block request made by scsi lld's to temporarily stop all
-  * scsi commands on the specified device.  Called from interrupt
-  * or normal process context.
+  * scsi commands on the specified device. May sleep.
   *
   * Returns zero if successful or error if not
   *
   *    (which must be a legal transition).  When the device is in this
   *    state, all commands are deferred until the scsi lld reenables
   *    the device with scsi_device_unblock or device_block_tmo fires.
+  *
+  * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
+  * scsi_internal_device_block() has blocked a SCSI device and also
+  * remove the rport mutex lock and unlock calls from srp_queuecommand().
   */
  int
  scsi_internal_device_block(struct scsi_device *sdev)
                spin_lock_irqsave(q->queue_lock, flags);
                blk_stop_queue(q);
                spin_unlock_irqrestore(q->queue_lock, flags);
+               scsi_wait_for_queuecommand(sdev);
        }
  
        return 0;
diff --combined drivers/scsi/sd.c
index 079c2d9759fb8a98c5283eea53b01b9ced80662e,b4933afe08a155f7e7d0fcb1e1e19e3d8ef18caa..1622e23138e0f430d77fe7db10c840941ef02aef
@@@ -93,7 -93,6 +93,7 @@@ MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15
  MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
  MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
  MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
 +MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
  
  #if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
  #define SD_MINORS     16
@@@ -164,7 -163,7 +164,7 @@@ cache_type_store(struct device *dev, st
        static const char temp[] = "temporary ";
        int len;
  
 -      if (sdp->type != TYPE_DISK)
 +      if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
                /* no cache control on RBC devices; theoretically they
                 * can do it, but there's probably so many exceptions
                 * it's not worth the risk */
@@@ -263,7 -262,7 +263,7 @@@ allow_restart_store(struct device *dev
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
  
 -      if (sdp->type != TYPE_DISK)
 +      if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
                return -EINVAL;
  
        sdp->allow_restart = simple_strtoul(buf, NULL, 10);
@@@ -393,11 -392,6 +393,11 @@@ provisioning_mode_store(struct device *
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
  
 +      if (sd_is_zoned(sdkp)) {
 +              sd_config_discard(sdkp, SD_LBP_DISABLE);
 +              return count;
 +      }
 +
        if (sdp->type != TYPE_DISK)
                return -EINVAL;
  
@@@ -465,7 -459,7 +465,7 @@@ max_write_same_blocks_store(struct devi
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
  
 -      if (sdp->type != TYPE_DISK)
 +      if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
                return -EINVAL;
  
        err = kstrtoul(buf, 10, &max);
@@@ -716,6 -710,7 +716,6 @@@ static int sd_setup_discard_cmnd(struc
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        sector_t sector = blk_rq_pos(rq);
        unsigned int nr_sectors = blk_rq_sectors(rq);
 -      unsigned int nr_bytes = blk_rq_bytes(rq);
        unsigned int len;
        int ret;
        char *buf;
                goto out;
        }
  
 -      rq->completion_data = page;
        rq->timeout = SD_TIMEOUT;
  
        cmd->transfersize = len;
        cmd->allowed = SD_MAX_RETRIES;
  
 -      /*
 -       * Initially __data_len is set to the amount of data that needs to be
 -       * transferred to the target. This amount depends on whether WRITE SAME
 -       * or UNMAP is being used. After the scatterlist has been mapped by
 -       * scsi_init_io() we set __data_len to the size of the area to be
 -       * discarded on disk. This allows us to report completion on the full
 -       * amount of blocks described by the request.
 -       */
 -      blk_add_request_payload(rq, page, 0, len);
 -      ret = scsi_init_io(cmd);
 -      rq->__data_len = nr_bytes;
 +      rq->special_vec.bv_page = page;
 +      rq->special_vec.bv_offset = 0;
 +      rq->special_vec.bv_len = len;
 +
 +      rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
 +      rq->resid_len = len;
  
 +      ret = scsi_init_io(cmd);
  out:
        if (ret != BLKPREP_OK)
                __free_page(page);
@@@ -844,12 -844,6 +844,12 @@@ static int sd_setup_write_same_cmnd(str
  
        BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
  
 +      if (sd_is_zoned(sdkp)) {
 +              ret = sd_zbc_setup_write_cmnd(cmd);
 +              if (ret != BLKPREP_OK)
 +                      return ret;
 +      }
 +
        sector >>= ilog2(sdp->sector_size) - 9;
        nr_sectors >>= ilog2(sdp->sector_size) - 9;
  
@@@ -907,25 -901,19 +907,25 @@@ static int sd_setup_read_write_cmnd(str
        struct request *rq = SCpnt->request;
        struct scsi_device *sdp = SCpnt->device;
        struct gendisk *disk = rq->rq_disk;
 -      struct scsi_disk *sdkp;
 +      struct scsi_disk *sdkp = scsi_disk(disk);
        sector_t block = blk_rq_pos(rq);
        sector_t threshold;
        unsigned int this_count = blk_rq_sectors(rq);
        unsigned int dif, dix;
 +      bool zoned_write = sd_is_zoned(sdkp) && rq_data_dir(rq) == WRITE;
        int ret;
        unsigned char protect;
  
 +      if (zoned_write) {
 +              ret = sd_zbc_setup_write_cmnd(SCpnt);
 +              if (ret != BLKPREP_OK)
 +                      return ret;
 +      }
 +
        ret = scsi_init_io(SCpnt);
        if (ret != BLKPREP_OK)
                goto out;
        SCpnt = rq->special;
 -      sdkp = scsi_disk(disk);
  
        /* from here on until we're complete, any goto out
         * is used for a killable error condition */
        } else if (rq_data_dir(rq) == READ) {
                SCpnt->cmnd[0] = READ_6;
        } else {
 -              scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n",
 -                          req_op(rq), (unsigned long long) rq->cmd_flags);
 +              scmd_printk(KERN_ERR, SCpnt, "Unknown command %d\n", req_op(rq));
                goto out;
        }
  
         */
        ret = BLKPREP_OK;
   out:
 +      if (zoned_write && ret != BLKPREP_OK)
 +              sd_zbc_cancel_write_cmnd(SCpnt);
 +
        return ret;
  }
  
@@@ -1163,10 -1149,6 +1163,10 @@@ static int sd_init_command(struct scsi_
        case REQ_OP_READ:
        case REQ_OP_WRITE:
                return sd_setup_read_write_cmnd(cmd);
 +      case REQ_OP_ZONE_REPORT:
 +              return sd_zbc_setup_report_cmnd(cmd);
 +      case REQ_OP_ZONE_RESET:
 +              return sd_zbc_setup_reset_cmnd(cmd);
        default:
                BUG();
        }
@@@ -1176,8 -1158,8 +1176,8 @@@ static void sd_uninit_command(struct sc
  {
        struct request *rq = SCpnt->request;
  
 -      if (req_op(rq) == REQ_OP_DISCARD)
 -              __free_page(rq->completion_data);
 +      if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
 +              __free_page(rq->special_vec.bv_page);
  
        if (SCpnt->cmnd != rq->cmd) {
                mempool_free(SCpnt->cmnd, sd_cdb_pool);
@@@ -1513,7 -1495,7 +1513,7 @@@ static int sd_sync_cache(struct scsi_di
                 */
                res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
                                             &sshdr, timeout, SD_MAX_RETRIES,
 -                                           NULL, REQ_PM);
 +                                           NULL, 0, RQF_PM);
                if (res == 0)
                        break;
        }
@@@ -1798,10 -1780,7 +1798,10 @@@ static int sd_done(struct scsi_cmnd *SC
        unsigned char op = SCpnt->cmnd[0];
        unsigned char unmap = SCpnt->cmnd[1] & 8;
  
 -      if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_SAME) {
 +      switch (req_op(req)) {
 +      case REQ_OP_DISCARD:
 +      case REQ_OP_WRITE_SAME:
 +      case REQ_OP_ZONE_RESET:
                if (!result) {
                        good_bytes = blk_rq_bytes(req);
                        scsi_set_resid(SCpnt, 0);
                        good_bytes = 0;
                        scsi_set_resid(SCpnt, blk_rq_bytes(req));
                }
 +              break;
 +      case REQ_OP_ZONE_REPORT:
 +              if (!result) {
 +                      good_bytes = scsi_bufflen(SCpnt)
 +                              - scsi_get_resid(SCpnt);
 +                      scsi_set_resid(SCpnt, 0);
 +              } else {
 +                      good_bytes = 0;
 +                      scsi_set_resid(SCpnt, blk_rq_bytes(req));
 +              }
 +              break;
        }
  
        if (result) {
  
                                        good_bytes = 0;
                                        req->__data_len = blk_rq_bytes(req);
 -                                      req->cmd_flags |= REQ_QUIET;
 +                                      req->rq_flags |= RQF_QUIET;
                                }
                        }
                }
        default:
                break;
        }
 +
   out:
 +      if (sd_is_zoned(sdkp))
 +              sd_zbc_complete(SCpnt, good_bytes, &sshdr);
 +
        SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
                                           "sd_done: completed %d of %d bytes\n",
                                           good_bytes, scsi_bufflen(SCpnt)));
@@@ -2019,6 -1983,7 +2019,6 @@@ sd_spinup_disk(struct scsi_disk *sdkp
        }
  }
  
 -
  /*
   * Determine whether disk supports Data Integrity Field.
   */
@@@ -2168,9 -2133,6 +2168,9 @@@ static int read_capacity_16(struct scsi
        /* Logical blocks per physical block exponent */
        sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
  
 +      /* RC basis */
 +      sdkp->rc_basis = (buffer[12] >> 4) & 0x3;
 +
        /* Lowest aligned logical block */
        alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
        blk_queue_alignment_offset(sdp->request_queue, alignment);
@@@ -2280,6 -2242,7 +2280,6 @@@ sd_read_capacity(struct scsi_disk *sdkp
  {
        int sector_size;
        struct scsi_device *sdp = sdkp->device;
 -      sector_t old_capacity = sdkp->capacity;
  
        if (sd_try_rc16_first(sdp)) {
                sector_size = read_capacity_16(sdkp, sdp, buffer);
@@@ -2360,44 -2323,35 +2360,44 @@@ got_data
                sector_size = 512;
        }
        blk_queue_logical_block_size(sdp->request_queue, sector_size);
 +      blk_queue_physical_block_size(sdp->request_queue,
 +                                    sdkp->physical_block_size);
 +      sdkp->device->sector_size = sector_size;
  
 -      {
 -              char cap_str_2[10], cap_str_10[10];
 +      if (sdkp->capacity > 0xffffffff)
 +              sdp->use_16_for_rw = 1;
  
 -              string_get_size(sdkp->capacity, sector_size,
 -                              STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
 -              string_get_size(sdkp->capacity, sector_size,
 -                              STRING_UNITS_10, cap_str_10,
 -                              sizeof(cap_str_10));
 +}
  
 -              if (sdkp->first_scan || old_capacity != sdkp->capacity) {
 -                      sd_printk(KERN_NOTICE, sdkp,
 -                                "%llu %d-byte logical blocks: (%s/%s)\n",
 -                                (unsigned long long)sdkp->capacity,
 -                                sector_size, cap_str_10, cap_str_2);
 +/*
 + * Print disk capacity
 + */
 +static void
 +sd_print_capacity(struct scsi_disk *sdkp,
 +                sector_t old_capacity)
 +{
 +      int sector_size = sdkp->device->sector_size;
 +      char cap_str_2[10], cap_str_10[10];
  
 -                      if (sdkp->physical_block_size != sector_size)
 -                              sd_printk(KERN_NOTICE, sdkp,
 -                                        "%u-byte physical blocks\n",
 -                                        sdkp->physical_block_size);
 -              }
 -      }
 +      string_get_size(sdkp->capacity, sector_size,
 +                      STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
 +      string_get_size(sdkp->capacity, sector_size,
 +                      STRING_UNITS_10, cap_str_10,
 +                      sizeof(cap_str_10));
  
 -      if (sdkp->capacity > 0xffffffff)
 -              sdp->use_16_for_rw = 1;
 +      if (sdkp->first_scan || old_capacity != sdkp->capacity) {
 +              sd_printk(KERN_NOTICE, sdkp,
 +                        "%llu %d-byte logical blocks: (%s/%s)\n",
 +                        (unsigned long long)sdkp->capacity,
 +                        sector_size, cap_str_10, cap_str_2);
  
 -      blk_queue_physical_block_size(sdp->request_queue,
 -                                    sdkp->physical_block_size);
 -      sdkp->device->sector_size = sector_size;
 +              if (sdkp->physical_block_size != sector_size)
 +                      sd_printk(KERN_NOTICE, sdkp,
 +                                "%u-byte physical blocks\n",
 +                                sdkp->physical_block_size);
 +
 +              sd_zbc_print_zones(sdkp);
 +      }
  }
  
  /* called with buffer of length 512 */
@@@ -2465,9 -2419,7 +2465,7 @@@ sd_read_write_protect_flag(struct scsi_
                if (sdkp->first_scan || old_wp != sdkp->write_prot) {
                        sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
                                  sdkp->write_prot ? "on" : "off");
-                       sd_printk(KERN_DEBUG, sdkp,
-                                 "Mode Sense: %02x %02x %02x %02x\n",
-                                 buffer[0], buffer[1], buffer[2], buffer[3]);
+                       sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
                }
        }
  }
@@@ -2659,7 -2611,7 +2657,7 @@@ static void sd_read_app_tag_own(struct 
        struct scsi_mode_data data;
        struct scsi_sense_hdr sshdr;
  
 -      if (sdp->type != TYPE_DISK)
 +      if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
                return;
  
        if (sdkp->protection_type == 0)
@@@ -2766,7 -2718,6 +2764,7 @@@ static void sd_read_block_limits(struc
   */
  static void sd_read_block_characteristics(struct scsi_disk *sdkp)
  {
 +      struct request_queue *q = sdkp->disk->queue;
        unsigned char *buffer;
        u16 rot;
        const int vpd_len = 64;
        rot = get_unaligned_be16(&buffer[4]);
  
        if (rot == 1) {
 -              queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
 -              queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, sdkp->disk->queue);
 +              queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
 +              queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
        }
  
 +      sdkp->zoned = (buffer[8] >> 4) & 3;
 +      if (sdkp->zoned == 1)
 +              q->limits.zoned = BLK_ZONED_HA;
 +      else if (sdkp->device->type == TYPE_ZBC)
 +              q->limits.zoned = BLK_ZONED_HM;
 +      else
 +              q->limits.zoned = BLK_ZONED_NONE;
 +      if (blk_queue_is_zoned(q) && sdkp->first_scan)
 +              sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
 +                    q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
 +
   out:
        kfree(buffer);
  }
@@@ -2867,7 -2807,6 +2865,7 @@@ static int sd_revalidate_disk(struct ge
        struct scsi_disk *sdkp = scsi_disk(disk);
        struct scsi_device *sdp = sdkp->device;
        struct request_queue *q = sdkp->disk->queue;
 +      sector_t old_capacity = sdkp->capacity;
        unsigned char *buffer;
        unsigned int dev_max, rw_max;
  
                        sd_read_block_provisioning(sdkp);
                        sd_read_block_limits(sdkp);
                        sd_read_block_characteristics(sdkp);
 +                      sd_zbc_read_zones(sdkp, buffer);
                }
  
 +              sd_print_capacity(sdkp, old_capacity);
 +
                sd_read_write_protect_flag(sdkp, buffer);
                sd_read_cache_type(sdkp, buffer);
                sd_read_app_tag_own(sdkp, buffer);
@@@ -3103,16 -3039,9 +3101,16 @@@ static int sd_probe(struct device *dev
  
        scsi_autopm_get_device(sdp);
        error = -ENODEV;
 -      if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC)
 +      if (sdp->type != TYPE_DISK &&
 +          sdp->type != TYPE_ZBC &&
 +          sdp->type != TYPE_MOD &&
 +          sdp->type != TYPE_RBC)
                goto out;
  
 +#ifndef CONFIG_BLK_DEV_ZONED
 +      if (sdp->type == TYPE_ZBC)
 +              goto out;
 +#endif
        SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
                                        "sd_probe\n"));
  
@@@ -3216,8 -3145,6 +3214,8 @@@ static int sd_remove(struct device *dev
        del_gendisk(sdkp->disk);
        sd_shutdown(dev);
  
 +      sd_zbc_remove(sdkp);
 +
        blk_register_region(devt, SD_MINORS, NULL,
                            sd_default_probe, NULL, NULL);
  
@@@ -3271,7 -3198,7 +3269,7 @@@ static int sd_start_stop_device(struct 
                return -ENODEV;
  
        res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
 -                             SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM);
 +                             SD_TIMEOUT, SD_MAX_RETRIES, NULL, 0, RQF_PM);
        if (res) {
                sd_print_result(sdkp, "Start/Stop Unit failed", res);
                if (driver_byte(res) & DRIVER_SENSE)
index cf549871c1ee147296cd03d1384bf37fb3db4534,0c75c75217f8f6ca73e3af39b1017d6e0aa87f00..ef8548c3a423d213dd04537753190b48f820cb83
@@@ -45,6 -45,8 +45,8 @@@
  #include "ufs_quirks.h"
  #include "unipro.h"
  
+ #define UFSHCD_REQ_SENSE_SIZE 18
  #define UFSHCD_ENABLE_INTRS   (UTP_TRANSFER_REQ_COMPL |\
                                 UTP_TASK_REQ_COMPL |\
                                 UFSHCD_ERROR_MASK)
  #define NOP_OUT_TIMEOUT    30 /* msecs */
  
  /* Query request retries */
- #define QUERY_REQ_RETRIES 10
+ #define QUERY_REQ_RETRIES 3
  /* Query request timeout */
- #define QUERY_REQ_TIMEOUT 30 /* msec */
- /*
-  * Query request timeout for fDeviceInit flag
-  * fDeviceInit query response time for some devices is too large that default
-  * QUERY_REQ_TIMEOUT may not be enough for such devices.
-  */
- #define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
+ #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
  
  /* Task management command timeout */
  #define TM_CMD_TIMEOUT        100 /* msecs */
@@@ -123,6 -119,7 +119,7 @@@ enum 
        UFSHCD_STATE_RESET,
        UFSHCD_STATE_ERROR,
        UFSHCD_STATE_OPERATIONAL,
+       UFSHCD_STATE_EH_SCHEDULED,
  };
  
  /* UFSHCD error handling flags */
@@@ -598,6 -595,20 +595,20 @@@ static bool ufshcd_is_unipro_pa_params_
                return false;
  }
  
+ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+ {
+       if (ufshcd_is_clkscaling_enabled(hba)) {
+               devfreq_suspend_device(hba->devfreq);
+               hba->clk_scaling.window_start_t = 0;
+       }
+ }
+ static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+ {
+       if (ufshcd_is_clkscaling_enabled(hba))
+               devfreq_resume_device(hba->devfreq);
+ }
  static void ufshcd_ungate_work(struct work_struct *work)
  {
        int ret;
                hba->clk_gating.is_suspended = false;
        }
  unblock_reqs:
-       if (ufshcd_is_clkscaling_enabled(hba))
-               devfreq_resume_device(hba->devfreq);
+       ufshcd_resume_clkscaling(hba);
        scsi_unblock_requests(hba->host);
  }
  
@@@ -660,6 -670,21 +670,21 @@@ int ufshcd_hold(struct ufs_hba *hba, bo
  start:
        switch (hba->clk_gating.state) {
        case CLKS_ON:
+               /*
+                * Wait for the ungate work to complete if in progress.
+                * Though the clocks may be in ON state, the link could
+                * still be in hibner8 state if hibern8 is allowed
+                * during clock gating.
+                * Make sure we exit hibern8 state also in addition to
+                * clocks being ON.
+                */
+               if (ufshcd_can_hibern8_during_gating(hba) &&
+                   ufshcd_is_link_hibern8(hba)) {
+                       spin_unlock_irqrestore(hba->host->host_lock, flags);
+                       flush_work(&hba->clk_gating.ungate_work);
+                       spin_lock_irqsave(hba->host->host_lock, flags);
+                       goto start;
+               }
                break;
        case REQ_CLKS_OFF:
                if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
@@@ -709,7 -734,14 +734,14 @@@ static void ufshcd_gate_work(struct wor
        unsigned long flags;
  
        spin_lock_irqsave(hba->host->host_lock, flags);
-       if (hba->clk_gating.is_suspended) {
+       /*
+        * In case you are here to cancel this work the gating state
+        * would be marked as REQ_CLKS_ON. In this case save time by
+        * skipping the gating work and exit after changing the clock
+        * state to CLKS_ON.
+        */
+       if (hba->clk_gating.is_suspended ||
+               (hba->clk_gating.state == REQ_CLKS_ON)) {
                hba->clk_gating.state = CLKS_ON;
                goto rel_lock;
        }
                ufshcd_set_link_hibern8(hba);
        }
  
-       if (ufshcd_is_clkscaling_enabled(hba)) {
-               devfreq_suspend_device(hba->devfreq);
-               hba->clk_scaling.window_start_t = 0;
-       }
+       ufshcd_suspend_clkscaling(hba);
  
        if (!ufshcd_is_link_active(hba))
                ufshcd_setup_clocks(hba, false);
@@@ -878,6 -907,8 +907,8 @@@ void ufshcd_send_command(struct ufs_hb
        ufshcd_clk_scaling_start_busy(hba);
        __set_bit(task_tag, &hba->outstanding_reqs);
        ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+       /* Make sure that doorbell is committed immediately */
+       wmb();
  }
  
  /**
@@@ -889,10 -920,14 +920,14 @@@ static inline void ufshcd_copy_sense_da
        int len;
        if (lrbp->sense_buffer &&
            ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+               int len_to_copy;
                len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+               len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
                memcpy(lrbp->sense_buffer,
                        lrbp->ucd_rsp_ptr->sr.sense_data,
-                       min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+                       min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
        }
  }
  
@@@ -1088,7 -1123,7 +1123,7 @@@ ufshcd_send_uic_cmd(struct ufs_hba *hba
   *
   * Returns 0 in case of success, non-zero value in case of failure
   */
- static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
+ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  {
        struct ufshcd_sg_entry *prd_table;
        struct scatterlist *sg;
                return sg_segments;
  
        if (sg_segments) {
-               lrbp->utr_descriptor_ptr->prd_table_length =
-                                       cpu_to_le16((u16) (sg_segments));
+               if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+                       lrbp->utr_descriptor_ptr->prd_table_length =
+                               cpu_to_le16((u16)(sg_segments *
+                                       sizeof(struct ufshcd_sg_entry)));
+               else
+                       lrbp->utr_descriptor_ptr->prd_table_length =
+                               cpu_to_le16((u16) (sg_segments));
  
                prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
  
@@@ -1410,6 -1450,7 +1450,7 @@@ static int ufshcd_queuecommand(struct S
        switch (hba->ufshcd_state) {
        case UFSHCD_STATE_OPERATIONAL:
                break;
+       case UFSHCD_STATE_EH_SCHEDULED:
        case UFSHCD_STATE_RESET:
                err = SCSI_MLQUEUE_HOST_BUSY;
                goto out_unlock;
  
        WARN_ON(lrbp->cmd);
        lrbp->cmd = cmd;
-       lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+       lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
        lrbp->sense_buffer = cmd->sense_buffer;
        lrbp->task_tag = tag;
        lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
  
        ufshcd_comp_scsi_upiu(hba, lrbp);
  
-       err = ufshcd_map_sg(lrbp);
+       err = ufshcd_map_sg(hba, lrbp);
        if (err) {
                lrbp->cmd = NULL;
                clear_bit_unlock(tag, &hba->lrb_in_use);
                goto out;
        }
+       /* Make sure descriptors are ready before ringing the doorbell */
+       wmb();
  
        /* issue command to the controller */
        spin_lock_irqsave(hba->host->host_lock, flags);
+       ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
        ufshcd_send_command(hba, tag);
  out_unlock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
@@@ -1581,6 -1625,8 +1625,8 @@@ static int ufshcd_wait_for_dev_cmd(stru
        time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
                        msecs_to_jiffies(max_timeout));
  
+       /* Make sure descriptors are ready before ringing the doorbell */
+       wmb();
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->dev_cmd.complete = NULL;
        if (likely(time_left)) {
@@@ -1683,6 -1729,7 +1729,7 @@@ static int ufshcd_exec_dev_cmd(struct u
        /* Make sure descriptors are ready before ringing the doorbell */
        wmb();
        spin_lock_irqsave(hba->host->host_lock, flags);
+       ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
        ufshcd_send_command(hba, tag);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
  
@@@ -1789,9 -1836,6 +1836,6 @@@ int ufshcd_query_flag(struct ufs_hba *h
                goto out_unlock;
        }
  
-       if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
-               timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
  
        if (err) {
@@@ -1861,8 -1905,8 +1905,8 @@@ static int ufshcd_query_attr(struct ufs
        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  
        if (err) {
-               dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
-                               __func__, opcode, idn, err);
+               dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+                               __func__, opcode, idn, index, err);
                goto out_unlock;
        }
  
@@@ -1961,8 -2005,8 +2005,8 @@@ static int __ufshcd_query_descriptor(st
        err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  
        if (err) {
-               dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
-                               __func__, opcode, idn, err);
+               dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+                               __func__, opcode, idn, index, err);
                goto out_unlock;
        }
  
@@@ -2055,18 -2099,41 +2099,41 @@@ static int ufshcd_read_desc_param(struc
                                        desc_id, desc_index, 0, desc_buf,
                                        &buff_len);
  
-       if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
-           (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
-            ufs_query_desc_max_size[desc_id])
-           || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
-               dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
-                       __func__, desc_id, param_offset, buff_len, ret);
-               if (!ret)
-                       ret = -EINVAL;
+       if (ret) {
+               dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+                       __func__, desc_id, desc_index, param_offset, ret);
  
                goto out;
        }
  
+       /* Sanity check */
+       if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+               dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+                       __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+               ret = -EINVAL;
+               goto out;
+       }
+       /*
+        * While reading variable size descriptors (like string descriptor),
+        * some UFS devices may report the "LENGTH" (field in "Transaction
+        * Specific fields" of Query Response UPIU) same as what was requested
+        * in Query Request UPIU instead of reporting the actual size of the
+        * variable size descriptor.
+        * Although it's safe to ignore the "LENGTH" field for variable size
+        * descriptors as we can always derive the length of the descriptor from
+        * the descriptor header fields. Hence this change impose the length
+        * match check only for fixed size descriptors (for which we always
+        * request the correct size as part of Query Request UPIU).
+        */
+       if ((desc_id != QUERY_DESC_IDN_STRING) &&
+           (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
+               dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
+                       __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
+               ret = -EINVAL;
+               goto out;
+       }
        if (is_kmalloc)
                memcpy(param_read_buf, &desc_buf[param_offset], param_size);
  out:
@@@ -2088,7 -2155,18 +2155,18 @@@ static inline int ufshcd_read_power_des
                                         u8 *buf,
                                         u32 size)
  {
-       return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+       int err = 0;
+       int retries;
+       for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+               /* Read descriptor*/
+               err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+               if (!err)
+                       break;
+               dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+       }
+       return err;
  }
  
  int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
@@@ -2320,12 -2398,21 +2398,21 @@@ static void ufshcd_host_memory_configur
                                cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
  
                /* Response upiu and prdt offset should be in double words */
-               utrdlp[i].response_upiu_offset =
+               if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
+                       utrdlp[i].response_upiu_offset =
+                               cpu_to_le16(response_offset);
+                       utrdlp[i].prd_table_offset =
+                               cpu_to_le16(prdt_offset);
+                       utrdlp[i].response_upiu_length =
+                               cpu_to_le16(ALIGNED_UPIU_SIZE);
+               } else {
+                       utrdlp[i].response_upiu_offset =
                                cpu_to_le16((response_offset >> 2));
-               utrdlp[i].prd_table_offset =
+                       utrdlp[i].prd_table_offset =
                                cpu_to_le16((prdt_offset >> 2));
-               utrdlp[i].response_upiu_length =
+                       utrdlp[i].response_upiu_length =
                                cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
+               }
  
                hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
                hba->lrb[i].ucd_req_ptr =
@@@ -2429,10 -2516,10 +2516,10 @@@ int ufshcd_dme_set_attr(struct ufs_hba 
                                set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
        } while (ret && peer && --retries);
  
-       if (!retries)
+       if (ret)
                dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
-                               set, UIC_GET_ATTR_ID(attr_sel), mib_val,
-                               retries);
+                       set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+                       UFS_UIC_COMMAND_RETRIES - retries);
  
        return ret;
  }
@@@ -2496,9 -2583,10 +2583,10 @@@ int ufshcd_dme_get_attr(struct ufs_hba 
                                get, UIC_GET_ATTR_ID(attr_sel), ret);
        } while (ret && peer && --retries);
  
-       if (!retries)
+       if (ret)
                dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
-                               get, UIC_GET_ATTR_ID(attr_sel), retries);
+                       get, UIC_GET_ATTR_ID(attr_sel),
+                       UFS_UIC_COMMAND_RETRIES - retries);
  
        if (mib_val && !ret)
                *mib_val = uic_cmd.argument3;
@@@ -2651,6 -2739,8 +2739,8 @@@ static int __ufshcd_uic_hibern8_enter(s
        int ret;
        struct uic_command uic_cmd = {0};
  
+       ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
        uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
  
                 */
                if (ufshcd_link_recovery(hba))
                        ret = -ENOLINK;
-       }
+       } else
+               ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
+                                                               POST_CHANGE);
  
        return ret;
  }
@@@ -2687,13 -2779,17 +2779,17 @@@ static int ufshcd_uic_hibern8_exit(stru
        struct uic_command uic_cmd = {0};
        int ret;
  
+       ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
        uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
        if (ret) {
                dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
                        __func__, ret);
                ret = ufshcd_link_recovery(hba);
-       }
+       } else
+               ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
+                                                               POST_CHANGE);
  
        return ret;
  }
@@@ -2725,8 -2821,8 +2821,8 @@@ static int ufshcd_get_max_pwr_mode(stru
        if (hba->max_pwr_info.is_valid)
                return 0;
  
-       pwr_info->pwr_tx = FASTAUTO_MODE;
-       pwr_info->pwr_rx = FASTAUTO_MODE;
+       pwr_info->pwr_tx = FAST_MODE;
+       pwr_info->pwr_rx = FAST_MODE;
        pwr_info->hs_rate = PA_HS_MODE_B;
  
        /* Get the connected lane count */
                                __func__, pwr_info->gear_rx);
                        return -EINVAL;
                }
-               pwr_info->pwr_rx = SLOWAUTO_MODE;
+               pwr_info->pwr_rx = SLOW_MODE;
        }
  
        ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
                                __func__, pwr_info->gear_tx);
                        return -EINVAL;
                }
-               pwr_info->pwr_tx = SLOWAUTO_MODE;
+               pwr_info->pwr_tx = SLOW_MODE;
        }
  
        hba->max_pwr_info.is_valid = true;
@@@ -3090,7 -3186,16 +3186,16 @@@ static int ufshcd_link_startup(struct u
  {
        int ret;
        int retries = DME_LINKSTARTUP_RETRIES;
+       bool link_startup_again = false;
+       /*
+        * If UFS device isn't active then we will have to issue link startup
+        * 2 times to make sure the device state move to active.
+        */
+       if (!ufshcd_is_ufs_dev_active(hba))
+               link_startup_again = true;
  
+ link_startup:
        do {
                ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
  
                /* failed to get the link up... retire */
                goto out;
  
+       if (link_startup_again) {
+               link_startup_again = false;
+               retries = DME_LINKSTARTUP_RETRIES;
+               goto link_startup;
+       }
        if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
                ret = ufshcd_disable_device_tx_lcc(hba);
                if (ret)
@@@ -3181,16 -3292,24 +3292,24 @@@ static void ufshcd_set_queue_depth(stru
  {
        int ret = 0;
        u8 lun_qdepth;
+       int retries;
        struct ufs_hba *hba;
  
        hba = shost_priv(sdev->host);
  
        lun_qdepth = hba->nutrs;
-       ret = ufshcd_read_unit_desc_param(hba,
-                                         ufshcd_scsi_to_upiu_lun(sdev->lun),
-                                         UNIT_DESC_PARAM_LU_Q_DEPTH,
-                                         &lun_qdepth,
-                                         sizeof(lun_qdepth));
+       for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+               /* Read descriptor*/
+               ret = ufshcd_read_unit_desc_param(hba,
+                                 ufshcd_scsi_to_upiu_lun(sdev->lun),
+                                 UNIT_DESC_PARAM_LU_Q_DEPTH,
+                                 &lun_qdepth,
+                                 sizeof(lun_qdepth));
+               if (!ret || ret == -ENOTSUPP)
+                       break;
+               dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret);
+       }
  
        /* Some WLUN doesn't support unit descriptor */
        if (ret == -EOPNOTSUPP)
@@@ -4097,6 -4216,17 +4216,17 @@@ static void ufshcd_update_uic_error(str
  {
        u32 reg;
  
+       /* PHY layer lane error */
+       reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+       /* Ignore LINERESET indication, as this is not an error */
+       if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+                       (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK))
+               /*
+                * To know whether this error is fatal or not, DB timeout
+                * must be checked but this error is handled separately.
+                */
+               dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
        /* PA_INIT_ERROR is fatal and needs UIC reset */
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
        if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
@@@ -4158,7 -4288,7 +4288,7 @@@ static void ufshcd_check_errors(struct 
                        /* block commands from scsi mid-layer */
                        scsi_block_requests(hba->host);
  
-                       hba->ufshcd_state = UFSHCD_STATE_ERROR;
+                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
                        schedule_work(&hba->eh_work);
                }
        }
@@@ -4311,6 -4441,8 +4441,8 @@@ static int ufshcd_issue_tm_cmd(struct u
        task_req_upiup->input_param1 = cpu_to_be32(lun_id);
        task_req_upiup->input_param2 = cpu_to_be32(task_id);
  
+       ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
        /* send command to the controller */
        __set_bit(free_slot, &hba->outstanding_tasks);
  
        wmb();
  
        ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+       /* Make sure that doorbell is committed immediately */
+       wmb();
  
        spin_unlock_irqrestore(host->host_lock, flags);
  
        return icc_level;
  }
  
+ static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
+ {
+       int ret = 0;
+       int retries;
+       for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+               /* write attribute */
+               ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+                       QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
+               if (!ret)
+                       break;
+               dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret);
+       }
+       return ret;
+ }
  static void ufshcd_init_icc_levels(struct ufs_hba *hba)
  {
        int ret;
        dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
                        __func__, hba->init_prefetch_data.icc_level);
  
-       ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
-               QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
-               &hba->init_prefetch_data.icc_level);
+       ret = ufshcd_set_icc_levels_attr(hba,
+                                hba->init_prefetch_data.icc_level);
  
        if (ret)
                dev_err(hba->dev,
        return ret;
  }
  
+ /**
+  * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+  * less than device PA_TACTIVATE time.
+  * @hba: per-adapter instance
+  *
+  * Some UFS devices require host PA_TACTIVATE to be lower than device
+  * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+  * for such devices.
+  *
+  * Returns zero on success, non-zero error value on failure.
+  */
+ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+ {
+       int ret = 0;
+       u32 granularity, peer_granularity;
+       u32 pa_tactivate, peer_pa_tactivate;
+       u32 pa_tactivate_us, peer_pa_tactivate_us;
+       u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+       ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+                                 &granularity);
+       if (ret)
+               goto out;
+       ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+                                 &peer_granularity);
+       if (ret)
+               goto out;
+       if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+           (granularity > PA_GRANULARITY_MAX_VAL)) {
+               dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+                       __func__, granularity);
+               return -EINVAL;
+       }
+       if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+           (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+               dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+                       __func__, peer_granularity);
+               return -EINVAL;
+       }
+       ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+       if (ret)
+               goto out;
+       ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+                                 &peer_pa_tactivate);
+       if (ret)
+               goto out;
+       pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+       peer_pa_tactivate_us = peer_pa_tactivate *
+                            gran_to_us_table[peer_granularity - 1];
+       if (pa_tactivate_us > peer_pa_tactivate_us) {
+               u32 new_peer_pa_tactivate;
+               new_peer_pa_tactivate = pa_tactivate_us /
+                                     gran_to_us_table[peer_granularity - 1];
+               new_peer_pa_tactivate++;
+               ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+                                         new_peer_pa_tactivate);
+       }
+ out:
+       return ret;
+ }
  static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
  {
        if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
        if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
                /* set 1ms timeout for PA_TACTIVATE */
                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+       if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+               ufshcd_quirk_tune_host_pa_tactivate(hba);
  }
  
  /**
@@@ -5027,9 -5251,11 +5251,11 @@@ static int ufshcd_probe_hba(struct ufs_
                        __func__);
        } else {
                ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
-               if (ret)
+               if (ret) {
                        dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
                                        __func__, ret);
+                       goto out;
+               }
        }
  
        /* set the state as operational after switching to desired gear */
                hba->is_init_prefetch = true;
  
        /* Resume devfreq after UFS device is detected */
-       if (ufshcd_is_clkscaling_enabled(hba))
-               devfreq_resume_device(hba->devfreq);
+       ufshcd_resume_clkscaling(hba);
  
  out:
        /*
@@@ -5389,6 -5614,10 +5614,10 @@@ static int __ufshcd_setup_clocks(struc
        if (!head || list_empty(head))
                goto out;
  
+       ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+       if (ret)
+               return ret;
        list_for_each_entry(clki, head, list) {
                if (!IS_ERR_OR_NULL(clki->clk)) {
                        if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
                }
        }
  
-       ret = ufshcd_vops_setup_clocks(hba, on);
+       ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+       if (ret)
+               return ret;
  out:
        if (ret) {
                list_for_each_entry(clki, head, list) {
@@@ -5500,8 -5732,6 +5732,6 @@@ static void ufshcd_variant_hba_exit(str
        if (!hba->vops)
                return;
  
-       ufshcd_vops_setup_clocks(hba, false);
        ufshcd_vops_setup_regulators(hba, false);
  
        ufshcd_vops_exit(hba);
@@@ -5564,6 -5794,7 +5794,7 @@@ static void ufshcd_hba_exit(struct ufs_
        if (hba->is_powered) {
                ufshcd_variant_hba_exit(hba);
                ufshcd_setup_vreg(hba, false);
+               ufshcd_suspend_clkscaling(hba);
                ufshcd_setup_clocks(hba, false);
                ufshcd_setup_hba_vreg(hba, false);
                hba->is_powered = false;
@@@ -5577,20 -5808,20 +5808,20 @@@ ufshcd_send_request_sense(struct ufs_hb
                                0,
                                0,
                                0,
-                               SCSI_SENSE_BUFFERSIZE,
+                               UFSHCD_REQ_SENSE_SIZE,
                                0};
        char *buffer;
        int ret;
  
-       buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+       buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
        if (!buffer) {
                ret = -ENOMEM;
                goto out;
        }
  
        ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
-                               SCSI_SENSE_BUFFERSIZE, NULL,
+                               UFSHCD_REQ_SENSE_SIZE, NULL,
 -                              msecs_to_jiffies(1000), 3, NULL, REQ_PM);
 +                              msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
        if (ret)
                pr_err("%s: failed with err %d\n", __func__, ret);
  
@@@ -5652,11 -5883,11 +5883,11 @@@ static int ufshcd_set_dev_pwr_mode(stru
  
        /*
         * Current function would be generally called from the power management
 -       * callbacks hence set the REQ_PM flag so that it doesn't resume the
 +       * callbacks hence set the RQF_PM flag so that it doesn't resume the
         * already suspended childs.
         */
        ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
 -                                   START_STOP_TIMEOUT, 0, NULL, REQ_PM);
 +                                   START_STOP_TIMEOUT, 0, NULL, 0, RQF_PM);
        if (ret) {
                sdev_printk(KERN_WARNING, sdp,
                            "START_STOP failed for power mode: %d, result %x\n",
@@@ -5766,7 -5997,6 +5997,6 @@@ static int ufshcd_vreg_set_hpm(struct u
            !hba->dev_info.is_lu_power_on_wp) {
                ret = ufshcd_setup_vreg(hba, true);
        } else if (!ufshcd_is_ufs_dev_active(hba)) {
-               ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
                if (!ret && !ufshcd_is_link_active(hba)) {
                        ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
                        if (ret)
                        if (ret)
                                goto vccq_lpm;
                }
+               ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
        }
        goto out;
  
@@@ -5839,6 -6070,8 +6070,8 @@@ static int ufshcd_suspend(struct ufs_hb
        ufshcd_hold(hba, false);
        hba->clk_gating.is_suspended = true;
  
+       ufshcd_suspend_clkscaling(hba);
        if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
                        req_link_state == UIC_LINK_ACTIVE_STATE) {
                goto disable_clks;
  
        if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
            (req_link_state == hba->uic_link_state))
-               goto out;
+               goto enable_gating;
  
        /* UFS device & link must be active before we enter in this function */
        if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
                ret = -EINVAL;
-               goto out;
+               goto enable_gating;
        }
  
        if (ufshcd_is_runtime_pm(pm_op)) {
        ufshcd_vreg_set_lpm(hba);
  
  disable_clks:
-       /*
-        * The clock scaling needs access to controller registers. Hence, Wait
-        * for pending clock scaling work to be done before clocks are
-        * turned off.
-        */
-       if (ufshcd_is_clkscaling_enabled(hba)) {
-               devfreq_suspend_device(hba->devfreq);
-               hba->clk_scaling.window_start_t = 0;
-       }
        /*
         * Call vendor specific suspend callback. As these callbacks may access
         * vendor specific host controller register space call them before the
        if (ret)
                goto set_link_active;
  
-       ret = ufshcd_vops_setup_clocks(hba, false);
-       if (ret)
-               goto vops_resume;
        if (!ufshcd_is_link_active(hba))
                ufshcd_setup_clocks(hba, false);
        else
        ufshcd_hba_vreg_set_lpm(hba);
        goto out;
  
- vops_resume:
-       ufshcd_vops_resume(hba, pm_op);
  set_link_active:
+       ufshcd_resume_clkscaling(hba);
        ufshcd_vreg_set_hpm(hba);
        if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
                ufshcd_set_link_active(hba);
@@@ -5937,6 -6156,7 +6156,7 @@@ set_dev_active
        if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
                ufshcd_disable_auto_bkops(hba);
  enable_gating:
+       ufshcd_resume_clkscaling(hba);
        hba->clk_gating.is_suspended = false;
        ufshcd_release(hba);
  out:
@@@ -6015,8 -6235,7 +6235,7 @@@ static int ufshcd_resume(struct ufs_hb
        ufshcd_urgent_bkops(hba);
        hba->clk_gating.is_suspended = false;
  
-       if (ufshcd_is_clkscaling_enabled(hba))
-               devfreq_resume_device(hba->devfreq);
+       ufshcd_resume_clkscaling(hba);
  
        /* Schedule clock gating in case of no access to UFS device yet */
        ufshcd_release(hba);
@@@ -6030,6 -6249,7 +6249,7 @@@ disable_vreg
        ufshcd_vreg_set_lpm(hba);
  disable_irq_and_vops_clks:
        ufshcd_disable_irq(hba);
+       ufshcd_suspend_clkscaling(hba);
        ufshcd_setup_clocks(hba, false);
  out:
        hba->pm_op_in_progress = 0;
@@@ -6052,16 -6272,13 +6272,13 @@@ int ufshcd_system_suspend(struct ufs_hb
        if (!hba || !hba->is_powered)
                return 0;
  
-       if (pm_runtime_suspended(hba->dev)) {
-               if (hba->rpm_lvl == hba->spm_lvl)
-                       /*
-                        * There is possibility that device may still be in
-                        * active state during the runtime suspend.
-                        */
-                       if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
-                           hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
-                               goto out;
+       if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+            hba->curr_dev_pwr_mode) &&
+           (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+            hba->uic_link_state))
+               goto out;
  
+       if (pm_runtime_suspended(hba->dev)) {
                /*
                 * UFS device and/or UFS link low power states during runtime
                 * suspend seems to be different than what is expected during
@@@ -6092,7 -6309,10 +6309,10 @@@ EXPORT_SYMBOL(ufshcd_system_suspend)
  
  int ufshcd_system_resume(struct ufs_hba *hba)
  {
-       if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+       if (!hba)
+               return -EINVAL;
+       if (!hba->is_powered || pm_runtime_suspended(hba->dev))
                /*
                 * Let the runtime resume take care of resuming
                 * if runtime suspended.
@@@ -6113,7 -6333,10 +6333,10 @@@ EXPORT_SYMBOL(ufshcd_system_resume)
   */
  int ufshcd_runtime_suspend(struct ufs_hba *hba)
  {
-       if (!hba || !hba->is_powered)
+       if (!hba)
+               return -EINVAL;
+       if (!hba->is_powered)
                return 0;
  
        return ufshcd_suspend(hba, UFS_RUNTIME_PM);
@@@ -6143,10 -6366,13 +6366,13 @@@ EXPORT_SYMBOL(ufshcd_runtime_suspend)
   */
  int ufshcd_runtime_resume(struct ufs_hba *hba)
  {
-       if (!hba || !hba->is_powered)
+       if (!hba)
+               return -EINVAL;
+       if (!hba->is_powered)
                return 0;
-       else
-               return ufshcd_resume(hba, UFS_RUNTIME_PM);
+       return ufshcd_resume(hba, UFS_RUNTIME_PM);
  }
  EXPORT_SYMBOL(ufshcd_runtime_resume);
  
@@@ -6198,11 -6424,7 +6424,7 @@@ void ufshcd_remove(struct ufs_hba *hba
        ufshcd_disable_intr(hba, hba->intr_mask);
        ufshcd_hba_stop(hba, true);
  
-       scsi_host_put(hba->host);
        ufshcd_exit_clk_gating(hba);
-       if (ufshcd_is_clkscaling_enabled(hba))
-               devfreq_remove_device(hba->devfreq);
        ufshcd_hba_exit(hba);
  }
  EXPORT_SYMBOL_GPL(ufshcd_remove);
@@@ -6324,15 -6546,47 +6546,47 @@@ static int ufshcd_devfreq_target(struc
  {
        int err = 0;
        struct ufs_hba *hba = dev_get_drvdata(dev);
+       bool release_clk_hold = false;
+       unsigned long irq_flags;
  
        if (!ufshcd_is_clkscaling_enabled(hba))
                return -EINVAL;
  
+       spin_lock_irqsave(hba->host->host_lock, irq_flags);
+       if (ufshcd_eh_in_progress(hba)) {
+               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+               return 0;
+       }
+       if (ufshcd_is_clkgating_allowed(hba) &&
+           (hba->clk_gating.state != CLKS_ON)) {
+               if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+                       /* hold the vote until the scaling work is completed */
+                       hba->clk_gating.active_reqs++;
+                       release_clk_hold = true;
+                       hba->clk_gating.state = CLKS_ON;
+               } else {
+                       /*
+                        * Clock gating work seems to be running in parallel
+                        * hence skip scaling work to avoid deadlock between
+                        * current scaling work and gating work.
+                        */
+                       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+                       return 0;
+               }
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
        if (*freq == UINT_MAX)
                err = ufshcd_scale_clks(hba, true);
        else if (*freq == 0)
                err = ufshcd_scale_clks(hba, false);
  
+       spin_lock_irqsave(hba->host->host_lock, irq_flags);
+       if (release_clk_hold)
+               __ufshcd_release(hba);
+       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
        return err;
  }
  
@@@ -6498,7 -6752,7 +6752,7 @@@ int ufshcd_init(struct ufs_hba *hba, vo
        }
  
        if (ufshcd_is_clkscaling_enabled(hba)) {
-               hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
+               hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile,
                                                   "simple_ondemand", NULL);
                if (IS_ERR(hba->devfreq)) {
                        dev_err(hba->dev, "Unable to register with devfreq %ld\n",
                        goto out_remove_scsi_host;
                }
                /* Suspend devfreq until the UFS device is detected */
-               devfreq_suspend_device(hba->devfreq);
-               hba->clk_scaling.window_start_t = 0;
+               ufshcd_suspend_clkscaling(hba);
        }
  
        /* Hold auto suspend until async scan completes */
        pm_runtime_get_sync(dev);
  
        /*
-        * The device-initialize-sequence hasn't been invoked yet.
-        * Set the device to power-off state
+        * We are assuming that device wasn't put in sleep/power-down
+        * state exclusively during the boot stage before kernel.
+        * This assumption helps avoid doing link startup twice during
+        * ufshcd_probe_hba().
         */
-       ufshcd_set_ufs_dev_poweroff(hba);
+       ufshcd_set_ufs_dev_active(hba);
  
        async_schedule(ufshcd_async_scan, hba);
  
@@@ -6530,7 -6785,6 +6785,6 @@@ exit_gating
        ufshcd_exit_clk_gating(hba);
  out_disable:
        hba->is_irq_enabled = false;
-       scsi_host_put(host);
        ufshcd_hba_exit(hba);
  out_error:
        return err;
diff --combined include/linux/blk-mq.h
index 87e404aae267e29308d784d66ed8c15fbfdede0e,6c0fb259581f0e00a1375e7ce26be99e4b0d7ad4..4a2ab5d99ff7ed8a64baa99ae708b30edca00e4d
@@@ -3,7 -3,6 +3,7 @@@
  
  #include <linux/blkdev.h>
  #include <linux/sbitmap.h>
 +#include <linux/srcu.h>
  
  struct blk_mq_tags;
  struct blk_flush_queue;
@@@ -36,8 -35,6 +36,8 @@@ struct blk_mq_hw_ctx 
  
        struct blk_mq_tags      *tags;
  
 +      struct srcu_struct      queue_rq_srcu;
 +
        unsigned long           queued;
        unsigned long           run;
  #define BLK_MQ_MAX_DISPATCH_ORDER     7
@@@ -218,20 -215,18 +218,20 @@@ void blk_mq_start_request(struct reques
  void blk_mq_end_request(struct request *rq, int error);
  void __blk_mq_end_request(struct request *rq, int error);
  
 -void blk_mq_requeue_request(struct request *rq);
 -void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
 -void blk_mq_cancel_requeue_work(struct request_queue *q);
 +void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
 +                              bool kick_requeue_list);
  void blk_mq_kick_requeue_list(struct request_queue *q);
  void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
  void blk_mq_abort_requeue_list(struct request_queue *q);
  void blk_mq_complete_request(struct request *rq, int error);
  
 +bool blk_mq_queue_stopped(struct request_queue *q);
  void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
  void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
  void blk_mq_stop_hw_queues(struct request_queue *q);
  void blk_mq_start_hw_queues(struct request_queue *q);
 +void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
  void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
  void blk_mq_run_hw_queues(struct request_queue *q, bool async);
  void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
@@@ -242,6 -237,7 +242,7 @@@ void blk_mq_unfreeze_queue(struct reque
  void blk_mq_freeze_queue_start(struct request_queue *q);
  int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
  
+ int blk_mq_map_queues(struct blk_mq_tag_set *set);
  void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
  
  /*