]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Sep 2017 04:11:05 +0000 (21:11 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Sep 2017 04:11:05 +0000 (21:11 -0700)
Pull SCSI updates from James Bottomley:
 "This is mostly updates of the usual suspects: lpfc, qla2xxx, hisi_sas,
  megaraid_sas, zfcp and a host of minor updates.

  The major driver change here is the elimination of the block based
  cciss driver in favour of the SCSI based hpsa driver (which now drives
  all the legacy cases cciss used to be required for). Plus a reset
  handler clean up and the redo of the SAS SMP handler to use bsg lib"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (279 commits)
  scsi: scsi-mq: Always unprepare before requeuing a request
  scsi: Show .retries and .jiffies_at_alloc in debugfs
  scsi: Improve requeuing behavior
  scsi: Call scsi_initialize_rq() for filesystem requests
  scsi: qla2xxx: Reset the logo flag, after target re-login.
  scsi: qla2xxx: Fix slow mem alloc behind lock
  scsi: qla2xxx: Clear fc4f_nvme flag
  scsi: qla2xxx: add missing includes for qla_isr
  scsi: qla2xxx: Fix an integer overflow in sysfs code
  scsi: aacraid: report -ENOMEM to upper layer from aac_convert_sgraw2()
  scsi: aacraid: get rid of one level of indentation
  scsi: aacraid: fix indentation errors
  scsi: storvsc: fix memory leak on ring buffer busy
  scsi: scsi_transport_sas: switch to bsg-lib for SMP passthrough
  scsi: smartpqi: remove the smp_handler stub
  scsi: hpsa: remove the smp_handler stub
  scsi: bsg-lib: pass the release callback through bsg_setup_queue
  scsi: Rework handling of scsi_device.vpd_pg8[03]
  scsi: Rework the code for caching Vital Product Data (VPD)
  scsi: rcu: Introduce rcu_swap_protected()
  ...

12 files changed:
1  2 
MAINTAINERS
block/bsg-lib.c
drivers/block/Kconfig
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_nvmet.h
drivers/staging/rts5208/rtsx.c
drivers/staging/unisys/visorhba/visorhba_main.c
drivers/virtio/virtio_ring.c
include/linux/bsg-lib.h
include/linux/rcupdate.h

diff --combined MAINTAINERS
index 259d32d8c84b4e3c47bb947d6d365afa1151b0f3,4507c25637c3a718800424a724d046bcffd5b706..bf206bd9f056c6fdc9d49f2d24d9f7a634c12db5
@@@ -301,7 -301,6 +301,7 @@@ S: Supporte
  F:    drivers/acpi/
  F:    drivers/pnp/pnpacpi/
  F:    include/linux/acpi.h
 +F:    include/linux/fwnode.h
  F:    include/acpi/
  F:    Documentation/acpi/
  F:    Documentation/ABI/testing/sysfs-bus-acpi
@@@ -311,14 -310,6 +311,14 @@@ F:       drivers/pci/*/*acpi
  F:    drivers/pci/*/*/*acpi*
  F:    tools/power/acpi/
  
 +ACPI APEI
 +M:    "Rafael J. Wysocki" <rjw@rjwysocki.net>
 +M:    Len Brown <lenb@kernel.org>
 +L:    linux-acpi@vger.kernel.org
 +R:    Tony Luck <tony.luck@intel.com>
 +R:    Borislav Petkov <bp@alien8.de>
 +F:    drivers/acpi/apei/
 +
  ACPI COMPONENT ARCHITECTURE (ACPICA)
  M:    Robert Moore <robert.moore@intel.com>
  M:    Lv Zheng <lv.zheng@intel.com>
@@@ -778,12 -769,6 +778,12 @@@ W:       http://ez.analog.com/community/linux
  S:    Supported
  F:    drivers/media/i2c/adv7180.c
  
 +ANALOG DEVICES INC ADV748X DRIVER
 +M:    Kieran Bingham <kieran.bingham@ideasonboard.com>
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +F:    drivers/media/i2c/adv748x/*
 +
  ANALOG DEVICES INC ADV7511 DRIVER
  M:    Hans Verkuil <hans.verkuil@cisco.com>
  L:    linux-media@vger.kernel.org
@@@ -1168,7 -1153,6 +1168,7 @@@ L:      linux-arm-kernel@axis.co
  F:    arch/arm/mach-artpec
  F:    arch/arm/boot/dts/artpec6*
  F:    drivers/clk/axis
 +F:    drivers/crypto/axis
  F:    drivers/pinctrl/pinctrl-artpec*
  F:    Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
  
@@@ -1177,7 -1161,7 +1177,7 @@@ M:      Brendan Higgins <brendanhiggins@goog
  R:    Benjamin Herrenschmidt <benh@kernel.crashing.org>
  R:    Joel Stanley <joel@jms.id.au>
  L:    linux-i2c@vger.kernel.org
 -L:    openbmc@lists.ozlabs.org
 +L:    openbmc@lists.ozlabs.org (moderated for non-subscribers)
  S:    Maintained
  F:    drivers/irqchip/irq-aspeed-i2c-ic.c
  F:    drivers/i2c/busses/i2c-aspeed.c
@@@ -1298,15 -1282,10 +1298,15 @@@ S:   Maintaine
  
  ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
  M:    Hans Ulli Kroll <ulli.kroll@googlemail.com>
 +M:    Linus Walleij <linus.walleij@linaro.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  T:    git git://github.com/ulli-kroll/linux.git
  S:    Maintained
 +F:    Documentation/devicetree/bindings/arm/gemini.txt
 +F:    Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
 +F:    Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt
  F:    arch/arm/mach-gemini/
 +F:    drivers/pinctrl/pinctrl-gemini.c
  F:    drivers/rtc/rtc-ftrtc010.c
  
  ARM/CSR SIRFPRIMA2 MACHINE SUPPORT
@@@ -1591,7 -1570,7 +1591,7 @@@ M:      Chunfeng Yun <chunfeng.yun@mediatek.
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  L:    linux-mediatek@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 -F:    drivers/phy/phy-mt65xx-usb3.c
 +F:    drivers/phy/mediatek/phy-mtk-tphy.c
  
  ARM/MICREL KS8695 ARCHITECTURE
  M:    Greg Ungerer <gerg@uclinux.org>
@@@ -2014,7 -1993,6 +2014,7 @@@ F:      arch/arm64/boot/dts/socionext
  F:    drivers/bus/uniphier-system-bus.c
  F:    drivers/clk/uniphier/
  F:    drivers/i2c/busses/i2c-uniphier*
 +F:    drivers/irqchip/irq-uniphier-aidet.c
  F:    drivers/pinctrl/uniphier/
  F:    drivers/reset/reset-uniphier.c
  F:    drivers/tty/serial/8250/8250_uniphier.c
@@@ -2139,12 -2117,6 +2139,12 @@@ S:    Maintaine
  F:    arch/arm64/
  F:    Documentation/arm64/
  
 +AS3645A LED FLASH CONTROLLER DRIVER
 +M:    Sakari Ailus <sakari.ailus@iki.fi>
 +L:    linux-leds@vger.kernel.org
 +S:    Maintained
 +F:    drivers/leds/leds-as3645a.c
 +
  AS3645A LED FLASH CONTROLLER DRIVER
  M:    Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  L:    linux-media@vger.kernel.org
@@@ -2413,10 -2385,9 +2413,10 @@@ AUDIT SUBSYSTE
  M:    Paul Moore <paul@paul-moore.com>
  M:    Eric Paris <eparis@redhat.com>
  L:    linux-audit@redhat.com (moderated for non-subscribers)
 -W:    http://people.redhat.com/sgrubb/audit/
 -T:    git git://git.infradead.org/users/pcmoore/audit
 -S:    Maintained
 +W:    https://github.com/linux-audit
 +W:    https://people.redhat.com/sgrubb/audit
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git
 +S:    Supported
  F:    include/linux/audit.h
  F:    include/uapi/linux/audit.h
  F:    kernel/audit*
@@@ -2506,7 -2477,7 +2506,7 @@@ Q:      https://patchwork.open-mesh.org/proj
  S:    Maintained
  F:    Documentation/ABI/testing/sysfs-class-net-batman-adv
  F:    Documentation/ABI/testing/sysfs-class-net-mesh
 -F:    Documentation/networking/batman-adv.txt
 +F:    Documentation/networking/batman-adv.rst
  F:    include/uapi/linux/batman_adv.h
  F:    net/batman-adv/
  
@@@ -4137,9 -4108,7 +4137,9 @@@ F:      include/linux/dax.
  F:    include/trace/events/fs_dax.h
  
  DIRECTORY NOTIFICATION (DNOTIFY)
 -M:    Eric Paris <eparis@parisplace.org>
 +M:    Jan Kara <jack@suse.cz>
 +R:    Amir Goldstein <amir73il@gmail.com>
 +L:    linux-fsdevel@vger.kernel.org
  S:    Maintained
  F:    Documentation/filesystems/dnotify.txt
  F:    fs/notify/dnotify/
@@@ -4390,12 -4359,6 +4390,12 @@@ S:    Maintaine
  F:    drivers/gpu/drm/qxl/
  F:    include/uapi/drm/qxl_drm.h
  
 +DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
 +M:    Noralf Trønnes <noralf@tronnes.org>
 +S:    Maintained
 +F:    drivers/gpu/drm/tinydrm/repaper.c
 +F:    Documentation/devicetree/bindings/display/repaper.txt
 +
  DRM DRIVER FOR RAGE 128 VIDEO CARDS
  S:    Orphan / Obsolete
  F:    drivers/gpu/drm/r128/
@@@ -4411,12 -4374,6 +4411,12 @@@ S:    Orphan / Obsolet
  F:    drivers/gpu/drm/sis/
  F:    include/uapi/drm/sis_drm.h
  
 +DRM DRIVER FOR SITRONIX ST7586 PANELS
 +M:    David Lechner <david@lechnology.com>
 +S:    Maintained
 +F:    drivers/gpu/drm/tinydrm/st7586.c
 +F:    Documentation/devicetree/bindings/display/st7586.txt
 +
  DRM DRIVER FOR TDFX VIDEO CARDS
  S:    Orphan / Obsolete
  F:    drivers/gpu/drm/tdfx/
@@@ -4665,14 -4622,6 +4665,14 @@@ F:    drivers/gpu/drm/panel
  F:    include/drm/drm_panel.h
  F:    Documentation/devicetree/bindings/display/panel/
  
 +DRM TINYDRM DRIVERS
 +M:    Noralf Trønnes <noralf@tronnes.org>
 +W:    https://github.com/notro/tinydrm/wiki/Development
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +S:    Maintained
 +F:    drivers/gpu/drm/tinydrm/
 +F:    include/drm/tinydrm/
 +
  DSBR100 USB FM RADIO DRIVER
  M:    Alexey Klimov <klimov.linux@gmail.com>
  L:    linux-media@vger.kernel.org
@@@ -5152,7 -5101,6 +5152,7 @@@ F:      include/linux/of_net.
  F:    include/linux/phy.h
  F:    include/linux/phy_fixed.h
  F:    include/linux/platform_data/mdio-gpio.h
 +F:    include/linux/platform_data/mdio-bcm-unimac.h
  F:    include/trace/events/mdio.h
  F:    include/uapi/linux/mdio.h
  F:    include/uapi/linux/mii.h
@@@ -5250,9 -5198,7 +5250,9 @@@ F:      Documentation/hwmon/f71805
  F:    drivers/hwmon/f71805f.c
  
  FANOTIFY
 -M:    Eric Paris <eparis@redhat.com>
 +M:    Jan Kara <jack@suse.cz>
 +R:    Amir Goldstein <amir73il@gmail.com>
 +L:    linux-fsdevel@vger.kernel.org
  S:    Maintained
  F:    fs/notify/fanotify/
  F:    include/linux/fanotify.h
@@@ -5395,11 -5341,10 +5395,11 @@@ K:   fmc_d.*registe
  
  FPGA MANAGER FRAMEWORK
  M:    Alan Tull <atull@kernel.org>
 -R:    Moritz Fischer <moritz.fischer@ettus.com>
 +R:    Moritz Fischer <mdf@kernel.org>
  L:    linux-fpga@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
 +Q:    http://patchwork.kernel.org/project/linux-fpga/list/
  F:    Documentation/fpga/
  F:    Documentation/devicetree/bindings/fpga/
  F:    drivers/fpga/
@@@ -5818,12 -5763,6 +5818,12 @@@ S:    Maintaine
  F:    Documentation/acpi/gpio-properties.txt
  F:    drivers/gpio/gpiolib-acpi.c
  
 +GPIO IR Transmitter
 +M:    Sean Young <sean@mess.org>
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +F:    drivers/media/rc/gpio-ir-tx.c
 +
  GPIO MOCKUP DRIVER
  M:    Bamvor Jian Zhang <bamvor.zhangjian@linaro.org>
  L:    linux-gpio@vger.kernel.org
@@@ -5895,7 -5834,7 +5895,7 @@@ F:      drivers/staging/greybus/spi.
  F:    drivers/staging/greybus/spilib.c
  F:    drivers/staging/greybus/spilib.h
  
 -GREYBUS LOOBACK/TIME PROTOCOLS DRIVERS
 +GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS
  M:    Bryan O'Donoghue <pure.logic@nexus-software.ie>
  S:    Maintained
  F:    drivers/staging/greybus/loopback.c
@@@ -6093,16 -6032,6 +6093,6 @@@ F:     drivers/scsi/hpsa*.[ch
  F:    include/linux/cciss*.h
  F:    include/uapi/linux/cciss*.h
  
- HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss)
- M:    Don Brace <don.brace@microsemi.com>
- L:    esc.storagedev@microsemi.com
- L:    linux-scsi@vger.kernel.org
- S:    Supported
- F:    Documentation/blockdev/cciss.txt
- F:    drivers/block/cciss*
- F:    include/linux/cciss_ioctl.h
- F:    include/uapi/linux/cciss_ioctl.h
  HFI1 DRIVER
  M:    Mike Marciniszyn <mike.marciniszyn@intel.com>
  M:    Dennis Dalessandro <dennis.dalessandro@intel.com>
@@@ -6208,14 -6137,6 +6198,14 @@@ S:    Maintaine
  F:    drivers/net/ethernet/hisilicon/
  F:    Documentation/devicetree/bindings/net/hisilicon*.txt
  
 +HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
 +M:    Yisen Zhuang <yisen.zhuang@huawei.com>
 +M:    Salil Mehta <salil.mehta@huawei.com>
 +L:    netdev@vger.kernel.org
 +W:    http://www.hisilicon.com
 +S:    Maintained
 +F:    drivers/net/ethernet/hisilicon/hns3/
 +
  HISILICON ROCE DRIVER
  M:    Lijun Ou <oulijun@huawei.com>
  M:    Wei Hu(Xavier) <xavier.huwei@huawei.com>
@@@ -6300,13 -6221,6 +6290,13 @@@ L:    linux-input@vger.kernel.or
  S:    Maintained
  F:    drivers/input/touchscreen/htcpen.c
  
 +HUAWEI ETHERNET DRIVER
 +M:    Aviad Krawczyk <aviad.krawczyk@huawei.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    Documentation/networking/hinic.txt
 +F:    drivers/net/ethernet/huawei/hinic/
 +
  HUGETLB FILESYSTEM
  M:    Nadia Yvette Chambers <nyc@holomorphy.com>
  S:    Maintained
@@@ -6333,9 -6247,7 +6323,9 @@@ M:      Haiyang Zhang <haiyangz@microsoft.co
  M:    Stephen Hemminger <sthemmin@microsoft.com>
  L:    devel@linuxdriverproject.org
  S:    Maintained
 +F:    Documentation/networking/netvsc.txt
  F:    arch/x86/include/asm/mshyperv.h
 +F:    arch/x86/include/asm/trace/hyperv.h
  F:    arch/x86/include/uapi/asm/hyperv.h
  F:    arch/x86/kernel/cpu/mshyperv.c
  F:    arch/x86/hyperv
@@@ -6347,9 -6259,7 +6337,9 @@@ F:      drivers/net/hyperv
  F:    drivers/scsi/storvsc_drv.c
  F:    drivers/uio/uio_hv_generic.c
  F:    drivers/video/fbdev/hyperv_fb.c
 +F:    net/vmw_vsock/hyperv_transport.c
  F:    include/linux/hyperv.h
 +F:    include/uapi/linux/hyperv.h
  F:    tools/hv/
  F:    Documentation/ABI/stable/sysfs-bus-vmbus
  
@@@ -6517,15 -6427,6 +6507,15 @@@ L:    netdev@vger.kernel.or
  S:    Supported
  F:    drivers/net/ethernet/ibm/ibmvnic.*
  
 +IBM Power Virtual Accelerator Switchboard
 +M:    Sukadev Bhattiprolu
 +L:    linuxppc-dev@lists.ozlabs.org
 +S:    Supported
 +F:    arch/powerpc/platforms/powernv/vas*
 +F:    arch/powerpc/platforms/powernv/copy-paste.h
 +F:    arch/powerpc/include/asm/vas.h
 +F:    arch/powerpc/include/uapi/asm/vas.h
 +
  IBM Power Virtual Ethernet Device Driver
  M:    Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
  L:    netdev@vger.kernel.org
@@@ -6762,9 -6663,9 +6752,9 @@@ S:      Maintaine
  F:    drivers/mtd/nand/jz4780_*
  
  INOTIFY
 -M:    John McCutchan <john@johnmccutchan.com>
 -M:    Robert Love <rlove@rlove.org>
 -M:    Eric Paris <eparis@parisplace.org>
 +M:    Jan Kara <jack@suse.cz>
 +R:    Amir Goldstein <amir73il@gmail.com>
 +L:    linux-fsdevel@vger.kernel.org
  S:    Maintained
  F:    Documentation/filesystems/inotify.txt
  F:    fs/notify/inotify/
@@@ -6833,9 -6734,8 +6823,9 @@@ S:      Supporte
  F:    drivers/scsi/isci/
  
  INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
 -M:    Daniel Vetter <daniel.vetter@intel.com>
  M:    Jani Nikula <jani.nikula@linux.intel.com>
 +M:    Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
 +M:    Rodrigo Vivi <rodrigo.vivi@intel.com>
  L:    intel-gfx@lists.freedesktop.org
  W:    https://01.org/linuxgraphics/
  B:    https://01.org/linuxgraphics/documentation/how-report-bugs
@@@ -7173,7 -7073,9 +7163,7 @@@ W:      http://irda.sourceforge.net
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git
  F:    Documentation/networking/irda.txt
 -F:    drivers/net/irda/
 -F:    include/net/irda/
 -F:    net/irda/
 +F:    drivers/staging/irda/
  
  IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
  M:    Marc Zyngier <marc.zyngier@arm.com>
@@@ -7198,6 -7100,7 +7188,6 @@@ M:      Marc Zyngier <marc.zyngier@arm.com
  L:    linux-kernel@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 -T:    git git://git.infradead.org/users/jcooper/linux.git irqchip/core
  F:    Documentation/devicetree/bindings/interrupt-controller/
  F:    drivers/irqchip/
  
@@@ -7727,6 -7630,17 +7717,6 @@@ T:     git git://linuxtv.org/mkrufky/tuners
  S:    Maintained
  F:    drivers/media/dvb-frontends/lgdt3305.*
  
 -LGUEST
 -M:    Rusty Russell <rusty@rustcorp.com.au>
 -L:    lguest@lists.ozlabs.org
 -W:    http://lguest.ozlabs.org/
 -S:    Odd Fixes
 -F:    arch/x86/include/asm/lguest*.h
 -F:    arch/x86/lguest/
 -F:    drivers/lguest/
 -F:    include/linux/lguest*.h
 -F:    tools/lguest/
 -
  LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
  M:    Viresh Kumar <vireshk@kernel.org>
  L:    linux-ide@vger.kernel.org
@@@ -7862,7 -7776,6 +7852,7 @@@ F:      drivers/pci/hotplug/rpa
  F:    drivers/rtc/rtc-opal.c
  F:    drivers/scsi/ibmvscsi/
  F:    drivers/tty/hvc/hvc_opal.c
 +F:    drivers/watchdog/wdrtas.c
  F:    tools/testing/selftests/powerpc
  N:    /pmac
  N:    powermac
@@@ -8420,14 -8333,6 +8410,14 @@@ T:    git git://linuxtv.org/media_tree.gi
  S:    Supported
  F:    drivers/media/dvb-frontends/lnbh25*
  
 +MEDIA DRIVERS FOR MXL5XX TUNER DEMODULATORS
 +M:    Daniel Scheller <d.scheller.oss@gmail.com>
 +L:    linux-media@vger.kernel.org
 +W:    https://linuxtv.org
 +T:    git git://linuxtv.org/media_tree.git
 +S:    Maintained
 +F:    drivers/media/dvb-frontends/mxl5xx*
 +
  MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices
  M:    Sergey Kozlov <serjk@netup.ru>
  M:    Abylay Ospan <aospan@netup.ru>
@@@ -8484,30 -8389,6 +8474,30 @@@ S:    Supporte
  F:    Documentation/devicetree/bindings/media/renesas,vsp1.txt
  F:    drivers/media/platform/vsp1/
  
 +MEDIA DRIVERS FOR ST STV0910 DEMODULATOR ICs
 +M:    Daniel Scheller <d.scheller.oss@gmail.com>
 +L:    linux-media@vger.kernel.org
 +W:    https://linuxtv.org
 +T:    git git://linuxtv.org/media_tree.git
 +S:    Maintained
 +F:    drivers/media/dvb-frontends/stv0910*
 +
 +MEDIA DRIVERS FOR ST STV6111 TUNER ICs
 +M:    Daniel Scheller <d.scheller.oss@gmail.com>
 +L:    linux-media@vger.kernel.org
 +W:    https://linuxtv.org
 +T:    git git://linuxtv.org/media_tree.git
 +S:    Maintained
 +F:    drivers/media/dvb-frontends/stv6111*
 +
 +MEDIA DRIVERS FOR DIGITAL DEVICES PCIE DEVICES
 +M:    Daniel Scheller <d.scheller.oss@gmail.com>
 +L:    linux-media@vger.kernel.org
 +W:    https://linuxtv.org
 +T:    git git://linuxtv.org/media_tree.git
 +S:    Maintained
 +F:    drivers/media/pci/ddbridge/*
 +
  MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
  M:    Mauro Carvalho Chehab <mchehab@s-opensource.com>
  M:    Mauro Carvalho Chehab <mchehab@kernel.org>
@@@ -8533,9 -8414,7 +8523,9 @@@ F:      include/uapi/linux/uvcvideo.
  
  MEDIATEK ETHERNET DRIVER
  M:    Felix Fietkau <nbd@openwrt.org>
 -M:    John Crispin <blogic@openwrt.org>
 +M:    John Crispin <john@phrozen.org>
 +M:    Sean Wang <sean.wang@mediatek.com>
 +M:    Nelson Chang <nelson.chang@mediatek.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/ethernet/mediatek/
@@@ -8571,24 -8450,11 +8561,24 @@@ L:   linux-wireless@vger.kernel.or
  S:    Maintained
  F:    drivers/net/wireless/mediatek/mt7601u/
  
 +MEDIATEK CIR DRIVER
 +M:    Sean Wang <sean.wang@mediatek.com>
 +S:    Maintained
 +F:    drivers/media/rc/mtk-cir.c
 +
  MEDIATEK RANDOM NUMBER GENERATOR SUPPORT
  M:    Sean Wang <sean.wang@mediatek.com>
  S:    Maintained
  F:    drivers/char/hw_random/mtk-rng.c
  
 +MEDIATEK USB3 DRD IP DRIVER
 +M:    Chunfeng Yun <chunfeng.yun@mediatek.com>
 +L:    linux-usb@vger.kernel.org (moderated for non-subscribers)
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +L:    linux-mediatek@lists.infradead.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    drivers/usb/mtu3/
 +
  MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES
  M:    Peter Senna Tschudin <peter.senna@collabora.com>
  M:    Martin Donnelly <martin.donnelly@ge.com>
@@@ -8753,7 -8619,7 +8743,7 @@@ M:      Mathieu Desnoyers <mathieu.desnoyers
  M:    "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
  L:    linux-kernel@vger.kernel.org
  S:    Supported
 -F:    kernel/membarrier.c
 +F:    kernel/sched/membarrier.c
  F:    include/uapi/linux/membarrier.h
  
  MEMORY MANAGEMENT
@@@ -8807,16 -8673,6 +8797,16 @@@ F:    drivers/leds/leds-menf21bmc.
  F:    drivers/hwmon/menf21bmc_hwmon.c
  F:    Documentation/hwmon/menf21bmc
  
 +MESON AO CEC DRIVER FOR AMLOGIC SOCS
 +M:    Neil Armstrong <narmstrong@baylibre.com>
 +L:    linux-media@lists.freedesktop.org
 +L:    linux-amlogic@lists.infradead.org
 +W:    http://linux-meson.com/
 +S:    Supported
 +F:    drivers/media/platform/meson/ao-cec.c
 +F:    Documentation/devicetree/bindings/media/meson-ao-cec.txt
 +T:    git git://linuxtv.org/media_tree.git
 +
  METAG ARCHITECTURE
  M:    James Hogan <james.hogan@imgtec.com>
  L:    linux-metag@vger.kernel.org
@@@ -8853,12 -8709,6 +8843,12 @@@ F:    drivers/dma/at_hdmac.
  F:    drivers/dma/at_hdmac_regs.h
  F:    include/linux/platform_data/dma-atmel.h
  
 +MICROCHIP / ATMEL ECC DRIVER
 +M:    Tudor Ambarus <tudor.ambarus@microchip.com>
 +L:    linux-crypto@vger.kernel.org
 +S:    Maintained
 +F:    drivers/crypto/atmel-ecc.*
 +
  MICROCHIP / ATMEL ISC DRIVER
  M:    Songjun Wu <songjun.wu@microchip.com>
  L:    linux-media@vger.kernel.org
@@@ -9606,7 -9456,6 +9596,7 @@@ M:      Srinivas Kandagatla <srinivas.kandag
  S:    Maintained
  F:    drivers/nvmem/
  F:    Documentation/devicetree/bindings/nvmem/
 +F:    Documentation/ABI/stable/sysfs-bus-nvmem
  F:    include/linux/nvmem-consumer.h
  F:    include/linux/nvmem-provider.h
  
@@@ -9850,7 -9699,7 +9840,7 @@@ S:      Maintaine
  F:    drivers/media/i2c/ov5640.c
  
  OMNIVISION OV5647 SENSOR DRIVER
 -M:    Ramiro Oliveira <roliveir@synopsys.com>
 +M:    Luis Oliveira <lolivei@synopsys.com>
  L:    linux-media@vger.kernel.org
  T:    git git://linuxtv.org/media_tree.git
  S:    Maintained
@@@ -10524,7 -10373,7 +10514,7 @@@ L:   linux-gpio@vger.kernel.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
  S:    Maintained
  F:    Documentation/devicetree/bindings/pinctrl/
 -F:    Documentation/pinctrl.txt
 +F:    Documentation/driver-api/pinctl.rst
  F:    drivers/pinctrl/
  F:    include/linux/pinctrl/
  
@@@ -10848,7 -10697,6 +10838,7 @@@ L:   linux-media@vger.kernel.or
  T:    git git://linuxtv.org/media_tree.git
  S:    Maintained
  F:    drivers/media/usb/pulse8-cec/*
 +F:    Documentation/media/cec-drivers/pulse8-cec.rst
  
  PVRUSB2 VIDEO4LINUX DRIVER
  M:    Mike Isely <isely@pobox.com>
@@@ -10876,12 -10724,6 +10866,12 @@@ F: Documentation/devicetree/bindings/hw
  F:    Documentation/hwmon/pwm-fan
  F:    drivers/hwmon/pwm-fan.c
  
 +PWM IR Transmitter
 +M:    Sean Young <sean@mess.org>
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +F:    drivers/media/rc/pwm-ir-tx.c
 +
  PWM SUBSYSTEM
  M:    Thierry Reding <thierry.reding@gmail.com>
  L:    linux-pwm@vger.kernel.org
@@@ -11076,14 -10918,6 +11066,14 @@@ W: http://wireless.kernel.org/en/users/
  S:    Supported
  F:    drivers/net/wireless/ath/ath9k/
  
 +QUALCOMM CAMERA SUBSYSTEM DRIVER
 +M:    Todor Tomov <todor.tomov@linaro.org>
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/media/qcom,camss.txt
 +F:    Documentation/media/v4l-drivers/qcom_camss.rst
 +F:    drivers/media/platform/qcom/camss-8x16/
 +
  QUALCOMM EMAC GIGABIT ETHERNET DRIVER
  M:    Timur Tabi <timur@codeaurora.org>
  L:    netdev@vger.kernel.org
@@@ -11267,7 -11101,7 +11257,7 @@@ M:   Fenghua Yu <fenghua.yu@intel.com
  L:    linux-kernel@vger.kernel.org
  S:    Supported
  F:    arch/x86/kernel/cpu/intel_rdt*
 -F:    arch/x86/include/asm/intel_rdt*
 +F:    arch/x86/include/asm/intel_rdt_sched.h
  F:    Documentation/x86/intel_rdt*
  
  READ-COPY UPDATE (RCU)
@@@ -11437,17 -11271,6 +11427,17 @@@ L: linux-serial@vger.kernel.or
  S:    Odd Fixes
  F:    drivers/tty/serial/rp2.*
  
 +ROHM MULTIFUNCTION BD9571MWV-M PMIC DEVICE DRIVERS
 +M:    Marek Vasut <marek.vasut+renesas@gmail.com>
 +L:    linux-kernel@vger.kernel.org
 +L:    linux-renesas-soc@vger.kernel.org
 +S:    Supported
 +F:    drivers/mfd/bd9571mwv.c
 +F:    drivers/regulator/bd9571mwv-regulator.c
 +F:    drivers/gpio/gpio-bd9571mwv.c
 +F:    include/linux/mfd/bd9571mwv.h
 +F:    Documentation/devicetree/bindings/mfd/bd9571mwv.txt
 +
  ROSE NETWORK LAYER
  M:    Ralf Baechle <ralf@linux-mips.org>
  L:    linux-hams@vger.kernel.org
@@@ -11609,6 -11432,7 +11599,7 @@@ F:   drivers/s390/crypto
  
  S390 ZFCP DRIVER
  M:    Steffen Maier <maier@linux.vnet.ibm.com>
+ M:    Benjamin Block <bblock@linux.vnet.ibm.com>
  L:    linux-s390@vger.kernel.org
  W:    http://www.ibm.com/developerworks/linux/linux390/
  S:    Supported
@@@ -12657,12 -12481,6 +12648,12 @@@ M: Ion Badulescu <ionut@badula.org
  S:    Odd Fixes
  F:    drivers/net/ethernet/adaptec/starfire*
  
 +STEC S1220 SKD DRIVER
 +M:    Bart Van Assche <bart.vanassche@wdc.com>
 +L:    linux-block@vger.kernel.org
 +S:    Maintained
 +F:    drivers/block/skd*[ch]
 +
  STI CEC DRIVER
  M:    Benjamin Gaignard <benjamin.gaignard@linaro.org>
  S:    Maintained
@@@ -13166,11 -12984,6 +13157,11 @@@ M: Yehezkel Bernat <yehezkel.bernat@int
  S:    Maintained
  F:    drivers/thunderbolt/
  
 +THUNDERX GPIO DRIVER
 +M:    David Daney <david.daney@cavium.com>
 +S:    Maintained
 +F:    drivers/gpio/gpio-thunderx.c
 +
  TI AM437X VPFE DRIVER
  M:    "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
  L:    linux-media@vger.kernel.org
@@@ -13695,8 -13508,7 +13686,7 @@@ F:   Documentation/scsi/ufs.tx
  F:    drivers/scsi/ufs/
  
  UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
- M:    Manjunath M Bettegowda <manjumb@synopsys.com>
- M:    Prabu Thangamuthu <prabut@synopsys.com>
+ M:    Joao Pinto <jpinto@synopsys.com>
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    drivers/scsi/ufs/*dwc*
@@@ -14182,7 -13994,6 +14172,7 @@@ F:   drivers/block/virtio_blk.
  F:    include/linux/virtio*.h
  F:    include/uapi/linux/virtio_*.h
  F:    drivers/crypto/virtio/
 +F:    mm/balloon_compaction.c
  
  VIRTIO CRYPTO DRIVER
  M:    Gonglei <arei.gonglei@huawei.com>
diff --combined block/bsg-lib.c
index dd56d7460cb91d504aa22ae9215d00f97c844359,4752dbc3dc4966ece9c30f25b949245cea2bcff9..c82408c7cc3c91181f91d32c0147e18781506691
  #include <scsi/scsi_cmnd.h>
  
  /**
 - * bsg_destroy_job - routine to teardown/delete a bsg job
 + * bsg_teardown_job - routine to teardown a bsg job
   * @job: bsg_job that is to be torn down
   */
 -static void bsg_destroy_job(struct kref *kref)
 +static void bsg_teardown_job(struct kref *kref)
  {
        struct bsg_job *job = container_of(kref, struct bsg_job, kref);
        struct request *rq = job->req;
  
 -      blk_end_request_all(rq, BLK_STS_OK);
 -
        put_device(job->dev);   /* release reference for the request */
  
        kfree(job->request_payload.sg_list);
        kfree(job->reply_payload.sg_list);
 -      kfree(job);
 +
 +      blk_end_request_all(rq, BLK_STS_OK);
  }
  
  void bsg_job_put(struct bsg_job *job)
  {
 -      kref_put(&job->kref, bsg_destroy_job);
 +      kref_put(&job->kref, bsg_teardown_job);
  }
  EXPORT_SYMBOL_GPL(bsg_job_put);
  
@@@ -99,7 -100,7 +99,7 @@@ EXPORT_SYMBOL_GPL(bsg_job_done)
   */
  static void bsg_softirq_done(struct request *rq)
  {
 -      struct bsg_job *job = rq->special;
 +      struct bsg_job *job = blk_mq_rq_to_pdu(rq);
  
        bsg_job_put(job);
  }
@@@ -121,20 -122,33 +121,20 @@@ static int bsg_map_buffer(struct bsg_bu
  }
  
  /**
 - * bsg_create_job - create the bsg_job structure for the bsg request
 + * bsg_prepare_job - create the bsg_job structure for the bsg request
   * @dev: device that is being sent the bsg request
   * @req: BSG request that needs a job structure
   */
 -static int bsg_create_job(struct device *dev, struct request *req)
 +static int bsg_prepare_job(struct device *dev, struct request *req)
  {
        struct request *rsp = req->next_rq;
 -      struct request_queue *q = req->q;
        struct scsi_request *rq = scsi_req(req);
 -      struct bsg_job *job;
 +      struct bsg_job *job = blk_mq_rq_to_pdu(req);
        int ret;
  
 -      BUG_ON(req->special);
 -
 -      job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
 -      if (!job)
 -              return -ENOMEM;
 -
 -      req->special = job;
 -      job->req = req;
 -      if (q->bsg_job_size)
 -              job->dd_data = (void *)&job[1];
        job->request = rq->cmd;
        job->request_len = rq->cmd_len;
 -      job->reply = rq->sense;
 -      job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
 -                                               * allocated */
 +
        if (req->bio) {
                ret = bsg_map_buffer(&job->request_payload, req);
                if (ret)
@@@ -173,6 -187,7 +173,6 @@@ static void bsg_request_fn(struct reque
  {
        struct device *dev = q->queuedata;
        struct request *req;
 -      struct bsg_job *job;
        int ret;
  
        if (!get_device(dev))
                        break;
                spin_unlock_irq(q->queue_lock);
  
 -              ret = bsg_create_job(dev, req);
 +              ret = bsg_prepare_job(dev, req);
                if (ret) {
                        scsi_req(req)->result = ret;
                        blk_end_request_all(req, BLK_STS_OK);
                        continue;
                }
  
 -              job = req->special;
 -              ret = q->bsg_job_fn(job);
 +              ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
                spin_lock_irq(q->queue_lock);
                if (ret)
                        break;
        spin_lock_irq(q->queue_lock);
  }
  
 +static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
 +{
 +      struct bsg_job *job = blk_mq_rq_to_pdu(req);
 +      struct scsi_request *sreq = &job->sreq;
 +
 +      memset(job, 0, sizeof(*job));
 +
 +      scsi_req_init(sreq);
 +      sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
 +      sreq->sense = kzalloc(sreq->sense_len, gfp);
 +      if (!sreq->sense)
 +              return -ENOMEM;
 +
 +      job->req = req;
 +      job->reply = sreq->sense;
 +      job->reply_len = sreq->sense_len;
 +      job->dd_data = job + 1;
 +
 +      return 0;
 +}
 +
 +static void bsg_exit_rq(struct request_queue *q, struct request *req)
 +{
 +      struct bsg_job *job = blk_mq_rq_to_pdu(req);
 +      struct scsi_request *sreq = &job->sreq;
 +
 +      kfree(sreq->sense);
 +}
 +
  /**
   * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
   * @dev: device to attach bsg device to
   * @job_fn: bsg job handler
   * @dd_job_size: size of LLD data needed for each job
   */
- struct request_queue *bsg_setup_queue(struct device *dev, char *name,
-               bsg_job_fn *job_fn, int dd_job_size)
+ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
+               bsg_job_fn *job_fn, int dd_job_size,
+               void (*release)(struct device *))
  {
        struct request_queue *q;
        int ret;
        q = blk_alloc_queue(GFP_KERNEL);
        if (!q)
                return ERR_PTR(-ENOMEM);
 -      q->cmd_size = sizeof(struct scsi_request);
 +      q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
 +      q->init_rq_fn = bsg_init_rq;
 +      q->exit_rq_fn = bsg_exit_rq;
        q->request_fn = bsg_request_fn;
  
        ret = blk_init_allocated_queue(q);
                goto out_cleanup_queue;
  
        q->queuedata = dev;
 -      q->bsg_job_size = dd_job_size;
        q->bsg_job_fn = job_fn;
        queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
        queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
        blk_queue_softirq_done(q, bsg_softirq_done);
        blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
  
-       ret = bsg_register_queue(q, dev, name, NULL);
+       ret = bsg_register_queue(q, dev, name, release);
        if (ret) {
                printk(KERN_ERR "%s: bsg interface failed to "
                       "initialize - register queue\n", dev->kobj.name);
diff --combined drivers/block/Kconfig
index 104180e3c55e317e76480c8384ca4297acc6b8f3,898e4861488b1a532537bab2c7856c263f3378b8..4a438b8abe27ad9c1774c89f608bd47fecc295a3
@@@ -17,7 -17,6 +17,7 @@@ if BLK_DE
  
  config BLK_DEV_NULL_BLK
        tristate "Null test block driver"
 +      depends on CONFIGFS_FS
  
  config BLK_DEV_FD
        tristate "Normal floppy disk support"
@@@ -112,33 -111,6 +112,6 @@@ source "drivers/block/mtip32xx/Kconfig
  
  source "drivers/block/zram/Kconfig"
  
- config BLK_CPQ_CISS_DA
-       tristate "Compaq Smart Array 5xxx support"
-       depends on PCI
-       select CHECK_SIGNATURE
-       select BLK_SCSI_REQUEST
-       help
-         This is the driver for Compaq Smart Array 5xxx controllers.
-         Everyone using these boards should say Y here.
-         See <file:Documentation/blockdev/cciss.txt> for the current list of
-         boards supported by this driver, and for further information
-         on the use of this driver.
- config CISS_SCSI_TAPE
-       bool "SCSI tape drive support for Smart Array 5xxx"
-       depends on BLK_CPQ_CISS_DA && PROC_FS
-       depends on SCSI=y || SCSI=BLK_CPQ_CISS_DA
-       help
-         When enabled (Y), this option allows SCSI tape drives and SCSI medium
-         changers (tape robots) to be accessed via a Compaq 5xxx array 
-         controller.  (See <file:Documentation/blockdev/cciss.txt> for more details.)
-         "SCSI support" and "SCSI tape support" must also be enabled for this 
-         option to work.
-         When this option is disabled (N), the SCSI portion of the driver 
-         is not compiled.
  config BLK_DEV_DAC960
        tristate "Mylex DAC960/DAC1100 PCI RAID Controller support"
        depends on PCI
@@@ -471,7 -443,7 +444,7 @@@ config VIRTIO_BL
        depends on VIRTIO
        ---help---
          This is the virtual block driver for virtio.  It can be used with
 -          lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
 +          QEMU based VMMs (like KVM or Xen).  Say Y or M.
  
  config VIRTIO_BLK_SCSI
        bool "SCSI passthrough request for the Virtio block driver"
index 7ee1a94c0b33eefd57a6889df66649477ad4713b,0806323829e651925616983cb982b99bed54185a..c17677f494afe7f9baf741732511c62d4ea0bde7
@@@ -205,10 -205,8 +205,10 @@@ lpfc_nvme_info_show(struct device *dev
                                atomic_read(&tgtp->xmt_ls_rsp_error));
  
                len += snprintf(buf+len, PAGE_SIZE-len,
 -                              "FCP: Rcv %08x Release %08x Drop %08x\n",
 +                              "FCP: Rcv %08x Defer %08x Release %08x "
 +                              "Drop %08x\n",
                                atomic_read(&tgtp->rcv_fcp_cmd_in),
 +                              atomic_read(&tgtp->rcv_fcp_cmd_defer),
                                atomic_read(&tgtp->xmt_fcp_release),
                                atomic_read(&tgtp->rcv_fcp_cmd_drop));
  
                                atomic_read(&tgtp->xmt_abort_rsp),
                                atomic_read(&tgtp->xmt_abort_rsp_error));
  
-               spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
-               spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
-               tot = phba->sli4_hba.nvmet_xri_cnt -
-                       (phba->sli4_hba.nvmet_ctx_get_cnt +
-                       phba->sli4_hba.nvmet_ctx_put_cnt);
-               spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-               spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+               /* Calculate outstanding IOs */
+               tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+               tot += atomic_read(&tgtp->xmt_fcp_release);
+               tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
  
                len += snprintf(buf + len, PAGE_SIZE - len,
                                "IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
@@@ -1893,6 -1888,36 +1890,36 @@@ static inline bool lpfc_rangecheck(uin
        return val >= min && val <= max;
  }
  
+ /**
+  * lpfc_enable_bbcr_set: Sets an attribute value.
+  * @phba: pointer the the adapter structure.
+  * @val: integer attribute value.
+  *
+  * Description:
+  * Validates the min and max values then sets the
+  * adapter config field if in the valid range. prints error message
+  * and does not set the parameter if invalid.
+  *
+  * Returns:
+  * zero on success
+  * -EINVAL if val is invalid
+  */
+ static ssize_t
+ lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
+ {
+       if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3068 %s_enable_bbcr changed from %d to %d\n",
+                               LPFC_DRIVER_NAME, phba->cfg_enable_bbcr, val);
+               phba->cfg_enable_bbcr = val;
+               return 0;
+       }
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "0451 %s_enable_bbcr cannot set to %d, range is 0, 1\n",
+                       LPFC_DRIVER_NAME, val);
+       return -EINVAL;
+ }
  /**
   * lpfc_param_show - Return a cfg attribute value in decimal
   *
@@@ -5116,6 -5141,14 +5143,14 @@@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG
   */
  LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
  
+ /*
+  * lpfc_enable_bbcr: Enable BB Credit Recovery
+  *       0  = BB Credit Recovery disabled
+  *       1  = BB Credit Recovery enabled (default)
+  * Value range is [0,1]. Default value is 1.
+  */
+ LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
  struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_nvme_info,
        &dev_attr_bg_info,
        &dev_attr_protocol,
        &dev_attr_lpfc_xlane_supported,
        &dev_attr_lpfc_enable_mds_diags,
+       &dev_attr_lpfc_enable_bbcr,
        NULL,
  };
  
@@@ -6234,11 -6268,13 +6270,13 @@@ lpfc_get_cfgparam(struct lpfc_hba *phba
        lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
        lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
        lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
+       lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
  
        if (phba->sli_rev != LPFC_SLI_REV4) {
                /* NVME only supported on SLI4 */
                phba->nvmet_support = 0;
                phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+               phba->cfg_enable_bbcr = 0;
        } else {
                /* We MUST have FCP support */
                if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
index 744f3f395b64852a294a9300adb64a496087aed7,c292264aa687b787351f0389f896f56bd1068462..d50c481ec41ccb0f8e3258c16dd9ec5897a6669c
@@@ -782,11 -782,8 +782,11 @@@ lpfc_debugfs_nvmestat_data(struct lpfc_
                                atomic_read(&tgtp->xmt_ls_rsp_error));
  
                len += snprintf(buf + len, size - len,
 -                              "FCP: Rcv %08x Drop %08x\n",
 +                              "FCP: Rcv %08x Defer %08x Release %08x "
 +                              "Drop %08x\n",
                                atomic_read(&tgtp->rcv_fcp_cmd_in),
 +                              atomic_read(&tgtp->rcv_fcp_cmd_defer),
 +                              atomic_read(&tgtp->xmt_fcp_release),
                                atomic_read(&tgtp->rcv_fcp_cmd_drop));
  
                if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
                        spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
                }
  
-               spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
-               spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
-               tot = phba->sli4_hba.nvmet_xri_cnt -
-                       (phba->sli4_hba.nvmet_ctx_get_cnt +
-                       phba->sli4_hba.nvmet_ctx_put_cnt);
-               spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-               spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+               /* Calculate outstanding IOs */
+               tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+               tot += atomic_read(&tgtp->xmt_fcp_release);
+               tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
  
                len += snprintf(buf + len, size - len,
                                "IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
index bbbd0f84160d36563008a212afd8252f86ef15c8,346af470f36012c43767d08db3e4ce0e2574f8ac..0b7c1a49e203f8f31024d8756474084f8cacb529
@@@ -170,12 -170,14 +170,14 @@@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba 
        struct lpfc_nvmet_tgtport *tgtp;
        struct fc_frame_header *fc_hdr;
        struct rqb_dmabuf *nvmebuf;
+       struct lpfc_nvmet_ctx_info *infop;
        uint32_t *payload;
        uint32_t size, oxid, sid, rc;
+       int cpu;
        unsigned long iflag;
  
        if (ctxp->txrdy) {
-               pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
+               dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
                              ctxp->txrdy_phys);
                ctxp->txrdy = NULL;
                ctxp->txrdy_phys = 0;
        }
        spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
  
-       spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
-       list_add_tail(&ctx_buf->list,
-                     &phba->sli4_hba.lpfc_nvmet_ctx_put_list);
-       phba->sli4_hba.nvmet_ctx_put_cnt++;
-       spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
+       /*
+        * Use the CPU context list, from the MRQ the IO was received on
+        * (ctxp->idx), to save context structure.
+        */
+       cpu = smp_processor_id();
+       infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
+       spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
+       list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
+       infop->nvmet_ctx_list_cnt++;
+       spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
  #endif
  }
  
@@@ -552,7 -559,7 +559,7 @@@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_h
                /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
        } else {
                ctxp->entry_cnt++;
-               start_clean = offsetof(struct lpfc_iocbq, wqe);
+               start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
                memset(((char *)cmdwqe) + start_clean, 0,
                       (sizeof(struct lpfc_iocbq) - start_clean));
  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@@ -841,31 -848,12 +848,31 @@@ lpfc_nvmet_xmt_fcp_release(struct nvmet
        lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
  }
  
 +static void
 +lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
 +                   struct nvmefc_tgt_fcp_req *rsp)
 +{
 +      struct lpfc_nvmet_tgtport *tgtp;
 +      struct lpfc_nvmet_rcv_ctx *ctxp =
 +              container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
 +      struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
 +      struct lpfc_hba *phba = ctxp->phba;
 +
 +      lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
 +                       ctxp->oxid, ctxp->size, smp_processor_id());
 +
 +      tgtp = phba->targetport->private;
 +      atomic_inc(&tgtp->rcv_fcp_cmd_defer);
 +      lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
 +}
 +
  static struct nvmet_fc_target_template lpfc_tgttemplate = {
        .targetport_delete = lpfc_nvmet_targetport_delete,
        .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
        .fcp_op         = lpfc_nvmet_xmt_fcp_op,
        .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
        .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
 +      .defer_rcv      = lpfc_nvmet_defer_rcv,
  
        .max_hw_queues  = 1,
        .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
  };
  
  static void
- lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
+ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
+               struct lpfc_nvmet_ctx_info *infop)
  {
        struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
        unsigned long flags;
  
-       spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
-       spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+       spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
        list_for_each_entry_safe(ctx_buf, next_ctx_buf,
-                       &phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) {
+                               &infop->nvmet_ctx_list, list) {
                spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
                list_del_init(&ctx_buf->list);
                spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-               __lpfc_clear_active_sglq(phba,
-                                        ctx_buf->sglq->sli4_lxritag);
+               __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
                ctx_buf->sglq->state = SGL_FREED;
                ctx_buf->sglq->ndlp = NULL;
  
                spin_lock(&phba->sli4_hba.sgl_list_lock);
                list_add_tail(&ctx_buf->sglq->list,
-                             &phba->sli4_hba.lpfc_nvmet_sgl_list);
+                               &phba->sli4_hba.lpfc_nvmet_sgl_list);
                spin_unlock(&phba->sli4_hba.sgl_list_lock);
  
                lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
                kfree(ctx_buf->context);
        }
-       list_for_each_entry_safe(ctx_buf, next_ctx_buf,
-                       &phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) {
-               spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-               list_del_init(&ctx_buf->list);
-               spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-               __lpfc_clear_active_sglq(phba,
-                                        ctx_buf->sglq->sli4_lxritag);
-               ctx_buf->sglq->state = SGL_FREED;
-               ctx_buf->sglq->ndlp = NULL;
+       spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
+ }
  
-               spin_lock(&phba->sli4_hba.sgl_list_lock);
-               list_add_tail(&ctx_buf->sglq->list,
-                             &phba->sli4_hba.lpfc_nvmet_sgl_list);
-               spin_unlock(&phba->sli4_hba.sgl_list_lock);
+ static void
+ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
+ {
+       struct lpfc_nvmet_ctx_info *infop;
+       int i, j;
  
-               lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
-               kfree(ctx_buf->context);
+       /* The first context list, MRQ 0 CPU 0 */
+       infop = phba->sli4_hba.nvmet_ctx_info;
+       if (!infop)
+               return;
+       /* Cycle the the entire CPU context list for every MRQ */
+       for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
+               for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
+                       __lpfc_nvmet_clean_io_for_cpu(phba, infop);
+                       infop++; /* next */
+               }
        }
-       spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-       spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
+       kfree(phba->sli4_hba.nvmet_ctx_info);
+       phba->sli4_hba.nvmet_ctx_info = NULL;
  }
  
  static int
@@@ -932,15 -923,71 +942,71 @@@ lpfc_nvmet_setup_io_context(struct lpfc
        struct lpfc_nvmet_ctxbuf *ctx_buf;
        struct lpfc_iocbq *nvmewqe;
        union lpfc_wqe128 *wqe;
-       int i;
+       struct lpfc_nvmet_ctx_info *last_infop;
+       struct lpfc_nvmet_ctx_info *infop;
+       int i, j, idx;
  
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
                        "6403 Allocate NVMET resources for %d XRIs\n",
                        phba->sli4_hba.nvmet_xri_cnt);
  
+       phba->sli4_hba.nvmet_ctx_info = kcalloc(
+               phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
+               sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
+       if (!phba->sli4_hba.nvmet_ctx_info) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "6419 Failed allocate memory for "
+                               "nvmet context lists\n");
+               return -ENOMEM;
+       }
+       /*
+        * Assuming X CPUs in the system, and Y MRQs, allocate some
+        * lpfc_nvmet_ctx_info structures as follows:
+        *
+        * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
+        * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
+        * ...
+        * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
+        *
+        * Each line represents a MRQ "silo" containing an entry for
+        * every CPU.
+        *
+        * MRQ X is initially assumed to be associated with CPU X, thus
+        * contexts are initially distributed across all MRQs using
+        * the MRQ index (N) as follows cpuN/mrqN. When contexts are
+        * freed, the are freed to the MRQ silo based on the CPU number
+        * of the IO completion. Thus a context that was allocated for MRQ A
+        * whose IO completed on CPU B will be freed to cpuB/mrqA.
+        */
+       infop = phba->sli4_hba.nvmet_ctx_info;
+       for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+               for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+                       INIT_LIST_HEAD(&infop->nvmet_ctx_list);
+                       spin_lock_init(&infop->nvmet_ctx_list_lock);
+                       infop->nvmet_ctx_list_cnt = 0;
+                       infop++;
+               }
+       }
+       /*
+        * Setup the next CPU context info ptr for each MRQ.
+        * MRQ 0 will cycle thru CPUs 0 - X separately from
+        * MRQ 1 cycling thru CPUs 0 - X, and so on.
+        */
+       for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+               last_infop = lpfc_get_ctx_list(phba, 0, j);
+               for (i = phba->sli4_hba.num_present_cpu - 1;  i >= 0; i--) {
+                       infop = lpfc_get_ctx_list(phba, i, j);
+                       infop->nvmet_ctx_next_cpu = last_infop;
+                       last_infop = infop;
+               }
+       }
        /* For all nvmet xris, allocate resources needed to process a
         * received command on a per xri basis.
         */
+       idx = 0;
        for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
                ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
                if (!ctx_buf) {
                /* Word 7 */
                bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
                bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
-               bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
                /* Word 10 */
                bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
                bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
                                        "6407 Ran out of NVMET XRIs\n");
                        return -ENOMEM;
                }
-               spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
-               list_add_tail(&ctx_buf->list,
-                             &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
-               spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+               /*
+                * Add ctx to MRQidx context list. Our initial assumption
+                * is MRQidx will be associated with CPUidx. This association
+                * can change on the fly.
+                */
+               infop = lpfc_get_ctx_list(phba, idx, idx);
+               spin_lock(&infop->nvmet_ctx_list_lock);
+               list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
+               infop->nvmet_ctx_list_cnt++;
+               spin_unlock(&infop->nvmet_ctx_list_lock);
+               /* Spread ctx structures evenly across all MRQs */
+               idx++;
+               if (idx >= phba->cfg_nvmet_mrq)
+                       idx = 0;
+       }
+       infop = phba->sli4_hba.nvmet_ctx_info;
+       for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+               for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+                                       "6408 TOTAL NVMET ctx for CPU %d "
+                                       "MRQ %d: cnt %d nextcpu %p\n",
+                                       i, j, infop->nvmet_ctx_list_cnt,
+                                       infop->nvmet_ctx_next_cpu);
+                       infop++;
+               }
        }
-       phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt;
        return 0;
  }
  
@@@ -1365,10 -1434,65 +1453,65 @@@ dropit
  #endif
  }
  
+ static struct lpfc_nvmet_ctxbuf *
+ lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
+                            struct lpfc_nvmet_ctx_info *current_infop)
+ {
+       struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
+       struct lpfc_nvmet_ctx_info *get_infop;
+       int i;
+       /*
+        * The current_infop for the MRQ a NVME command IU was received
+        * on is empty. Our goal is to replenish this MRQs context
+        * list from a another CPUs.
+        *
+        * First we need to pick a context list to start looking on.
+        * nvmet_ctx_start_cpu has available context the last time
+        * we needed to replenish this CPU where nvmet_ctx_next_cpu
+        * is just the next sequential CPU for this MRQ.
+        */
+       if (current_infop->nvmet_ctx_start_cpu)
+               get_infop = current_infop->nvmet_ctx_start_cpu;
+       else
+               get_infop = current_infop->nvmet_ctx_next_cpu;
+       for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+               if (get_infop == current_infop) {
+                       get_infop = get_infop->nvmet_ctx_next_cpu;
+                       continue;
+               }
+               spin_lock(&get_infop->nvmet_ctx_list_lock);
+               /* Just take the entire context list, if there are any */
+               if (get_infop->nvmet_ctx_list_cnt) {
+                       list_splice_init(&get_infop->nvmet_ctx_list,
+                                   &current_infop->nvmet_ctx_list);
+                       current_infop->nvmet_ctx_list_cnt =
+                               get_infop->nvmet_ctx_list_cnt - 1;
+                       get_infop->nvmet_ctx_list_cnt = 0;
+                       spin_unlock(&get_infop->nvmet_ctx_list_lock);
+                       current_infop->nvmet_ctx_start_cpu = get_infop;
+                       list_remove_head(&current_infop->nvmet_ctx_list,
+                                        ctx_buf, struct lpfc_nvmet_ctxbuf,
+                                        list);
+                       return ctx_buf;
+               }
+               /* Otherwise, move on to the next CPU for this MRQ */
+               spin_unlock(&get_infop->nvmet_ctx_list_lock);
+               get_infop = get_infop->nvmet_ctx_next_cpu;
+       }
+       /* Nothing found, all contexts for the MRQ are in-flight */
+       return NULL;
+ }
  /**
   * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
   * @phba: pointer to lpfc hba data structure.
-  * @pring: pointer to a SLI ring.
+  * @idx: relative index of MRQ vector
   * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
   *
   * This routine is used for processing the WQE associated with a unsolicited
   **/
  static void
  lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
-                           struct lpfc_sli_ring *pring,
+                           uint32_t idx,
                            struct rqb_dmabuf *nvmebuf,
                            uint64_t isr_timestamp)
  {
- #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
        struct lpfc_nvmet_rcv_ctx *ctxp;
        struct lpfc_nvmet_tgtport *tgtp;
        struct fc_frame_header *fc_hdr;
        struct lpfc_nvmet_ctxbuf *ctx_buf;
+       struct lpfc_nvmet_ctx_info *current_infop;
        uint32_t *payload;
        uint32_t size, oxid, sid, rc, qno;
        unsigned long iflag;
+       int current_cpu;
  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        uint32_t id;
  #endif
  
+       if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+               return;
        ctx_buf = NULL;
        if (!nvmebuf || !phba->targetport) {
                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                goto dropit;
        }
  
-       spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
-       if (phba->sli4_hba.nvmet_ctx_get_cnt) {
-               list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
+       /*
+        * Get a pointer to the context list for this MRQ based on
+        * the CPU this MRQ IRQ is associated with. If the CPU association
+        * changes from our initial assumption, the context list could
+        * be empty, thus it would need to be replenished with the
+        * context list from another CPU for this MRQ.
+        */
+       current_cpu = smp_processor_id();
+       current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
+       spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
+       if (current_infop->nvmet_ctx_list_cnt) {
+               list_remove_head(&current_infop->nvmet_ctx_list,
                                 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
-               phba->sli4_hba.nvmet_ctx_get_cnt--;
+               current_infop->nvmet_ctx_list_cnt--;
        } else {
-               spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
-               if (phba->sli4_hba.nvmet_ctx_put_cnt) {
-                       list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list,
-                                   &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
-                       INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
-                       phba->sli4_hba.nvmet_ctx_get_cnt =
-                               phba->sli4_hba.nvmet_ctx_put_cnt;
-                       phba->sli4_hba.nvmet_ctx_put_cnt = 0;
-                       spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-                       list_remove_head(
-                               &phba->sli4_hba.lpfc_nvmet_ctx_get_list,
-                               ctx_buf, struct lpfc_nvmet_ctxbuf, list);
-                       phba->sli4_hba.nvmet_ctx_get_cnt--;
-               } else {
-                       spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
-               }
+               ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
        }
-       spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
+       spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
  
        fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
        oxid = be16_to_cpu(fc_hdr->fh_ox_id);
        ctxp->size = size;
        ctxp->oxid = oxid;
        ctxp->sid = sid;
+       ctxp->idx = idx;
        ctxp->state = LPFC_NVMET_STE_RCV;
        ctxp->entry_cnt = 1;
        ctxp->flag = 0;
                return;
        }
  
 +      /* Processing of FCP command is deferred */
 +      if (rc == -EOVERFLOW) {
 +              lpfc_nvmeio_data(phba,
 +                               "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
 +                               oxid, size, sid);
 +              /* defer reposting rcv buffer till .defer_rcv callback */
 +              ctxp->rqb_buffer = nvmebuf;
 +              atomic_inc(&tgtp->rcv_fcp_cmd_out);
 +              return;
 +      }
 +
        atomic_inc(&tgtp->rcv_fcp_cmd_drop);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                        "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
@@@ -1556,7 -1667,6 +1697,6 @@@ dropit
  
        if (nvmebuf)
                lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
- #endif
  }
  
  /**
@@@ -1591,7 -1701,7 +1731,7 @@@ lpfc_nvmet_unsol_ls_event(struct lpfc_h
  /**
   * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
   * @phba: pointer to lpfc hba data structure.
-  * @pring: pointer to a SLI ring.
+  * @idx: relative index of MRQ vector
   * @nvmebuf: pointer to received nvme data structure.
   *
   * This routine is used to process an unsolicited event received from a SLI
   **/
  void
  lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
-                          struct lpfc_sli_ring *pring,
+                          uint32_t idx,
                           struct rqb_dmabuf *nvmebuf,
                           uint64_t isr_timestamp)
  {
                lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
                return;
        }
-       lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
+       lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
                                    isr_timestamp);
  }
  
@@@ -1863,6 -1973,7 +2003,7 @@@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hb
                       nvmewqe->sli4_xritag);
  
                /* Word 7 */
+               bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, 1);
                bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
  
                /* Word 8 */
  
        case NVMET_FCOP_WRITEDATA:
                /* Words 0 - 2 : The first sg segment */
-               txrdy = pci_pool_alloc(phba->txrdy_payload_pool,
+               txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
                                       GFP_KERNEL, &physaddr);
                if (!txrdy) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                       nvmewqe->sli4_xritag);
  
                /* Word 7 */
+               bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, 1);
                bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
                bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
                       CMD_FCP_TRECEIVE64_WQE);
                       nvmewqe->sli4_xritag);
  
                /* Word 7 */
+               bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, 0);
                bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
                bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
  
index 48a76788b003cb746afa45376272af362c4446ce,88cd62a11c3377b301928203e2a5b5a2e0741406..25a65b0bb7f33a8f764c8d0ac816fa98fcd85434
@@@ -49,7 -49,6 +49,7 @@@ struct lpfc_nvmet_tgtport 
        atomic_t rcv_fcp_cmd_in;
        atomic_t rcv_fcp_cmd_out;
        atomic_t rcv_fcp_cmd_drop;
 +      atomic_t rcv_fcp_cmd_defer;
        atomic_t xmt_fcp_release;
  
        /* Stats counters - lpfc_nvmet_xmt_fcp_op */
        atomic_t xmt_abort_rsp_error;
  };
  
+ struct lpfc_nvmet_ctx_info {
+       struct list_head nvmet_ctx_list;
+       spinlock_t      nvmet_ctx_list_lock; /* lock per CPU */
+       struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
+       struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
+       uint16_t        nvmet_ctx_list_cnt;
+       char pad[16];  /* pad to a cache-line */
+ };
+ /* This retrieves the context info associated with the specified cpu / mrq */
+ #define lpfc_get_ctx_list(phba, cpu, mrq)  \
+       (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
  struct lpfc_nvmet_rcv_ctx {
        union {
                struct nvmefc_tgt_ls_req ls_req;
        uint16_t size;
        uint16_t entry_cnt;
        uint16_t cpu;
+       uint16_t idx;
        uint16_t state;
        /* States */
  #define LPFC_NVMET_STE_LS_RCV         1
index 53748d61e20e7fb20877b4569a9b68c7caa66699,e719b1e8b9ce52988ede52623c011e1e289b4d6d..89e2cfe7d1cc0ffdef1334ca73a59729ca69f70b
@@@ -205,16 -205,6 +205,6 @@@ static int device_reset(struct scsi_cmn
        return SUCCESS;
  }
  
- /* Simulate a SCSI bus reset by resetting the device's USB port. */
- static int bus_reset(struct scsi_cmnd *srb)
- {
-       struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
-       dev_info(&dev->pci->dev, "%s called\n", __func__);
-       return SUCCESS;
- }
  /*
   * this defines our host template, with which we'll allocate hosts
   */
@@@ -231,7 -221,6 +221,6 @@@ static struct scsi_host_template rtsx_h
        /* error and abort handlers */
        .eh_abort_handler =             command_abort,
        .eh_device_reset_handler =      device_reset,
-       .eh_bus_reset_handler =         bus_reset,
  
        /* queue commands only, only one command per LUN */
        .can_queue =                    1,
@@@ -999,7 -988,7 +988,7 @@@ static int rtsx_probe(struct pci_dev *p
  
        /* We come here if there are any problems */
  errout:
 -      dev_err(&pci->dev, "rtsx_probe() failed\n");
 +      dev_err(&pci->dev, "%s failed\n", __func__);
        release_everything(dev);
  
        return err;
@@@ -1009,7 -998,7 +998,7 @@@ static void rtsx_remove(struct pci_dev 
  {
        struct rtsx_dev *dev = pci_get_drvdata(pci);
  
 -      dev_info(&pci->dev, "rtsx_remove() called\n");
 +      dev_info(&pci->dev, "%s called\n", __func__);
  
        quiesce_and_remove_host(dev);
        release_everything(dev);
index 8567e447891e8434c6ba5a9e7197ff56b47abaaa,ddce92552ff57052a81ed8375b25ff67a8d9354a..419dba89af065b431d7923cbad0de4c309262efa
@@@ -1,5 -1,4 +1,5 @@@
 -/* Copyright (c) 2012 - 2015 UNISYS CORPORATION
 +/*
 + * Copyright (c) 2012 - 2015 UNISYS CORPORATION
   * All rights reserved.
   *
   * This program is free software; you can redistribute it and/or modify
@@@ -15,6 -14,7 +15,6 @@@
   */
  
  #include <linux/debugfs.h>
 -#include <linux/skbuff.h>
  #include <linux/kthread.h>
  #include <linux/idr.h>
  #include <linux/seq_file.h>
@@@ -39,17 -39,16 +39,16 @@@ static struct visor_channeltype_descrip
        /* Note that the only channel type we expect to be reported by the
         * bus driver is the VISOR_VHBA channel.
         */
 -      { VISOR_VHBA_CHANNEL_UUID, "sparvhba" },
 -      { NULL_UUID_LE, NULL }
 +      { VISOR_VHBA_CHANNEL_GUID, "sparvhba" },
 +      {}
  };
  
  MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
 -MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_UUID_STR);
 +MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
  
  struct visordisk_info {
+       struct scsi_device *sdev;
        u32 valid;
-       /* Disk Path */
-       u32 channel, id, lun;
        atomic_t ios_threshold;
        atomic_t error_count;
        struct visordisk_info *next;
  
  struct scsipending {
        struct uiscmdrsp cmdrsp;
 -      void *sent;             /* The Data being tracked */
 -      char cmdtype;           /* Type of pointer that is being stored */
 +      /* The Data being tracked */
 +      void *sent;
 +      /* Type of pointer that is being stored */
 +      char cmdtype;
  };
  
  /* Each scsi_host has a host_data area that contains this struct. */
@@@ -74,8 -71,7 +73,8 @@@ struct visorhba_devdata 
        struct scsipending pending[MAX_PENDING_REQUESTS];
        /* Start search for next pending free slot here */
        unsigned int nextinsert;
 -      spinlock_t privlock; /* lock to protect data in devdata */
 +      /* lock to protect data in devdata */
 +      spinlock_t privlock;
        bool serverdown;
        bool serverchangingstate;
        unsigned long long acquire_failed_cnt;
@@@ -105,25 -101,19 +104,19 @@@ struct visorhba_devices_open 
        struct visorhba_devdata *devdata;
  };
  
- #define for_each_vdisk_match(iter, list, match) \
-       for (iter = &list->head; iter->next; iter = iter->next) \
-               if ((iter->channel == match->channel) && \
-                   (iter->id == match->id) && \
-                   (iter->lun == match->lun))
  /*
 - *    visor_thread_start - starts a thread for the device
 - *    @threadfn: Function the thread starts
 - *    @thrcontext: Context to pass to the thread, i.e. devdata
 - *    @name: string describing name of thread
 + * visor_thread_start - Starts a thread for the device
 + * @threadfn:   Function the thread starts
 + * @thrcontext: Context to pass to the thread, i.e. devdata
 + * @name:     String describing name of thread
   *
 - *    Starts a thread for the device.
 + * Starts a thread for the device.
   *
 - *    Return the task_struct * denoting the thread on success,
 - *             or NULL on failure
 + * Return: The task_struct * denoting the thread on success,
 + *       or NULL on failure
   */
 -static struct task_struct *visor_thread_start
 -(int (*threadfn)(void *), void *thrcontext, char *name)
 +static struct task_struct *visor_thread_start(int (*threadfn)(void *),
 +                                            void *thrcontext, char *name)
  {
        struct task_struct *task;
  
  }
  
  /*
 - *      visor_thread_stop - stops the thread if it is running
 + * visor_thread_stop - Stops the thread if it is running
 + * @task: Description of process to stop
   */
  static void visor_thread_stop(struct task_struct *task)
  {
 -      if (!task)
 -              return;  /* no thread running */
        kthread_stop(task);
  }
  
  /*
 - *    add_scsipending_entry - save off io command that is pending in
 - *                            Service Partition
 - *    @devdata: Pointer to devdata
 - *    @cmdtype: Specifies the type of command pending
 - *    @new:   The command to be saved
 + * add_scsipending_entry - Save off io command that is pending in
 + *                       Service Partition
 + * @devdata: Pointer to devdata
 + * @cmdtype: Specifies the type of command pending
 + * @new:     The command to be saved
   *
 - *    Saves off the io command that is being handled by the Service
 - *    Partition so that it can be handled when it completes. If new is
 - *    NULL it is assumed the entry refers only to the cmdrsp.
 - *    Returns insert_location where entry was added,
 - *    -EBUSY if it can't
 + * Saves off the io command that is being handled by the Service
 + * Partition so that it can be handled when it completes. If new is
 + * NULL it is assumed the entry refers only to the cmdrsp.
 + *
 + * Return: Insert_location where entry was added on success,
 + *       -EBUSY if it can't
   */
  static int add_scsipending_entry(struct visorhba_devdata *devdata,
                                 char cmdtype, void *new)
        entry->cmdtype = cmdtype;
        if (new)
                entry->sent = new;
 -      else /* wants to send cmdrsp */
 +      /* wants to send cmdrsp */
 +      else
                entry->sent = &entry->cmdrsp;
        devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
        spin_unlock_irqrestore(&devdata->privlock, flags);
  }
  
  /*
 - *    del_scsipending_ent - removes an entry from the pending array
 - *    @devdata: Device holding the pending array
 - *    @del: Entry to remove
 + * del_scsipending_ent - Removes an entry from the pending array
 + * @devdata: Device holding the pending array
 + * @del:     Entry to remove
   *
 - *    Removes the entry pointed at by del and returns it.
 - *    Returns the scsipending entry pointed at
 + * Removes the entry pointed at by del and returns it.
 + *
 + * Return: The scsipending entry pointed to on success, NULL on failure
   */
 -static void *del_scsipending_ent(struct visorhba_devdata *devdata,
 -                               int del)
 +static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
  {
        unsigned long flags;
        void *sent;
  
        spin_lock_irqsave(&devdata->privlock, flags);
        sent = devdata->pending[del].sent;
 -
        devdata->pending[del].cmdtype = 0;
        devdata->pending[del].sent = NULL;
        spin_unlock_irqrestore(&devdata->privlock, flags);
  }
  
  /*
 - *    get_scsipending_cmdrsp - return the cmdrsp stored in a pending entry
 - *    @ddata: Device holding the pending array
 - *    @ent: Entry that stores the cmdrsp
 + * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry
 + * @ddata: Device holding the pending array
 + * @ent:   Entry that stores the cmdrsp
   *
 - *    Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
 - *    if the "sent" field is not NULL
 - *    Returns a pointer to the cmdrsp.
 + * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
 + * if the "sent" field is not NULL.
 + *
 + * Return: A pointer to the cmdrsp, NULL on failure
   */
  static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
                                                int ent)
  }
  
  /*
 - *      simple_idr_get - associate a provided pointer with an int value
 - *                       1 <= value <= INT_MAX, and return this int value;
 - *                       the pointer value can be obtained later by passing
 - *                       this int value to idr_find()
 - *      @idrtable: the data object maintaining the pointer<-->int mappings
 - *      @p: the pointer value to be remembered
 - *      @lock: a spinlock used when exclusive access to idrtable is needed
 + * simple_idr_get - Associate a provided pointer with an int value
 + *                1 <= value <= INT_MAX, and return this int value;
 + *                the pointer value can be obtained later by passing
 + *                this int value to idr_find()
 + * @idrtable: The data object maintaining the pointer<-->int mappings
 + * @p:              The pointer value to be remembered
 + * @lock:     A spinlock used when exclusive access to idrtable is needed
 + *
 + * Return: The id number mapped to pointer 'p', 0 on failure
   */
  static unsigned int simple_idr_get(struct idr *idrtable, void *p,
                                   spinlock_t *lock)
        id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
        spin_unlock_irqrestore(lock, flags);
        idr_preload_end();
 +      /* failure */
        if (id < 0)
 -              return 0;  /* failure */
 -      return (unsigned int)(id);  /* idr_alloc() guarantees > 0 */
 +              return 0;
 +      /* idr_alloc() guarantees > 0 */
 +      return (unsigned int)(id);
  }
  
  /*
 - *      setup_scsitaskmgmt_handles - stash the necessary handles so that the
 - *                                   completion processing logic for a taskmgmt
 - *                                   cmd will be able to find who to wake up
 - *                                   and where to stash the result
 + * setup_scsitaskmgmt_handles - Stash the necessary handles so that the
 + *                            completion processing logic for a taskmgmt
 + *                            cmd will be able to find who to wake up
 + *                            and where to stash the result
 + * @idrtable: The data object maintaining the pointer<-->int mappings
 + * @lock:     A spinlock used when exclusive access to idrtable is needed
 + * @cmdrsp:   Response from the IOVM
 + * @event:    The event handle to associate with an id
 + * @result:   The location to place the result of the event handle into
   */
  static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
                                       struct uiscmdrsp *cmdrsp,
  }
  
  /*
 - *      cleanup_scsitaskmgmt_handles - forget handles created by
 - *                                     setup_scsitaskmgmt_handles()
 + * cleanup_scsitaskmgmt_handles - Forget handles created by
 + *                              setup_scsitaskmgmt_handles()
 + * @idrtable: The data object maintaining the pointer<-->int mappings
 + * @cmdrsp:   Response from the IOVM
   */
  static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
                                         struct uiscmdrsp *cmdrsp)
  }
  
  /*
 - *    forward_taskmgmt_command - send taskmegmt command to the Service
 - *                               Partition
 - *    @tasktype: Type of taskmgmt command
 - *    @scsidev: Scsidev that issued command
 + * forward_taskmgmt_command - Send taskmegmt command to the Service
 + *                          Partition
 + * @tasktype: Type of taskmgmt command
 + * @scsidev:  Scsidev that issued command
   *
 - *    Create a cmdrsp packet and send it to the Serivce Partition
 - *    that will service this request.
 - *    Returns whether the command was queued successfully or not.
 + * Create a cmdrsp packet and send it to the Serivce Partition
 + * that will service this request.
 + *
 + * Return: Int representing whether command was queued successfully or not
   */
  static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
-                                   struct scsi_cmnd *scsicmd)
+                                   struct scsi_device *scsidev)
  {
        struct uiscmdrsp *cmdrsp;
-       struct scsi_device *scsidev = scsicmd->device;
        struct visorhba_devdata *devdata =
                (struct visorhba_devdata *)scsidev->host->hostdata;
        int notifyresult = 0xffff;
        dev_dbg(&scsidev->sdev_gendev,
                "visorhba: taskmgmt type=%d success; result=0x%x\n",
                 tasktype, notifyresult);
-       if (tasktype == TASK_MGMT_ABORT_TASK)
-               scsicmd->result = DID_ABORT << 16;
-       else
-               scsicmd->result = DID_RESET << 16;
-       scsicmd->scsi_done(scsicmd);
        cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
        return SUCCESS;
  
@@@ -382,94 -352,108 +368,105 @@@ err_del_scsipending_ent
  }
  
  /*
 - *    visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
 - *    @scsicmd: The scsicmd that needs aborted
 - *
 - *    Returns SUCCESS if inserted, failure otherwise
 + * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
 + * @scsicmd: The scsicmd that needs aborted
   *
 + * Return: SUCCESS if inserted, FAILED otherwise
   */
  static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
  {
        /* issue TASK_MGMT_ABORT_TASK */
        struct scsi_device *scsidev;
        struct visordisk_info *vdisk;
-       struct visorhba_devdata *devdata;
+       int rtn;
  
        scsidev = scsicmd->device;
-       devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
-       for_each_vdisk_match(vdisk, devdata, scsidev) {
-               if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
-                       atomic_inc(&vdisk->error_count);
-               else
-                       atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
+       vdisk = scsidev->hostdata;
+       if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
+               atomic_inc(&vdisk->error_count);
+       else
+               atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
+       rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
+       if (rtn == SUCCESS) {
+               scsicmd->result = DID_ABORT << 16;
+               scsicmd->scsi_done(scsicmd);
        }
-       return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd);
+       return rtn;
  }
  
  /*
 - *    visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
 - *    @scsicmd: The scsicmd that needs aborted
 + * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
 + * @scsicmd: The scsicmd that needs aborted
   *
 - *    Returns SUCCESS if inserted, failure otherwise
 + * Return: SUCCESS if inserted, FAILED otherwise
   */
  static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
  {
        /* issue TASK_MGMT_LUN_RESET */
        struct scsi_device *scsidev;
        struct visordisk_info *vdisk;
-       struct visorhba_devdata *devdata;
+       int rtn;
  
        scsidev = scsicmd->device;
-       devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
-       for_each_vdisk_match(vdisk, devdata, scsidev) {
-               if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
-                       atomic_inc(&vdisk->error_count);
-               else
-                       atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
+       vdisk = scsidev->hostdata;
+       if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
+               atomic_inc(&vdisk->error_count);
+       else
+               atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
+       rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
+       if (rtn == SUCCESS) {
+               scsicmd->result = DID_RESET << 16;
+               scsicmd->scsi_done(scsicmd);
        }
-       return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd);
+       return rtn;
  }
  
  /*
 - *    visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
 - *                                 target on the bus
 - *    @scsicmd: The scsicmd that needs aborted
 + * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
 + *                            target on the bus
 + * @scsicmd: The scsicmd that needs aborted
   *
 - *    Returns SUCCESS
 + * Return: SUCCESS if inserted, FAILED otherwise
   */
  static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
  {
        struct scsi_device *scsidev;
        struct visordisk_info *vdisk;
-       struct visorhba_devdata *devdata;
+       int rtn;
  
        scsidev = scsicmd->device;
-       devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
-       for_each_vdisk_match(vdisk, devdata, scsidev) {
+       shost_for_each_device(scsidev, scsidev->host) {
+               vdisk = scsidev->hostdata;
                if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
                        atomic_inc(&vdisk->error_count);
                else
                        atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
        }
-       return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd);
+       rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
+       if (rtn == SUCCESS) {
+               scsicmd->result = DID_RESET << 16;
+               scsicmd->scsi_done(scsicmd);
+       }
+       return rtn;
  }
  
  /*
 - *    visorhba_host_reset_handler - Not supported
 - *    @scsicmd: The scsicmd that needs aborted
 + * visorhba_host_reset_handler - Not supported
 + * @scsicmd: The scsicmd that needs to be aborted
   *
 - *    Not supported, return SUCCESS
 - *    Returns SUCCESS
 + * Return: Not supported, return SUCCESS
   */
 -static int
 -visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
 +static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
  {
        /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
        return SUCCESS;
  }
  
  /*
 - *    visorhba_get_info
 - *    @shp: Scsi host that is requesting information
 + * visorhba_get_info - Get information about SCSI device
 + * @shp: Scsi host that is requesting information
   *
 - *    Returns string with info
 + * Return: String with visorhba information
   */
  static const char *visorhba_get_info(struct Scsi_Host *shp)
  {
  }
  
  /*
 - *    visorhba_queue_command_lck -- queues command to the Service Partition
 - *    @scsicmd: Command to be queued
 - *    @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
 + * dma_data_dir_linux_to_spar - convert dma_data_direction value to
 + *                            Unisys-specific equivalent
 + * @d: dma direction value to convert
 + *
 + * Returns the Unisys-specific dma direction value corresponding to @d
 + */
 +static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
 +{
 +      switch (d) {
 +      case DMA_BIDIRECTIONAL:
 +              return UIS_DMA_BIDIRECTIONAL;
 +      case DMA_TO_DEVICE:
 +              return UIS_DMA_TO_DEVICE;
 +      case DMA_FROM_DEVICE:
 +              return UIS_DMA_FROM_DEVICE;
 +      case DMA_NONE:
 +              return UIS_DMA_NONE;
 +      default:
 +              return UIS_DMA_NONE;
 +      }
 +}
 +
 +/*
 + * visorhba_queue_command_lck - Queues command to the Service Partition
 + * @scsicmd:          Command to be queued
 + * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
   *
 - *    Queues to scsicmd to the ServicePartition after converting it to a
 - *    uiscmdrsp structure.
 + * Queues to scsicmd to the ServicePartition after converting it to a
 + * uiscmdrsp structure.
   *
 - *    Returns success if queued to the Service Partition, otherwise
 - *    failure.
 + * Return: 0 if successfully queued to the Service Partition, otherwise
 + *       error code
   */
 -static int
 -visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
 -                         void (*visorhba_cmnd_done)(struct scsi_cmnd *))
 +static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
 +                                    void (*visorhba_cmnd_done)
 +                                         (struct scsi_cmnd *))
  {
        struct uiscmdrsp *cmdrsp;
        struct scsi_device *scsidev = scsicmd->device;
  
        insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
                                                (void *)scsicmd);
 -
        if (insert_location < 0)
                return SCSI_MLQUEUE_DEVICE_BUSY;
  
        cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
 -
        cmdrsp->cmdtype = CMD_SCSI_TYPE;
        /* save the pending insertion location. Deletion from pending
         * will return the scsicmd pointer for completion
        cmdrsp->scsi.vdest.id = scsidev->id;
        cmdrsp->scsi.vdest.lun = scsidev->lun;
        /* save datadir */
 -      cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
 +      cmdrsp->scsi.data_dir =
 +              dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
        memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
 -
        cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
  
        /* keep track of the max buffer length so far. */
@@@ -590,13 -553,13 +587,13 @@@ static DEF_SCSI_QCMD(visorhba_queue_com
  #endif
  
  /*
 - *    visorhba_slave_alloc - called when new disk is discovered
 - *    @scsidev: New disk
 + * visorhba_slave_alloc - Called when new disk is discovered
 + * @scsidev: New disk
   *
 - *    Create a new visordisk_info structure and add it to our
 - *    list of vdisks.
 + * Create a new visordisk_info structure and add it to our
 + * list of vdisks.
   *
 - *    Returns success when created, otherwise error.
 + * Return: 0 on success, -ENOMEM on failure.
   */
  static int visorhba_slave_alloc(struct scsi_device *scsidev)
  {
         * LLD can alloc any struct & do init if needed.
         */
        struct visordisk_info *vdisk;
-       struct visordisk_info *tmpvdisk;
        struct visorhba_devdata *devdata;
        struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
  
 -              return 0; /* already allocated return success */
++      /* already allocated return success */
+       if (scsidev->hostdata)
++              return 0;
 +      /* even though we errored, treat as success */
        devdata = (struct visorhba_devdata *)scsihost->hostdata;
        if (!devdata)
 -              return 0; /* even though we errored, treat as success */
 +              return 0;
  
-       /* already allocated return success */
-       for_each_vdisk_match(vdisk, devdata, scsidev)
-               return 0;
-       tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
-       if (!tmpvdisk)
+       vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
+       if (!vdisk)
                return -ENOMEM;
  
-       tmpvdisk->channel = scsidev->channel;
-       tmpvdisk->id = scsidev->id;
-       tmpvdisk->lun = scsidev->lun;
-       vdisk->next = tmpvdisk;
+       vdisk->sdev = scsidev;
+       scsidev->hostdata = vdisk;
        return 0;
  }
  
  /*
 - *    visorhba_slave_destroy - disk is going away
 - *    @scsidev: scsi device going away
 - *
 - *    Disk is going away, clean up resources.
 - *    Returns void.
 + * visorhba_slave_destroy - Disk is going away, clean up resources.
 + * @scsidev: Scsi device to destroy
   */
  static void visorhba_slave_destroy(struct scsi_device *scsidev)
  {
        /* midlevel calls this after device has been quiesced and
         * before it is to be deleted.
         */
-       struct visordisk_info *vdisk, *delvdisk;
-       struct visorhba_devdata *devdata;
-       struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
+       struct visordisk_info *vdisk;
  
-       devdata = (struct visorhba_devdata *)scsihost->hostdata;
-       for_each_vdisk_match(vdisk, devdata, scsidev) {
-               delvdisk = vdisk->next;
-               vdisk->next = delvdisk->next;
-               kfree(delvdisk);
-               return;
-       }
+       vdisk = scsidev->hostdata;
+       scsidev->hostdata = NULL;
+       kfree(vdisk);
  }
  
  static struct scsi_host_template visorhba_driver_template = {
  };
  
  /*
 - *    info_debugfs_show - debugfs interface to dump visorhba states
 + * info_debugfs_show - Debugfs interface to dump visorhba states
 + * @seq: The sequence file to write information to
 + * @v:   Unused, but needed for use with seq file single_open invocation
 + *
 + * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info.
   *
 - *      This presents a file in the debugfs tree named:
 - *          /visorhba/vbus<x>:dev<y>/info
 + * Return: SUCCESS
   */
  static int info_debugfs_show(struct seq_file *seq, void *v)
  {
@@@ -716,13 -668,12 +704,13 @@@ static const struct file_operations inf
  };
  
  /*
 - *    complete_taskmgmt_command - complete task management
 - *    @cmdrsp: Response from the IOVM
 + * complete_taskmgmt_command - Complete task management
 + * @idrtable: The data object maintaining the pointer<-->int mappings
 + * @cmdrsp:   Response from the IOVM
 + * @result:   The result of the task management command
   *
 - *    Service Partition returned the result of the task management
 - *    command. Wake up anyone waiting for it.
 - *    Returns void
 + * Service Partition returned the result of the task management
 + * command. Wake up anyone waiting for it.
   */
  static void complete_taskmgmt_command(struct idr *idrtable,
                                      struct uiscmdrsp *cmdrsp, int result)
                idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
        int *scsi_result_ptr =
                idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
 -
        if (unlikely(!(wq && scsi_result_ptr))) {
                pr_err("visorhba: no completion context; cmd will time out\n");
                return;
  }
  
  /*
 - *    visorhba_serverdown_complete - Called when we are done cleaning up
 - *                                   from serverdown
 - *    @work: work structure for this serverdown request
 + * visorhba_serverdown_complete - Called when we are done cleaning up
 + *                              from serverdown
 + * @devdata: Visorhba instance on which to complete serverdown
   *
 - *    Called when we are done cleanning up from serverdown, stop processing
 - *    queue, fail pending IOs.
 - *    Returns void when finished cleaning up
 + * Called when we are done cleanning up from serverdown, stop processing
 + * queue, fail pending IOs.
   */
  static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
  {
  }
  
  /*
 - *    visorhba_serverdown - Got notified that the IOVM is down
 - *    @devdata: visorhba that is being serviced by downed IOVM.
 + * visorhba_serverdown - Got notified that the IOVM is down
 + * @devdata: Visorhba that is being serviced by downed IOVM
 + *
 + * Something happened to the IOVM, return immediately and
 + * schedule cleanup work.
   *
 - *    Something happened to the IOVM, return immediately and
 - *    schedule work cleanup work.
 - *    Return SUCCESS or EINVAL
 + * Return: 0 on success, -EINVAL on failure
   */
  static int visorhba_serverdown(struct visorhba_devdata *devdata)
  {
  }
  
  /*
 - *    do_scsi_linuxstat - scsi command returned linuxstat
 - *    @cmdrsp: response from IOVM
 - *    @scsicmd: Command issued.
 + * do_scsi_linuxstat - Scsi command returned linuxstat
 + * @cmdrsp:  Response from IOVM
 + * @scsicmd: Command issued
   *
 - *    Don't log errors for disk-not-present inquiries
 - *    Returns void
 + * Don't log errors for disk-not-present inquiries.
   */
 -static void
 -do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
 +static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
 +                            struct scsi_cmnd *scsicmd)
  {
-       struct visorhba_devdata *devdata;
        struct visordisk_info *vdisk;
        struct scsi_device *scsidev;
  
            (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
                return;
        /* Okay see what our error_count is here.... */
-       devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
-       for_each_vdisk_match(vdisk, devdata, scsidev) {
-               if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
-                       atomic_inc(&vdisk->error_count);
-                       atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
-               }
+       vdisk = scsidev->hostdata;
+       if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
+               atomic_inc(&vdisk->error_count);
+               atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
        }
  }
  
 -static int set_no_disk_inquiry_result(unsigned char *buf,
 -                                    size_t len, bool is_lun0)
 +static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
 +                                    bool is_lun0)
  {
 -      if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN)
 +      if (len < NO_DISK_INQUIRY_RESULT_LEN)
                return -EINVAL;
        memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
        buf[2] = SCSI_SPC2_VER;
  }
  
  /*
 - *    do_scsi_nolinuxstat - scsi command didn't have linuxstat
 - *    @cmdrsp: response from IOVM
 - *    @scsicmd: Command issued.
 + * do_scsi_nolinuxstat - Scsi command didn't have linuxstat
 + * @cmdrsp:  Response from IOVM
 + * @scsicmd: Command issued
   *
 - *    Handle response when no linuxstat was returned
 - *    Returns void
 + * Handle response when no linuxstat was returned.
   */
 -static void
 -do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
 +static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
 +                              struct scsi_cmnd *scsicmd)
  {
        struct scsi_device *scsidev;
        unsigned char *buf;
        char *this_page_orig;
        int bufind = 0;
        struct visordisk_info *vdisk;
-       struct visorhba_devdata *devdata;
  
        scsidev = scsicmd->device;
        if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
                }
                kfree(buf);
        } else {
-               devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
-               for_each_vdisk_match(vdisk, devdata, scsidev) {
-                       if (atomic_read(&vdisk->ios_threshold) > 0) {
-                               atomic_dec(&vdisk->ios_threshold);
-                               if (atomic_read(&vdisk->ios_threshold) == 0)
-                                       atomic_set(&vdisk->error_count, 0);
-                       }
+               vdisk = scsidev->hostdata;
+               if (atomic_read(&vdisk->ios_threshold) > 0) {
+                       atomic_dec(&vdisk->ios_threshold);
+                       if (atomic_read(&vdisk->ios_threshold) == 0)
+                               atomic_set(&vdisk->error_count, 0);
                }
        }
  }
  
  /*
 - *    complete_scsi_command - complete a scsi command
 - *    @uiscmdrsp: Response from Service Partition
 - *    @scsicmd: The scsi command
 + * complete_scsi_command - Complete a scsi command
 + * @uiscmdrsp: Response from Service Partition
 + * @scsicmd:   The scsi command
   *
 - *    Response returned by the Service Partition, finish it and send
 - *    completion to the scsi midlayer.
 - *    Returns void.
 + * Response was returned by the Service Partition. Finish it and send
 + * completion to the scsi midlayer.
   */
 -static void
 -complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
 +static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
 +                                struct scsi_cmnd *scsicmd)
  {
        /* take what we need out of cmdrsp and complete the scsicmd */
        scsicmd->result = cmdrsp->scsi.linuxstat;
  }
  
  /*
 - *    drain_queue - pull responses out of iochannel
 - *    @cmdrsp: Response from the IOSP
 - *    @devdata: device that owns this iochannel
 + * drain_queue - Pull responses out of iochannel
 + * @cmdrsp:  Response from the IOSP
 + * @devdata: Device that owns this iochannel
   *
 - *    Pulls responses out of the iochannel and process the responses.
 - *    Restuns void
 + * Pulls responses out of the iochannel and process the responses.
   */
 -static void
 -drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
 +static void drain_queue(struct uiscmdrsp *cmdrsp,
 +                      struct visorhba_devdata *devdata)
  {
        struct scsi_cmnd *scsicmd;
  
        while (1) {
 +              /* queue empty */
                if (visorchannel_signalremove(devdata->dev->visorchannel,
                                              IOCHAN_FROM_IOPART,
                                              cmdrsp))
 -                      break; /* queue empty */
 -
 +                      break;
                if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
                        /* scsicmd location is returned by the
                         * deletion
  }
  
  /*
 - *    process_incoming_rsps - Process responses from IOSP
 - *    @v: void pointer to visorhba_devdata
 + * process_incoming_rsps - Process responses from IOSP
 + * @v:  Void pointer to visorhba_devdata
   *
 - *    Main function for the thread that processes the responses
 - *    from the IO Service Partition. When the queue is empty, wait
 - *    to check to see if it is full again.
 + * Main function for the thread that processes the responses
 + * from the IO Service Partition. When the queue is empty, wait
 + * to check to see if it is full again.
 + *
 + * Return: 0 on success, -ENOMEM on failure
   */
  static int process_incoming_rsps(void *v)
  {
  }
  
  /*
 - *    visorhba_pause - function to handle visorbus pause messages
 - *    @dev: device that is pausing.
 - *    @complete_func: function to call when finished
 + * visorhba_pause - Function to handle visorbus pause messages
 + * @dev:         Device that is pausing
 + * @complete_func: Function to call when finished
 + *
 + * Something has happened to the IO Service Partition that is
 + * handling this device. Quiet this device and reset commands
 + * so that the Service Partition can be corrected.
   *
 - *    Something has happened to the IO Service Partition that is
 - *    handling this device. Quiet this device and reset commands
 - *    so that the Service Partition can be corrected.
 - *    Returns SUCCESS
 + * Return: SUCCESS
   */
  static int visorhba_pause(struct visor_device *dev,
                          visorbus_state_complete_func complete_func)
  }
  
  /*
 - *    visorhba_resume - function called when the IO Service Partition is back
 - *    @dev: device that is pausing.
 - *    @complete_func: function to call when finished
 + * visorhba_resume - Function called when the IO Service Partition is back
 + * @dev:         Device that is pausing
 + * @complete_func: Function to call when finished
   *
 - *    Yay! The IO Service Partition is back, the channel has been wiped
 - *    so lets re-establish connection and start processing responses.
 - *    Returns 0 on success, error on failure.
 + * Yay! The IO Service Partition is back, the channel has been wiped
 + * so lets re-establish connection and start processing responses.
 + *
 + * Return: 0 on success, -EINVAL on failure
   */
  static int visorhba_resume(struct visor_device *dev,
                           visorbus_state_complete_func complete_func)
  
        devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
                                             "vhba_incming");
 -
        devdata->serverdown = false;
        devdata->serverchangingstate = false;
  
  }
  
  /*
 - *    visorhba_probe - device has been discovered, do acquire
 - *    @dev: visor_device that was discovered
 + * visorhba_probe - Device has been discovered; do acquire
 + * @dev: visor_device that was discovered
   *
 - *    A new HBA was discovered, do the initial connections of it.
 - *    Return 0 on success, otherwise error.
 + * A new HBA was discovered; do the initial connections of it.
 + *
 + * Return: 0 on success, otherwise error code
   */
  static int visorhba_probe(struct visor_device *dev)
  {
@@@ -1176,10 -1122,11 +1158,10 @@@ err_scsi_host_put
  }
  
  /*
 - *    visorhba_remove - remove a visorhba device
 - *    @dev: Device to remove
 + * visorhba_remove - Remove a visorhba device
 + * @dev: Device to remove
   *
 - *    Removes the visorhba device.
 - *    Returns void.
 + * Removes the visorhba device.
   */
  static void visorhba_remove(struct visor_device *dev)
  {
@@@ -1217,12 -1164,10 +1199,12 @@@ static struct visor_driver visorhba_dri
  };
  
  /*
 - *    visorhba_init           - driver init routine
 + * visorhba_init - Driver init routine
 + *
 + * Initialize the visorhba driver and register it with visorbus
 + * to handle s-Par virtual host bus adapter.
   *
 - *    Initialize the visorhba driver and register it with visorbus
 - *    to handle s-Par virtual host bus adapter.
 + * Return: 0 on success, error code otherwise
   */
  static int visorhba_init(void)
  {
@@@ -1245,9 -1190,9 +1227,9 @@@ cleanup_debugfs
  }
  
  /*
 - *    visorhba_exit   - driver exit routine
 + * visorhba_exit - Driver exit routine
   *
 - *    Unregister driver from the bus and free up memory.
 + * Unregister driver from the bus and free up memory.
   */
  static void visorhba_exit(void)
  {
index 9aaa177e8209731cbe63aef3c7f1479ab2497851,27cbc1eab868c9134269eb555f0ea74615681667..eb30f3e09a4775b3f046ccc698f18a05210f17ad
@@@ -296,7 -296,6 +296,6 @@@ static inline int virtqueue_add(struct 
        }
  #endif
  
-       BUG_ON(total_sg > vq->vring.num);
        BUG_ON(total_sg == 0);
  
        head = vq->free_head;
         * buffers, then go indirect. FIXME: tune this threshold */
        if (vq->indirect && total_sg > 1 && vq->vq.num_free)
                desc = alloc_indirect(_vq, total_sg, gfp);
-       else
+       else {
                desc = NULL;
+               WARN_ON_ONCE(total_sg > vq->vring.num && !vq->indirect);
+       }
  
        if (desc) {
                /* Use a single buffer which doesn't continue */
        vq->desc_state[head].data = data;
        if (indirect)
                vq->desc_state[head].indir_desc = desc;
 -      if (ctx)
 +      else
                vq->desc_state[head].indir_desc = ctx;
  
        /* Put entry in available array (but don't update avail->idx until they
diff --combined include/linux/bsg-lib.h
index 637a20cfb237db4ab76a480172e918e6e48f849b,1062f08e1a553077bba5cf1cdf68987ad3d27707..b1be0233ce353d93241bf1d1776d901637e9b95a
@@@ -24,7 -24,6 +24,7 @@@
  #define _BLK_BSG_
  
  #include <linux/blkdev.h>
 +#include <scsi/scsi_request.h>
  
  struct request;
  struct device;
@@@ -38,7 -37,6 +38,7 @@@ struct bsg_buffer 
  };
  
  struct bsg_job {
 +      struct scsi_request sreq;
        struct device *dev;
        struct request *req;
  
@@@ -68,8 -66,9 +68,9 @@@
  
  void bsg_job_done(struct bsg_job *job, int result,
                  unsigned int reply_payload_rcv_len);
- struct request_queue *bsg_setup_queue(struct device *dev, char *name,
-               bsg_job_fn *job_fn, int dd_job_size);
+ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
+               bsg_job_fn *job_fn, int dd_job_size,
+               void (*release)(struct device *));
  void bsg_job_put(struct bsg_job *job);
  int __must_check bsg_job_get(struct bsg_job *job);
  
diff --combined include/linux/rcupdate.h
index 96f1baf62ab8cf72216a97b5df7eb3fd386cbc3d,8e920f0ecb07ed0b7fa72e7612e4641bc379616e..de50d8a4cf414121a9f83e50df7738a5f530494a
@@@ -58,6 -58,8 +58,6 @@@ void call_rcu(struct rcu_head *head, rc
  void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
  void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
  void synchronize_sched(void);
 -void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
 -void synchronize_rcu_tasks(void);
  void rcu_barrier_tasks(void);
  
  #ifdef CONFIG_PREEMPT_RCU
@@@ -103,13 -105,11 +103,13 @@@ static inline int rcu_preempt_depth(voi
  
  /* Internal to kernel */
  void rcu_init(void);
 +extern int rcu_scheduler_active __read_mostly;
  void rcu_sched_qs(void);
  void rcu_bh_qs(void);
  void rcu_check_callbacks(int user);
  void rcu_report_dead(unsigned int cpu);
  void rcu_cpu_starting(unsigned int cpu);
 +void rcutree_migrate_callbacks(int cpu);
  
  #ifdef CONFIG_RCU_STALL_COMMON
  void rcu_sysrq_start(void);
@@@ -164,6 -164,8 +164,6 @@@ static inline void rcu_init_nohz(void) 
   * macro rather than an inline function to avoid #include hell.
   */
  #ifdef CONFIG_TASKS_RCU
 -#define TASKS_RCU(x) x
 -extern struct srcu_struct tasks_rcu_exit_srcu;
  #define rcu_note_voluntary_context_switch_lite(t) \
        do { \
                if (READ_ONCE((t)->rcu_tasks_holdout)) \
                rcu_all_qs(); \
                rcu_note_voluntary_context_switch_lite(t); \
        } while (0)
 +void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
 +void synchronize_rcu_tasks(void);
 +void exit_tasks_rcu_start(void);
 +void exit_tasks_rcu_finish(void);
  #else /* #ifdef CONFIG_TASKS_RCU */
 -#define TASKS_RCU(x) do { } while (0)
  #define rcu_note_voluntary_context_switch_lite(t)     do { } while (0)
  #define rcu_note_voluntary_context_switch(t)          rcu_all_qs()
 +#define call_rcu_tasks call_rcu_sched
 +#define synchronize_rcu_tasks synchronize_sched
 +static inline void exit_tasks_rcu_start(void) { }
 +static inline void exit_tasks_rcu_finish(void) { }
  #endif /* #else #ifdef CONFIG_TASKS_RCU */
  
  /**
@@@ -412,6 -407,22 +412,22 @@@ static inline void rcu_preempt_sleep_ch
        _r_a_p__v;                                                            \
  })
  
+ /**
+  * rcu_swap_protected() - swap an RCU and a regular pointer
+  * @rcu_ptr: RCU pointer
+  * @ptr: regular pointer
+  * @c: the conditions under which the dereference will take place
+  *
+  * Perform swap(@rcu_ptr, @ptr) where @rcu_ptr is an RCU-annotated pointer and
+  * @c is the argument that is passed to the rcu_dereference_protected() call
+  * used to read that pointer.
+  */
+ #define rcu_swap_protected(rcu_ptr, ptr, c) do {                      \
+       typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c));  \
+       rcu_assign_pointer((rcu_ptr), (ptr));                           \
+       (ptr) = __tmp;                                                  \
+ } while (0)
  /**
   * rcu_access_pointer() - fetch RCU pointer with no dereferencing
   * @p: The pointer to read