]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Apr 2017 23:58:38 +0000 (16:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Apr 2017 23:58:38 +0000 (16:58 -0700)
Pull perf fixes from Thomas Gleixner:
 "Two small fixes for perf:

   - the move to support cross arch annotation introduced per arch
     initialization requirements, fullfill them for s/390 (Christian
     Borntraeger)

   - add the missing initialization to the LBR entries to avoid exposing
     random or stale data"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86: Avoid exposing wrong/stale data in intel_pmu_lbr_read_32()
  perf annotate s390: Fix perf annotate error -95 (4.10 regression)

104 files changed:
.mailmap
MAINTAINERS
arch/ia64/include/asm/asm-prototypes.h [new file with mode: 0644]
arch/ia64/lib/Makefile
arch/x86/mm/init.c
arch/x86/platform/efi/quirks.c
drivers/acpi/acpica/utresrc.c
drivers/acpi/scan.c
drivers/ata/pata_atiixp.c
drivers/ata/sata_via.c
drivers/block/zram/zram_drv.c
drivers/char/mem.c
drivers/char/virtio_console.c
drivers/cpufreq/cpufreq.c
drivers/firmware/efi/libstub/gop.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/firmware.c
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
drivers/gpu/drm/udl/udl_transfer.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-uclogic.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/irqchip/irq-imx-gpcv2.c
drivers/net/virtio_net.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/samsung/pinctrl-exynos.c
drivers/pinctrl/samsung/pinctrl-exynos.h
drivers/pwm/pwm-lpss-pci.c
drivers/pwm/pwm-lpss-platform.c
drivers/pwm/pwm-lpss.c
drivers/pwm/pwm-lpss.h
drivers/pwm/pwm-rockchip.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/target_core_alua.c
drivers/target/target_core_configfs.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/usb/gadget/function/f_tcm.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/omap/omapfb_main.c
drivers/video/fbdev/ssd1307fb.c
drivers/video/fbdev/xen-fbfront.c
drivers/virtio/virtio.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_legacy.c
drivers/virtio/virtio_pci_modern.c
fs/btrfs/inode.c
fs/btrfs/super.c
fs/btrfs/volumes.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/hugetlbfs/inode.c
fs/proc/task_mmu.c
include/linux/cgroup.h
include/linux/mmu_notifier.h
include/linux/sched.h
include/linux/virtio.h
include/target/target_core_base.h
include/uapi/linux/virtio_pci.h
kernel/audit.c
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
kernel/irq/affinity.c
kernel/kthread.c
mm/huge_memory.c
mm/z3fold.c
mm/zsmalloc.c
tools/power/cpupower/utils/helpers/cpuid.c
tools/power/x86/turbostat/turbostat.8
tools/power/x86/turbostat/turbostat.c

index e229922dc7f0a30cefab74d0f75313af513965e1..1d6f4e7280dc67f630b79456b3f1e5a4c0a41343 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -99,6 +99,8 @@ Linas Vepstas <linas@austin.ibm.com>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
 Mark Brown <broonie@sirena.org.uk>
+Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
+Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
 Matthieu CASTET <castet.matthieu@free.fr>
 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
 Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
index fdd5350fe261f27a99d4a5c43c1e8a99eb498e26..676c139bc883ef7906e854174b8177ef282e0c51 100644 (file)
@@ -13305,7 +13305,7 @@ F:      drivers/virtio/
 F:     tools/virtio/
 F:     drivers/net/virtio_net.c
 F:     drivers/block/virtio_blk.c
-F:     include/linux/virtio_*.h
+F:     include/linux/virtio*.h
 F:     include/uapi/linux/virtio_*.h
 F:     drivers/crypto/virtio/
 
diff --git a/arch/ia64/include/asm/asm-prototypes.h b/arch/ia64/include/asm/asm-prototypes.h
new file mode 100644 (file)
index 0000000..a2c1398
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef _ASM_IA64_ASM_PROTOTYPES_H
+#define _ASM_IA64_ASM_PROTOTYPES_H
+
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <asm/esi.h>
+#include <asm/ftrace.h>
+#include <asm/page.h>
+#include <asm/pal.h>
+#include <asm/string.h>
+#include <asm/uaccess.h>
+#include <asm/unwind.h>
+#include <asm/xor.h>
+
+extern const char ia64_ivt[];
+
+signed int __divsi3(signed int, unsigned int);
+signed int __modsi3(signed int, unsigned int);
+
+signed long long __divdi3(signed long long, unsigned long long);
+signed long long __moddi3(signed long long, unsigned long long);
+
+unsigned int __udivsi3(unsigned int, unsigned int);
+unsigned int __umodsi3(unsigned int, unsigned int);
+
+unsigned long long __udivdi3(unsigned long long, unsigned long long);
+unsigned long long __umoddi3(unsigned long long, unsigned long long);
+
+#endif /* _ASM_IA64_ASM_PROTOTYPES_H */
index 1f3d3877618fdc934ab20f07695476206fe35e00..0a40b14407b1692c7a684bb2ea689d130df36805 100644 (file)
@@ -24,25 +24,25 @@ AFLAGS___modsi3.o   =            -DMODULO
 AFLAGS___umodsi3.o     = -DUNSIGNED -DMODULO
 
 $(obj)/__divdi3.o: $(src)/idiv64.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__udivdi3.o: $(src)/idiv64.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__moddi3.o: $(src)/idiv64.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__umoddi3.o: $(src)/idiv64.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__divsi3.o: $(src)/idiv32.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__udivsi3.o: $(src)/idiv32.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__modsi3.o: $(src)/idiv32.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__umodsi3.o: $(src)/idiv32.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
index 22af912d66d258f413414c7355ac82da2814b074..889e7619a0914d87ad49dbb5b960933e5dd316ad 100644 (file)
@@ -643,21 +643,40 @@ void __init init_mem_mapping(void)
  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
  * is valid. The argument is a physical page number.
  *
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains BIOS code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
+ * On x86, access has to be given to the first megabyte of RAM because that
+ * area traditionally contains BIOS code and data regions used by X, dosemu,
+ * and similar apps. Since they map the entire memory range, the whole range
+ * must be allowed (for mapping), but any areas that would otherwise be
+ * disallowed are flagged as being "zero filled" instead of rejected.
+ * Access has to be given to non-kernel-ram areas as well, these contain the
+ * PCI mmio resources as well as potential bios/acpi data regions.
  */
 int devmem_is_allowed(unsigned long pagenr)
 {
-       if (pagenr < 256)
-               return 1;
-       if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+       if (page_is_ram(pagenr)) {
+               /*
+                * For disallowed memory regions in the low 1MB range,
+                * request that the page be shown as all zeros.
+                */
+               if (pagenr < 256)
+                       return 2;
+
+               return 0;
+       }
+
+       /*
+        * This must follow RAM test, since System RAM is considered a
+        * restricted resource under CONFIG_STRICT_IOMEM.
+        */
+       if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
+               /* Low 1MB bypasses iomem restrictions. */
+               if (pagenr < 256)
+                       return 1;
+
                return 0;
-       if (!page_is_ram(pagenr))
-               return 1;
-       return 0;
+       }
+
+       return 1;
 }
 
 void free_init_pages(char *what, unsigned long begin, unsigned long end)
index 30031d5293c483202c526d5045cda23be6617359..cdfe8c62895981029b8f69218b717b05d0b8961a 100644 (file)
@@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
                return;
        }
 
+       /* No need to reserve regions that will never be freed. */
+       if (md.attribute & EFI_MEMORY_RUNTIME)
+               return;
+
        size += addr % EFI_PAGE_SIZE;
        size = round_up(size, EFI_PAGE_SIZE);
        addr = round_down(addr, EFI_PAGE_SIZE);
index c86bae7b1d0fc4d0fce5832a1f2716f59d91a7e6..ff096d9755b925d9f72105f42993ebcc7c0522e1 100644 (file)
@@ -421,10 +421,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
 
        ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
 
-       /*
-        * The absolute minimum resource template is one end_tag descriptor.
-        * However, we will treat a lone end_tag as just a simple buffer.
-        */
+       /* The absolute minimum resource template is one end_tag descriptor */
+
        if (aml_length < sizeof(struct aml_resource_end_tag)) {
                return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
        }
@@ -456,8 +454,9 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
                /* Invoke the user function */
 
                if (user_function) {
-                       status = user_function(aml, length, offset,
-                                              resource_index, context);
+                       status =
+                           user_function(aml, length, offset, resource_index,
+                                         context);
                        if (ACPI_FAILURE(status)) {
                                return_ACPI_STATUS(status);
                        }
@@ -481,12 +480,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
                                *context = aml;
                        }
 
-                       /* Check if buffer is defined to be longer than the resource length */
-
-                       if (aml_length > (offset + length)) {
-                               return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
-                       }
-
                        /* Normal exit */
 
                        return_ACPI_STATUS(AE_OK);
index 192691880d55c9499e1d77e68058a56d8e5318f3..2433569b02ef5cf40dd82cebec6fc83186f4dc3b 100644 (file)
@@ -1857,15 +1857,20 @@ static void acpi_bus_attach(struct acpi_device *device)
                return;
 
        device->flags.match_driver = true;
-       if (!ret) {
-               ret = device_attach(&device->dev);
-               if (ret < 0)
-                       return;
-
-               if (!ret && device->pnp.type.platform_id)
-                       acpi_default_enumeration(device);
+       if (ret > 0) {
+               acpi_device_set_enumerated(device);
+               goto ok;
        }
 
+       ret = device_attach(&device->dev);
+       if (ret < 0)
+               return;
+
+       if (ret > 0 || !device->pnp.type.platform_id)
+               acpi_device_set_enumerated(device);
+       else
+               acpi_default_enumeration(device);
+
  ok:
        list_for_each_entry(child, &device->children, node)
                acpi_bus_attach(child);
index 6c9aa95a9a050cc070ab222a382f5dabd55ec7ea..49d705c9f0f7b9c6b2ef2549769b6901438c2854 100644 (file)
@@ -278,11 +278,6 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        };
        const struct ata_port_info *ppi[] = { &info, &info };
 
-       /* SB600/700 don't have secondary port wired */
-       if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) ||
-               (pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE))
-               ppi[1] = &ata_dummy_port_info;
-
        return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
                                      ATA_HOST_PARALLEL_SCAN);
 }
index 0636d84fbefe0acc889004b39e83c0137fc0f6a0..f3f538eec7b3bb85b682368ffb42496989c97263 100644 (file)
@@ -644,14 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id,
                pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
        }
 
-       /* enable IRQ on hotplug */
-       pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
-       if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
-               dev_dbg(&pdev->dev,
-                       "enabling SATA hotplug (0x%x)\n",
-                       (int) tmp8);
-               tmp8 |= SATA_HOTPLUG;
-               pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
+       if (board_id == vt6421) {
+               /* enable IRQ on hotplug */
+               pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
+               if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
+                       dev_dbg(&pdev->dev,
+                               "enabling SATA hotplug (0x%x)\n",
+                               (int) tmp8);
+                       tmp8 |= SATA_HOTPLUG;
+                       pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
+               }
        }
 
        /*
index dceb5edd1e5455f4c1b101e8ad3ce4dba46ac22f..0c09d42561081ebb393398ae21a6900283d8f007 100644 (file)
@@ -523,7 +523,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 
        cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
        if (size == PAGE_SIZE) {
-               copy_page(mem, cmem);
+               memcpy(mem, cmem, PAGE_SIZE);
        } else {
                struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
 
@@ -717,7 +717,7 @@ compress_again:
 
        if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
                src = kmap_atomic(page);
-               copy_page(cmem, src);
+               memcpy(cmem, src, PAGE_SIZE);
                kunmap_atomic(src);
        } else {
                memcpy(cmem, src, clen);
@@ -928,7 +928,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
        }
 
        index = sector >> SECTORS_PER_PAGE_SHIFT;
-       offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
+       offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
 
        bv.bv_page = page;
        bv.bv_len = PAGE_SIZE;
index 6d9cc2d39d22306fd68f30bac6f4a60e6cfa5a87..7e4a9d1296bb7fb666f6b37ced2757a8585b7d75 100644 (file)
@@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 #endif
 
 #ifdef CONFIG_STRICT_DEVMEM
+static inline int page_is_allowed(unsigned long pfn)
+{
+       return devmem_is_allowed(pfn);
+}
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
        u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
        return 1;
 }
 #else
+static inline int page_is_allowed(unsigned long pfn)
+{
+       return 1;
+}
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
        return 1;
@@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
 
        while (count > 0) {
                unsigned long remaining;
+               int allowed;
 
                sz = size_inside_page(p, count);
 
-               if (!range_is_allowed(p >> PAGE_SHIFT, count))
+               allowed = page_is_allowed(p >> PAGE_SHIFT);
+               if (!allowed)
                        return -EPERM;
+               if (allowed == 2) {
+                       /* Show zeros for restricted memory. */
+                       remaining = clear_user(buf, sz);
+               } else {
+                       /*
+                        * On ia64 if a page has been mapped somewhere as
+                        * uncached, then it must also be accessed uncached
+                        * by the kernel or data corruption may occur.
+                        */
+                       ptr = xlate_dev_mem_ptr(p);
+                       if (!ptr)
+                               return -EFAULT;
 
-               /*
-                * On ia64 if a page has been mapped somewhere as uncached, then
-                * it must also be accessed uncached by the kernel or data
-                * corruption may occur.
-                */
-               ptr = xlate_dev_mem_ptr(p);
-               if (!ptr)
-                       return -EFAULT;
+                       remaining = copy_to_user(buf, ptr, sz);
+
+                       unxlate_dev_mem_ptr(p, ptr);
+               }
 
-               remaining = copy_to_user(buf, ptr, sz);
-               unxlate_dev_mem_ptr(p, ptr);
                if (remaining)
                        return -EFAULT;
 
@@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
 #endif
 
        while (count > 0) {
+               int allowed;
+
                sz = size_inside_page(p, count);
 
-               if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+               allowed = page_is_allowed(p >> PAGE_SHIFT);
+               if (!allowed)
                        return -EPERM;
 
-               /*
-                * On ia64 if a page has been mapped somewhere as uncached, then
-                * it must also be accessed uncached by the kernel or data
-                * corruption may occur.
-                */
-               ptr = xlate_dev_mem_ptr(p);
-               if (!ptr) {
-                       if (written)
-                               break;
-                       return -EFAULT;
-               }
+               /* Skip actual writing when a page is marked as restricted. */
+               if (allowed == 1) {
+                       /*
+                        * On ia64 if a page has been mapped somewhere as
+                        * uncached, then it must also be accessed uncached
+                        * by the kernel or data corruption may occur.
+                        */
+                       ptr = xlate_dev_mem_ptr(p);
+                       if (!ptr) {
+                               if (written)
+                                       break;
+                               return -EFAULT;
+                       }
 
-               copied = copy_from_user(ptr, buf, sz);
-               unxlate_dev_mem_ptr(p, ptr);
-               if (copied) {
-                       written += sz - copied;
-                       if (written)
-                               break;
-                       return -EFAULT;
+                       copied = copy_from_user(ptr, buf, sz);
+                       unxlate_dev_mem_ptr(p, ptr);
+                       if (copied) {
+                               written += sz - copied;
+                               if (written)
+                                       break;
+                               return -EFAULT;
+                       }
                }
 
                buf += sz;
index e9b7e0b3cabe60d3be3ab8a092159b137854d8ec..87fe111d0be6b03ec8157f0a688046d490d7e887 100644 (file)
@@ -2202,14 +2202,16 @@ static int virtcons_freeze(struct virtio_device *vdev)
 
        vdev->config->reset(vdev);
 
-       virtqueue_disable_cb(portdev->c_ivq);
+       if (use_multiport(portdev))
+               virtqueue_disable_cb(portdev->c_ivq);
        cancel_work_sync(&portdev->control_work);
        cancel_work_sync(&portdev->config_work);
        /*
         * Once more: if control_work_handler() was running, it would
         * enable the cb as the last step.
         */
-       virtqueue_disable_cb(portdev->c_ivq);
+       if (use_multiport(portdev))
+               virtqueue_disable_cb(portdev->c_ivq);
        remove_controlq_data(portdev);
 
        list_for_each_entry(port, &portdev->ports, list) {
index bc96d423781aa8a300725f8fbe0a052be12cd4b5..0e3f6496524d92c7c1717d8d2259684952d7acb8 100644 (file)
@@ -2398,6 +2398,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
  *********************************************************************/
 static enum cpuhp_state hp_online;
 
+static int cpuhp_cpufreq_online(unsigned int cpu)
+{
+       cpufreq_online(cpu);
+
+       return 0;
+}
+
+static int cpuhp_cpufreq_offline(unsigned int cpu)
+{
+       cpufreq_offline(cpu);
+
+       return 0;
+}
+
 /**
  * cpufreq_register_driver - register a CPU Frequency driver
  * @driver_data: A struct cpufreq_driver containing the values#
@@ -2460,8 +2474,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        }
 
        ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
-                                       cpufreq_online,
-                                       cpufreq_offline);
+                                       cpuhp_cpufreq_online,
+                                       cpuhp_cpufreq_offline);
        if (ret < 0)
                goto err_if_unreg;
        hp_online = ret;
index 932742e4cf23147e304d6df2a1c8f37c8f89d296..24c461dea7afb146a509e097b581aa2fdaede132 100644 (file)
@@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
 
                status = __gop_query32(sys_table_arg, gop32, &info, &size,
                                       &current_fb_base);
-               if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+               if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+                   info->pixel_format != PIXEL_BLT_ONLY) {
                        /*
                         * Systems that use the UEFI Console Splitter may
                         * provide multiple GOP devices, not all of which are
@@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
 
                status = __gop_query64(sys_table_arg, gop64, &info, &size,
                                       &current_fb_base);
-               if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+               if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+                   info->pixel_format != PIXEL_BLT_ONLY) {
                        /*
                         * Systems that use the UEFI Console Splitter may
                         * provide multiple GOP devices, not all of which are
index da48819ff2e6550c0a7d6206569d85e8a880c0c5..b78d9239e48fb0fc3b02129fe97795a6b94bed70 100644 (file)
@@ -1317,7 +1317,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
        if (!fence) {
                event_free(gpu, event);
                ret = -ENOMEM;
-               goto out_pm_put;
+               goto out_unlock;
        }
 
        gpu->event[event].fence = fence;
@@ -1357,6 +1357,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
        hangcheck_timer_reset(gpu);
        ret = 0;
 
+out_unlock:
        mutex_unlock(&gpu->lock);
 
 out_pm_put:
index b7d7721e72faddc2a2d4fc76d69d795b8053cfad..40af17ec6312533d4080cc1581faa4683a0405b9 100644 (file)
@@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
 {
        int ret;
 
-       if (vgpu->failsafe)
-               return 0;
-
        if (WARN_ON(bytes > 4))
                return -EINVAL;
 
index f1f426a97aa9d43826010d7be90f6bffdbe59426..d186c157f65fefe3c64b45b86f0ffa6ab824df2f 100644 (file)
@@ -775,7 +775,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
                        _EL_OFFSET_STATUS_PTR);
 
        ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
-       ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
+       ctx_status_ptr.read_ptr = 0;
+       ctx_status_ptr.write_ptr = 0x7;
        vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 }
 
index 933a7c211a1c29ab77357119e37b0de2bb3dd521..dce8d15f706f58b4cf1019ddc1d5b609c903980c 100644 (file)
@@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
        struct gvt_firmware_header *h;
        void *firmware;
        void *p;
-       unsigned long size;
+       unsigned long size, crc32_start;
        int i;
        int ret;
 
-       size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
+       size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
        firmware = vzalloc(size);
        if (!firmware)
                return -ENOMEM;
@@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
 
        memcpy(gvt->firmware.mmio, p, info->mmio_size);
 
+       crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+       h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
+
        firmware_attr.size = size;
        firmware_attr.private = firmware;
 
@@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
 
        firmware->mmio = mem;
 
-       sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
+       sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
                 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
                 pdev->revision);
 
index 3b9d59e457ba7dbf2a1baffff7cd4f4c9aa75f3f..ef3baa0c4754566319a4706d50a77d2e1c6e2255 100644 (file)
@@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
        .vgpu_create = intel_gvt_create_vgpu,
        .vgpu_destroy = intel_gvt_destroy_vgpu,
        .vgpu_reset = intel_gvt_reset_vgpu,
+       .vgpu_activate = intel_gvt_activate_vgpu,
+       .vgpu_deactivate = intel_gvt_deactivate_vgpu,
 };
 
 /**
index 6dfc48b63b718b4c4e6f5c62794db5ce279b18a4..becae2fa3b29d9956cf69469968c6a1c7c74b770 100644 (file)
@@ -382,7 +382,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                                 unsigned int engine_mask);
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
-
+void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
 
 /* validating GM functions */
 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
@@ -449,6 +450,8 @@ struct intel_gvt_ops {
                                struct intel_vgpu_type *);
        void (*vgpu_destroy)(struct intel_vgpu *);
        void (*vgpu_reset)(struct intel_vgpu *);
+       void (*vgpu_activate)(struct intel_vgpu *);
+       void (*vgpu_deactivate)(struct intel_vgpu *);
 };
 
 
index d641214578a7dc6631e866bbc91c2d38f3e95a76..e466259034e24b2c62b82265978298c3500b79c5 100644 (file)
@@ -544,6 +544,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
        if (ret)
                goto undo_group;
 
+       intel_gvt_ops->vgpu_activate(vgpu);
+
        atomic_set(&vgpu->vdev.released, 0);
        return ret;
 
@@ -569,6 +571,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
        if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
                return;
 
+       intel_gvt_ops->vgpu_deactivate(vgpu);
+
        ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
                                        &vgpu->vdev.iommu_notifier);
        WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
@@ -1340,13 +1344,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 
 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
 {
-       struct intel_vgpu *vgpu = info->vgpu;
-
-       if (!info) {
-               gvt_vgpu_err("kvmgt_guest_info invalid\n");
-               return false;
-       }
-
        kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
        kvm_put_kvm(info->kvm);
        kvmgt_protect_table_destroy(info);
index 41cfa5ccae84ce4020c6b2ff4a051ce8e17f6298..649ef280cc9a5bc10f4bebdc2f43c27e5249bd7f 100644 (file)
@@ -72,7 +72,7 @@ static struct {
        char *name;
 } vgpu_types[] = {
 /* Fixed vGPU type table */
-       { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
+       { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, GVT_EDID_1024_768, "8" },
        { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
        { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
        { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
@@ -179,20 +179,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
 }
 
 /**
- * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * intel_gvt_active_vgpu - activate a virtual GPU
  * @vgpu: virtual GPU
  *
- * This function is called when user wants to destroy a virtual GPU.
+ * This function is called when user wants to activate a virtual GPU.
  *
  */
-void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
+{
+       mutex_lock(&vgpu->gvt->lock);
+       vgpu->active = true;
+       mutex_unlock(&vgpu->gvt->lock);
+}
+
+/**
+ * intel_gvt_deactive_vgpu - deactivate a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to deactivate a virtual GPU.
+ * All virtual GPU runtime information will be destroyed.
+ *
+ */
+void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
 {
        struct intel_gvt *gvt = vgpu->gvt;
 
        mutex_lock(&gvt->lock);
 
        vgpu->active = false;
-       idr_remove(&gvt->vgpu_idr, vgpu->id);
 
        if (atomic_read(&vgpu->running_workload_num)) {
                mutex_unlock(&gvt->lock);
@@ -201,6 +215,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        }
 
        intel_vgpu_stop_schedule(vgpu);
+
+       mutex_unlock(&gvt->lock);
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy a virtual GPU.
+ *
+ */
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+
+       mutex_lock(&gvt->lock);
+
+       WARN(vgpu->active, "vGPU is still active!\n");
+
+       idr_remove(&gvt->vgpu_idr, vgpu->id);
        intel_vgpu_clean_sched_policy(vgpu);
        intel_vgpu_clean_gvt_context(vgpu);
        intel_vgpu_clean_execlist(vgpu);
@@ -277,7 +311,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (ret)
                goto out_clean_shadow_ctx;
 
-       vgpu->active = true;
        mutex_unlock(&gvt->lock);
 
        return vgpu;
index 1c75402a59c1377e7abd410f2dc58dcec0df7094..5c089b3c2a7efdb29343de77a43dfc1e1f720057 100644 (file)
@@ -1434,8 +1434,6 @@ static int i915_drm_suspend(struct drm_device *dev)
                goto out;
        }
 
-       intel_guc_suspend(dev_priv);
-
        intel_display_suspend(dev);
 
        intel_dp_mst_suspend(dev);
index 1e53c31b6826ec996b2d153e1dd32232b77dd9d7..46fcd8b7080aafca8d589ca25ef6d57a9dc27a48 100644 (file)
@@ -806,6 +806,7 @@ struct intel_csr {
        func(has_resource_streamer); \
        func(has_runtime_pm); \
        func(has_snoop); \
+       func(unfenced_needs_alignment); \
        func(cursor_needs_physical); \
        func(hws_needs_physical); \
        func(overlay_needs_physical); \
index 67b1fc5a03313b80bc9459543b8dec3743ea953b..fe531f90406241bfa2cfc89e6155f1ef25802404 100644 (file)
@@ -4348,6 +4348,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
        i915_gem_context_lost(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
+       intel_guc_suspend(dev_priv);
+
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
        cancel_delayed_work_sync(&dev_priv->gt.retire_work);
 
index 30e0675fd7dab7949d3cb3cb498be852d7448ac9..15a15d00a6bfa07cbe93ac2669cefaee3cb5ed2c 100644 (file)
@@ -888,6 +888,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
        struct list_head ordered_vmas;
        struct list_head pinned_vmas;
        bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
+       bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
        int retry;
 
        vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -908,7 +909,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
                if (!has_fenced_gpu_access)
                        entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
                need_fence =
-                       entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+                       (entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
+                        needs_unfenced_map) &&
                        i915_gem_object_is_tiled(obj);
                need_mappable = need_fence || need_reloc_mappable(vma);
 
index 2801a4d5632491787009c4ae62130dc732215136..96e45a4d54410b085191e09ab33c6f4ceca0100a 100644 (file)
@@ -2704,7 +2704,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        if (unlikely(ggtt->do_idle_maps)) {
-               if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
+               if (i915_gem_wait_for_idle(dev_priv, 0)) {
                        DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
index e7c3c0318ff60f2bf60b3c5afce405d11ce54a5c..da70bfe97ec5843adbdac276e5c86fa608266087 100644 (file)
@@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 
 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 {
+       /* The timeline struct (as part of the ppgtt underneath a context)
+        * may be freed when the request is no longer in use by the GPU.
+        * We could extend the life of a context to beyond that of all
+        * fences, possibly keeping the hw resource around indefinitely,
+        * or we just give them a false name. Since
+        * dma_fence_ops.get_timeline_name is a debug feature, the occasional
+        * lie seems justifiable.
+        */
+       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+               return "signaled";
+
        return to_request(fence)->timeline->common->name;
 }
 
index d5d2b4c6ed382d687719a088d943580ccacbba15..70b3832a79dd40d066a6f2deb34cbc6d436559e1 100644 (file)
@@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
        BUG();
 }
 
+static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
+{
+       if (!unlock)
+               return;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       /* expedite the RCU grace period to free some request slabs */
+       synchronize_rcu_expedited();
+}
+
 static bool any_vma_pinned(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
@@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                intel_runtime_pm_put(dev_priv);
 
        i915_gem_retire_requests(dev_priv);
-       if (unlock)
-               mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       /* expedite the RCU grace period to free some request slabs */
-       synchronize_rcu_expedited();
+       i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
 
        return count;
 }
@@ -293,8 +301,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
                        count += obj->base.size >> PAGE_SHIFT;
        }
 
-       if (unlock)
-               mutex_unlock(&dev->struct_mutex);
+       i915_gem_shrinker_unlock(dev, unlock);
 
        return count;
 }
@@ -321,8 +328,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
                                         sc->nr_to_scan - freed,
                                         I915_SHRINK_BOUND |
                                         I915_SHRINK_UNBOUND);
-       if (unlock)
-               mutex_unlock(&dev->struct_mutex);
+
+       i915_gem_shrinker_unlock(dev, unlock);
 
        return freed;
 }
@@ -364,8 +371,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
                                         struct shrinker_lock_uninterruptible *slu)
 {
        dev_priv->mm.interruptible = slu->was_interruptible;
-       if (slu->unlock)
-               mutex_unlock(&dev_priv->drm.struct_mutex);
+       i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
 }
 
 static int
index ecb487b5356fe68696b19d3054dc61339a61f406..9bbbd4e83e3c5d99cb4cdc9d4422f586159826bf 100644 (file)
@@ -60,6 +60,7 @@
        .has_overlay = 1, .overlay_needs_physical = 1, \
        .has_gmch_display = 1, \
        .hws_needs_physical = 1, \
+       .unfenced_needs_alignment = 1, \
        .ring_mask = RENDER_RING, \
        GEN_DEFAULT_PIPEOFFSETS, \
        CURSOR_OFFSETS
@@ -101,6 +102,7 @@ static const struct intel_device_info intel_i915g_info = {
        .platform = INTEL_I915G, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .hws_needs_physical = 1,
+       .unfenced_needs_alignment = 1,
 };
 
 static const struct intel_device_info intel_i915gm_info = {
@@ -112,6 +114,7 @@ static const struct intel_device_info intel_i915gm_info = {
        .supports_tv = 1,
        .has_fbc = 1,
        .hws_needs_physical = 1,
+       .unfenced_needs_alignment = 1,
 };
 
 static const struct intel_device_info intel_i945g_info = {
@@ -120,6 +123,7 @@ static const struct intel_device_info intel_i945g_info = {
        .has_hotplug = 1, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .hws_needs_physical = 1,
+       .unfenced_needs_alignment = 1,
 };
 
 static const struct intel_device_info intel_i945gm_info = {
@@ -130,6 +134,7 @@ static const struct intel_device_info intel_i945gm_info = {
        .supports_tv = 1,
        .has_fbc = 1,
        .hws_needs_physical = 1,
+       .unfenced_needs_alignment = 1,
 };
 
 static const struct intel_device_info intel_g33_info = {
index a1b7eec58be2742e6d94e5566b09fbb61e54df9d..70964ca9251e04939225c87a773e9a6bf760b1ac 100644 (file)
@@ -1705,7 +1705,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
         */
        if (WARN_ON(stream->sample_flags != props->sample_flags)) {
                ret = -ENODEV;
-               goto err_alloc;
+               goto err_flags;
        }
 
        list_add(&stream->link, &dev_priv->perf.streams);
@@ -1728,6 +1728,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
 
 err_open:
        list_del(&stream->link);
+err_flags:
        if (stream->ops->destroy)
                stream->ops->destroy(stream);
 err_alloc:
@@ -1793,6 +1794,11 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
                if (ret)
                        return ret;
 
+               if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
+                       DRM_DEBUG("Unknown i915 perf property ID\n");
+                       return -EINVAL;
+               }
+
                switch ((enum drm_i915_perf_property_id)id) {
                case DRM_I915_PERF_PROP_CTX_HANDLE:
                        props->single_context = 1;
@@ -1862,9 +1868,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
                        props->oa_periodic = true;
                        props->oa_period_exponent = value;
                        break;
-               default:
+               case DRM_I915_PERF_PROP_MAX:
                        MISSING_CASE(id);
-                       DRM_DEBUG("Unknown i915 perf property ID\n");
                        return -EINVAL;
                }
 
index 471af3b480adc38a3a48c27d1999805836cf5630..47517a02f0a439125b3b3a769e6848a4c4928ca2 100644 (file)
@@ -670,15 +670,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
 static struct intel_engine_cs *
 pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
 {
-       struct intel_engine_cs *engine;
+       struct intel_engine_cs *engine =
+               container_of(pt, struct drm_i915_gem_request, priotree)->engine;
+
+       GEM_BUG_ON(!locked);
 
-       engine = container_of(pt,
-                             struct drm_i915_gem_request,
-                             priotree)->engine;
        if (engine != locked) {
-               if (locked)
-                       spin_unlock_irq(&locked->timeline->lock);
-               spin_lock_irq(&engine->timeline->lock);
+               spin_unlock(&locked->timeline->lock);
+               spin_lock(&engine->timeline->lock);
        }
 
        return engine;
@@ -686,7 +685,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
 
 static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
 {
-       struct intel_engine_cs *engine = NULL;
+       struct intel_engine_cs *engine;
        struct i915_dependency *dep, *p;
        struct i915_dependency stack;
        LIST_HEAD(dfs);
@@ -720,26 +719,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
        list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
                struct i915_priotree *pt = dep->signaler;
 
-               list_for_each_entry(p, &pt->signalers_list, signal_link)
+               /* Within an engine, there can be no cycle, but we may
+                * refer to the same dependency chain multiple times
+                * (redundant dependencies are not eliminated) and across
+                * engines.
+                */
+               list_for_each_entry(p, &pt->signalers_list, signal_link) {
+                       GEM_BUG_ON(p->signaler->priority < pt->priority);
                        if (prio > READ_ONCE(p->signaler->priority))
                                list_move_tail(&p->dfs_link, &dfs);
+               }
 
                list_safe_reset_next(dep, p, dfs_link);
-               if (!RB_EMPTY_NODE(&pt->node))
-                       continue;
-
-               engine = pt_lock_engine(pt, engine);
-
-               /* If it is not already in the rbtree, we can update the
-                * priority inplace and skip over it (and its dependencies)
-                * if it is referenced *again* as we descend the dfs.
-                */
-               if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
-                       pt->priority = prio;
-                       list_del_init(&dep->dfs_link);
-               }
        }
 
+       engine = request->engine;
+       spin_lock_irq(&engine->timeline->lock);
+
        /* Fifo and depth-first replacement ensure our deps execute before us */
        list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
                struct i915_priotree *pt = dep->signaler;
@@ -751,16 +747,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
                if (prio <= pt->priority)
                        continue;
 
-               GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
-
                pt->priority = prio;
-               rb_erase(&pt->node, &engine->execlist_queue);
-               if (insert_request(pt, &engine->execlist_queue))
-                       engine->execlist_first = &pt->node;
+               if (!RB_EMPTY_NODE(&pt->node)) {
+                       rb_erase(&pt->node, &engine->execlist_queue);
+                       if (insert_request(pt, &engine->execlist_queue))
+                               engine->execlist_first = &pt->node;
+               }
        }
 
-       if (engine)
-               spin_unlock_irq(&engine->timeline->lock);
+       spin_unlock_irq(&engine->timeline->lock);
 
        /* XXX Do we need to preempt to make room for us and our deps? */
 }
@@ -1440,7 +1435,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
        GEM_BUG_ON(request->ctx != port[0].request->ctx);
 
        /* Reset WaIdleLiteRestore:bdw,skl as well */
-       request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
+       request->tail =
+               intel_ring_wrap(request->ring,
+                               request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
 }
 
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
index 13dccb18cd43ed85aee58a6a15618c515094a83b..8cb2078c5bfc4abc7aeaa7fe51974266edcc8016 100644 (file)
@@ -521,11 +521,17 @@ static inline void intel_ring_advance(struct intel_ring *ring)
         */
 }
 
+static inline u32
+intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+{
+       return pos & (ring->size - 1);
+}
+
 static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
 {
        /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
        u32 offset = addr - ring->vaddr;
-       return offset & (ring->size - 1);
+       return intel_ring_wrap(ring, offset);
 }
 
 int __intel_ring_space(int head, int tail, int size);
index 0b4440ffbeae21a3d33e67ed1c727e00cf3884b3..a9182d5e60117321a1a354b9b97288d11cc13cf4 100644 (file)
@@ -995,7 +995,6 @@ nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
 {
        struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
        __drm_atomic_helper_plane_destroy_state(&asyw->state);
-       dma_fence_put(asyw->state.fence);
        kfree(asyw);
 }
 
@@ -1007,7 +1006,6 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
        if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
                return NULL;
        __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
-       asyw->state.fence = NULL;
        asyw->interval = 1;
        asyw->sema = armw->sema;
        asyw->ntfy = armw->ntfy;
@@ -2036,6 +2034,7 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
        u32 vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
        u32 hfrontp =  mode->hsync_start - mode->hdisplay;
        u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+       u32 blankus;
        struct nv50_head_mode *m = &asyh->mode;
 
        m->h.active = mode->htotal;
@@ -2049,9 +2048,10 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
        m->v.blanks = m->v.active - vfrontp - 1;
 
        /*XXX: Safe underestimate, even "0" works */
-       m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
-       m->v.blankus *= 1000;
-       m->v.blankus /= mode->clock;
+       blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
+       blankus *= 1000;
+       blankus /= mode->clock;
+       m->v.blankus = blankus;
 
        if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
                m->v.blank2e =  m->v.active + m->v.synce + vbackp;
index 273562dd6bbdb1a138c2e73417fa0e071716749b..3b86a73995672220b5e71b0f39898129782abf28 100644 (file)
@@ -714,7 +714,7 @@ nv4a_chipset = {
        .i2c = nv04_i2c_new,
        .imem = nv40_instmem_new,
        .mc = nv44_mc_new,
-       .mmu = nv44_mmu_new,
+       .mmu = nv04_mmu_new,
        .pci = nv40_pci_new,
        .therm = nv40_therm_new,
        .timer = nv41_timer_new,
@@ -2271,6 +2271,35 @@ nv136_chipset = {
        .fifo = gp100_fifo_new,
 };
 
+static const struct nvkm_device_chip
+nv137_chipset = {
+       .name = "GP107",
+       .bar = gf100_bar_new,
+       .bios = nvkm_bios_new,
+       .bus = gf100_bus_new,
+       .devinit = gm200_devinit_new,
+       .fb = gp102_fb_new,
+       .fuse = gm107_fuse_new,
+       .gpio = gk104_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .ltc = gp100_ltc_new,
+       .mc = gp100_mc_new,
+       .mmu = gf100_mmu_new,
+       .pci = gp100_pci_new,
+       .pmu = gp102_pmu_new,
+       .timer = gk20a_timer_new,
+       .top = gk104_top_new,
+       .ce[0] = gp102_ce_new,
+       .ce[1] = gp102_ce_new,
+       .ce[2] = gp102_ce_new,
+       .ce[3] = gp102_ce_new,
+       .disp = gp102_disp_new,
+       .dma = gf119_dma_new,
+       .fifo = gp100_fifo_new,
+};
+
 static int
 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
                       struct nvkm_notify *notify)
@@ -2708,6 +2737,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x132: device->chip = &nv132_chipset; break;
                case 0x134: device->chip = &nv134_chipset; break;
                case 0x136: device->chip = &nv136_chipset; break;
+               case 0x137: device->chip = &nv137_chipset; break;
                default:
                        nvdev_error(device, "unknown chipset (%08x)\n", boot0);
                        goto done;
index 003ac915eaadad44c5b0ad2c1bbab9d18377596e..8a8895246d26a23be65eb6fb29cbb001f71f728b 100644 (file)
@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
                }
 
                if (type == 0x00000010) {
-                       if (!nv31_mpeg_mthd(mpeg, mthd, data))
+                       if (nv31_mpeg_mthd(mpeg, mthd, data))
                                show &= ~0x01000000;
                }
        }
index e536f37e24b0c75fbf7882eccae37c5b1d0aeae8..c3cf02ed468ea1ccf6212a5e9c42dbee00fa612e 100644 (file)
@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
                }
 
                if (type == 0x00000010) {
-                       if (!nv44_mpeg_mthd(subdev->device, mthd, data))
+                       if (nv44_mpeg_mthd(subdev->device, mthd, data))
                                show &= ~0x01000000;
                }
        }
index 917dcb978c2ccc921c1dfcf90329973ff3044b59..0c87b1ac6b68f0d41cfd01851a14b9a092455f4f 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/fb.h>
 #include <linux/prefetch.h>
+#include <asm/unaligned.h>
 
 #include <drm/drmP.h>
 #include "udl_drv.h"
@@ -163,7 +164,7 @@ static void udl_compress_hline16(
                        const u8 *const start = pixel;
                        const uint16_t repeating_pixel_val16 = pixel_val16;
 
-                       *(uint16_t *)cmd = cpu_to_be16(pixel_val16);
+                       put_unaligned_be16(pixel_val16, cmd);
 
                        cmd += 2;
                        pixel += bpp;
index 63ec1993eaaa905af1583f7169e6be812cf0486d..d162f0dc76e3f44e2134eafa5509137ddf0cc411 100644 (file)
@@ -819,8 +819,7 @@ static int hid_scan_report(struct hid_device *hid)
                hid->group = HID_GROUP_WACOM;
                break;
        case USB_VENDOR_ID_SYNAPTICS:
-               if (hid->group == HID_GROUP_GENERIC ||
-                   hid->group == HID_GROUP_MULTITOUCH_WIN_8)
+               if (hid->group == HID_GROUP_GENERIC)
                        if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
                            && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
                                /*
@@ -2096,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
index 4e2648c86c8c56142cd06b6a269433f72c5c07c9..b26c030926c188aff2dd054cd2ae9e9910837887 100644 (file)
 #define USB_DEVICE_ID_UGEE_TABLET_45           0x0045
 #define USB_DEVICE_ID_YIYNOVA_TABLET           0x004d
 
+#define USB_VENDOR_ID_UGEE             0x28bd
+#define USB_DEVICE_ID_UGEE_TABLET_EX07S                0x0071
+
 #define USB_VENDOR_ID_UNITEC   0x227d
 #define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709    0x0709
 #define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19    0x0a19
index 1509d7287ff3e60e79c845214e47e901fae650f8..e3e6e5c893cc05e0c934c8c9f505de9fdd06e26a 100644 (file)
@@ -977,6 +977,7 @@ static int uclogic_probe(struct hid_device *hdev,
                }
                break;
        case USB_DEVICE_ID_UGTIZER_TABLET_GP0610:
+       case USB_DEVICE_ID_UGEE_TABLET_EX07S:
                /* If this is the pen interface */
                if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
                        rc = uclogic_tablet_enable(hdev);
@@ -1069,6 +1070,7 @@ static const struct hid_device_id uclogic_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, uclogic_devices);
index 91cbe86b25c8ec2d693bea5452c04d0ffa73ba0b..fcbed35e95a824979bb48fba9f05614591e97491 100644 (file)
@@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
                rx_wr->sg_list = &rx_desc->rx_sg;
                rx_wr->num_sge = 1;
                rx_wr->next = rx_wr + 1;
+               rx_desc->in_use = false;
        }
        rx_wr--;
        rx_wr->next = NULL; /* mark end of work requests list */
@@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
        struct ib_recv_wr *rx_wr_failed, rx_wr;
        int ret;
 
+       if (!rx_desc->in_use) {
+               /*
+                * if the descriptor is not in-use we already reposted it
+                * for recv, so just silently return
+                */
+               return 0;
+       }
+
+       rx_desc->in_use = false;
        rx_wr.wr_cqe = &rx_desc->rx_cqe;
        rx_wr.sg_list = &rx_desc->rx_sg;
        rx_wr.num_sge = 1;
@@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
                return;
        }
 
+       rx_desc->in_use = true;
+
        ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
                        ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
 
@@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
        ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
        isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
 
-       if (ret)
-               transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
-       else
-               isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
+       if (ret) {
+               /*
+                * transport_generic_request_failure() expects to have
+                * plus two references to handle queue-full, so re-add
+                * one here as target-core will have already dropped
+                * it after the first isert_put_datain() callback.
+                */
+               kref_get(&cmd->cmd_kref);
+               transport_generic_request_failure(cmd, cmd->pi_err);
+       } else {
+               /*
+                * XXX: isert_put_response() failure is not retried.
+                */
+               ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
+               if (ret)
+                       pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
+       }
 }
 
 static void
@@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
        cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
        spin_unlock_bh(&cmd->istate_lock);
 
-       if (ret) {
-               target_put_sess_cmd(se_cmd);
-               transport_send_check_condition_and_sense(se_cmd,
-                                                        se_cmd->pi_err, 0);
-       } else {
+       /*
+        * transport_generic_request_failure() will drop the extra
+        * se_cmd->cmd_kref reference after T10-PI error, and handle
+        * any non-zero ->queue_status() callback error retries.
+        */
+       if (ret)
+               transport_generic_request_failure(se_cmd, se_cmd->pi_err);
+       else
                target_execute_cmd(se_cmd);
-       }
 }
 
 static void
@@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
                chain_wr = &isert_cmd->tx_desc.send_wr;
        }
 
-       isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
-       isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd);
-       return 1;
+       rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
+       isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
+                 isert_cmd, rc);
+       return rc;
 }
 
 static int
 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
 {
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+       int ret;
 
        isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
                 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
 
        isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
-       isert_rdma_rw_ctx_post(isert_cmd, conn->context,
-                       &isert_cmd->tx_desc.tx_cqe, NULL);
+       ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
+                                    &isert_cmd->tx_desc.tx_cqe, NULL);
 
-       isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
-                isert_cmd);
-       return 0;
+       isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
+                isert_cmd, ret);
+       return ret;
 }
 
 static int
index c02ada57d7f5c4fa5b765a6401ac8e7bbd6096bb..87d994de8c910d998c12a4205c0e66eb1220b12d 100644 (file)
@@ -60,7 +60,7 @@
 
 #define ISER_RX_PAD_SIZE       (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
                (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
-                sizeof(struct ib_cqe)))
+                sizeof(struct ib_cqe) + sizeof(bool)))
 
 #define ISCSI_ISER_SG_TABLESIZE                256
 
@@ -85,6 +85,7 @@ struct iser_rx_desc {
        u64             dma_addr;
        struct ib_sge   rx_sg;
        struct ib_cqe   rx_cqe;
+       bool            in_use;
        char            pad[ISER_RX_PAD_SIZE];
 } __packed;
 
index 15af9a9753e582b797a58de3c9713226800999f8..2d203b422129e53554d5be023595c6946877037c 100644 (file)
@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
                return -ENOMEM;
        }
 
+       raw_spin_lock_init(&cd->rlock);
+
        cd->gpc_base = of_iomap(node, 0);
        if (!cd->gpc_base) {
                pr_err("fsl-gpcv2: unable to map gpc registers\n");
index ea9890d619670e1abfba75fe608c2925d824cb1c..f36584616e7d6825c7e69137b4a31a3d55779688 100644 (file)
@@ -2230,14 +2230,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
 #define MIN_MTU ETH_MIN_MTU
 #define MAX_MTU ETH_MAX_MTU
 
-static int virtnet_probe(struct virtio_device *vdev)
+static int virtnet_validate(struct virtio_device *vdev)
 {
-       int i, err;
-       struct net_device *dev;
-       struct virtnet_info *vi;
-       u16 max_queue_pairs;
-       int mtu;
-
        if (!vdev->config->get) {
                dev_err(&vdev->dev, "%s failure: config access disabled\n",
                        __func__);
@@ -2247,6 +2241,25 @@ static int virtnet_probe(struct virtio_device *vdev)
        if (!virtnet_validate_features(vdev))
                return -EINVAL;
 
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
+               int mtu = virtio_cread16(vdev,
+                                        offsetof(struct virtio_net_config,
+                                                 mtu));
+               if (mtu < MIN_MTU)
+                       __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
+       }
+
+       return 0;
+}
+
+static int virtnet_probe(struct virtio_device *vdev)
+{
+       int i, err;
+       struct net_device *dev;
+       struct virtnet_info *vi;
+       u16 max_queue_pairs;
+       int mtu;
+
        /* Find if host supports multiqueue virtio_net device */
        err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
                                   struct virtio_net_config,
@@ -2362,11 +2375,20 @@ static int virtnet_probe(struct virtio_device *vdev)
                                     offsetof(struct virtio_net_config,
                                              mtu));
                if (mtu < dev->min_mtu) {
-                       __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
-               } else {
-                       dev->mtu = mtu;
-                       dev->max_mtu = mtu;
+                       /* Should never trigger: MTU was previously validated
+                        * in virtnet_validate.
+                        */
+                       dev_err(&vdev->dev, "device MTU appears to have changed "
+                               "it is now %d < %d", mtu, dev->min_mtu);
+                       goto free_stats;
                }
+
+               dev->mtu = mtu;
+               dev->max_mtu = mtu;
+
+               /* TODO: size buffers correctly in this case. */
+               if (dev->mtu > ETH_DATA_LEN)
+                       vi->big_packets = true;
        }
 
        if (vi->any_header_sg)
@@ -2544,6 +2566,7 @@ static struct virtio_driver virtio_net_driver = {
        .driver.name =  KBUILD_MODNAME,
        .driver.owner = THIS_MODULE,
        .id_table =     id_table,
+       .validate =     virtnet_validate,
        .probe =        virtnet_probe,
        .remove =       virtnet_remove,
        .config_changed = virtnet_config_changed,
index f80134e3e0b68aba9f8b94771702cdd3df83a678..9ff790174906e46962ec9b9fa00f4f04de14aa18 100644 (file)
@@ -13,6 +13,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/dmi.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -1524,10 +1525,31 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
+/*
+ * Certain machines seem to hardcode Linux IRQ numbers in their ACPI
+ * tables. Since we leave GPIOs that are not capable of generating
+ * interrupts out of the irqdomain the numbering will be different and
+ * cause devices using the hardcoded IRQ numbers fail. In order not to
+ * break such machines we will only mask pins from irqdomain if the machine
+ * is not listed below.
+ */
+static const struct dmi_system_id chv_no_valid_mask[] = {
+       {
+               /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
+               .ident = "Acer Chromebook (CYAN)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
+                       DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
+               },
+       }
+};
+
 static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
 {
        const struct chv_gpio_pinrange *range;
        struct gpio_chip *chip = &pctrl->chip;
+       bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
        int ret, i, offset;
 
        *chip = chv_gpio_chip;
@@ -1536,7 +1558,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
        chip->label = dev_name(pctrl->dev);
        chip->parent = pctrl->dev;
        chip->base = -1;
-       chip->irq_need_valid_mask = true;
+       chip->irq_need_valid_mask = need_valid_mask;
 
        ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
        if (ret) {
@@ -1567,7 +1589,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                intsel &= CHV_PADCTRL0_INTSEL_MASK;
                intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
 
-               if (intsel >= pctrl->community->nirqs)
+               if (need_valid_mask && intsel >= pctrl->community->nirqs)
                        clear_bit(i, chip->irq_valid_mask);
        }
 
index f9b49967f512b52cec5447ae0baee9e146745d1b..63e51b56a22a94c528489a8d18aaf38795f0072e 100644 (file)
@@ -1468,82 +1468,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
 
 /* pin banks of exynos5433 pin-controller - ALIVE */
 static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
-       EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
-       EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
-       EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
-       EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
-       EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
 };
 
 /* pin banks of exynos5433 pin-controller - AUD */
 static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
-       EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+       EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
 };
 
 /* pin banks of exynos5433 pin-controller - CPIF */
 static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - eSE */
 static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - FINGER */
 static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - FSYS */
 static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
-       EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
-       EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
-       EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
-       EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
-       EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
 };
 
 /* pin banks of exynos5433 pin-controller - IMEM */
 static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - NFC */
 static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - PERIC */
 static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
-       EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
-       EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
-       EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
-       EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
-       EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
-       EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
-       EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
-       EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
-       EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
-       EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
-       EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
-       EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
-       EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
-       EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
-       EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
-       EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
+       EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
+       EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
 };
 
 /* pin banks of exynos5433 pin-controller - TOUCH */
 static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
 };
 
 /*
index a473092fb8d2362f11a1a48beb7f545b7c9f25ce..cd046eb7d705682fae1293abf432b9d04465328f 100644 (file)
                .name           = id                    \
        }
 
-#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
-       {                                               \
-               .type           = &bank_type_alive,     \
-               .pctl_offset    = reg,                  \
-               .nr_pins        = pins,                 \
-               .eint_type      = EINT_TYPE_WKUP,       \
-               .eint_offset    = offs,                 \
-               .name           = id,                   \
-               .pctl_res_idx   = pctl_idx,             \
-       }                                               \
-
 #define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs)         \
        {                                                       \
                .type           = &exynos5433_bank_type_off,    \
index 053088b9b66edb4c50c3b47c5e942b6e2e343c00..c1527cb645be8d0c038742559e270d1914ac8726 100644 (file)
@@ -36,6 +36,14 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
        .clk_rate = 19200000,
        .npwm = 4,
        .base_unit_bits = 22,
+       .bypass = true,
+};
+
+/* Tangier */
+static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
+       .clk_rate = 19200000,
+       .npwm = 4,
+       .base_unit_bits = 22,
 };
 
 static int pwm_lpss_probe_pci(struct pci_dev *pdev,
@@ -97,7 +105,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
        { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
        { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
-       { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info},
+       { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_tng_info},
        { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info},
        { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
        { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
index b22b6fdadb9ae14e0e55f28ecbfcece7e971eb1f..5d6ed1507d29284f2ba28f2cc781f4b797067f01 100644 (file)
@@ -37,6 +37,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
        .clk_rate = 19200000,
        .npwm = 4,
        .base_unit_bits = 22,
+       .bypass = true,
 };
 
 static int pwm_lpss_probe_platform(struct platform_device *pdev)
index 689d2c1cbead80f5b6540dac13f1f0e56edfecfa..8db0d40ccacde84a61d292936f6bbdeeed7ac358 100644 (file)
@@ -57,7 +57,7 @@ static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value)
        writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM);
 }
 
-static int pwm_lpss_update(struct pwm_device *pwm)
+static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
 {
        struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip);
        const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM;
@@ -65,8 +65,6 @@ static int pwm_lpss_update(struct pwm_device *pwm)
        u32 val;
        int err;
 
-       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
-
        /*
         * PWM Configuration register has SW_UPDATE bit that is set when a new
         * configuration is written to the register. The bit is automatically
@@ -122,6 +120,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
        pwm_lpss_write(pwm, ctrl);
 }
 
+static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
+{
+       if (cond)
+               pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
+}
+
 static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                          struct pwm_state *state)
 {
@@ -137,18 +141,21 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                                return ret;
                        }
                        pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
-                       ret = pwm_lpss_update(pwm);
+                       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
+                       pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
+                       ret = pwm_lpss_wait_for_update(pwm);
                        if (ret) {
                                pm_runtime_put(chip->dev);
                                return ret;
                        }
-                       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
+                       pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
                } else {
                        ret = pwm_lpss_is_updating(pwm);
                        if (ret)
                                return ret;
                        pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
-                       return pwm_lpss_update(pwm);
+                       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
+                       return pwm_lpss_wait_for_update(pwm);
                }
        } else if (pwm_is_enabled(pwm)) {
                pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
index c94cd7c2695da72181830f35d52c2ce12c6afc18..98306bb02cfe71c0775eb430e7cf623fdc431889 100644 (file)
@@ -22,6 +22,7 @@ struct pwm_lpss_boardinfo {
        unsigned long clk_rate;
        unsigned int npwm;
        unsigned long base_unit_bits;
+       bool bypass;
 };
 
 struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
index ef89df1f7336c77a70c3a4de42b6d71ec96162ba..744d56197286a45eb148de437d31a3c3ca8fbd1a 100644 (file)
@@ -191,6 +191,28 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        return 0;
 }
 
+static int rockchip_pwm_enable(struct pwm_chip *chip,
+                        struct pwm_device *pwm,
+                        bool enable,
+                        enum pwm_polarity polarity)
+{
+       struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+       int ret;
+
+       if (enable) {
+               ret = clk_enable(pc->clk);
+               if (ret)
+                       return ret;
+       }
+
+       pc->data->set_enable(chip, pwm, enable, polarity);
+
+       if (!enable)
+               clk_disable(pc->clk);
+
+       return 0;
+}
+
 static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                              struct pwm_state *state)
 {
@@ -207,22 +229,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return ret;
 
        if (state->polarity != curstate.polarity && enabled) {
-               pc->data->set_enable(chip, pwm, false, state->polarity);
+               ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
+               if (ret)
+                       goto out;
                enabled = false;
        }
 
        ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
        if (ret) {
                if (enabled != curstate.enabled)
-                       pc->data->set_enable(chip, pwm, !enabled,
-                                            state->polarity);
-
+                       rockchip_pwm_enable(chip, pwm, !enabled,
+                                     state->polarity);
                goto out;
        }
 
-       if (state->enabled != enabled)
-               pc->data->set_enable(chip, pwm, state->enabled,
-                                    state->polarity);
+       if (state->enabled != enabled) {
+               ret = rockchip_pwm_enable(chip, pwm, state->enabled,
+                                   state->polarity);
+               if (ret)
+                       goto out;
+       }
 
        /*
         * Update the state with the real hardware, which can differ a bit
index a91802432f2f47d1b163ba9f8e2da90dabe28e62..e3f9ed3690b7a86103472de987c03fd76becd59b 100644 (file)
@@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *);
 
 int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 {
-       iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
-       return 0;
+       return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
 }
 EXPORT_SYMBOL(iscsit_queue_rsp);
 
index bf40f03755ddc50697652ccde864d40df840fa0b..344e8448869c15c9e078ef708debf98cac1aae1a 100644 (file)
@@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid(
 static int lio_queue_data_in(struct se_cmd *se_cmd)
 {
        struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+       struct iscsi_conn *conn = cmd->conn;
 
        cmd->i_state = ISTATE_SEND_DATAIN;
-       cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd);
-
-       return 0;
+       return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
 }
 
 static int lio_write_pending(struct se_cmd *se_cmd)
@@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd)
 static int lio_queue_status(struct se_cmd *se_cmd)
 {
        struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+       struct iscsi_conn *conn = cmd->conn;
 
        cmd->i_state = ISTATE_SEND_STATUS;
 
        if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
-               iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
-               return 0;
+               return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
        }
-       cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
-
-       return 0;
+       return conn->conn_transport->iscsit_queue_status(conn, cmd);
 }
 
 static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
index e65bf78ceef3740fc1923c1b3ed446aa2996b82d..fce627628200cf9917a8212da7a17ccff3d6136c 100644 (file)
@@ -781,22 +781,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
        } else if (IS_TYPE_NUMBER(param)) {
                if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
                        SET_PSTATE_REPLY_OPTIONAL(param);
-               /*
-                * The GlobalSAN iSCSI Initiator for MacOSX does
-                * not respond to MaxBurstLength, FirstBurstLength,
-                * DefaultTime2Wait or DefaultTime2Retain parameter keys.
-                * So, we set them to 'reply optional' here, and assume the
-                * the defaults from iscsi_parameters.h if the initiator
-                * is not RFC compliant and the keys are not negotiated.
-                */
-               if (!strcmp(param->name, MAXBURSTLENGTH))
-                       SET_PSTATE_REPLY_OPTIONAL(param);
-               if (!strcmp(param->name, FIRSTBURSTLENGTH))
-                       SET_PSTATE_REPLY_OPTIONAL(param);
-               if (!strcmp(param->name, DEFAULTTIME2WAIT))
-                       SET_PSTATE_REPLY_OPTIONAL(param);
-               if (!strcmp(param->name, DEFAULTTIME2RETAIN))
-                       SET_PSTATE_REPLY_OPTIONAL(param);
                /*
                 * Required for gPXE iSCSI boot client
                 */
index 5041a9c8bdcbfd9bf9eb9368e850bdb8792a6be9..7d3e2fcc26a0da82629102693a99750622afed95 100644 (file)
@@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue(
        }
 }
 
-void iscsit_add_cmd_to_response_queue(
+int iscsit_add_cmd_to_response_queue(
        struct iscsi_cmd *cmd,
        struct iscsi_conn *conn,
        u8 state)
@@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue(
        if (!qr) {
                pr_err("Unable to allocate memory for"
                        " struct iscsi_queue_req\n");
-               return;
+               return -ENOMEM;
        }
        INIT_LIST_HEAD(&qr->qr_list);
        qr->cmd = cmd;
@@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue(
        spin_unlock_bh(&conn->response_queue_lock);
 
        wake_up(&conn->queues_wq);
+       return 0;
 }
 
 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
@@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
 {
        struct se_cmd *se_cmd = NULL;
        int rc;
+       bool op_scsi = false;
        /*
         * Determine if a struct se_cmd is associated with
         * this struct iscsi_cmd.
         */
        switch (cmd->iscsi_opcode) {
        case ISCSI_OP_SCSI_CMD:
-               se_cmd = &cmd->se_cmd;
-               __iscsit_free_cmd(cmd, true, shutdown);
+               op_scsi = true;
                /*
                 * Fallthrough
                 */
        case ISCSI_OP_SCSI_TMFUNC:
-               rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
-               if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
-                       __iscsit_free_cmd(cmd, true, shutdown);
+               se_cmd = &cmd->se_cmd;
+               __iscsit_free_cmd(cmd, op_scsi, shutdown);
+               rc = transport_generic_free_cmd(se_cmd, shutdown);
+               if (!rc && shutdown && se_cmd->se_sess) {
+                       __iscsit_free_cmd(cmd, op_scsi, shutdown);
                        target_put_sess_cmd(se_cmd);
                }
                break;
index 8ff08856516aba68394fc07661ec71b635c8b6a2..9e4197af8708e1056a08f4c01d423581a075bef1 100644 (file)
@@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd
                        struct iscsi_conn_recovery **, itt_t);
 extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
 extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
-extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
 extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
 extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
 extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
index fd7c16a7ca6e06ad53e6d6df54ab739550ae4a4a..fc4a9c303d559f95b1216857efd8ae6ce77b96ca 100644 (file)
@@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
                /*
                 * Set the ASYMMETRIC ACCESS State
                 */
-               buf[off++] |= (atomic_read(
-                       &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
+               buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
                /*
                 * Set supported ASYMMETRIC ACCESS State bits
                 */
@@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd)
 
        spin_lock(&lun->lun_tg_pt_gp_lock);
        tg_pt_gp = lun->lun_tg_pt_gp;
-       out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+       out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
        nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
 
        // XXX: keeps using tg_pt_gp witout reference after unlock
@@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata(
 }
 
 /*
- * Called with tg_pt_gp->tg_pt_gp_md_mutex held
+ * Called with tg_pt_gp->tg_pt_gp_transition_mutex held
  */
 static int core_alua_update_tpg_primary_metadata(
        struct t10_alua_tg_pt_gp *tg_pt_gp)
@@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata(
                        "alua_access_state=0x%02x\n"
                        "alua_access_status=0x%02x\n",
                        tg_pt_gp->tg_pt_gp_id,
-                       tg_pt_gp->tg_pt_gp_alua_pending_state,
+                       tg_pt_gp->tg_pt_gp_alua_access_state,
                        tg_pt_gp->tg_pt_gp_alua_access_status);
 
        snprintf(path, ALUA_METADATA_PATH_LEN,
@@ -1013,93 +1012,41 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 }
 
-static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
-{
-       struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
-               struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
-       struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
-       bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
-                        ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
-
-       /*
-        * Update the ALUA metadata buf that has been allocated in
-        * core_alua_do_port_transition(), this metadata will be written
-        * to struct file.
-        *
-        * Note that there is the case where we do not want to update the
-        * metadata when the saved metadata is being parsed in userspace
-        * when setting the existing port access state and access status.
-        *
-        * Also note that the failure to write out the ALUA metadata to
-        * struct file does NOT affect the actual ALUA transition.
-        */
-       if (tg_pt_gp->tg_pt_gp_write_metadata) {
-               mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
-               core_alua_update_tpg_primary_metadata(tg_pt_gp);
-               mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
-       }
-       /*
-        * Set the current primary ALUA access state to the requested new state
-        */
-       atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
-                  tg_pt_gp->tg_pt_gp_alua_pending_state);
-
-       pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
-               " from primary access state %s to %s\n", (explicit) ? "explicit" :
-               "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
-               tg_pt_gp->tg_pt_gp_id,
-               core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
-               core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
-
-       core_alua_queue_state_change_ua(tg_pt_gp);
-
-       spin_lock(&dev->t10_alua.tg_pt_gps_lock);
-       atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
-       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
-
-       if (tg_pt_gp->tg_pt_gp_transition_complete)
-               complete(tg_pt_gp->tg_pt_gp_transition_complete);
-}
-
 static int core_alua_do_transition_tg_pt(
        struct t10_alua_tg_pt_gp *tg_pt_gp,
        int new_state,
        int explicit)
 {
-       struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
-       DECLARE_COMPLETION_ONSTACK(wait);
+       int prev_state;
 
+       mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
        /* Nothing to be done here */
-       if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
+       if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
+               mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
                return 0;
+       }
 
-       if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
+       if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
+               mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
                return -EAGAIN;
-
-       /*
-        * Flush any pending transitions
-        */
-       if (!explicit)
-               flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
+       }
 
        /*
         * Save the old primary ALUA access state, and set the current state
         * to ALUA_ACCESS_STATE_TRANSITION.
         */
-       atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
-                       ALUA_ACCESS_STATE_TRANSITION);
+       prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
+       tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
        tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
                                ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
                                ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
 
        core_alua_queue_state_change_ua(tg_pt_gp);
 
-       if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+       if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
+               mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
                return 0;
-
-       tg_pt_gp->tg_pt_gp_alua_previous_state =
-               atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
-       tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+       }
 
        /*
         * Check for the optional ALUA primary state transition delay
@@ -1108,19 +1055,36 @@ static int core_alua_do_transition_tg_pt(
                msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
 
        /*
-        * Take a reference for workqueue item
+        * Set the current primary ALUA access state to the requested new state
         */
-       spin_lock(&dev->t10_alua.tg_pt_gps_lock);
-       atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
-       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+       tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
 
-       schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
-       if (explicit) {
-               tg_pt_gp->tg_pt_gp_transition_complete = &wait;
-               wait_for_completion(&wait);
-               tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+       /*
+        * Update the ALUA metadata buf that has been allocated in
+        * core_alua_do_port_transition(), this metadata will be written
+        * to struct file.
+        *
+        * Note that there is the case where we do not want to update the
+        * metadata when the saved metadata is being parsed in userspace
+        * when setting the existing port access state and access status.
+        *
+        * Also note that the failure to write out the ALUA metadata to
+        * struct file does NOT affect the actual ALUA transition.
+        */
+       if (tg_pt_gp->tg_pt_gp_write_metadata) {
+               core_alua_update_tpg_primary_metadata(tg_pt_gp);
        }
 
+       pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
+               " from primary access state %s to %s\n", (explicit) ? "explicit" :
+               "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+               tg_pt_gp->tg_pt_gp_id,
+               core_alua_dump_state(prev_state),
+               core_alua_dump_state(new_state));
+
+       core_alua_queue_state_change_ua(tg_pt_gp);
+
+       mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
        return 0;
 }
 
@@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
        }
        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
-       mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
+       mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
        spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
        atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
-       INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
-                 core_alua_do_transition_tg_pt_work);
        tg_pt_gp->tg_pt_gp_dev = dev;
-       atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
-               ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
+       tg_pt_gp->tg_pt_gp_alua_access_state =
+                       ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
        /*
         * Enable both explicit and implicit ALUA support by default
         */
@@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp(
        dev->t10_alua.alua_tg_pt_gps_counter--;
        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
-       flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
-
        /*
         * Allow a struct t10_alua_tg_pt_gp_member * referenced by
         * core_alua_get_tg_pt_gp_by_name() in
@@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
                        "Primary Access Status: %s\nTG Port Secondary Access"
                        " State: %s\nTG Port Secondary Access Status: %s\n",
                        config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
-                       core_alua_dump_state(atomic_read(
-                                       &tg_pt_gp->tg_pt_gp_alua_access_state)),
+                       core_alua_dump_state(
+                               tg_pt_gp->tg_pt_gp_alua_access_state),
                        core_alua_dump_status(
                                tg_pt_gp->tg_pt_gp_alua_access_status),
                        atomic_read(&lun->lun_tg_pt_secondary_offline) ?
index 38b5025e4c7a877f9e5c0bcfa6995262b6330e32..70657fd564406b3c137b02c3f61824f387f554aa 100644 (file)
@@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
                char *page)
 {
        return sprintf(page, "%d\n",
-               atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state));
+                      to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
 }
 
 static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
index d8a16ca6baa507b235cbec2fbff56a648874fd2e..d1e6cab8e3d3f0a95cb801f2680c2b8e2474de1d 100644 (file)
@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
                pr_err("Source se_lun->lun_se_dev does not exist\n");
                return -EINVAL;
        }
+       if (lun->lun_shutdown) {
+               pr_err("Unable to create mappedlun symlink because"
+                       " lun->lun_shutdown=true\n");
+               return -EINVAL;
+       }
        se_tpg = lun->lun_tpg;
 
        nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
index 6fb191914f458f7889508652e19b860355387491..dfaef4d3b2d2698088754c08f1846bc893815237 100644 (file)
@@ -642,6 +642,8 @@ void core_tpg_remove_lun(
         */
        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 
+       lun->lun_shutdown = true;
+
        core_clear_lun_from_tpg(lun, tpg);
        /*
         * Wait for any active I/O references to percpu se_lun->lun_ref to
@@ -663,6 +665,8 @@ void core_tpg_remove_lun(
        }
        if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
                hlist_del_rcu(&lun->link);
+
+       lun->lun_shutdown = false;
        mutex_unlock(&tpg->tpg_lun_mutex);
 
        percpu_ref_exit(&lun->lun_ref);
index b1a3cdb29468cf84e7eb48d6c8c41934c0b5b4cb..a0cd56ee5fe984f7ddf27c41157f7314343d23c1 100644 (file)
@@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache;
 struct kmem_cache *t10_alua_lba_map_mem_cache;
 
 static void transport_complete_task_attr(struct se_cmd *cmd);
+static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
 static void transport_handle_queue_full(struct se_cmd *cmd,
-               struct se_device *dev);
+               struct se_device *dev, int err, bool write_pending);
 static int transport_put_cmd(struct se_cmd *cmd);
 static void target_complete_ok_work(struct work_struct *work);
 
@@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work)
 
                if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
                        transport_write_pending_qf(cmd);
-               else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
+               else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
+                        cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
                        transport_complete_qf(cmd);
        }
 }
@@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
                }
                trace_target_cmd_complete(cmd);
                ret = cmd->se_tfo->queue_status(cmd);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
                goto check_stop;
        default:
@@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        }
 
        ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
-       if (ret == -EAGAIN || ret == -ENOMEM)
+       if (ret)
                goto queue_full;
 
 check_stop:
@@ -1739,8 +1741,7 @@ check_stop:
        return;
 
 queue_full:
-       cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
-       transport_handle_queue_full(cmd, cmd->se_dev);
+       transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
 EXPORT_SYMBOL(transport_generic_request_failure);
 
@@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd)
        int ret = 0;
 
        transport_complete_task_attr(cmd);
+       /*
+        * If a fabric driver ->write_pending() or ->queue_data_in() callback
+        * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
+        * the same callbacks should not be retried.  Return CHECK_CONDITION
+        * if a scsi_status is not already set.
+        *
+        * If a fabric driver ->queue_status() has returned non zero, always
+        * keep retrying no matter what..
+        */
+       if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
+               if (cmd->scsi_status)
+                       goto queue_status;
 
-       if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
-               trace_target_cmd_complete(cmd);
-               ret = cmd->se_tfo->queue_status(cmd);
-               goto out;
+               cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+               cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+               cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
+               translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+               goto queue_status;
        }
 
+       if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+               goto queue_status;
+
        switch (cmd->data_direction) {
        case DMA_FROM_DEVICE:
                if (cmd->scsi_status)
@@ -2007,19 +2024,33 @@ queue_status:
                break;
        }
 
-out:
        if (ret < 0) {
-               transport_handle_queue_full(cmd, cmd->se_dev);
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
                return;
        }
        transport_lun_remove_cmd(cmd);
        transport_cmd_check_stop_to_fabric(cmd);
 }
 
-static void transport_handle_queue_full(
-       struct se_cmd *cmd,
-       struct se_device *dev)
+static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
+                                       int err, bool write_pending)
 {
+       /*
+        * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
+        * ->queue_data_in() callbacks from new process context.
+        *
+        * Otherwise for other errors, transport_complete_qf() will send
+        * CHECK_CONDITION via ->queue_status() instead of attempting to
+        * retry associated fabric driver data-transfer callbacks.
+        */
+       if (err == -EAGAIN || err == -ENOMEM) {
+               cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
+                                                TRANSPORT_COMPLETE_QF_OK;
+       } else {
+               pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
+               cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
+       }
+
        spin_lock_irq(&dev->qf_cmd_lock);
        list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
        atomic_inc_mb(&dev->dev_qf_count);
@@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work)
                WARN_ON(!cmd->scsi_status);
                ret = transport_send_check_condition_and_sense(
                                        cmd, 0, 1);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
 
                transport_lun_remove_cmd(cmd);
@@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work)
                } else if (rc) {
                        ret = transport_send_check_condition_and_sense(cmd,
                                                rc, 0);
-                       if (ret == -EAGAIN || ret == -ENOMEM)
+                       if (ret)
                                goto queue_full;
 
                        transport_lun_remove_cmd(cmd);
@@ -2134,7 +2165,7 @@ queue_rsp:
                if (target_read_prot_action(cmd)) {
                        ret = transport_send_check_condition_and_sense(cmd,
                                                cmd->pi_err, 0);
-                       if (ret == -EAGAIN || ret == -ENOMEM)
+                       if (ret)
                                goto queue_full;
 
                        transport_lun_remove_cmd(cmd);
@@ -2144,7 +2175,7 @@ queue_rsp:
 
                trace_target_cmd_complete(cmd);
                ret = cmd->se_tfo->queue_data_in(cmd);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
                break;
        case DMA_TO_DEVICE:
@@ -2157,7 +2188,7 @@ queue_rsp:
                        atomic_long_add(cmd->data_length,
                                        &cmd->se_lun->lun_stats.tx_data_octets);
                        ret = cmd->se_tfo->queue_data_in(cmd);
-                       if (ret == -EAGAIN || ret == -ENOMEM)
+                       if (ret)
                                goto queue_full;
                        break;
                }
@@ -2166,7 +2197,7 @@ queue_rsp:
 queue_status:
                trace_target_cmd_complete(cmd);
                ret = cmd->se_tfo->queue_status(cmd);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
                break;
        default:
@@ -2180,8 +2211,8 @@ queue_status:
 queue_full:
        pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
                " data_direction: %d\n", cmd, cmd->data_direction);
-       cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
-       transport_handle_queue_full(cmd, cmd->se_dev);
+
+       transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
 
 void target_free_sgl(struct scatterlist *sgl, int nents)
@@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        ret = cmd->se_tfo->write_pending(cmd);
-       if (ret == -EAGAIN || ret == -ENOMEM)
+       if (ret)
                goto queue_full;
 
-       /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
-       WARN_ON(ret);
-
-       return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return 0;
 
 queue_full:
        pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
-       cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
-       transport_handle_queue_full(cmd, cmd->se_dev);
+       transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
        return 0;
 }
 EXPORT_SYMBOL(transport_generic_new_cmd);
@@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
        int ret;
 
        ret = cmd->se_tfo->write_pending(cmd);
-       if (ret == -EAGAIN || ret == -ENOMEM) {
+       if (ret) {
                pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
                         cmd);
-               transport_handle_queue_full(cmd, cmd->se_dev);
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
        }
 }
 
@@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
        __releases(&cmd->t_state_lock)
        __acquires(&cmd->t_state_lock)
 {
+       int ret;
+
        assert_spin_locked(&cmd->t_state_lock);
        WARN_ON_ONCE(!irqs_disabled());
 
@@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
        trace_target_cmd_complete(cmd);
 
        spin_unlock_irq(&cmd->t_state_lock);
-       cmd->se_tfo->queue_status(cmd);
+       ret = cmd->se_tfo->queue_status(cmd);
+       if (ret)
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
        spin_lock_irq(&cmd->t_state_lock);
 
        return 1;
@@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status);
 void transport_send_task_abort(struct se_cmd *cmd)
 {
        unsigned long flags;
+       int ret;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
@@ -3090,7 +3122,9 @@ send_abort:
                 cmd->t_task_cdb[0], cmd->tag);
 
        trace_target_cmd_complete(cmd);
-       cmd->se_tfo->queue_status(cmd);
+       ret = cmd->se_tfo->queue_status(cmd);
+       if (ret)
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
 
 static void target_tmr_work(struct work_struct *work)
index c6874c38a10bc45e86beae58ddfed175664d51cf..f615c3bbb73e8b7a2a7bf3f5039efd84c724cdf2 100644 (file)
@@ -311,24 +311,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
                   DATA_BLOCK_BITS);
 }
 
-static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
-               struct scatterlist *data_sg, unsigned int data_nents)
+static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+                            bool bidi)
 {
+       struct se_cmd *se_cmd = cmd->se_cmd;
        int i, block;
        int block_remaining = 0;
        void *from, *to;
        size_t copy_bytes, from_offset;
-       struct scatterlist *sg;
+       struct scatterlist *sg, *data_sg;
+       unsigned int data_nents;
+       DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
+
+       bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+
+       if (!bidi) {
+               data_sg = se_cmd->t_data_sg;
+               data_nents = se_cmd->t_data_nents;
+       } else {
+               uint32_t count;
+
+               /*
+                * For bidi case, the first count blocks are for Data-Out
+                * buffer blocks, and before gathering the Data-In buffer
+                * the Data-Out buffer blocks should be discarded.
+                */
+               count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
+               while (count--) {
+                       block = find_first_bit(bitmap, DATA_BLOCK_BITS);
+                       clear_bit(block, bitmap);
+               }
+
+               data_sg = se_cmd->t_bidi_data_sg;
+               data_nents = se_cmd->t_bidi_data_nents;
+       }
 
        for_each_sg(data_sg, sg, data_nents, i) {
                int sg_remaining = sg->length;
                to = kmap_atomic(sg_page(sg)) + sg->offset;
                while (sg_remaining > 0) {
                        if (block_remaining == 0) {
-                               block = find_first_bit(cmd_bitmap,
+                               block = find_first_bit(bitmap,
                                                DATA_BLOCK_BITS);
                                block_remaining = DATA_BLOCK_SIZE;
-                               clear_bit(block, cmd_bitmap);
+                               clear_bit(block, bitmap);
                        }
                        copy_bytes = min_t(size_t, sg_remaining,
                                        block_remaining);
@@ -394,6 +420,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
        return true;
 }
 
+static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
+{
+       struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+       size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
+
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
+               BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+               data_length += round_up(se_cmd->t_bidi_data_sg->length,
+                               DATA_BLOCK_SIZE);
+       }
+
+       return data_length;
+}
+
+static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
+{
+       size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+
+       return data_length / DATA_BLOCK_SIZE;
+}
+
 static sense_reason_t
 tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 {
@@ -407,7 +454,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        uint32_t cmd_head;
        uint64_t cdb_off;
        bool copy_to_data_area;
-       size_t data_length;
+       size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
        DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
 
        if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
@@ -421,8 +468,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
         * expensive to tell how many regions are freed in the bitmap
        */
        base_command_size = max(offsetof(struct tcmu_cmd_entry,
-                               req.iov[se_cmd->t_bidi_data_nents +
-                                       se_cmd->t_data_nents]),
+                               req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
                                sizeof(struct tcmu_cmd_entry));
        command_size = base_command_size
                + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -433,11 +479,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 
        mb = udev->mb_addr;
        cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
-       data_length = se_cmd->data_length;
-       if (se_cmd->se_cmd_flags & SCF_BIDI) {
-               BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
-               data_length += se_cmd->t_bidi_data_sg->length;
-       }
        if ((command_size > (udev->cmdr_size / 2)) ||
            data_length > udev->data_size) {
                pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
@@ -511,11 +552,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        entry->req.iov_dif_cnt = 0;
 
        /* Handle BIDI commands */
-       iov_cnt = 0;
-       alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
-               se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
-       entry->req.iov_bidi_cnt = iov_cnt;
-
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
+               iov_cnt = 0;
+               iov++;
+               alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
+                               se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
+                               false);
+               entry->req.iov_bidi_cnt = iov_cnt;
+       }
        /* cmd's data_bitmap is what changed in process */
        bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
                        DATA_BLOCK_BITS);
@@ -592,19 +636,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                               se_cmd->scsi_sense_length);
                free_data_area(udev, cmd);
        } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
-               DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
-
                /* Get Data-In buffer before clean up */
-               bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
-               gather_data_area(udev, bitmap,
-                       se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
+               gather_data_area(udev, cmd, true);
                free_data_area(udev, cmd);
        } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
-               DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
-
-               bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
-               gather_data_area(udev, bitmap,
-                       se_cmd->t_data_sg, se_cmd->t_data_nents);
+               gather_data_area(udev, cmd, false);
                free_data_area(udev, cmd);
        } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
                free_data_area(udev, cmd);
@@ -1196,11 +1232,6 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
        if (ret < 0)
                return ret;
 
-       if (!val) {
-               pr_err("Illegal value for cmd_time_out\n");
-               return -EINVAL;
-       }
-
        udev->cmd_time_out = val * MSEC_PER_SEC;
        return count;
 }
index d2351139342f6200209078e769e04f5ea1eb2d1f..a82e2bd5ea34d97996cb79ba5b72d09aed86e09a 100644 (file)
@@ -373,7 +373,7 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
        usb_ep_free_request(fu->ep_in, fu->bot_req_in);
        usb_ep_free_request(fu->ep_out, fu->bot_req_out);
        usb_ep_free_request(fu->ep_out, fu->cmd.req);
-       usb_ep_free_request(fu->ep_out, fu->bot_status.req);
+       usb_ep_free_request(fu->ep_in, fu->bot_status.req);
 
        kfree(fu->cmd.buf);
 
index 8c4dc1e1f94fdb53ad7acf2808fa400c03787857..b827a8113e26803d8caee69e4ea59d6ceae56ea0 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/efi.h>
 #include <linux/errno.h>
 #include <linux/fb.h>
+#include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/screen_info.h>
 #include <video/vga.h>
@@ -143,6 +144,8 @@ static struct attribute *efifb_attrs[] = {
 };
 ATTRIBUTE_GROUPS(efifb);
 
+static bool pci_dev_disabled;  /* FB base matches BAR of a disabled device */
+
 static int efifb_probe(struct platform_device *dev)
 {
        struct fb_info *info;
@@ -152,7 +155,7 @@ static int efifb_probe(struct platform_device *dev)
        unsigned int size_total;
        char *option = NULL;
 
-       if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+       if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
                return -ENODEV;
 
        if (fb_get_options("efifb", &option))
@@ -360,3 +363,64 @@ static struct platform_driver efifb_driver = {
 };
 
 builtin_platform_driver(efifb_driver);
+
+#if defined(CONFIG_PCI) && !defined(CONFIG_X86)
+
+static bool pci_bar_found;     /* did we find a BAR matching the efifb base? */
+
+static void claim_efifb_bar(struct pci_dev *dev, int idx)
+{
+       u16 word;
+
+       pci_bar_found = true;
+
+       pci_read_config_word(dev, PCI_COMMAND, &word);
+       if (!(word & PCI_COMMAND_MEMORY)) {
+               pci_dev_disabled = true;
+               dev_err(&dev->dev,
+                       "BAR %d: assigned to efifb but device is disabled!\n",
+                       idx);
+               return;
+       }
+
+       if (pci_claim_resource(dev, idx)) {
+               pci_dev_disabled = true;
+               dev_err(&dev->dev,
+                       "BAR %d: failed to claim resource for efifb!\n", idx);
+               return;
+       }
+
+       dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
+}
+
+static void efifb_fixup_resources(struct pci_dev *dev)
+{
+       u64 base = screen_info.lfb_base;
+       u64 size = screen_info.lfb_size;
+       int i;
+
+       if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+               return;
+
+       if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+               base |= (u64)screen_info.ext_lfb_base << 32;
+
+       if (!base)
+               return;
+
+       for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+               struct resource *res = &dev->resource[i];
+
+               if (!(res->flags & IORESOURCE_MEM))
+                       continue;
+
+               if (res->start <= base && res->end >= base + size - 1) {
+                       claim_efifb_bar(dev, i);
+                       break;
+               }
+       }
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
+                              16, efifb_fixup_resources);
+
+#endif
index 1abba07b84b3efd014b28bfd0ef61f3cad7cbeda..f4cbfb3b8a0980030e2a0c2bbd78348968d6b8a1 100644 (file)
@@ -1608,19 +1608,6 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev)
        return 0;
 }
 
-static void check_required_callbacks(struct omapfb_device *fbdev)
-{
-#define _C(x) (fbdev->ctrl->x != NULL)
-#define _P(x) (fbdev->panel->x != NULL)
-       BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
-       BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
-                _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
-                _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
-                _P(get_caps)));
-#undef _P
-#undef _C
-}
-
 /*
  * Called by LDM binding to probe and attach a new device.
  * Initialization sequence:
@@ -1705,8 +1692,6 @@ static int omapfb_do_probe(struct platform_device *pdev,
                omapfb_ops.fb_mmap = omapfb_mmap;
        init_state++;
 
-       check_required_callbacks(fbdev);
-
        r = planes_init(fbdev);
        if (r)
                goto cleanup;
index bd017b57c47f8af4ff1558cedaa8589a5f0ce9ff..f599520374ddf575bba1236b81bec2c4c2d21c49 100644 (file)
@@ -578,10 +578,14 @@ static int ssd1307fb_probe(struct i2c_client *client,
 
        par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat");
        if (IS_ERR(par->vbat_reg)) {
-               dev_err(&client->dev, "failed to get VBAT regulator: %ld\n",
-                       PTR_ERR(par->vbat_reg));
                ret = PTR_ERR(par->vbat_reg);
-               goto fb_alloc_error;
+               if (ret == -ENODEV) {
+                       par->vbat_reg = NULL;
+               } else {
+                       dev_err(&client->dev, "failed to get VBAT regulator: %d\n",
+                               ret);
+                       goto fb_alloc_error;
+               }
        }
 
        if (of_property_read_u32(node, "solomon,width", &par->width))
@@ -668,10 +672,13 @@ static int ssd1307fb_probe(struct i2c_client *client,
                udelay(4);
        }
 
-       ret = regulator_enable(par->vbat_reg);
-       if (ret) {
-               dev_err(&client->dev, "failed to enable VBAT: %d\n", ret);
-               goto reset_oled_error;
+       if (par->vbat_reg) {
+               ret = regulator_enable(par->vbat_reg);
+               if (ret) {
+                       dev_err(&client->dev, "failed to enable VBAT: %d\n",
+                               ret);
+                       goto reset_oled_error;
+               }
        }
 
        ret = ssd1307fb_init(par);
@@ -710,7 +717,8 @@ panel_init_error:
                pwm_put(par->pwm);
        };
 regulator_enable_error:
-       regulator_disable(par->vbat_reg);
+       if (par->vbat_reg)
+               regulator_disable(par->vbat_reg);
 reset_oled_error:
        fb_deferred_io_cleanup(info);
 fb_alloc_error:
index d0115a7af0a9ae0f3171a66bf716f0f21c3e91b1..3ee309c50b2d015fb3d463dd88f6b99253beea12 100644 (file)
@@ -643,7 +643,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateInitWait:
-InitWait:
                xenbus_switch_state(dev, XenbusStateConnected);
                break;
 
@@ -654,7 +653,8 @@ InitWait:
                 * get Connected twice here.
                 */
                if (dev->state != XenbusStateConnected)
-                       goto InitWait; /* no InitWait seen yet, fudge it */
+                       /* no InitWait seen yet, fudge it */
+                       xenbus_switch_state(dev, XenbusStateConnected);
 
                if (xenbus_read_unsigned(info->xbdev->otherend,
                                         "request-update", 0))
index 400d70b6937948cc5a8aac698efc64fcf49c1dde..48230a5e12f262b67d28d87adc713f462e8ec5fc 100644 (file)
@@ -232,6 +232,12 @@ static int virtio_dev_probe(struct device *_d)
                if (device_features & (1ULL << i))
                        __virtio_set_bit(dev, i);
 
+       if (drv->validate) {
+               err = drv->validate(dev);
+               if (err)
+                       goto err;
+       }
+
        err = virtio_finalize_features(dev);
        if (err)
                goto err;
index 590534910dc617836e18c91b6576410a0299de26..698d5d06fa039ca1a27b151a3dcf5d322784e3f0 100644 (file)
@@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        int i;
 
-       synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0));
-       for (i = 1; i < vp_dev->msix_vectors; i++)
+       if (vp_dev->intx_enabled)
+               synchronize_irq(vp_dev->pci_dev->irq);
+
+       for (i = 0; i < vp_dev->msix_vectors; ++i)
                synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
 }
 
@@ -60,13 +62,16 @@ static irqreturn_t vp_config_changed(int irq, void *opaque)
 static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
 {
        struct virtio_pci_device *vp_dev = opaque;
+       struct virtio_pci_vq_info *info;
        irqreturn_t ret = IRQ_NONE;
-       struct virtqueue *vq;
+       unsigned long flags;
 
-       list_for_each_entry(vq, &vp_dev->vdev.vqs, list) {
-               if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED)
+       spin_lock_irqsave(&vp_dev->lock, flags);
+       list_for_each_entry(info, &vp_dev->virtqueues, node) {
+               if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
                        ret = IRQ_HANDLED;
        }
+       spin_unlock_irqrestore(&vp_dev->lock, flags);
 
        return ret;
 }
@@ -97,186 +102,244 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
        return vp_vring_interrupt(irq, opaque);
 }
 
-static void vp_remove_vqs(struct virtio_device *vdev)
+static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
+                                  bool per_vq_vectors, struct irq_affinity *desc)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-       struct virtqueue *vq, *n;
+       const char *name = dev_name(&vp_dev->vdev.dev);
+       unsigned i, v;
+       int err = -ENOMEM;
 
-       list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
-               if (vp_dev->msix_vector_map) {
-                       int v = vp_dev->msix_vector_map[vq->index];
+       vp_dev->msix_vectors = nvectors;
 
-                       if (v != VIRTIO_MSI_NO_VECTOR)
-                               free_irq(pci_irq_vector(vp_dev->pci_dev, v),
-                                       vq);
-               }
-               vp_dev->del_vq(vq);
+       vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
+                                    GFP_KERNEL);
+       if (!vp_dev->msix_names)
+               goto error;
+       vp_dev->msix_affinity_masks
+               = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
+                         GFP_KERNEL);
+       if (!vp_dev->msix_affinity_masks)
+               goto error;
+       for (i = 0; i < nvectors; ++i)
+               if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
+                                       GFP_KERNEL))
+                       goto error;
+
+       err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
+                                            nvectors, PCI_IRQ_MSIX |
+                                            (desc ? PCI_IRQ_AFFINITY : 0),
+                                            desc);
+       if (err < 0)
+               goto error;
+       vp_dev->msix_enabled = 1;
+
+       /* Set the vector used for configuration */
+       v = vp_dev->msix_used_vectors;
+       snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+                "%s-config", name);
+       err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
+                         vp_config_changed, 0, vp_dev->msix_names[v],
+                         vp_dev);
+       if (err)
+               goto error;
+       ++vp_dev->msix_used_vectors;
+
+       v = vp_dev->config_vector(vp_dev, v);
+       /* Verify we had enough resources to assign the vector */
+       if (v == VIRTIO_MSI_NO_VECTOR) {
+               err = -EBUSY;
+               goto error;
        }
+
+       if (!per_vq_vectors) {
+               /* Shared vector for all VQs */
+               v = vp_dev->msix_used_vectors;
+               snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+                        "%s-virtqueues", name);
+               err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
+                                 vp_vring_interrupt, 0, vp_dev->msix_names[v],
+                                 vp_dev);
+               if (err)
+                       goto error;
+               ++vp_dev->msix_used_vectors;
+       }
+       return 0;
+error:
+       return err;
+}
+
+static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
+                                    void (*callback)(struct virtqueue *vq),
+                                    const char *name,
+                                    u16 msix_vec)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
+       struct virtqueue *vq;
+       unsigned long flags;
+
+       /* fill out our structure that represents an active queue */
+       if (!info)
+               return ERR_PTR(-ENOMEM);
+
+       vq = vp_dev->setup_vq(vp_dev, info, index, callback, name,
+                             msix_vec);
+       if (IS_ERR(vq))
+               goto out_info;
+
+       info->vq = vq;
+       if (callback) {
+               spin_lock_irqsave(&vp_dev->lock, flags);
+               list_add(&info->node, &vp_dev->virtqueues);
+               spin_unlock_irqrestore(&vp_dev->lock, flags);
+       } else {
+               INIT_LIST_HEAD(&info->node);
+       }
+
+       vp_dev->vqs[index] = info;
+       return vq;
+
+out_info:
+       kfree(info);
+       return vq;
+}
+
+static void vp_del_vq(struct virtqueue *vq)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+       struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
+       unsigned long flags;
+
+       spin_lock_irqsave(&vp_dev->lock, flags);
+       list_del(&info->node);
+       spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+       vp_dev->del_vq(info);
+       kfree(info);
 }
 
 /* the config->del_vqs() implementation */
 void vp_del_vqs(struct virtio_device *vdev)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtqueue *vq, *n;
        int i;
 
-       if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs)))
-               return;
+       list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+               if (vp_dev->per_vq_vectors) {
+                       int v = vp_dev->vqs[vq->index]->msix_vector;
 
-       vp_remove_vqs(vdev);
+                       if (v != VIRTIO_MSI_NO_VECTOR) {
+                               int irq = pci_irq_vector(vp_dev->pci_dev, v);
+
+                               irq_set_affinity_hint(irq, NULL);
+                               free_irq(irq, vq);
+                       }
+               }
+               vp_del_vq(vq);
+       }
+       vp_dev->per_vq_vectors = false;
+
+       if (vp_dev->intx_enabled) {
+               free_irq(vp_dev->pci_dev->irq, vp_dev);
+               vp_dev->intx_enabled = 0;
+       }
 
-       if (vp_dev->pci_dev->msix_enabled) {
-               for (i = 0; i < vp_dev->msix_vectors; i++)
+       for (i = 0; i < vp_dev->msix_used_vectors; ++i)
+               free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
+
+       for (i = 0; i < vp_dev->msix_vectors; i++)
+               if (vp_dev->msix_affinity_masks[i])
                        free_cpumask_var(vp_dev->msix_affinity_masks[i]);
 
+       if (vp_dev->msix_enabled) {
                /* Disable the vector used for configuration */
                vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
 
-               kfree(vp_dev->msix_affinity_masks);
-               kfree(vp_dev->msix_names);
-               kfree(vp_dev->msix_vector_map);
+               pci_free_irq_vectors(vp_dev->pci_dev);
+               vp_dev->msix_enabled = 0;
        }
 
-       free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
-       pci_free_irq_vectors(vp_dev->pci_dev);
+       vp_dev->msix_vectors = 0;
+       vp_dev->msix_used_vectors = 0;
+       kfree(vp_dev->msix_names);
+       vp_dev->msix_names = NULL;
+       kfree(vp_dev->msix_affinity_masks);
+       vp_dev->msix_affinity_masks = NULL;
+       kfree(vp_dev->vqs);
+       vp_dev->vqs = NULL;
 }
 
 static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
                struct virtqueue *vqs[], vq_callback_t *callbacks[],
-               const char * const names[], struct irq_affinity *desc)
+               const char * const names[], bool per_vq_vectors,
+               struct irq_affinity *desc)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-       const char *name = dev_name(&vp_dev->vdev.dev);
-       int i, j, err = -ENOMEM, allocated_vectors, nvectors;
-       unsigned flags = PCI_IRQ_MSIX;
-       bool shared = false;
        u16 msix_vec;
+       int i, err, nvectors, allocated_vectors;
 
-       if (desc) {
-               flags |= PCI_IRQ_AFFINITY;
-               desc->pre_vectors++; /* virtio config vector */
-       }
-
-       nvectors = 1;
-       for (i = 0; i < nvqs; i++)
-               if (callbacks[i])
-                       nvectors++;
-
-       /* Try one vector per queue first. */
-       err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
-                       nvectors, flags, desc);
-       if (err < 0) {
-               /* Fallback to one vector for config, one shared for queues. */
-               shared = true;
-               err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2,
-                               PCI_IRQ_MSIX);
-               if (err < 0)
-                       return err;
-       }
-       if (err < 0)
-               return err;
-
-       vp_dev->msix_vectors = nvectors;
-       vp_dev->msix_names = kmalloc_array(nvectors,
-                       sizeof(*vp_dev->msix_names), GFP_KERNEL);
-       if (!vp_dev->msix_names)
-               goto out_free_irq_vectors;
-
-       vp_dev->msix_affinity_masks = kcalloc(nvectors,
-                       sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL);
-       if (!vp_dev->msix_affinity_masks)
-               goto out_free_msix_names;
+       vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
+       if (!vp_dev->vqs)
+               return -ENOMEM;
 
-       for (i = 0; i < nvectors; ++i) {
-               if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
-                               GFP_KERNEL))
-                       goto out_free_msix_affinity_masks;
+       if (per_vq_vectors) {
+               /* Best option: one for change interrupt, one per vq. */
+               nvectors = 1;
+               for (i = 0; i < nvqs; ++i)
+                       if (callbacks[i])
+                               ++nvectors;
+       } else {
+               /* Second best: one for change, shared for all vqs. */
+               nvectors = 2;
        }
 
-       /* Set the vector used for configuration */
-       snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names),
-                "%s-config", name);
-       err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed,
-                       0, vp_dev->msix_names[0], vp_dev);
+       err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
+                                     per_vq_vectors ? desc : NULL);
        if (err)
-               goto out_free_msix_affinity_masks;
+               goto error_find;
 
-       /* Verify we had enough resources to assign the vector */
-       if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) {
-               err = -EBUSY;
-               goto out_free_config_irq;
-       }
-
-       vp_dev->msix_vector_map = kmalloc_array(nvqs,
-                       sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
-       if (!vp_dev->msix_vector_map)
-               goto out_disable_config_irq;
-
-       allocated_vectors = j = 1; /* vector 0 is the config interrupt */
+       vp_dev->per_vq_vectors = per_vq_vectors;
+       allocated_vectors = vp_dev->msix_used_vectors;
        for (i = 0; i < nvqs; ++i) {
                if (!names[i]) {
                        vqs[i] = NULL;
                        continue;
                }
 
-               if (callbacks[i])
-                       msix_vec = allocated_vectors;
-               else
+               if (!callbacks[i])
                        msix_vec = VIRTIO_MSI_NO_VECTOR;
-
-               vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
-                               msix_vec);
+               else if (vp_dev->per_vq_vectors)
+                       msix_vec = allocated_vectors++;
+               else
+                       msix_vec = VP_MSIX_VQ_VECTOR;
+               vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+                                    msix_vec);
                if (IS_ERR(vqs[i])) {
                        err = PTR_ERR(vqs[i]);
-                       goto out_remove_vqs;
+                       goto error_find;
                }
 
-               if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
-                       vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
+               if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
                        continue;
-               }
 
-               snprintf(vp_dev->msix_names[j],
-                        sizeof(*vp_dev->msix_names), "%s-%s",
+               /* allocate per-vq irq if available and necessary */
+               snprintf(vp_dev->msix_names[msix_vec],
+                        sizeof *vp_dev->msix_names,
+                        "%s-%s",
                         dev_name(&vp_dev->vdev.dev), names[i]);
                err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
-                                 vring_interrupt, IRQF_SHARED,
-                                 vp_dev->msix_names[j], vqs[i]);
-               if (err) {
-                       /* don't free this irq on error */
-                       vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
-                       goto out_remove_vqs;
-               }
-               vp_dev->msix_vector_map[i] = msix_vec;
-               j++;
-
-               /*
-                * Use a different vector for each queue if they are available,
-                * else share the same vector for all VQs.
-                */
-               if (!shared)
-                       allocated_vectors++;
+                                 vring_interrupt, 0,
+                                 vp_dev->msix_names[msix_vec],
+                                 vqs[i]);
+               if (err)
+                       goto error_find;
        }
-
        return 0;
 
-out_remove_vqs:
-       vp_remove_vqs(vdev);
-       kfree(vp_dev->msix_vector_map);
-out_disable_config_irq:
-       vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
-out_free_config_irq:
-       free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
-out_free_msix_affinity_masks:
-       for (i = 0; i < nvectors; i++) {
-               if (vp_dev->msix_affinity_masks[i])
-                       free_cpumask_var(vp_dev->msix_affinity_masks[i]);
-       }
-       kfree(vp_dev->msix_affinity_masks);
-out_free_msix_names:
-       kfree(vp_dev->msix_names);
-out_free_irq_vectors:
-       pci_free_irq_vectors(vp_dev->pci_dev);
+error_find:
+       vp_del_vqs(vdev);
        return err;
 }
 
@@ -287,29 +350,33 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        int i, err;
 
+       vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
+       if (!vp_dev->vqs)
+               return -ENOMEM;
+
        err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
                        dev_name(&vdev->dev), vp_dev);
        if (err)
-               return err;
+               goto out_del_vqs;
 
+       vp_dev->intx_enabled = 1;
+       vp_dev->per_vq_vectors = false;
        for (i = 0; i < nvqs; ++i) {
                if (!names[i]) {
                        vqs[i] = NULL;
                        continue;
                }
-               vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
-                               VIRTIO_MSI_NO_VECTOR);
+               vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+                                    VIRTIO_MSI_NO_VECTOR);
                if (IS_ERR(vqs[i])) {
                        err = PTR_ERR(vqs[i]);
-                       goto out_remove_vqs;
+                       goto out_del_vqs;
                }
        }
 
        return 0;
-
-out_remove_vqs:
-       vp_remove_vqs(vdev);
-       free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
+out_del_vqs:
+       vp_del_vqs(vdev);
        return err;
 }
 
@@ -320,9 +387,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
 {
        int err;
 
-       err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc);
+       /* Try MSI-X with one vector per queue. */
+       err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc);
        if (!err)
                return 0;
+       /* Fallback: MSI-X with one vector for config, one shared for queues. */
+       err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc);
+       if (!err)
+               return 0;
+       /* Finally fall back to regular interrupts. */
        return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
 }
 
@@ -342,15 +415,16 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
 {
        struct virtio_device *vdev = vq->vdev;
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
+       struct cpumask *mask;
+       unsigned int irq;
 
        if (!vq->callback)
                return -EINVAL;
 
-       if (vp_dev->pci_dev->msix_enabled) {
-               int vec = vp_dev->msix_vector_map[vq->index];
-               struct cpumask *mask = vp_dev->msix_affinity_masks[vec];
-               unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec);
-
+       if (vp_dev->msix_enabled) {
+               mask = vp_dev->msix_affinity_masks[info->msix_vector];
+               irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
                if (cpu == -1)
                        irq_set_affinity_hint(irq, NULL);
                else {
@@ -365,12 +439,13 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
 const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-       unsigned int *map = vp_dev->msix_vector_map;
 
-       if (!map || map[index] == VIRTIO_MSI_NO_VECTOR)
+       if (!vp_dev->per_vq_vectors ||
+           vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
                return NULL;
 
-       return pci_irq_get_affinity(vp_dev->pci_dev, map[index]);
+       return pci_irq_get_affinity(vp_dev->pci_dev,
+                                   vp_dev->vqs[index]->msix_vector);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -441,6 +516,8 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
        vp_dev->vdev.dev.parent = &pci_dev->dev;
        vp_dev->vdev.dev.release = virtio_pci_release_dev;
        vp_dev->pci_dev = pci_dev;
+       INIT_LIST_HEAD(&vp_dev->virtqueues);
+       spin_lock_init(&vp_dev->lock);
 
        /* enable the device */
        rc = pci_enable_device(pci_dev);
index ac8c9d7889646ab3cc28bb51accd0d3840d5a34f..e96334aec1e0d70842d1a9fc53462ab728be87c0 100644 (file)
 #include <linux/highmem.h>
 #include <linux/spinlock.h>
 
+struct virtio_pci_vq_info {
+       /* the actual virtqueue */
+       struct virtqueue *vq;
+
+       /* the list node for the virtqueues list */
+       struct list_head node;
+
+       /* MSI-X vector (or none) */
+       unsigned msix_vector;
+};
+
 /* Our device structure */
 struct virtio_pci_device {
        struct virtio_device vdev;
@@ -64,25 +75,47 @@ struct virtio_pci_device {
        /* the IO mapping for the PCI config space */
        void __iomem *ioaddr;
 
+       /* a list of queues so we can dispatch IRQs */
+       spinlock_t lock;
+       struct list_head virtqueues;
+
+       /* array of all queues for house-keeping */
+       struct virtio_pci_vq_info **vqs;
+
+       /* MSI-X support */
+       int msix_enabled;
+       int intx_enabled;
        cpumask_var_t *msix_affinity_masks;
        /* Name strings for interrupts. This size should be enough,
         * and I'm too lazy to allocate each name separately. */
        char (*msix_names)[256];
-       /* Total Number of MSI-X vectors (including per-VQ ones). */
-       int msix_vectors;
-       /* Map of per-VQ MSI-X vectors, may be NULL */
-       unsigned *msix_vector_map;
+       /* Number of available vectors */
+       unsigned msix_vectors;
+       /* Vectors allocated, excluding per-vq vectors if any */
+       unsigned msix_used_vectors;
+
+       /* Whether we have vector per vq */
+       bool per_vq_vectors;
 
        struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
+                                     struct virtio_pci_vq_info *info,
                                      unsigned idx,
                                      void (*callback)(struct virtqueue *vq),
                                      const char *name,
                                      u16 msix_vec);
-       void (*del_vq)(struct virtqueue *vq);
+       void (*del_vq)(struct virtio_pci_vq_info *info);
 
        u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
 };
 
+/* Constants for MSI-X */
+/* Use first vector for configuration changes, second and the rest for
+ * virtqueues Thus, we need at least 2 vectors for MSI. */
+enum {
+       VP_MSIX_CONFIG_VECTOR = 0,
+       VP_MSIX_VQ_VECTOR = 1,
+};
+
 /* Convert a generic virtio device to our structure */
 static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
 {
index f7362c5fe18a96a902bc81138b8ea796e03e79d9..4bfa48fb1324660f82ae6272d2e1ecc33522ba3e 100644 (file)
@@ -112,6 +112,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
 }
 
 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+                                 struct virtio_pci_vq_info *info,
                                  unsigned index,
                                  void (*callback)(struct virtqueue *vq),
                                  const char *name,
@@ -129,6 +130,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
        if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
                return ERR_PTR(-ENOENT);
 
+       info->msix_vector = msix_vec;
+
        /* create the vring */
        vq = vring_create_virtqueue(index, num,
                                    VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
@@ -159,13 +162,14 @@ out_deactivate:
        return ERR_PTR(err);
 }
 
-static void del_vq(struct virtqueue *vq)
+static void del_vq(struct virtio_pci_vq_info *info)
 {
+       struct virtqueue *vq = info->vq;
        struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
 
        iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
 
-       if (vp_dev->pci_dev->msix_enabled) {
+       if (vp_dev->msix_enabled) {
                iowrite16(VIRTIO_MSI_NO_VECTOR,
                          vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
                /* Flush the write out to device */
index 7bc3004b840ef3e3dabb5c2e24af8a935f552eba..8978f109d2d79828e5b0c12649debc481dfacd7f 100644 (file)
@@ -293,6 +293,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
 }
 
 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+                                 struct virtio_pci_vq_info *info,
                                  unsigned index,
                                  void (*callback)(struct virtqueue *vq),
                                  const char *name,
@@ -322,6 +323,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
        /* get offset of notification word for this vq */
        off = vp_ioread16(&cfg->queue_notify_off);
 
+       info->msix_vector = msix_vec;
+
        /* create the vring */
        vq = vring_create_virtqueue(index, num,
                                    SMP_CACHE_BYTES, &vp_dev->vdev,
@@ -405,13 +408,14 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
        return 0;
 }
 
-static void del_vq(struct virtqueue *vq)
+static void del_vq(struct virtio_pci_vq_info *info)
 {
+       struct virtqueue *vq = info->vq;
        struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
 
        vp_iowrite16(vq->index, &vp_dev->common->queue_select);
 
-       if (vp_dev->pci_dev->msix_enabled) {
+       if (vp_dev->msix_enabled) {
                vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
                             &vp_dev->common->queue_msix_vector);
                /* Flush the write out to device */
index a18510be76c141e5d4b4d687c2eb5498cc273c56..5e71f1ea3391b034dc8e6f55f62d82dbe76e9811 100644 (file)
@@ -7910,7 +7910,6 @@ struct btrfs_retry_complete {
 static void btrfs_retry_endio_nocsum(struct bio *bio)
 {
        struct btrfs_retry_complete *done = bio->bi_private;
-       struct inode *inode;
        struct bio_vec *bvec;
        int i;
 
@@ -7918,12 +7917,12 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
                goto end;
 
        ASSERT(bio->bi_vcnt == 1);
-       inode = bio->bi_io_vec->bv_page->mapping->host;
-       ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
+       ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
 
        done->uptodate = 1;
        bio_for_each_segment_all(bvec, bio, i)
-       clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0);
+               clean_io_failure(BTRFS_I(done->inode), done->start,
+                                bvec->bv_page, 0);
 end:
        complete(&done->done);
        bio_put(bio);
@@ -7973,8 +7972,10 @@ next_block_or_try_again:
 
                start += sectorsize;
 
-               if (nr_sectors--) {
+               nr_sectors--;
+               if (nr_sectors) {
                        pgoff += sectorsize;
+                       ASSERT(pgoff < PAGE_SIZE);
                        goto next_block_or_try_again;
                }
        }
@@ -7986,9 +7987,7 @@ static void btrfs_retry_endio(struct bio *bio)
 {
        struct btrfs_retry_complete *done = bio->bi_private;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
-       struct inode *inode;
        struct bio_vec *bvec;
-       u64 start;
        int uptodate;
        int ret;
        int i;
@@ -7998,11 +7997,8 @@ static void btrfs_retry_endio(struct bio *bio)
 
        uptodate = 1;
 
-       start = done->start;
-
        ASSERT(bio->bi_vcnt == 1);
-       inode = bio->bi_io_vec->bv_page->mapping->host;
-       ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
+       ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
 
        bio_for_each_segment_all(bvec, bio, i) {
                ret = __readpage_endio_check(done->inode, io_bio, i,
@@ -8080,8 +8076,10 @@ next:
 
                ASSERT(nr_sectors);
 
-               if (--nr_sectors) {
+               nr_sectors--;
+               if (nr_sectors) {
                        pgoff += sectorsize;
+                       ASSERT(pgoff < PAGE_SIZE);
                        goto next_block;
                }
        }
index da687dc79cce6155a278038a15775637c97ce3cc..9530a333d302c0c13c3e8463814b642a023afc23 100644 (file)
@@ -549,16 +549,19 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                case Opt_ssd:
                        btrfs_set_and_info(info, SSD,
                                           "use ssd allocation scheme");
+                       btrfs_clear_opt(info->mount_opt, NOSSD);
                        break;
                case Opt_ssd_spread:
                        btrfs_set_and_info(info, SSD_SPREAD,
                                           "use spread ssd allocation scheme");
                        btrfs_set_opt(info->mount_opt, SSD);
+                       btrfs_clear_opt(info->mount_opt, NOSSD);
                        break;
                case Opt_nossd:
                        btrfs_set_and_info(info, NOSSD,
                                             "not using ssd allocation scheme");
                        btrfs_clear_opt(info->mount_opt, SSD);
+                       btrfs_clear_opt(info->mount_opt, SSD_SPREAD);
                        break;
                case Opt_barrier:
                        btrfs_clear_and_info(info, NOBARRIER,
index 73d56eef5e60f311225b06ad7adccedeed54a0db..ab8a66d852f91cb04206361a551b8c57760c9c40 100644 (file)
@@ -6213,7 +6213,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
        for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
                dev = bbio->stripes[dev_nr].dev;
                if (!dev || !dev->bdev ||
-                   (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) {
+                   (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) {
                        bbio_error(bbio, first_bio, logical);
                        continue;
                }
index d07f13a63369df6a177a2236b9498cfefee3cb97..37f5a41cc50cc523cd76c790100398d4db9e80ca 100644 (file)
@@ -948,7 +948,6 @@ struct cifs_tcon {
        bool use_persistent:1; /* use persistent instead of durable handles */
 #ifdef CONFIG_CIFS_SMB2
        bool print:1;           /* set if connection to printer share */
-       bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
        __le32 capabilities;
        __u32 share_flags;
        __u32 maximal_access;
index ec5e5e514fdd4a2556fa15974e869cdba5e3bd91..97e5d236d26559806ca8bc278f7c248e0579ef77 100644 (file)
@@ -79,8 +79,7 @@ extern void cifs_delete_mid(struct mid_q_entry *mid);
 extern void cifs_wake_up_task(struct mid_q_entry *mid);
 extern int cifs_handle_standard(struct TCP_Server_Info *server,
                                struct mid_q_entry *mid);
-extern int cifs_discard_remaining_data(struct TCP_Server_Info *server,
-                                      char *buf);
+extern int cifs_discard_remaining_data(struct TCP_Server_Info *server);
 extern int cifs_call_async(struct TCP_Server_Info *server,
                        struct smb_rqst *rqst,
                        mid_receive_t *receive, mid_callback_t *callback,
index 967b92631807a95786a264acf80833f0e0dbb147..5d21f00ae341b4ec002eb33dd79c7a16a1e1dfa4 100644 (file)
@@ -1400,9 +1400,9 @@ openRetry:
  * current bigbuf.
  */
 int
-cifs_discard_remaining_data(struct TCP_Server_Info *server, char *buf)
+cifs_discard_remaining_data(struct TCP_Server_Info *server)
 {
-       unsigned int rfclen = get_rfc1002_length(buf);
+       unsigned int rfclen = get_rfc1002_length(server->smallbuf);
        int remaining = rfclen + 4 - server->total_read;
 
        while (remaining > 0) {
@@ -1426,8 +1426,10 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        int length;
        struct cifs_readdata *rdata = mid->callback_data;
 
-       length = cifs_discard_remaining_data(server, mid->resp_buf);
+       length = cifs_discard_remaining_data(server);
        dequeue_mid(mid, rdata->result);
+       mid->resp_buf = server->smallbuf;
+       server->smallbuf = NULL;
        return length;
 }
 
@@ -1459,7 +1461,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 
        if (server->ops->is_status_pending &&
            server->ops->is_status_pending(buf, server, 0)) {
-               cifs_discard_remaining_data(server, buf);
+               cifs_discard_remaining_data(server);
                return -1;
        }
 
@@ -1519,9 +1521,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
                 rdata->iov[0].iov_base, server->total_read);
 
-       mid->resp_buf = server->smallbuf;
-       server->smallbuf = NULL;
-
        /* how much data is in the response? */
        data_len = server->ops->read_data_length(buf);
        if (data_offset + data_len > buflen) {
@@ -1544,6 +1543,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
                return cifs_readv_discard(server, mid);
 
        dequeue_mid(mid, false);
+       mid->resp_buf = server->smallbuf;
+       server->smallbuf = NULL;
        return length;
 }
 
index 0c7596cef4b88639c50b3efdb86a511f349ac451..d82467cfb0e2df5ef7e655bc601f4f17f3b890b1 100644 (file)
@@ -3753,6 +3753,9 @@ try_mount_again:
        if (IS_ERR(tcon)) {
                rc = PTR_ERR(tcon);
                tcon = NULL;
+               if (rc == -EACCES)
+                       goto mount_fail_check;
+
                goto remote_path_check;
        }
 
index aa3debbba82648944a1e1426516abebb268b7ca0..21d4045357397de647b8fe0282ed1a436b7e3a7f 100644 (file)
@@ -2597,7 +2597,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
                wdata->credits = credits;
 
                if (!wdata->cfile->invalidHandle ||
-                   !cifs_reopen_file(wdata->cfile, false))
+                   !(rc = cifs_reopen_file(wdata->cfile, false)))
                        rc = server->ops->async_writev(wdata,
                                        cifs_uncached_writedata_release);
                if (rc) {
@@ -3022,7 +3022,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
                rdata->credits = credits;
 
                if (!rdata->cfile->invalidHandle ||
-                   !cifs_reopen_file(rdata->cfile, true))
+                   !(rc = cifs_reopen_file(rdata->cfile, true)))
                        rc = server->ops->async_readv(rdata);
 error:
                if (rc) {
@@ -3617,7 +3617,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                }
 
                if (!rdata->cfile->invalidHandle ||
-                   !cifs_reopen_file(rdata->cfile, true))
+                   !(rc = cifs_reopen_file(rdata->cfile, true)))
                        rc = server->ops->async_readv(rdata);
                if (rc) {
                        add_credits_and_wake_if(server, rdata->credits, 0);
index 7b12a727947efc48df3ce8ea7698936d0702da21..152e37f2ad9213462a2ae439296a6edb49d653f9 100644 (file)
@@ -2195,7 +2195,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid)
        if (rc)
                goto free_pages;
 
-       rc = cifs_discard_remaining_data(server, buf);
+       rc = cifs_discard_remaining_data(server);
        if (rc)
                goto free_pages;
 
@@ -2221,7 +2221,7 @@ free_pages:
        kfree(pages);
        return rc;
 discard_data:
-       cifs_discard_remaining_data(server, buf);
+       cifs_discard_remaining_data(server);
        goto free_pages;
 }
 
index 66fa1b941cdf02f9d837b077ed1f1de3a0a94a79..02da648041fcd58d6e37431a60d596a5bfd4ca06 100644 (file)
@@ -562,8 +562,10 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
         * but for time being this is our only auth choice so doesn't matter.
         * We just found a server which sets blob length to zero expecting raw.
         */
-       if (blob_length == 0)
+       if (blob_length == 0) {
                cifs_dbg(FYI, "missing security blob on negprot\n");
+               server->sec_ntlmssp = true;
+       }
 
        rc = cifs_enable_signing(server, ses->sign);
        if (rc)
@@ -1171,9 +1173,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        else
                return -EIO;
 
-       if (tcon && tcon->bad_network_name)
-               return -ENOENT;
-
        unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
        if (unc_path == NULL)
                return -ENOMEM;
@@ -1277,8 +1276,6 @@ tcon_exit:
 tcon_error_exit:
        if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
                cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
-               if (tcon)
-                       tcon->bad_network_name = true;
        }
        goto tcon_exit;
 }
@@ -2181,6 +2178,9 @@ void smb2_reconnect_server(struct work_struct *work)
        struct cifs_tcon *tcon, *tcon2;
        struct list_head tmp_list;
        int tcon_exist = false;
+       int rc;
+       int resched = false;
+
 
        /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
        mutex_lock(&server->reconnect_mutex);
@@ -2208,13 +2208,18 @@ void smb2_reconnect_server(struct work_struct *work)
        spin_unlock(&cifs_tcp_ses_lock);
 
        list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
-               if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon))
+               rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
+               if (!rc)
                        cifs_reopen_persistent_handles(tcon);
+               else
+                       resched = true;
                list_del_init(&tcon->rlist);
                cifs_put_tcon(tcon);
        }
 
        cifs_dbg(FYI, "Reconnecting tcons finished\n");
+       if (resched)
+               queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
        mutex_unlock(&server->reconnect_mutex);
 
        /* now we can safely release srv struct */
index 7163fe014b57f4e15813c1969958d5764b18e5af..dde861387a407810b35f73ff37c3ce6389048326 100644 (file)
@@ -136,17 +136,26 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
        vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
        vma->vm_ops = &hugetlb_vm_ops;
 
+       /*
+        * Offset passed to mmap (before page shift) could have been
+        * negative when represented as a (l)off_t.
+        */
+       if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
+               return -EINVAL;
+
        if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
                return -EINVAL;
 
        vma_len = (loff_t)(vma->vm_end - vma->vm_start);
+       len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+       /* check for overflow */
+       if (len < vma_len)
+               return -EINVAL;
 
        inode_lock(inode);
        file_accessed(file);
 
        ret = -ENOMEM;
-       len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
-
        if (hugetlb_reserve_pages(inode,
                                vma->vm_pgoff >> huge_page_order(h),
                                len >> huge_page_shift(h), vma,
@@ -155,7 +164,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
 
        ret = 0;
        if (vma->vm_flags & VM_WRITE && inode->i_size < len)
-               inode->i_size = len;
+               i_size_write(inode, len);
 out:
        inode_unlock(inode);
 
index f08bd31c1081cc0536602db9d865f4bf491440c9..312578089544dbd652aac303d0cbabee8fbcb968 100644 (file)
@@ -900,7 +900,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
                unsigned long addr, pmd_t *pmdp)
 {
-       pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
+       pmd_t pmd = *pmdp;
+
+       /* See comment in change_huge_pmd() */
+       pmdp_invalidate(vma, addr, pmdp);
+       if (pmd_dirty(*pmdp))
+               pmd = pmd_mkdirty(pmd);
+       if (pmd_young(*pmdp))
+               pmd = pmd_mkyoung(pmd);
 
        pmd = pmd_wrprotect(pmd);
        pmd = pmd_clear_soft_dirty(pmd);
index f6b43fbb141c9ad03c1e05880e8c2228743d076e..af9c86e958bdad3be90cfb5f19aad99ede457339 100644 (file)
@@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
        pr_cont_kernfs_path(cgrp->kn);
 }
 
+static inline void cgroup_init_kthreadd(void)
+{
+       /*
+        * kthreadd is inherited by all kthreads, keep it in the root so
+        * that the new kthreads are guaranteed to stay in the root until
+        * initialization is finished.
+        */
+       current->no_cgroup_migration = 1;
+}
+
+static inline void cgroup_kthread_ready(void)
+{
+       /*
+        * This kthread finished initialization.  The creator should have
+        * set PF_NO_SETAFFINITY if this kthread should stay in the root.
+        */
+       current->no_cgroup_migration = 0;
+}
+
 #else /* !CONFIG_CGROUPS */
 
 struct cgroup_subsys_state;
@@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {}
 
 static inline int cgroup_init_early(void) { return 0; }
 static inline int cgroup_init(void) { return 0; }
+static inline void cgroup_init_kthreadd(void) {}
+static inline void cgroup_kthread_ready(void) {}
 
 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
                                               struct cgroup *ancestor)
index 51891fb0d3ce075e9343495e54de6d8d03211ac9..c91b3bcd158f8fa8b2dbc3d4738ade5fdcc600cb 100644 (file)
@@ -394,18 +394,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
        ___pud;                                                         \
 })
 
-#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd)           \
-({                                                                     \
-       unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;              \
-       pmd_t ___pmd;                                                   \
-                                                                       \
-       ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd);         \
-       mmu_notifier_invalidate_range(__mm, ___haddr,                   \
-                                     ___haddr + HPAGE_PMD_SIZE);       \
-                                                                       \
-       ___pmd;                                                         \
-})
-
 /*
  * set_pte_at_notify() sets the pte _after_ running the notifier.
  * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -489,7 +477,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
 #define        ptep_clear_flush_notify ptep_clear_flush
 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
-#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
 #define set_pte_at_notify set_pte_at
 
 #endif /* CONFIG_MMU_NOTIFIER */
index d67eee84fd430f3c44b77d4ba007ec5d2dcabb2b..4cf9a59a4d08ed181f30d7cefa56db92f48408d2 100644 (file)
@@ -604,6 +604,10 @@ struct task_struct {
 #ifdef CONFIG_COMPAT_BRK
        unsigned                        brk_randomized:1;
 #endif
+#ifdef CONFIG_CGROUPS
+       /* disallow userland-initiated cgroup migration */
+       unsigned                        no_cgroup_migration:1;
+#endif
 
        unsigned long                   atomic_flags; /* Flags requiring atomic access. */
 
index 04b0d3f95043c66856c6a6f4cab078c4791d35e8..7edfbdb55a995d436bf9e999ce202d0ca0bf2550 100644 (file)
@@ -167,6 +167,7 @@ struct virtio_driver {
        unsigned int feature_table_size;
        const unsigned int *feature_table_legacy;
        unsigned int feature_table_size_legacy;
+       int (*validate)(struct virtio_device *dev);
        int (*probe)(struct virtio_device *dev);
        void (*scan)(struct virtio_device *dev);
        void (*remove)(struct virtio_device *dev);
index 4b784b6e21c0d9cb533b31997883d7dd447343bf..ccfad0e9c2cdbd68f13c809c7ed6414b2c0c97c1 100644 (file)
@@ -117,6 +117,7 @@ enum transport_state_table {
        TRANSPORT_ISTATE_PROCESSING = 11,
        TRANSPORT_COMPLETE_QF_WP = 18,
        TRANSPORT_COMPLETE_QF_OK = 19,
+       TRANSPORT_COMPLETE_QF_ERR = 20,
 };
 
 /* Used for struct se_cmd->se_cmd_flags */
@@ -279,8 +280,6 @@ struct t10_alua_tg_pt_gp {
        u16     tg_pt_gp_id;
        int     tg_pt_gp_valid_id;
        int     tg_pt_gp_alua_supported_states;
-       int     tg_pt_gp_alua_pending_state;
-       int     tg_pt_gp_alua_previous_state;
        int     tg_pt_gp_alua_access_status;
        int     tg_pt_gp_alua_access_type;
        int     tg_pt_gp_nonop_delay_msecs;
@@ -289,18 +288,16 @@ struct t10_alua_tg_pt_gp {
        int     tg_pt_gp_pref;
        int     tg_pt_gp_write_metadata;
        u32     tg_pt_gp_members;
-       atomic_t tg_pt_gp_alua_access_state;
+       int     tg_pt_gp_alua_access_state;
        atomic_t tg_pt_gp_ref_cnt;
        spinlock_t tg_pt_gp_lock;
-       struct mutex tg_pt_gp_md_mutex;
+       struct mutex tg_pt_gp_transition_mutex;
        struct se_device *tg_pt_gp_dev;
        struct config_group tg_pt_gp_group;
        struct list_head tg_pt_gp_list;
        struct list_head tg_pt_gp_lun_list;
        struct se_lun *tg_pt_gp_alua_lun;
        struct se_node_acl *tg_pt_gp_alua_nacl;
-       struct work_struct tg_pt_gp_transition_work;
-       struct completion *tg_pt_gp_transition_complete;
 };
 
 struct t10_vpd {
@@ -705,6 +702,7 @@ struct se_lun {
        u64                     unpacked_lun;
 #define SE_LUN_LINK_MAGIC                      0xffff7771
        u32                     lun_link_magic;
+       bool                    lun_shutdown;
        bool                    lun_access_ro;
        u32                     lun_index;
 
index 15b4385a2be169e9b99221f31a444820feadae3d..90007a1abcab144ac3d6ac7d6e6f4001d58abb14 100644 (file)
@@ -79,7 +79,7 @@
  * configuration space */
 #define VIRTIO_PCI_CONFIG_OFF(msix_enabled)    ((msix_enabled) ? 24 : 20)
 /* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */
-#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled)
+#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled)
 
 /* Virtio ABI version, this must match exactly */
 #define VIRTIO_PCI_ABI_VERSION         0
index 2f4964cfde0b4f142778c199e606dc3d519ebbf0..a871bf80fde1adc79040936475b7045971e72d76 100644 (file)
@@ -160,7 +160,6 @@ static LIST_HEAD(audit_freelist);
 
 /* queue msgs to send via kauditd_task */
 static struct sk_buff_head audit_queue;
-static void kauditd_hold_skb(struct sk_buff *skb);
 /* queue msgs due to temporary unicast send problems */
 static struct sk_buff_head audit_retry_queue;
 /* queue msgs waiting for new auditd connection */
@@ -453,30 +452,6 @@ static void auditd_set(int pid, u32 portid, struct net *net)
        spin_unlock_irqrestore(&auditd_conn.lock, flags);
 }
 
-/**
- * auditd_reset - Disconnect the auditd connection
- *
- * Description:
- * Break the auditd/kauditd connection and move all the queued records into the
- * hold queue in case auditd reconnects.
- */
-static void auditd_reset(void)
-{
-       struct sk_buff *skb;
-
-       /* if it isn't already broken, break the connection */
-       rcu_read_lock();
-       if (auditd_conn.pid)
-               auditd_set(0, 0, NULL);
-       rcu_read_unlock();
-
-       /* flush all of the main and retry queues to the hold queue */
-       while ((skb = skb_dequeue(&audit_retry_queue)))
-               kauditd_hold_skb(skb);
-       while ((skb = skb_dequeue(&audit_queue)))
-               kauditd_hold_skb(skb);
-}
-
 /**
  * kauditd_print_skb - Print the audit record to the ring buffer
  * @skb: audit record
@@ -505,9 +480,6 @@ static void kauditd_rehold_skb(struct sk_buff *skb)
 {
        /* put the record back in the queue at the same place */
        skb_queue_head(&audit_hold_queue, skb);
-
-       /* fail the auditd connection */
-       auditd_reset();
 }
 
 /**
@@ -544,9 +516,6 @@ static void kauditd_hold_skb(struct sk_buff *skb)
        /* we have no other options - drop the message */
        audit_log_lost("kauditd hold queue overflow");
        kfree_skb(skb);
-
-       /* fail the auditd connection */
-       auditd_reset();
 }
 
 /**
@@ -566,6 +535,30 @@ static void kauditd_retry_skb(struct sk_buff *skb)
        skb_queue_tail(&audit_retry_queue, skb);
 }
 
+/**
+ * auditd_reset - Disconnect the auditd connection
+ *
+ * Description:
+ * Break the auditd/kauditd connection and move all the queued records into the
+ * hold queue in case auditd reconnects.
+ */
+static void auditd_reset(void)
+{
+       struct sk_buff *skb;
+
+       /* if it isn't already broken, break the connection */
+       rcu_read_lock();
+       if (auditd_conn.pid)
+               auditd_set(0, 0, NULL);
+       rcu_read_unlock();
+
+       /* flush all of the main and retry queues to the hold queue */
+       while ((skb = skb_dequeue(&audit_retry_queue)))
+               kauditd_hold_skb(skb);
+       while ((skb = skb_dequeue(&audit_queue)))
+               kauditd_hold_skb(skb);
+}
+
 /**
  * auditd_send_unicast_skb - Send a record via unicast to auditd
  * @skb: audit record
@@ -758,6 +751,7 @@ static int kauditd_thread(void *dummy)
                                        NULL, kauditd_rehold_skb);
                if (rc < 0) {
                        sk = NULL;
+                       auditd_reset();
                        goto main_queue;
                }
 
@@ -767,6 +761,7 @@ static int kauditd_thread(void *dummy)
                                        NULL, kauditd_hold_skb);
                if (rc < 0) {
                        sk = NULL;
+                       auditd_reset();
                        goto main_queue;
                }
 
@@ -775,16 +770,18 @@ main_queue:
                 * unicast, dump failed record sends to the retry queue; if
                 * sk == NULL due to previous failures we will just do the
                 * multicast send and move the record to the retry queue */
-               kauditd_send_queue(sk, portid, &audit_queue, 1,
-                                  kauditd_send_multicast_skb,
-                                  kauditd_retry_skb);
+               rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
+                                       kauditd_send_multicast_skb,
+                                       kauditd_retry_skb);
+               if (sk == NULL || rc < 0)
+                       auditd_reset();
+               sk = NULL;
 
                /* drop our netns reference, no auditd sends past this line */
                if (net) {
                        put_net(net);
                        net = NULL;
                }
-               sk = NULL;
 
                /* we have processed all the queues so wake everyone */
                wake_up(&audit_backlog_wait);
index 1dc22f6b49f5e06c4af22222dfb1b32c885ce16a..12e19f0636ea8366b190ab2ef41a0de84e0829f5 100644 (file)
@@ -1146,7 +1146,7 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
                 * path is super cold.  Let's just sleep a bit and retry.
                 */
                pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
-               if (IS_ERR(pinned_sb) ||
+               if (IS_ERR_OR_NULL(pinned_sb) ||
                    !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
                        mutex_unlock(&cgroup_mutex);
                        if (!IS_ERR_OR_NULL(pinned_sb))
index 48851327a15e18e8ba151a3a45c5126c5023ddb8..687f5e0194efccbadce199c0570cd08b8c96181a 100644 (file)
@@ -2425,11 +2425,12 @@ ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
                tsk = tsk->group_leader;
 
        /*
-        * Workqueue threads may acquire PF_NO_SETAFFINITY and become
-        * trapped in a cpuset, or RT worker may be born in a cgroup
-        * with no rt_runtime allocated.  Just say no.
+        * kthreads may acquire PF_NO_SETAFFINITY during initialization.
+        * If userland migrates such a kthread to a non-root cgroup, it can
+        * become trapped in a cpuset, or RT kthread may be born in a
+        * cgroup with no rt_runtime allocated.  Just say no.
         */
-       if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
+       if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
                ret = -EINVAL;
                goto out_unlock_rcu;
        }
index 4544b115f5eb85d4b01ec966f933f191cd2cae1e..d052947fe78507ac80bbe58a09cc612bca7bab73 100644 (file)
@@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
 struct cpumask *
 irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
 {
-       int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec;
+       int n, nodes, cpus_per_vec, extra_vecs, curvec;
        int affv = nvecs - affd->pre_vectors - affd->post_vectors;
        int last_affv = affv + affd->pre_vectors;
        nodemask_t nodemsk = NODE_MASK_NONE;
@@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
                goto done;
        }
 
-       /* Spread the vectors per node */
-       vecs_per_node = affv / nodes;
-       /* Account for rounding errors */
-       extra_vecs = affv - (nodes * vecs_per_node);
-
        for_each_node_mask(n, nodemsk) {
-               int ncpus, v, vecs_to_assign = vecs_per_node;
+               int ncpus, v, vecs_to_assign, vecs_per_node;
+
+               /* Spread the vectors per node */
+               vecs_per_node = (affv - curvec) / nodes;
 
                /* Get the cpus on this node which are in the mask */
                cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
 
                /* Calculate the number of cpus per vector */
                ncpus = cpumask_weight(nmsk);
+               vecs_to_assign = min(vecs_per_node, ncpus);
+
+               /* Account for rounding errors */
+               extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign);
 
                for (v = 0; curvec < last_affv && v < vecs_to_assign;
                     curvec++, v++) {
@@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
                        /* Account for extra vectors to compensate rounding errors */
                        if (extra_vecs) {
                                cpus_per_vec++;
-                               if (!--extra_vecs)
-                                       vecs_per_node++;
+                               --extra_vecs;
                        }
                        irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
                }
 
                if (curvec >= last_affv)
                        break;
+               --nodes;
        }
 
 done:
index 2f26adea0f84d21f4dae6d1ffdbbf94a73f40c67..26db528c1d881bf371ea5b53b7ade0815c990bf1 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/freezer.h>
 #include <linux/ptrace.h>
 #include <linux/uaccess.h>
+#include <linux/cgroup.h>
 #include <trace/events/sched.h>
 
 static DEFINE_SPINLOCK(kthread_create_lock);
@@ -225,6 +226,7 @@ static int kthread(void *_create)
 
        ret = -EINTR;
        if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
+               cgroup_kthread_ready();
                __kthread_parkme(self);
                ret = threadfn(data);
        }
@@ -538,6 +540,7 @@ int kthreadd(void *unused)
        set_mems_allowed(node_states[N_MEMORY]);
 
        current->flags |= PF_NOFREEZE;
+       cgroup_init_kthreadd();
 
        for (;;) {
                set_current_state(TASK_INTERRUPTIBLE);
index fef4cf210cc7f0df1889a01532bb32215d154e60..f3c4f9d22821f889104340332eee93c5e124df4d 100644 (file)
@@ -1568,8 +1568,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                deactivate_page(page);
 
        if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
-               orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
-                       tlb->fullmm);
+               pmdp_invalidate(vma, addr, pmd);
                orig_pmd = pmd_mkold(orig_pmd);
                orig_pmd = pmd_mkclean(orig_pmd);
 
@@ -1724,37 +1723,69 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 {
        struct mm_struct *mm = vma->vm_mm;
        spinlock_t *ptl;
-       int ret = 0;
+       pmd_t entry;
+       bool preserve_write;
+       int ret;
 
        ptl = __pmd_trans_huge_lock(pmd, vma);
-       if (ptl) {
-               pmd_t entry;
-               bool preserve_write = prot_numa && pmd_write(*pmd);
-               ret = 1;
+       if (!ptl)
+               return 0;
 
-               /*
-                * Avoid trapping faults against the zero page. The read-only
-                * data is likely to be read-cached on the local CPU and
-                * local/remote hits to the zero page are not interesting.
-                */
-               if (prot_numa && is_huge_zero_pmd(*pmd)) {
-                       spin_unlock(ptl);
-                       return ret;
-               }
+       preserve_write = prot_numa && pmd_write(*pmd);
+       ret = 1;
 
-               if (!prot_numa || !pmd_protnone(*pmd)) {
-                       entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
-                       entry = pmd_modify(entry, newprot);
-                       if (preserve_write)
-                               entry = pmd_mk_savedwrite(entry);
-                       ret = HPAGE_PMD_NR;
-                       set_pmd_at(mm, addr, pmd, entry);
-                       BUG_ON(vma_is_anonymous(vma) && !preserve_write &&
-                                       pmd_write(entry));
-               }
-               spin_unlock(ptl);
-       }
+       /*
+        * Avoid trapping faults against the zero page. The read-only
+        * data is likely to be read-cached on the local CPU and
+        * local/remote hits to the zero page are not interesting.
+        */
+       if (prot_numa && is_huge_zero_pmd(*pmd))
+               goto unlock;
+
+       if (prot_numa && pmd_protnone(*pmd))
+               goto unlock;
+
+       /*
+        * In case prot_numa, we are under down_read(mmap_sem). It's critical
+        * to not clear pmd intermittently to avoid race with MADV_DONTNEED
+        * which is also under down_read(mmap_sem):
+        *
+        *      CPU0:                           CPU1:
+        *                              change_huge_pmd(prot_numa=1)
+        *                               pmdp_huge_get_and_clear_notify()
+        * madvise_dontneed()
+        *  zap_pmd_range()
+        *   pmd_trans_huge(*pmd) == 0 (without ptl)
+        *   // skip the pmd
+        *                               set_pmd_at();
+        *                               // pmd is re-established
+        *
+        * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
+        * which may break userspace.
+        *
+        * pmdp_invalidate() is required to make sure we don't miss
+        * dirty/young flags set by hardware.
+        */
+       entry = *pmd;
+       pmdp_invalidate(vma, addr, pmd);
+
+       /*
+        * Recover dirty/young flags.  It relies on pmdp_invalidate to not
+        * corrupt them.
+        */
+       if (pmd_dirty(*pmd))
+               entry = pmd_mkdirty(entry);
+       if (pmd_young(*pmd))
+               entry = pmd_mkyoung(entry);
 
+       entry = pmd_modify(entry, newprot);
+       if (preserve_write)
+               entry = pmd_mk_savedwrite(entry);
+       ret = HPAGE_PMD_NR;
+       set_pmd_at(mm, addr, pmd, entry);
+       BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
+unlock:
+       spin_unlock(ptl);
        return ret;
 }
 
index f9492bccfd794a1983eabbc4bff32df35b31cea8..54f63c4a809ae123248200ee84642629c6db8ffc 100644 (file)
@@ -185,6 +185,12 @@ static inline void z3fold_page_lock(struct z3fold_header *zhdr)
        spin_lock(&zhdr->page_lock);
 }
 
+/* Try to lock a z3fold page */
+static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
+{
+       return spin_trylock(&zhdr->page_lock);
+}
+
 /* Unlock a z3fold page */
 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
 {
@@ -385,7 +391,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
                        spin_lock(&pool->lock);
                        zhdr = list_first_entry_or_null(&pool->unbuddied[i],
                                                struct z3fold_header, buddy);
-                       if (!zhdr) {
+                       if (!zhdr || !z3fold_page_trylock(zhdr)) {
                                spin_unlock(&pool->lock);
                                continue;
                        }
@@ -394,7 +400,6 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
                        spin_unlock(&pool->lock);
 
                        page = virt_to_page(zhdr);
-                       z3fold_page_lock(zhdr);
                        if (zhdr->first_chunks == 0) {
                                if (zhdr->middle_chunks != 0 &&
                                    chunks >= zhdr->start_middle)
index b7ee9c34dbd678fc984db3295a70c117ee2fc2f2..d41edd28298b68ff335e6324df7e4e793a481d93 100644 (file)
@@ -276,7 +276,7 @@ struct zs_pool {
 struct zspage {
        struct {
                unsigned int fullness:FULLNESS_BITS;
-               unsigned int class:CLASS_BITS;
+               unsigned int class:CLASS_BITS + 1;
                unsigned int isolated:ISOLATED_BITS;
                unsigned int magic:MAGIC_VAL_BITS;
        };
index 93b0aa74ca03bada7db24ae827b902ce09591bef..39c2c7d067bba55cae5cf19983128369af1bb81c 100644 (file)
@@ -156,6 +156,7 @@ out:
                                         */
                        case 0x2C:      /* Westmere EP - Gulftown */
                                cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
+                               break;
                        case 0x2A:      /* SNB */
                        case 0x2D:      /* SNB Xeon */
                        case 0x3A:      /* IVB */
index fedca32853262152cb7e1028139728c4aa677b11..ccf2a69365ccbb2a7cd27fa03089be54ce506c1a 100644 (file)
@@ -100,6 +100,8 @@ The system configuration dump (if --quiet is not used) is followed by statistics
 \fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states.  These numbers are from hardware residency counters.
 \fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
 \fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
+\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
+\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
 \fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states.  These numbers are from hardware residency counters.
 \fBPkgWatt\fP Watts consumed by the whole package.
 \fBCorWatt\fP Watts consumed by the core part of the package.
index 828dccd3f01eaf324bf2d3d2c674a585417cd07a..b11294730771bed6766f77138460813f8abbfce8 100644 (file)
@@ -1142,7 +1142,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
                 * it is possible for mperf's non-halted cycles + idle states
                 * to exceed TSC's all cycles: show c1 = 0% in that case.
                 */
-               if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
+               if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > (old->tsc * tsc_tweak))
                        old->c1 = 0;
                else {
                        /* normal case, derive c1 */
@@ -2485,8 +2485,10 @@ int snapshot_gfx_mhz(void)
 
        if (fp == NULL)
                fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
-       else
+       else {
                rewind(fp);
+               fflush(fp);
+       }
 
        retval = fscanf(fp, "%d", &gfx_cur_mhz);
        if (retval != 1)
@@ -3111,7 +3113,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return 0;
 
        fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx "
-                       "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n",
+                       "(high %d guar %d eff %d low %d)\n",
                        cpu, msr,
                        (unsigned int)HWP_HIGHEST_PERF(msr),
                        (unsigned int)HWP_GUARANTEED_PERF(msr),
@@ -3122,7 +3124,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return 0;
 
        fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx "
-                       "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n",
+                       "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n",
                        cpu, msr,
                        (unsigned int)(((msr) >> 0) & 0xff),
                        (unsigned int)(((msr) >> 8) & 0xff),
@@ -3136,7 +3138,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                        return 0;
 
                fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx "
-                       "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n",
+                       "(min %d max %d des %d epp 0x%x window 0x%x)\n",
                        cpu, msr,
                        (unsigned int)(((msr) >> 0) & 0xff),
                        (unsigned int)(((msr) >> 8) & 0xff),
@@ -3353,17 +3355,19 @@ void rapl_probe(unsigned int family, unsigned int model)
        case INTEL_FAM6_SKYLAKE_DESKTOP:        /* SKL */
        case INTEL_FAM6_KABYLAKE_MOBILE:        /* KBL */
        case INTEL_FAM6_KABYLAKE_DESKTOP:       /* KBL */
-               do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
+               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
                BIC_PRESENT(BIC_PKG__);
                BIC_PRESENT(BIC_RAM__);
                if (rapl_joules) {
                        BIC_PRESENT(BIC_Pkg_J);
                        BIC_PRESENT(BIC_Cor_J);
                        BIC_PRESENT(BIC_RAM_J);
+                       BIC_PRESENT(BIC_GFX_J);
                } else {
                        BIC_PRESENT(BIC_PkgWatt);
                        BIC_PRESENT(BIC_CorWatt);
                        BIC_PRESENT(BIC_RAMWatt);
+                       BIC_PRESENT(BIC_GFXWatt);
                }
                break;
        case INTEL_FAM6_HASWELL_X:      /* HSX */
@@ -3478,7 +3482,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 {
        unsigned long long msr;
-       unsigned int dts;
+       unsigned int dts, dts2;
        int cpu;
 
        if (!(do_dts || do_ptm))
@@ -3503,7 +3507,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
                fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
                        cpu, msr, tcc_activation_temp - dts);
 
-#ifdef THERM_DEBUG
                if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
                        return 0;
 
@@ -3511,11 +3514,10 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
                dts2 = (msr >> 8) & 0x7F;
                fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
                        cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
-#endif
        }
 
 
-       if (do_dts) {
+       if (do_dts && debug) {
                unsigned int resolution;
 
                if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
@@ -3526,7 +3528,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
                fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
                        cpu, msr, tcc_activation_temp - dts, resolution);
 
-#ifdef THERM_DEBUG
                if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
                        return 0;
 
@@ -3534,7 +3535,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
                dts2 = (msr >> 8) & 0x7F;
                fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
                        cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
-#endif
        }
 
        return 0;
@@ -4578,7 +4578,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 17.02.24"
+       fprintf(outf, "turbostat version 17.04.12"
                " - Len Brown <lenb@kernel.org>\n");
 }