]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 27 Jan 2017 20:36:39 +0000 (12:36 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 27 Jan 2017 20:36:39 +0000 (12:36 -0800)
Pull block fixes from Jens Axboe:
 "A set of fixes for this series. This contains:

   - Set of fixes for the nvme target code

   - A revert of patch from this merge window, causing a regression with
     WRITE_SAME on iSCSI targets at least.

   - A fix for a use-after-free in the new O_DIRECT bdev code.

   - Two fixes for the xen-blkfront driver"

* 'for-linus' of git://git.kernel.dk/linux-block:
  Revert "sd: remove __data_len hack for WRITE SAME"
  nvme-fc: use blk_rq_nr_phys_segments
  nvmet-rdma: Fix missing dma sync to nvme data structures
  nvmet: Call fatal_error from keep-alive timout expiration
  nvmet: cancel fatal error and flush async work before free controller
  nvmet: delete controllers deletion upon subsystem release
  nvmet_fc: correct logic in disconnect queue LS handling
  block: fix use after free in __blkdev_direct_IO
  xen-blkfront: correct maximum segment accounting
  xen-blkfront: feature flags handling adjustments

125 files changed:
Documentation/filesystems/proc.txt
Documentation/power/states.txt
MAINTAINERS
arch/frv/include/asm/atomic.h
arch/mn10300/include/asm/switch_to.h
arch/s390/kernel/ptrace.c
arch/s390/mm/pgtable.c
drivers/acpi/acpica/tbdata.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/sleep.c
drivers/acpi/video_detect.c
drivers/base/memory.c
drivers/cpufreq/intel_pstate.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/ast/ast_drv.h
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_post.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_gem.c
drivers/gpu/drm/vc4/vc4_render_cl.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/umem.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/qedr_cm.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/media/cec/cec-adap.c
drivers/media/dvb-core/dvb_net.c
drivers/media/i2c/Kconfig
drivers/media/i2c/smiapp/smiapp-core.c
drivers/media/i2c/tvp5150.c
drivers/media/i2c/tvp5150_reg.h
drivers/media/pci/cobalt/cobalt-driver.c
drivers/media/pci/cobalt/cobalt-driver.h
drivers/media/usb/dvb-usb/pctv452e.c
drivers/memstick/core/memstick.c
drivers/mmc/host/dw_mmc.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-broxton.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/meson/pinctrl-meson-gxbb.c
drivers/pinctrl/meson/pinctrl-meson-gxl.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel_mid_powerbtn.c
drivers/platform/x86/mlx-platform.c
drivers/platform/x86/surface3-wmi.c
drivers/thermal/thermal_hwmon.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vhost/vsock.c
drivers/video/fbdev/core/fbcmap.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_ring.c
drivers/xen/swiotlb-xen.c
fs/Kconfig
fs/dax.c
fs/ext2/Kconfig
fs/ext4/Kconfig
fs/proc/base.c
fs/romfs/super.c
fs/userfaultfd.c
include/drm/drm_atomic.h
include/drm/drm_crtc_helper.h
include/drm/drm_mode_config.h
include/linux/memory_hotplug.h
include/linux/mmzone.h
include/linux/nmi.h
include/linux/suspend.h
include/rdma/ib_verbs.h
include/uapi/linux/cec-funcs.h
include/uapi/rdma/Kbuild
include/uapi/rdma/cxgb3-abi.h
kernel/panic.c
kernel/power/suspend.c
kernel/sysctl.c
kernel/ucount.c
kernel/watchdog.c
kernel/watchdog_hld.c
lib/ioremap.c
lib/radix-tree.c
mm/huge_memory.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/page_alloc.c
mm/slub.c

index 72624a16b79284c0f2484741144e6b90ac084064..c94b4675d021ffd374de22d7d83df61dbb6c34dd 100644 (file)
@@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise
 snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
 It's slow but very precise.
 
-Table 1-2: Contents of the status files (as of 4.1)
+Table 1-2: Contents of the status files (as of 4.8)
 ..............................................................................
  Field                       Content
  Name                        filename of the executable
+ Umask                       file mode creation mask
  State                       state (R is running, S is sleeping, D is sleeping
                              in an uninterruptible wait, Z is zombie,
                             T is traced or stopped)
@@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1)
  TracerPid                   PID of process tracing this process (0 if not)
  Uid                         Real, effective, saved set, and  file system UIDs
  Gid                         Real, effective, saved set, and  file system GIDs
- Umask                       file mode creation mask
  FDSize                      number of file descriptor slots currently allocated
  Groups                      supplementary group list
  NStgid                      descendant namespace thread group ID hierarchy
@@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1)
  VmPeak                      peak virtual memory size
  VmSize                      total program size
  VmLck                       locked memory size
+ VmPin                       pinned memory size
  VmHWM                       peak resident set size ("high water mark")
  VmRSS                       size of memory portions. It contains the three
                              following parts (VmRSS = RssAnon + RssFile + RssShmem)
index 8a39ce45d8a01c298ec3b62b878e2623cba5a958..008ecb588317bc1d354bb5f50513605d6154237c 100644 (file)
@@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
 The default suspend mode (ie. the one to be used without writing anything into
 /sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
 "s2idle", but it can be overridden by the value of the "mem_sleep_default"
-parameter in the kernel command line.  On some ACPI-based systems, depending on
-the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
-is supported.
+parameter in the kernel command line.
 
 The properties of all of the sleep states are described below.
 
index 2fd11b439ab39cb2994d4fd3885ab9c45dc3958c..24a31b500a6c001221fc69ebe00b97ff16948573 100644 (file)
@@ -4153,7 +4153,7 @@ F:        Documentation/gpu/i915.rst
 INTEL GVT-g DRIVERS (Intel GPU Virtualization)
 M:      Zhenyu Wang <zhenyuw@linux.intel.com>
 M:      Zhi Wang <zhi.a.wang@intel.com>
-L:      igvt-g-dev@lists.01.org
+L:      intel-gvt-dev@lists.freedesktop.org
 L:      intel-gfx@lists.freedesktop.org
 W:      https://01.org/igvt-g
 T:      git https://github.com/01org/gvt-linux.git
@@ -13453,6 +13453,7 @@ F:      arch/x86/
 
 X86 PLATFORM DRIVERS
 M:     Darren Hart <dvhart@infradead.org>
+M:     Andy Shevchenko <andy@infradead.org>
 L:     platform-driver-x86@vger.kernel.org
 T:     git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
 S:     Maintained
@@ -13624,6 +13625,7 @@ F:      drivers/net/hamradio/z8530.h
 
 ZBUD COMPRESSED PAGE ALLOCATOR
 M:     Seth Jennings <sjenning@redhat.com>
+M:     Dan Streetman <ddstreet@ieee.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zbud.c
@@ -13679,6 +13681,7 @@ F:      Documentation/vm/zsmalloc.txt
 
 ZSWAP COMPRESSED SWAP CACHING
 M:     Seth Jennings <sjenning@redhat.com>
+M:     Dan Streetman <ddstreet@ieee.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zswap.c
index 1c2a5e264fc71cfd52f2acb0b24ddb1aff792be7..e93c9494503ac8fc3cfaa8167ea3523abb3e2925 100644 (file)
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
 #define atomic64_sub_and_test(i,v)     (atomic64_sub_return((i), (v)) == 0)
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
 #define atomic64_inc_and_test(v)       (atomic64_inc_return((v)) == 0)
-
+#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
 
 #define atomic_cmpxchg(v, old, new)    (cmpxchg(&(v)->counter, old, new))
 #define atomic_xchg(v, new)            (xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
        return c;
 }
 
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+{
+       long long c, old;
+
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == u))
+                       break;
+               old = atomic64_cmpxchg(v, c, c + i);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != u;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+       long long c, old, dec;
+
+       c = atomic64_read(v);
+       for (;;) {
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+               old = atomic64_cmpxchg((v), c, dec);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+               return dec;
+}
+
 #define ATOMIC_OP(op)                                                  \
 static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
 {                                                                      \
index 393d311735c8b573bd5702eac1dcaaac1103600e..67e333aa7629c406745564cb24acc5903733ec41 100644 (file)
@@ -16,7 +16,7 @@
 struct task_struct;
 struct thread_struct;
 
-#if !defined(CONFIG_LAZY_SAVE_FPU)
+#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
 struct fpu_state_struct;
 extern asmlinkage void fpu_save(struct fpu_state_struct *);
 #define switch_fpu(prev, next)                                         \
index 7447ba509c30eb0b409598d062ea408603f313af..12020b55887bfd258e6545e687ec4a9de4fdb214 100644 (file)
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
        if (target == current)
                save_fpu_regs();
 
+       if (MACHINE_HAS_VX)
+               convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
+       else
+               memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
+
        /* If setting FPC, must validate it first. */
        if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
                u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
        if (target == current)
                save_fpu_regs();
 
+       for (i = 0; i < __NUM_VXRS_LOW; i++)
+               vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
+
        rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
        if (rc == 0)
                for (i = 0; i < __NUM_VXRS_LOW; i++)
index 7a1897c51c5495f3f2b13d86ba8f6344e2234788..d56ef26d46816b834068609ceb940ce01901d731 100644 (file)
@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
        return pgste;
 }
 
-static inline void ptep_xchg_commit(struct mm_struct *mm,
+static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
                                    unsigned long addr, pte_t *ptep,
                                    pgste_t pgste, pte_t old, pte_t new)
 {
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
        } else {
                *ptep = new;
        }
+       return old;
 }
 
 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
        preempt_disable();
        pgste = ptep_xchg_start(mm, addr, ptep);
        old = ptep_flush_direct(mm, addr, ptep);
-       ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+       old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
        preempt_enable();
        return old;
 }
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
        preempt_disable();
        pgste = ptep_xchg_start(mm, addr, ptep);
        old = ptep_flush_lazy(mm, addr, ptep);
-       ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+       old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
        preempt_enable();
        return old;
 }
index 82b0b571097960919ce6ed36a703402422ac3cc7..b0399e8f6d27df774b175cb2a3aa7c8f3cce7189 100644 (file)
@@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
 
        ACPI_FUNCTION_TRACE(tb_install_and_load_table);
 
-       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
        /* Install the table and load it into the namespace */
 
        status = acpi_tb_install_standard_table(address, flags, TRUE,
                                                override, &i);
        if (ACPI_FAILURE(status)) {
-               goto unlock_and_exit;
+               goto exit;
        }
 
-       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
        status = acpi_tb_load_table(i, acpi_gbl_root_node);
-       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 
-unlock_and_exit:
+exit:
        *table_index = i;
-       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
        return_ACPI_STATUS(status);
 }
 
index 5fdf251a9f9797a2a00f479880d6449dfa6dd940..01e1b3d63fc0dc8ae0e0b17767dae535be2bec68 100644 (file)
@@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                goto release_and_exit;
        }
 
+       /* Acquire the table lock */
+
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
        if (reload) {
                /*
                 * Validate the incoming table signature.
@@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                                         new_table_desc.signature.integer));
 
                        status = AE_BAD_SIGNATURE;
-                       goto release_and_exit;
+                       goto unlock_and_exit;
                }
 
                /* Check if table is already registered */
@@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                                /* Table is still loaded, this is an error */
 
                                status = AE_ALREADY_EXISTS;
-                               goto release_and_exit;
+                               goto unlock_and_exit;
                        } else {
                                /*
                                 * Table was unloaded, allow it to be reloaded.
@@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                                 * indicate the re-installation.
                                 */
                                acpi_tb_uninstall_table(&new_table_desc);
+                               (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
                                *table_index = i;
                                return_ACPI_STATUS(AE_OK);
                        }
@@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address,
 
        /* Invoke table handler if present */
 
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
        if (acpi_gbl_table_handler) {
                (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
                                             new_table_desc.pointer,
                                             acpi_gbl_table_handler_context);
        }
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+unlock_and_exit:
+
+       /* Release the table lock */
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 
 release_and_exit:
 
index 9b6cebe227a08562985ce304463dcbe1f1ee1154..54abb26b736639ca54aa7051ae742d6657a501bc 100644 (file)
@@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void)
                if (acpi_sleep_state_supported(i))
                        sleep_states[i] = 1;
 
-       /*
-        * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
-        * the default suspend mode was not selected from the command line.
-        */
-       if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
-           mem_sleep_default > PM_SUSPEND_MEM)
-               mem_sleep_default = PM_SUSPEND_FREEZE;
-
        suspend_set_ops(old_suspend_ordering ?
                &acpi_suspend_ops_old : &acpi_suspend_ops);
        freeze_set_ops(&acpi_freeze_ops);
index 02ded25c82e4a06e1e79bf2f0a4855aa933b3df1..7f48156cbc0c0b47a22943b60bf374d8a86ea6e3 100644 (file)
@@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
                },
        },
-       {
-       /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
-       /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
-       .callback = video_detect_force_native,
-       .ident = "HP Pavilion dv6",
-       .matches = {
-               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-               DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
-               },
-       },
-
        { },
 };
 
index 8ab8ea1253e62310a68d9e6bf039d8d866ee4019..dacb6a8418aa927e8d75a86470b35b414bf48598 100644 (file)
@@ -408,14 +408,14 @@ static ssize_t show_valid_zones(struct device *dev,
        sprintf(buf, "%s", zone->name);
 
        /* MMOP_ONLINE_KERNEL */
-       zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
+       zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
        }
 
        /* MMOP_ONLINE_MOVABLE */
-       zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
+       zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
index f91c25718d164c9d9339acf671d67937995fe076..a54d65aa776d064f025036cf9aa35790d83d82e8 100644 (file)
@@ -2005,7 +2005,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                        limits = &performance_limits;
                        perf_limits = limits;
                }
-               if (policy->max >= policy->cpuinfo.max_freq) {
+               if (policy->max >= policy->cpuinfo.max_freq &&
+                   !limits->no_turbo) {
                        pr_debug("set performance\n");
                        intel_pstate_set_performance_limits(perf_limits);
                        goto out;
@@ -2047,6 +2048,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
            policy->policy != CPUFREQ_POLICY_PERFORMANCE)
                return -EINVAL;
 
+       /* When per-CPU limits are used, sysfs limits are not used */
+       if (!per_cpu_limits) {
+               unsigned int max_freq, min_freq;
+
+               max_freq = policy->cpuinfo.max_freq *
+                                               limits->max_sysfs_pct / 100;
+               min_freq = policy->cpuinfo.max_freq *
+                                               limits->min_sysfs_pct / 100;
+               cpufreq_verify_within_limits(policy, min_freq, max_freq);
+       }
+
        return 0;
 }
 
index 29d6d84d1c28b1e847715c08b6fac8f9d2a9aac3..41e41f90265df07af3299f65022feaf731e99ce7 100644 (file)
@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
                }
                break;
        }
+
+       if (!(*out_ring && (*out_ring)->adev)) {
+               DRM_ERROR("Ring %d is not initialized on IP %d\n",
+                         ring, ip_type);
+               return -EINVAL;
+       }
+
        return 0;
 }
 
index 762f8e82ceb7465f56aa8cfcd7124ce0fc28acda..e9a176891e13319d77e10b643a44b9e893e562ac 100644 (file)
@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
 
 static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
 {
-       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
-       kfree(amdgpu_encoder->enc_priv);
        drm_encoder_cleanup(encoder);
-       kfree(amdgpu_encoder);
+       kfree(encoder);
 }
 
 static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
index 908011d2c8f5200e92cc8db772b85315d5c16343..7abda94fc2cf3bb43ec686ac4e699a43b77a11d0 100644 (file)
@@ -113,6 +113,7 @@ struct ast_private {
        struct ttm_bo_kmap_obj cache_kmap;
        int next_cursor;
        bool support_wide_screen;
+       bool DisableP2A;
 
        enum ast_tx_chip tx_chip_type;
        u8 dp501_maxclk;
index f75c6421db6239c9435ed39dc7d6244d13894920..533e762d036dc272afbdf4d2bce146b6f1b450d9 100644 (file)
@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
        } else
                *need_post = false;
 
+       /* Check P2A Access */
+       ast->DisableP2A = true;
+       data = ast_read32(ast, 0xf004);
+       if (data != 0xFFFFFFFF)
+               ast->DisableP2A = false;
+
        /* Check if we support wide screen */
        switch (ast->chip) {
        case AST1180:
@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
                        ast->support_wide_screen = true;
                else {
                        ast->support_wide_screen = false;
-                       /* Read SCU7c (silicon revision register) */
-                       ast_write32(ast, 0xf004, 0x1e6e0000);
-                       ast_write32(ast, 0xf000, 0x1);
-                       data = ast_read32(ast, 0x1207c);
-                       data &= 0x300;
-                       if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
-                               ast->support_wide_screen = true;
-                       if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
-                               ast->support_wide_screen = true;
+                       if (ast->DisableP2A == false) {
+                               /* Read SCU7c (silicon revision register) */
+                               ast_write32(ast, 0xf004, 0x1e6e0000);
+                               ast_write32(ast, 0xf000, 0x1);
+                               data = ast_read32(ast, 0x1207c);
+                               data &= 0x300;
+                               if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+                                       ast->support_wide_screen = true;
+                               if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+                                       ast->support_wide_screen = true;
+                       }
                }
                break;
        }
@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
        uint32_t data, data2;
        uint32_t denum, num, div, ref_pll;
 
-       ast_write32(ast, 0xf004, 0x1e6e0000);
-       ast_write32(ast, 0xf000, 0x1);
-
-
-       ast_write32(ast, 0x10000, 0xfc600309);
-
-       do {
-               if (pci_channel_offline(dev->pdev))
-                       return -EIO;
-       } while (ast_read32(ast, 0x10000) != 0x01);
-       data = ast_read32(ast, 0x10004);
-
-       if (data & 0x40)
+       if (ast->DisableP2A)
+       {
                ast->dram_bus_width = 16;
+               ast->dram_type = AST_DRAM_1Gx16;
+               ast->mclk = 396;
+       }
        else
-               ast->dram_bus_width = 32;
+       {
+               ast_write32(ast, 0xf004, 0x1e6e0000);
+               ast_write32(ast, 0xf000, 0x1);
+               data = ast_read32(ast, 0x10004);
+
+               if (data & 0x40)
+                       ast->dram_bus_width = 16;
+               else
+                       ast->dram_bus_width = 32;
+
+               if (ast->chip == AST2300 || ast->chip == AST2400) {
+                       switch (data & 0x03) {
+                       case 0:
+                               ast->dram_type = AST_DRAM_512Mx16;
+                               break;
+                       default:
+                       case 1:
+                               ast->dram_type = AST_DRAM_1Gx16;
+                               break;
+                       case 2:
+                               ast->dram_type = AST_DRAM_2Gx16;
+                               break;
+                       case 3:
+                               ast->dram_type = AST_DRAM_4Gx16;
+                               break;
+                       }
+               } else {
+                       switch (data & 0x0c) {
+                       case 0:
+                       case 4:
+                               ast->dram_type = AST_DRAM_512Mx16;
+                               break;
+                       case 8:
+                               if (data & 0x40)
+                                       ast->dram_type = AST_DRAM_1Gx16;
+                               else
+                                       ast->dram_type = AST_DRAM_512Mx32;
+                               break;
+                       case 0xc:
+                               ast->dram_type = AST_DRAM_1Gx32;
+                               break;
+                       }
+               }
 
-       if (ast->chip == AST2300 || ast->chip == AST2400) {
-               switch (data & 0x03) {
-               case 0:
-                       ast->dram_type = AST_DRAM_512Mx16;
-                       break;
-               default:
-               case 1:
-                       ast->dram_type = AST_DRAM_1Gx16;
-                       break;
-               case 2:
-                       ast->dram_type = AST_DRAM_2Gx16;
-                       break;
+               data = ast_read32(ast, 0x10120);
+               data2 = ast_read32(ast, 0x10170);
+               if (data2 & 0x2000)
+                       ref_pll = 14318;
+               else
+                       ref_pll = 12000;
+
+               denum = data & 0x1f;
+               num = (data & 0x3fe0) >> 5;
+               data = (data & 0xc000) >> 14;
+               switch (data) {
                case 3:
-                       ast->dram_type = AST_DRAM_4Gx16;
-                       break;
-               }
-       } else {
-               switch (data & 0x0c) {
-               case 0:
-               case 4:
-                       ast->dram_type = AST_DRAM_512Mx16;
+                       div = 0x4;
                        break;
-               case 8:
-                       if (data & 0x40)
-                               ast->dram_type = AST_DRAM_1Gx16;
-                       else
-                               ast->dram_type = AST_DRAM_512Mx32;
+               case 2:
+               case 1:
+                       div = 0x2;
                        break;
-               case 0xc:
-                       ast->dram_type = AST_DRAM_1Gx32;
+               default:
+                       div = 0x1;
                        break;
                }
+               ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
        }
-
-       data = ast_read32(ast, 0x10120);
-       data2 = ast_read32(ast, 0x10170);
-       if (data2 & 0x2000)
-               ref_pll = 14318;
-       else
-               ref_pll = 12000;
-
-       denum = data & 0x1f;
-       num = (data & 0x3fe0) >> 5;
-       data = (data & 0xc000) >> 14;
-       switch (data) {
-       case 3:
-               div = 0x4;
-               break;
-       case 2:
-       case 1:
-               div = 0x2;
-               break;
-       default:
-               div = 0x1;
-               break;
-       }
-       ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
        return 0;
 }
 
index 810c51d92b99f81a441f37d02e319ed25b7768e5..5331ee1df086e7ae3950e33efc38975b544abdd4 100644 (file)
@@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
        ast_open_key(ast);
        ast_set_def_ext_reg(dev);
 
-       if (ast->chip == AST2300 || ast->chip == AST2400)
-               ast_init_dram_2300(dev);
-       else
-               ast_init_dram_reg(dev);
+       if (ast->DisableP2A == false)
+       {
+               if (ast->chip == AST2300 || ast->chip == AST2400)
+                       ast_init_dram_2300(dev);
+               else
+                       ast_init_dram_reg(dev);
 
-       ast_init_3rdtx(dev);
+               ast_init_3rdtx(dev);
+       }
+       else
+       {
+               if (ast->tx_chip_type != AST_TX_NONE)
+                       ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);        /* Enable DVO */
+       }
 }
 
 /* AST 2300 DRAM settings */
index 60697482b94c8136ea2720dbf3b9f81c94e1d823..50f5cf7b69d1dc55fd427efa61e7e8e5eeff80d0 100644 (file)
@@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
 
 static void set_out_fence_for_crtc(struct drm_atomic_state *state,
-                                  struct drm_crtc *crtc, s64 __user *fence_ptr)
+                                  struct drm_crtc *crtc, s32 __user *fence_ptr)
 {
        state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
 }
 
-static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
                                          struct drm_crtc *crtc)
 {
-       s64 __user *fence_ptr;
+       s32 __user *fence_ptr;
 
        fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
        state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
@@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
                state->color_mgmt_changed |= replaced;
                return ret;
        } else if (property == config->prop_out_fence_ptr) {
-               s64 __user *fence_ptr = u64_to_user_ptr(val);
+               s32 __user *fence_ptr = u64_to_user_ptr(val);
 
                if (!fence_ptr)
                        return 0;
@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
  */
 
 struct drm_out_fence_state {
-       s64 __user *out_fence_ptr;
+       s32 __user *out_fence_ptr;
        struct sync_file *sync_file;
        int fd;
 };
@@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
                return 0;
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               u64 __user *fence_ptr;
+               s32 __user *fence_ptr;
 
                fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
 
index b452a7ccd84b8ca4752f1dfb6828bd697eeed6b7..cf8f0128c161ed6e1034322066c49d53a80b9e92 100644 (file)
@@ -115,27 +115,24 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
 
 #define DRM_OUTPUT_POLL_PERIOD (10*HZ)
 /**
- * drm_kms_helper_poll_enable - re-enable output polling.
+ * drm_kms_helper_poll_enable_locked - re-enable output polling.
  * @dev: drm_device
  *
- * This function re-enables the output polling work, after it has been
- * temporarily disabled using drm_kms_helper_poll_disable(), for example over
- * suspend/resume.
+ * This function re-enables the output polling work without
+ * locking the mode_config mutex.
  *
- * Drivers can call this helper from their device resume implementation. It is
- * an error to call this when the output polling support has not yet been set
- * up.
- *
- * Note that calls to enable and disable polling must be strictly ordered, which
- * is automatically the case when they're only call from suspend/resume
- * callbacks.
+ * This is like drm_kms_helper_poll_enable() however it is to be
+ * called from a context where the mode_config mutex is locked
+ * already.
  */
-void drm_kms_helper_poll_enable(struct drm_device *dev)
+void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
 {
        bool poll = false;
        struct drm_connector *connector;
        unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
 
+       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
        if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
                return;
 
@@ -163,7 +160,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
        if (poll)
                schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
 }
-EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
 
 static enum drm_connector_status
 drm_connector_detect(struct drm_connector *connector, bool force)
@@ -290,7 +287,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
 
        /* Re-enable polling in case the global poll config changed. */
        if (drm_kms_helper_poll != dev->mode_config.poll_running)
-               drm_kms_helper_poll_enable(dev);
+               drm_kms_helper_poll_enable_locked(dev);
 
        dev->mode_config.poll_running = drm_kms_helper_poll;
 
@@ -482,12 +479,8 @@ out:
  * This function disables the output polling work.
  *
  * Drivers can call this helper from their device suspend implementation. It is
- * not an error to call this even when output polling isn't enabled or already
- * disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
- *
- * Note that calls to enable and disable polling must be strictly ordered, which
- * is automatically the case when they're only call from suspend/resume
- * callbacks.
+ * not an error to call this even when output polling isn't enabled or arlready
+ * disabled.
  */
 void drm_kms_helper_poll_disable(struct drm_device *dev)
 {
@@ -497,6 +490,24 @@ void drm_kms_helper_poll_disable(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_disable);
 
+/**
+ * drm_kms_helper_poll_enable - re-enable output polling.
+ * @dev: drm_device
+ *
+ * This function re-enables the output polling work.
+ *
+ * Drivers can call this helper from their device resume implementation. It is
+ * an error to call this when the output polling support has not yet been set
+ * up.
+ */
+void drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+       mutex_lock(&dev->mode_config.mutex);
+       drm_kms_helper_poll_enable_locked(dev);
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+
 /**
  * drm_kms_helper_poll_init - initialize and enable output polling
  * @dev: drm_device
index d26a092c70e8c8fe2a14df28dd22e4253f739a16..e4563984cb1e8106cda73e8d0e12f12d10f6a1d2 100644 (file)
@@ -481,7 +481,6 @@ struct parser_exec_state {
        (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
 
 static unsigned long bypass_scan_mask = 0;
-static bool bypass_batch_buffer_scan = true;
 
 /* ring ALL, type = 0 */
 static struct sub_op_bits sub_op_mi[] = {
@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
 {
        struct intel_gvt *gvt = s->vgpu->gvt;
 
-       if (bypass_batch_buffer_scan)
-               return 0;
-
        if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
                /* BDW decides privilege based on address space */
                if (cmd_val(s, 0) & (1 << 8))
index f32bb6f6495ce0aafddf35d920298c5d315af9bc..34083731669d8cbe55b94de2e5b3585aa73a7039 100644 (file)
@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
 #define get_desc_from_elsp_dwords(ed, i) \
        ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
 
-
-#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
-#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
-static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
-                            unsigned long add, int gmadr_bytes)
-{
-       if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
-               return -1;
-
-       *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
-               BATCH_BUFFER_ADDR_MASK;
-       if (gmadr_bytes == 8) {
-               *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
-                       add & BATCH_BUFFER_ADDR_HIGH_MASK;
-       }
-
-       return 0;
-}
-
 static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 {
-       int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+       const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+       struct intel_shadow_bb_entry *entry_obj;
 
        /* pin the gem object to ggtt */
-       if (!list_empty(&workload->shadow_bb)) {
-               struct intel_shadow_bb_entry *entry_obj =
-                       list_first_entry(&workload->shadow_bb,
-                                        struct intel_shadow_bb_entry,
-                                        list);
-               struct intel_shadow_bb_entry *temp;
+       list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
+               struct i915_vma *vma;
 
-               list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
-                               list) {
-                       struct i915_vma *vma;
-
-                       vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
-                                                      4, 0);
-                       if (IS_ERR(vma)) {
-                               gvt_err("Cannot pin\n");
-                               return;
-                       }
-
-                       /* FIXME: we are not tracking our pinned VMA leaving it
-                        * up to the core to fix up the stray pin_count upon
-                        * free.
-                        */
-
-                       /* update the relocate gma with shadow batch buffer*/
-                       set_gma_to_bb_cmd(entry_obj,
-                                         i915_ggtt_offset(vma),
-                                         gmadr_bytes);
+               vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
+               if (IS_ERR(vma)) {
+                       gvt_err("Cannot pin\n");
+                       return;
                }
+
+               /* FIXME: we are not tracking our pinned VMA leaving it
+                * up to the core to fix up the stray pin_count upon
+                * free.
+                */
+
+               /* update the relocate gma with shadow batch buffer*/
+               entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
+               if (gmadr_bytes == 8)
+                       entry_obj->bb_start_cmd_va[2] = 0;
        }
 }
 
@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
                INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
        }
 
-       vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
+       vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
                        sizeof(struct intel_vgpu_workload), 0,
                        SLAB_HWCACHE_ALIGN,
                        NULL);
index 0c9234a87a20b3d0d6a1f7ddf9e789719a6244cd..3f656e3a6e5a79a598934381d7f3a59e09eb8cd7 100644 (file)
@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
        return NULL;
 }
 
-static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
-               char *buf)
+static ssize_t available_instances_show(struct kobject *kobj,
+                                       struct device *dev, char *buf)
 {
        struct intel_vgpu_type *type;
        unsigned int num = 0;
@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
                                type->fence);
 }
 
-static MDEV_TYPE_ATTR_RO(available_instance);
+static MDEV_TYPE_ATTR_RO(available_instances);
 static MDEV_TYPE_ATTR_RO(device_api);
 static MDEV_TYPE_ATTR_RO(description);
 
 static struct attribute *type_attrs[] = {
-       &mdev_type_attr_available_instance.attr,
+       &mdev_type_attr_available_instances.attr,
        &mdev_type_attr_device_api.attr,
        &mdev_type_attr_description.attr,
        NULL,
index 3b30c28bff515f0fd1ce7e385ea89d6f17ccc834..2833dfa8c9aed8e9b6c8f86bbd2ad9a45c461b0c 100644 (file)
@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
        struct drm_i915_gem_object *obj;
        void *va;
        unsigned long len;
-       void *bb_start_cmd_va;
+       u32 *bb_start_cmd_va;
 };
 
 #define workload_q_head(vgpu, ring_id) \
index 445fec9c2841ad61282e538d4cfdbe1436e04fb3..b2c4a0b8a627e39c5828922083b78dfd667ad047 100644 (file)
@@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev)
 
        assert_forcewakes_inactive(dev_priv);
 
-       if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                intel_hpd_poll_init(dev_priv);
 
        DRM_DEBUG_KMS("Device suspended\n");
index 243224aeabf82f111ab14c4d6995ce868c0d36c1..69bc3b0c43905eccf19ad142cf32c7064a09feb0 100644 (file)
@@ -1977,6 +1977,11 @@ struct drm_i915_private {
 
        struct i915_frontbuffer_tracking fb_tracking;
 
+       struct intel_atomic_helper {
+               struct llist_head free_list;
+               struct work_struct free_work;
+       } atomic_helper;
+
        u16 orig_clock;
 
        bool mchbar_need_disable;
index a792dcb902b51d337f46f2c1ad7ad1ea737f4c8a..e924a95160796d1c8aa708e1c9ac0312b1015eb8 100644 (file)
@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
                        return ret;
        }
 
+       trace_i915_vma_bind(vma, bind_flags);
        ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
        if (ret)
                return ret;
index 86ecec5601d42dd59f937f7c8c2203b3aeb105bc..588470eb8d395df2719fe2e172f93f414ba88e92 100644 (file)
@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
        struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
        struct edid *edid;
        struct i2c_adapter *i2c;
+       bool ret = false;
 
        BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
 
@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
                 */
                if (!is_digital) {
                        DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
-                       return true;
+                       ret = true;
+               } else {
+                       DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
                }
-
-               DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
        } else {
                DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
        }
 
        kfree(edid);
 
-       return false;
+       return ret;
 }
 
 static enum drm_connector_status
index 8d702cf1a616ad4cd06b21593204e20634d6a7fb..77f7b1d849a4e3fe28f321c133efc44383a2e0b7 100644 (file)
@@ -2585,8 +2585,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
                         * We only keep the x/y offsets, so push all of the
                         * gtt offset into the x/y offsets.
                         */
-                       _intel_adjust_tile_offset(&x, &y, tile_size,
-                                                 tile_width, tile_height, pitch_tiles,
+                       _intel_adjust_tile_offset(&x, &y,
+                                                 tile_width, tile_height,
+                                                 tile_size, pitch_tiles,
                                                  gtt_offset_rotated * tile_size, 0);
 
                        gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
@@ -6849,6 +6850,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
        }
 
        state = drm_atomic_state_alloc(crtc->dev);
+       if (!state) {
+               DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
+                             crtc->base.id, crtc->name);
+               return;
+       }
+
        state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
 
        /* Everything's already locked, -EDEADLK can't happen. */
@@ -11246,6 +11253,7 @@ found:
        }
 
        old->restore_state = restore_state;
+       drm_atomic_state_put(state);
 
        /* let the connector get through one full cycle before testing */
        intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -14515,8 +14523,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
                break;
 
        case FENCE_FREE:
-               drm_atomic_state_put(&state->base);
-               break;
+               {
+                       struct intel_atomic_helper *helper =
+                               &to_i915(state->base.dev)->atomic_helper;
+
+                       if (llist_add(&state->freed, &helper->free_list))
+                               schedule_work(&helper->free_work);
+                       break;
+               }
        }
 
        return NOTIFY_DONE;
@@ -16395,6 +16409,18 @@ fail:
        drm_modeset_acquire_fini(&ctx);
 }
 
+static void intel_atomic_helper_free_state(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+       struct intel_atomic_state *state, *next;
+       struct llist_node *freed;
+
+       freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+       llist_for_each_entry_safe(state, next, freed, freed)
+               drm_atomic_state_put(&state->base);
+}
+
 int intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16414,6 +16440,9 @@ int intel_modeset_init(struct drm_device *dev)
 
        dev->mode_config.funcs = &intel_mode_funcs;
 
+       INIT_WORK(&dev_priv->atomic_helper.free_work,
+                 intel_atomic_helper_free_state);
+
        intel_init_quirks(dev);
 
        intel_init_pm(dev_priv);
@@ -17027,7 +17056,8 @@ void intel_display_resume(struct drm_device *dev)
 
        if (ret)
                DRM_ERROR("Restoring old state failed with %i\n", ret);
-       drm_atomic_state_put(state);
+       if (state)
+               drm_atomic_state_put(state);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
@@ -17097,6 +17127,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
 
+       flush_work(&dev_priv->atomic_helper.free_work);
+       WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+
        intel_disable_gt_powersave(dev_priv);
 
        /*
index cd132c216a67decc86e4b1fbc22d544a8230b0ad..cd72ae171eeb673de11f1ca6dcc6f1ab06fde81c 100644 (file)
@@ -370,6 +370,8 @@ struct intel_atomic_state {
        struct skl_wm_values wm_results;
 
        struct i915_sw_fence commit_ready;
+
+       struct llist_node freed;
 };
 
 struct intel_plane_state {
index beb08982dc0b5c3ea9711de7220c383e6ea41b92..8cf2d80f22540a35dc4245a842d8473ce7d852d9 100644 (file)
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
 {
        struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 
+       if (!ifbdev)
+               return;
+
        ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
 }
 
index b62e3f8ad415f6173470c90a3cb1b35b04f91c4b..3d546c019de058c4a925d2a689f2bf4b9ff2c6f7 100644 (file)
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
 
        /* Enable polling and queue hotplug re-enabling. */
        if (hpd_disabled) {
-               drm_kms_helper_poll_enable(dev);
+               drm_kms_helper_poll_enable_locked(dev);
                mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
                                 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
        }
@@ -511,7 +511,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
        }
 
        if (enabled)
-               drm_kms_helper_poll_enable(dev);
+               drm_kms_helper_poll_enable_locked(dev);
 
        mutex_unlock(&dev->mode_config.mutex);
 
index cef08da1da4e0bcb6a22fa73033562cdeea55006..6a157763dfc38f672f2b9384792679faf83b95d4 100644 (file)
@@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev)
                return ret;
 
        /* enable polling for external displays */
-       drm_kms_helper_poll_enable(dev);
+       if (!dev->mode_config.poll_enabled)
+               drm_kms_helper_poll_enable(dev);
 
        /* enable hotplug interrupts */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
index 59348fc41c77b0edf725e01655fe3ef1722a4342..bc85a45f91cde756d9763e34108ab53c711b9bd4 100644 (file)
@@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
        pci_set_master(pdev);
 
        ret = nouveau_do_resume(drm_dev, true);
-       drm_kms_helper_poll_enable(drm_dev);
+
+       if (!drm_dev->mode_config.poll_enabled)
+               drm_kms_helper_poll_enable(drm_dev);
+
        /* do magic */
        nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
        vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
index 8d5ed5bfdacb1d6c59e9db9df47a74be41462a9a..42c1fa53d4314f195b637c3de4323c86b095ae0f 100644 (file)
@@ -165,6 +165,8 @@ struct nouveau_drm {
        struct backlight_device *backlight;
        struct list_head bl_connectors;
        struct work_struct hpd_work;
+       struct work_struct fbcon_work;
+       int fbcon_new_state;
 #ifdef CONFIG_ACPI
        struct notifier_block acpi_nb;
 #endif
index 2f2a3dcd4ad777addbdcbfbb8cdc14922c7082eb..fa2d0a978cccbaac7a5640b5b6911779b6e8328f 100644 (file)
@@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
        .fb_probe = nouveau_fbcon_create,
 };
 
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+       struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
+       int state = READ_ONCE(drm->fbcon_new_state);
+
+       if (state == FBINFO_STATE_RUNNING)
+               pm_runtime_get_sync(drm->dev->dev);
+
+       console_lock();
+       if (state == FBINFO_STATE_RUNNING)
+               nouveau_fbcon_accel_restore(drm->dev);
+       drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
+       if (state != FBINFO_STATE_RUNNING)
+               nouveau_fbcon_accel_save_disable(drm->dev);
+       console_unlock();
+
+       if (state == FBINFO_STATE_RUNNING) {
+               pm_runtime_mark_last_busy(drm->dev->dev);
+               pm_runtime_put_sync(drm->dev->dev);
+       }
+}
+
 void
 nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon) {
-               console_lock();
-               if (state == FBINFO_STATE_RUNNING)
-                       nouveau_fbcon_accel_restore(dev);
-               drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
-               if (state != FBINFO_STATE_RUNNING)
-                       nouveau_fbcon_accel_save_disable(dev);
-               console_unlock();
-       }
+
+       if (!drm->fbcon)
+               return;
+
+       drm->fbcon_new_state = state;
+       /* Since runtime resume can happen as a result of a sysfs operation,
+        * it's possible we already have the console locked. So handle fbcon
+        * init/deinit from a seperate work thread
+        */
+       schedule_work(&drm->fbcon_work);
 }
 
 int
@@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev)
                return -ENOMEM;
 
        drm->fbcon = fbcon;
+       INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
 
        drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 
index 00ea0002b539b9e9b5b0a063f62deb3b7638fd56..e0c143b865f39cb36074b5e524638530cadeede9 100644 (file)
@@ -366,11 +366,10 @@ static void
 radeon_pci_shutdown(struct pci_dev *pdev)
 {
        /* if we are running in a VM, make sure the device
-        * torn down properly on reboot/shutdown.
-        * unfortunately we can't detect certain
-        * hypervisors so just do this all the time.
+        * torn down properly on reboot/shutdown
         */
-       radeon_pci_remove(pdev);
+       if (radeon_device_is_virtual())
+               radeon_pci_remove(pdev);
 }
 
 static int radeon_pmops_suspend(struct device *dev)
index a0fd3e66bc4b39c1f5092c340ad2c705bbf431f3..7aadce1f7e7a0e56b302bded53ad8c0d5977cc22 100644 (file)
@@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
 
        }
 
-       __drm_atomic_helper_crtc_destroy_state(state);
+       drm_atomic_helper_crtc_destroy_state(crtc, state);
 }
 
 static const struct drm_crtc_funcs vc4_crtc_funcs = {
index db920771bfb5641c9d5d8e53a8f73cf7ee501bb7..ab3016982466c3ca35ba479050ee107d26eb50ac 100644 (file)
@@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
                                          args->shader_rec_count);
        struct vc4_bo *bo;
 
-       if (uniforms_offset < shader_rec_offset ||
+       if (shader_rec_offset < args->bin_cl_size ||
+           uniforms_offset < shader_rec_offset ||
            exec_size < uniforms_offset ||
            args->shader_rec_count >= (UINT_MAX /
                                          sizeof(struct vc4_shader_state)) ||
            temp_size < exec_size) {
                DRM_ERROR("overflow in exec arguments\n");
+               ret = -EINVAL;
                goto fail;
        }
 
index 08886a3097577242f5c9e025fd6446d81bcc6dec..5cdd003605f57c99faf31832e3f3dd38a75b7402 100644 (file)
@@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
                }
 
                ret = vc4_full_res_bounds_check(exec, *obj, surf);
-               if (!ret)
+               if (ret)
                        return ret;
 
                return 0;
index e7dcfac877ca2eb7b86601a9115b4c2fc27f4c01..3e70a9c5d79d5a50ba3be228cb4174d1f66c98d7 100644 (file)
@@ -2811,7 +2811,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
        if (!src_addr || !src_addr->sa_family) {
                src_addr = (struct sockaddr *) &id->route.addr.src_addr;
                src_addr->sa_family = dst_addr->sa_family;
-               if (dst_addr->sa_family == AF_INET6) {
+               if (IS_ENABLED(CONFIG_IPV6) &&
+                   dst_addr->sa_family == AF_INET6) {
                        struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
                        struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
                        src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
index 1e62a5f0cb28203e0732b9915840fd9f02d45701..4609b921f899c9d7481b86825f18fe076a6f732c 100644 (file)
@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
 
        if (access & IB_ACCESS_ON_DEMAND) {
+               put_pid(umem->pid);
                ret = ib_umem_odp_get(context, umem);
                if (ret) {
                        kfree(umem);
@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 
        page_list = (struct page **) __get_free_page(GFP_KERNEL);
        if (!page_list) {
+               put_pid(umem->pid);
                kfree(umem);
                return ERR_PTR(-ENOMEM);
        }
index 9d5fe1853da46e54e9d635c5d101b688f4a8119a..6262dc035f3cea4c9613d96f67ec13e76a18643e 100644 (file)
@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev,
 
        memset(props, 0, sizeof(struct ib_port_attr));
        props->max_mtu = IB_MTU_4096;
-       if (netdev->mtu >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        if (!netif_carrier_ok(netdev))
                props->state = IB_PORT_DOWN;
index f1510cc76d2dbe7027e81f5495f53bd7b6588536..9398143d7c5e93a01d5b35c67ae0fdd4e0126483 100644 (file)
@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
        skb_trim(skb, dlen);
        mutex_lock(&ep->com.mutex);
 
-       /* update RX credits */
-       update_rx_credits(ep, dlen);
-
        switch (ep->com.state) {
        case MPA_REQ_SENT:
+               update_rx_credits(ep, dlen);
                ep->rcv_seq += dlen;
                disconnect = process_mpa_reply(ep, skb);
                break;
        case MPA_REQ_WAIT:
+               update_rx_credits(ep, dlen);
                ep->rcv_seq += dlen;
                disconnect = process_mpa_request(ep, skb);
                break;
        case FPDU_MODE: {
                struct c4iw_qp_attributes attrs;
+
+               update_rx_credits(ep, dlen);
                BUG_ON(!ep->com.qp);
                if (status)
                        pr_err("%s Unexpected streaming data." \
index 19c6477af19f1416d17c15363e239307542b438d..bec82a600d77c7990432c756ecb8124df6a52852 100644 (file)
@@ -504,6 +504,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
                goto skip_cqe;
        }
 
+       /*
+        * Special cqe for drain WR completions...
+        */
+       if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+               *cookie = CQE_DRAIN_COOKIE(hw_cqe);
+               *cqe = *hw_cqe;
+               goto skip_cqe;
+       }
+
        /*
         * Gotta tweak READ completions:
         *      1) the cqe doesn't contain the sq_wptr from the wr.
@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                                c4iw_invalidate_mr(qhp->rhp,
                                                   CQE_WRID_FR_STAG(&cqe));
                        break;
+               case C4IW_DRAIN_OPCODE:
+                       wc->opcode = IB_WC_SEND;
+                       break;
                default:
                        printk(KERN_ERR MOD "Unexpected opcode %d "
                               "in the CQE received for QPID=0x%0x\n",
@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                }
        }
 out:
-       if (wq) {
-               if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
-                       if (t4_sq_empty(wq))
-                               complete(&qhp->sq_drained);
-                       if (t4_rq_empty(wq))
-                               complete(&qhp->rq_drained);
-               }
+       if (wq)
                spin_unlock(&qhp->lock);
-       }
        return ret;
 }
 
index 516b0ae6dc3f6d061cc3c7d8ba412196785a599c..40c0e7b9fc6e4e671eb4fb03682f9130594c90cb 100644 (file)
@@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
                }
        }
 
+       rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
+       if (!rdev->free_workq) {
+               err = -ENOMEM;
+               goto err_free_status_page;
+       }
+
        rdev->status_page->db_off = 0;
 
        return 0;
+err_free_status_page:
+       free_page((unsigned long)rdev->status_page);
 destroy_ocqp_pool:
        c4iw_ocqp_pool_destroy(rdev);
 destroy_rqtpool:
@@ -862,6 +870,7 @@ destroy_resource:
 
 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 {
+       destroy_workqueue(rdev->free_workq);
        kfree(rdev->wr_log);
        free_page((unsigned long)rdev->status_page);
        c4iw_pblpool_destroy(rdev);
index 4788e1a46fdee23cce2956cc17ba8d09b0f3eb56..8cd4d054a87ed0ea27f323d47f82fdc7b3e10bff 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/kref.h>
 #include <linux/timer.h>
 #include <linux/io.h>
+#include <linux/workqueue.h>
 
 #include <asm/byteorder.h>
 
@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
        struct list_head qpids;
        struct list_head cqids;
        struct mutex lock;
+       struct kref kref;
 };
 
 enum c4iw_rdev_flags {
@@ -183,6 +185,7 @@ struct c4iw_rdev {
        atomic_t wr_log_idx;
        struct wr_log_entry *wr_log;
        int wr_log_size;
+       struct workqueue_struct *free_workq;
 };
 
 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -480,8 +483,8 @@ struct c4iw_qp {
        wait_queue_head_t wait;
        struct timer_list timer;
        int sq_sig_all;
-       struct completion rq_drained;
-       struct completion sq_drained;
+       struct work_struct free_work;
+       struct c4iw_ucontext *ucontext;
 };
 
 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -495,6 +498,7 @@ struct c4iw_ucontext {
        u32 key;
        spinlock_t mmap_lock;
        struct list_head mmaps;
+       struct kref kref;
 };
 
 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
        return container_of(c, struct c4iw_ucontext, ibucontext);
 }
 
+void _c4iw_free_ucontext(struct kref *kref);
+
+static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
+{
+       kref_put(&ucontext->kref, _c4iw_free_ucontext);
+}
+
+static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
+{
+       kref_get(&ucontext->kref);
+}
+
 struct c4iw_mm_entry {
        struct list_head entry;
        u64 addr;
@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
        return IB_QPS_ERR;
 }
 
+#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
+
 static inline u32 c4iw_ib_to_tpt_access(int a)
 {
        return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
@@ -997,8 +1015,6 @@ extern int c4iw_wr_log;
 extern int db_fc_threshold;
 extern int db_coalescing_threshold;
 extern int use_dsgl;
-void c4iw_drain_rq(struct ib_qp *qp);
-void c4iw_drain_sq(struct ib_qp *qp);
 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
 
 #endif
index 49b51b7e0fd786bf49dc2187e6c661f14087f3ae..3345e1c312f771cfaa8e31858624ca9892267467 100644 (file)
@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
        return -ENOSYS;
 }
 
-static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+void _c4iw_free_ucontext(struct kref *kref)
 {
-       struct c4iw_dev *rhp = to_c4iw_dev(context->device);
-       struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+       struct c4iw_ucontext *ucontext;
+       struct c4iw_dev *rhp;
        struct c4iw_mm_entry *mm, *tmp;
 
-       PDBG("%s context %p\n", __func__, context);
+       ucontext = container_of(kref, struct c4iw_ucontext, kref);
+       rhp = to_c4iw_dev(ucontext->ibucontext.device);
+
+       PDBG("%s ucontext %p\n", __func__, ucontext);
        list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
                kfree(mm);
        c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
        kfree(ucontext);
+}
+
+static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+       struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+
+       PDBG("%s context %p\n", __func__, context);
+       c4iw_put_ucontext(ucontext);
        return 0;
 }
 
@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
        c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
        INIT_LIST_HEAD(&context->mmaps);
        spin_lock_init(&context->mmap_lock);
+       kref_init(&context->kref);
 
        if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
                if (!warned++)
@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
 
        memset(props, 0, sizeof(struct ib_port_attr));
        props->max_mtu = IB_MTU_4096;
-       if (netdev->mtu >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        if (!netif_carrier_ok(netdev))
                props->state = IB_PORT_DOWN;
@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
        dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
        dev->ibdev.get_port_immutable = c4iw_port_immutable;
        dev->ibdev.get_dev_fw_str = get_dev_fw_str;
-       dev->ibdev.drain_sq = c4iw_drain_sq;
-       dev->ibdev.drain_rq = c4iw_drain_rq;
 
        dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
        if (!dev->ibdev.iwcm)
index cda5542e13a206347447a49f18f9e8cb930e7c8c..04c1c382dedb42bda631f3c62c80395df848a3c9 100644 (file)
@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
        return 0;
 }
 
-static void _free_qp(struct kref *kref)
+static void free_qp_work(struct work_struct *work)
+{
+       struct c4iw_ucontext *ucontext;
+       struct c4iw_qp *qhp;
+       struct c4iw_dev *rhp;
+
+       qhp = container_of(work, struct c4iw_qp, free_work);
+       ucontext = qhp->ucontext;
+       rhp = qhp->rhp;
+
+       PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
+       destroy_qp(&rhp->rdev, &qhp->wq,
+                  ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+
+       if (ucontext)
+               c4iw_put_ucontext(ucontext);
+       kfree(qhp);
+}
+
+static void queue_qp_free(struct kref *kref)
 {
        struct c4iw_qp *qhp;
 
        qhp = container_of(kref, struct c4iw_qp, kref);
        PDBG("%s qhp %p\n", __func__, qhp);
-       kfree(qhp);
+       queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
 }
 
 void c4iw_qp_add_ref(struct ib_qp *qp)
@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
 void c4iw_qp_rem_ref(struct ib_qp *qp)
 {
        PDBG("%s ib_qp %p\n", __func__, qp);
-       kref_put(&to_c4iw_qp(qp)->kref, _free_qp);
+       kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
 }
 
 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
        return 0;
 }
 
+static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+{
+       struct t4_cqe cqe = {};
+       struct c4iw_cq *schp;
+       unsigned long flag;
+       struct t4_cq *cq;
+
+       schp = to_c4iw_cq(qhp->ibqp.send_cq);
+       cq = &schp->cq;
+
+       cqe.u.drain_cookie = wr->wr_id;
+       cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_TYPE_V(1) |
+                                CQE_SWCQE_V(1) |
+                                CQE_QPID_V(qhp->wq.sq.qid));
+
+       spin_lock_irqsave(&schp->lock, flag);
+       cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+       cq->sw_queue[cq->sw_pidx] = cqe;
+       t4_swcq_produce(cq);
+       spin_unlock_irqrestore(&schp->lock, flag);
+
+       spin_lock_irqsave(&schp->comp_handler_lock, flag);
+       (*schp->ibcq.comp_handler)(&schp->ibcq,
+                                  schp->ibcq.cq_context);
+       spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+}
+
+static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+       struct t4_cqe cqe = {};
+       struct c4iw_cq *rchp;
+       unsigned long flag;
+       struct t4_cq *cq;
+
+       rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+       cq = &rchp->cq;
+
+       cqe.u.drain_cookie = wr->wr_id;
+       cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_TYPE_V(0) |
+                                CQE_SWCQE_V(1) |
+                                CQE_QPID_V(qhp->wq.sq.qid));
+
+       spin_lock_irqsave(&rchp->lock, flag);
+       cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+       cq->sw_queue[cq->sw_pidx] = cqe;
+       t4_swcq_produce(cq);
+       spin_unlock_irqrestore(&rchp->lock, flag);
+
+       spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+       (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+                                  rchp->ibcq.cq_context);
+       spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+}
+
 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                   struct ib_send_wr **bad_wr)
 {
@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        spin_lock_irqsave(&qhp->lock, flag);
        if (t4_wq_in_error(&qhp->wq)) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               *bad_wr = wr;
-               return -EINVAL;
+               complete_sq_drain_wr(qhp, wr);
+               return err;
        }
        num_wrs = t4_sq_avail(&qhp->wq);
        if (num_wrs == 0) {
@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
        spin_lock_irqsave(&qhp->lock, flag);
        if (t4_wq_in_error(&qhp->wq)) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               *bad_wr = wr;
-               return -EINVAL;
+               complete_rq_drain_wr(qhp, wr);
+               return err;
        }
        num_wrs = t4_rq_avail(&qhp->wq);
        if (num_wrs == 0) {
@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                }
                break;
        case C4IW_QP_STATE_CLOSING:
-               if (!internal) {
+
+               /*
+                * Allow kernel users to move to ERROR for qp draining.
+                */
+               if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
+                                 C4IW_QP_STATE_ERROR)) {
                        ret = -EINVAL;
                        goto out;
                }
@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        struct c4iw_dev *rhp;
        struct c4iw_qp *qhp;
        struct c4iw_qp_attributes attrs;
-       struct c4iw_ucontext *ucontext;
 
        qhp = to_c4iw_qp(ib_qp);
        rhp = qhp->rhp;
@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        spin_unlock_irq(&rhp->lock);
        free_ird(rhp, qhp->attr.max_ird);
 
-       ucontext = ib_qp->uobject ?
-                  to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
-       destroy_qp(&rhp->rdev, &qhp->wq,
-                  ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-
        c4iw_qp_rem_ref(ib_qp);
 
        PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        qhp->attr.max_ird = 0;
        qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
        spin_lock_init(&qhp->lock);
-       init_completion(&qhp->sq_drained);
-       init_completion(&qhp->rq_drained);
        mutex_init(&qhp->mutex);
        init_waitqueue_head(&qhp->wait);
        kref_init(&qhp->kref);
+       INIT_WORK(&qhp->free_work, free_qp_work);
 
        ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
        if (ret)
@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                        ma_sync_key_mm->len = PAGE_SIZE;
                        insert_mmap(ucontext, ma_sync_key_mm);
                }
+
+               c4iw_get_ucontext(ucontext);
+               qhp->ucontext = ucontext;
        }
        qhp->ibqp.qp_num = qhp->wq.sq.qid;
        init_timer(&(qhp->timer));
@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
        return 0;
 }
-
-static void move_qp_to_err(struct c4iw_qp *qp)
-{
-       struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
-
-       (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
-}
-
-void c4iw_drain_sq(struct ib_qp *ibqp)
-{
-       struct c4iw_qp *qp = to_c4iw_qp(ibqp);
-       unsigned long flag;
-       bool need_to_wait;
-
-       move_qp_to_err(qp);
-       spin_lock_irqsave(&qp->lock, flag);
-       need_to_wait = !t4_sq_empty(&qp->wq);
-       spin_unlock_irqrestore(&qp->lock, flag);
-
-       if (need_to_wait)
-               wait_for_completion(&qp->sq_drained);
-}
-
-void c4iw_drain_rq(struct ib_qp *ibqp)
-{
-       struct c4iw_qp *qp = to_c4iw_qp(ibqp);
-       unsigned long flag;
-       bool need_to_wait;
-
-       move_qp_to_err(qp);
-       spin_lock_irqsave(&qp->lock, flag);
-       need_to_wait = !t4_rq_empty(&qp->wq);
-       spin_unlock_irqrestore(&qp->lock, flag);
-
-       if (need_to_wait)
-               wait_for_completion(&qp->rq_drained);
-}
index 862381aa83c824bb8712e408df39e49f47f11975..640d22148a3eeb86fdc2d5c795dcf02271bf57fb 100644 (file)
@@ -179,6 +179,7 @@ struct t4_cqe {
                        __be32 wrid_hi;
                        __be32 wrid_low;
                } gen;
+               u64 drain_cookie;
        } u;
        __be64 reserved;
        __be64 bits_type_ts;
@@ -238,6 +239,7 @@ struct t4_cqe {
 /* generic accessor macros */
 #define CQE_WRID_HI(x)         (be32_to_cpu((x)->u.gen.wrid_hi))
 #define CQE_WRID_LOW(x)                (be32_to_cpu((x)->u.gen.wrid_low))
+#define CQE_DRAIN_COOKIE(x)    ((x)->u.drain_cookie)
 
 /* macros for flit 3 of the cqe */
 #define CQE_GENBIT_S   63
index 29e97df9e1a7f87c784ebf33f4ebccfae217f433..4c000d60d5c6f865ae17aa28654497e3dbbb913c 100644 (file)
@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
        memset(props, 0, sizeof(*props));
 
        props->max_mtu = IB_MTU_4096;
-       if (netdev->mtu >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        props->lid = 1;
        if (netif_carrier_ok(iwdev->netdev))
index aff9fb14768be9006e05145b2bdd7b6f7dbee8ff..5a31f3c6a4211d507cc4634c49df53021bba505b 100644 (file)
@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
        memset(props, 0, sizeof(*props));
 
        props->max_mtu = IB_MTU_4096;
-
-       if (netdev->mtu  >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu  >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu  >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu  >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        props->lid = 1;
        props->lmc = 0;
index 7b74d09a8217ca0f30de8d5065bb4b829100c56f..3ac8aa5ef37de2c5242125077eef78035d565901 100644 (file)
@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
        return 0;
 }
 
-void qedr_unaffiliated_event(void *context,
-                            u8 event_code)
+void qedr_unaffiliated_event(void *context, u8 event_code)
 {
        pr_err("unaffiliated event not implemented yet\n");
 }
@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
                if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
                        goto sysfs_err;
 
+       if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+
        DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
        return dev;
 
@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
        ib_dealloc_device(&dev->ibdev);
 }
 
-static int qedr_close(struct qedr_dev *dev)
+static void qedr_close(struct qedr_dev *dev)
 {
-       qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
-
-       return 0;
+       if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
 }
 
 static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
        qedr_remove(dev);
 }
 
+static void qedr_open(struct qedr_dev *dev)
+{
+       if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+}
+
 static void qedr_mac_address_change(struct qedr_dev *dev)
 {
        union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
 
        ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
 
-       qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+       qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
 
        if (rc)
                DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
 {
        switch (event) {
        case QEDE_UP:
-               qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+               qedr_open(dev);
                break;
        case QEDE_DOWN:
                qedr_close(dev);
index 620badd7d4fbd7f6ceaa61aff95ef1e41161f0be..bb32e4792ec9f022d201c0585bcce7a7cbae179c 100644 (file)
@@ -113,6 +113,8 @@ struct qedr_device_attr {
        struct qed_rdma_events events;
 };
 
+#define QEDR_ENET_STATE_BIT    (0)
+
 struct qedr_dev {
        struct ib_device        ibdev;
        struct qed_dev          *cdev;
@@ -153,6 +155,8 @@ struct qedr_dev {
        struct qedr_cq          *gsi_sqcq;
        struct qedr_cq          *gsi_rqcq;
        struct qedr_qp          *gsi_qp;
+
+       unsigned long enet_state;
 };
 
 #define QEDR_MAX_SQ_PBL                        (0x8000)
@@ -188,6 +192,7 @@ struct qedr_dev {
 #define QEDR_ROCE_MAX_CNQ_SIZE         (0x4000)
 
 #define QEDR_MAX_PORT                  (1)
+#define QEDR_PORT                      (1)
 
 #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
 
@@ -251,9 +256,6 @@ struct qedr_cq {
 
        u16 icid;
 
-       /* Lock to protect completion handler */
-       spinlock_t comp_handler_lock;
-
        /* Lock to protect multiplem CQ's */
        spinlock_t cq_lock;
        u8 arm_flags;
index 63890ebb72bdff1c525e87786f59b124b2ea8d3f..a9a8d8745d2e7f9ca20a5c849e042fdc6af261d6 100644 (file)
@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
        qedr_inc_sw_gsi_cons(&qp->sq);
        spin_unlock_irqrestore(&qp->q_lock, flags);
 
-       if (cq->ibcq.comp_handler) {
-               spin_lock_irqsave(&cq->comp_handler_lock, flags);
+       if (cq->ibcq.comp_handler)
                (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-               spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
-       }
 }
 
 void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
 
        spin_unlock_irqrestore(&qp->q_lock, flags);
 
-       if (cq->ibcq.comp_handler) {
-               spin_lock_irqsave(&cq->comp_handler_lock, flags);
+       if (cq->ibcq.comp_handler)
                (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-               spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
-       }
 }
 
 static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
        }
 
        if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
-               packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
-       else
                packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+       else
+               packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
 
        packet->roce_mode = roce_mode;
        memcpy(packet->header.vaddr, ud_header_buffer, header_size);
index 57c8de2080773b161272774a69eaebf02cc411ed..c7d6c9a783bd615627e720eb8f043444c99c5f3c 100644 (file)
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
                            struct ib_ucontext *context, struct ib_udata *udata)
 {
        struct qedr_dev *dev = get_qedr_dev(ibdev);
-       struct qedr_ucontext *uctx = NULL;
-       struct qedr_alloc_pd_uresp uresp;
        struct qedr_pd *pd;
        u16 pd_id;
        int rc;
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
        if (!pd)
                return ERR_PTR(-ENOMEM);
 
-       dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+       rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+       if (rc)
+               goto err;
 
-       uresp.pd_id = pd_id;
        pd->pd_id = pd_id;
 
        if (udata && context) {
+               struct qedr_alloc_pd_uresp uresp;
+
+               uresp.pd_id = pd_id;
+
                rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
-               if (rc)
+               if (rc) {
                        DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
-               uctx = get_qedr_ucontext(context);
-               uctx->pd = pd;
-               pd->uctx = uctx;
+                       dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
+                       goto err;
+               }
+
+               pd->uctx = get_qedr_ucontext(context);
+               pd->uctx->pd = pd;
        }
 
        return &pd->ibpd;
+
+err:
+       kfree(pd);
+       return ERR_PTR(rc);
 }
 
 int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1600,7 +1610,7 @@ err0:
        return ERR_PTR(-EFAULT);
 }
 
-enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
 {
        switch (qp_state) {
        case QED_ROCE_QP_STATE_RESET:
@@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
        return IB_QPS_ERR;
 }
 
-enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
+static enum qed_roce_qp_state qedr_get_state_from_ibqp(
+                                       enum ib_qp_state qp_state)
 {
        switch (qp_state) {
        case IB_QPS_RESET:
@@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
        int status = 0;
 
        if (new_state == qp->state)
-               return 1;
+               return 0;
 
        switch (qp->state) {
        case QED_ROCE_QP_STATE_RESET:
@@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
                /* ERR->XXX */
                switch (new_state) {
                case QED_ROCE_QP_STATE_RESET:
+                       if ((qp->rq.prod != qp->rq.cons) ||
+                           (qp->sq.prod != qp->sq.cons)) {
+                               DP_NOTICE(dev,
+                                         "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
+                                         qp->rq.prod, qp->rq.cons, qp->sq.prod,
+                                         qp->sq.cons);
+                               status = -EINVAL;
+                       }
                        break;
                default:
                        status = -EINVAL;
@@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                         qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
                DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
                         qp_params.remote_mac_addr);
-;
 
                qp_params.mtu = qp->mtu;
                qp_params.lb_indication = false;
@@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
 
        qp_attr->qp_state = qedr_get_ibqp_state(params.state);
        qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
-       qp_attr->path_mtu = iboe_get_mtu(params.mtu);
+       qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
        qp_attr->path_mig_state = IB_MIG_MIGRATED;
        qp_attr->rq_psn = params.rq_psn;
        qp_attr->sq_psn = params.sq_psn;
@@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
        qp_attr->cap.max_recv_wr = qp->rq.max_wr;
        qp_attr->cap.max_send_sge = qp->sq.max_sges;
        qp_attr->cap.max_recv_sge = qp->rq.max_sges;
-       qp_attr->cap.max_inline_data = qp->max_inline_data;
+       qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
        qp_init_attr->cap = qp_attr->cap;
 
        memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
@@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
        return rc;
 }
 
-struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
+static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
+                                      int max_page_list_len)
 {
        struct qedr_pd *pd = get_qedr_pd(ibpd);
        struct qedr_dev *dev = get_qedr_dev(ibpd->device);
@@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
        return 0;
 }
 
-enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
 {
        switch (opcode) {
        case IB_WR_RDMA_WRITE:
@@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
        }
 }
 
-inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
 {
        int wq_is_full, err_wr, pbl_is_full;
        struct qedr_dev *dev = qp->dev;
@@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
        return true;
 }
 
-int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                     struct ib_send_wr **bad_wr)
 {
        struct qedr_dev *dev = get_qedr_dev(ibqp->device);
@@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
                                  IB_WC_SUCCESS, 0);
                break;
        case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
-               DP_ERR(dev,
-                      "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
-                      cq->icid, qp->icid);
+               if (qp->state != QED_ROCE_QP_STATE_ERR)
+                       DP_ERR(dev,
+                              "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                              cq->icid, qp->icid);
                cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
                                  IB_WC_WR_FLUSH_ERR, 1);
                break;
index 231a1ce1f4bec845d6ecfc58f53cd0d4ea5762a1..bd8fbd3d2032d390cc41268e7d32544ff0c98308 100644 (file)
@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
        if (ret) {
                dev_err(&pdev->dev, "failed to allocate interrupts\n");
                ret = -ENOMEM;
-               goto err_netdevice;
+               goto err_free_cq_ring;
        }
 
        /* Allocate UAR table. */
@@ -1092,8 +1092,6 @@ err_free_uar_table:
 err_free_intrs:
        pvrdma_free_irq(dev);
        pvrdma_disable_msi_all(dev);
-err_netdevice:
-       unregister_netdevice_notifier(&dev->nb_netdev);
 err_free_cq_ring:
        pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
 err_free_async_ring:
index 54891370d18a5beef151816c882b40fb705a9ec5..c2aa52638dcb81ea4539b61c43d61f55edefb2b3 100644 (file)
@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
        union pvrdma_cmd_resp rsp;
        struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
        struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
-       struct pvrdma_alloc_ucontext_resp uresp;
+       struct pvrdma_alloc_ucontext_resp uresp = {0};
        int ret;
        void *ptr;
 
index 342e78163613dfdc719b171e1396d01fd44432eb..4abdeb359fb4f52cbacde5c3e1f488296c6064c5 100644 (file)
@@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
        }
 
        spin_lock_bh(&dev_list_lock);
-       list_add_tail(&rxe_dev_list, &rxe->list);
+       list_add_tail(&rxe->list, &rxe_dev_list);
        spin_unlock_bh(&dev_list_lock);
        return rxe;
 }
index 486d576e55bc016dda1f8ddad6b8f00941f66727..44b2108253bd988ec1f5222da999575ed37d3bed 100644 (file)
@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
        del_timer_sync(&qp->rnr_nak_timer);
 
        rxe_cleanup_task(&qp->req.task);
-       if (qp_type(qp) == IB_QPT_RC)
-               rxe_cleanup_task(&qp->comp.task);
+       rxe_cleanup_task(&qp->comp.task);
 
        /* flush out any receive wr's or pending requests */
        __rxe_do_task(&qp->req.task);
index 9104e6b8cac9f7b66322d7ebc573ab6a32fa525a..e71af717e71b0f5e6d82eb36b65d887c6005dc09 100644 (file)
@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                                                   SHOST_DIX_GUARD_CRC);
                }
 
-               /*
-                * Limit the sg_tablesize and max_sectors based on the device
-                * max fastreg page list length.
-                */
-               shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
-                       ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
-
                if (iscsi_host_add(shost,
                                   ib_conn->device->ib_device->dma_device)) {
                        mutex_unlock(&iser_conn->state_mutex);
@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
        max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
        shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
 
+       iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
+                iser_conn, shost->sg_tablesize,
+                shost->max_sectors);
+
        if (cmds_max > max_cmds) {
                iser_info("cmds_max changed from %u to %u\n",
                          cmds_max, max_cmds);
index 0be6a7c5ddb5aea294be7c10a8247895adbc2dde..9d0b22ad58c15759c3b92472083da15af5c42bac 100644 (file)
@@ -496,7 +496,6 @@ struct ib_conn {
  * @rx_descs:         rx buffers array (cyclic buffer)
  * @num_rx_descs:     number of rx descriptors
  * @scsi_sg_tablesize: scsi host sg_tablesize
- * @scsi_max_sectors: scsi host max sectors
  */
 struct iser_conn {
        struct ib_conn               ib_conn;
@@ -519,7 +518,6 @@ struct iser_conn {
        struct iser_rx_desc          *rx_descs;
        u32                          num_rx_descs;
        unsigned short               scsi_sg_tablesize;
-       unsigned int                 scsi_max_sectors;
        bool                         snd_w_inv;
 };
 
index 8ae7a3beddb728ee22825e28382b37bc467bbe94..6a9d1cb548ee8f7f34cfe1a1f9ad7c54de133271 100644 (file)
@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
        sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
                                 device->ib_device->attrs.max_fast_reg_page_list_len);
 
-       if (sg_tablesize > sup_sg_tablesize) {
-               sg_tablesize = sup_sg_tablesize;
-               iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
-       } else {
-               iser_conn->scsi_max_sectors = max_sectors;
-       }
-
-       iser_conn->scsi_sg_tablesize = sg_tablesize;
-
-       iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
-                iser_conn, iser_conn->scsi_sg_tablesize,
-                iser_conn->scsi_max_sectors);
+       iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
 }
 
 /**
index 8ddc071231931157ec459a6db4a0947d7b05f539..79bf48477ddb104097471a7a6040bcef2dfa0533 100644 (file)
@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
        struct srp_fr_desc *d;
        struct ib_mr *mr;
        int i, ret = -EINVAL;
+       enum ib_mr_type mr_type;
 
        if (pool_size <= 0)
                goto err;
@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
        spin_lock_init(&pool->lock);
        INIT_LIST_HEAD(&pool->free_list);
 
+       if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+               mr_type = IB_MR_TYPE_SG_GAPS;
+       else
+               mr_type = IB_MR_TYPE_MEM_REG;
+
        for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
-               mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
-                                max_page_list_len);
+               mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
                if (IS_ERR(mr)) {
                        ret = PTR_ERR(mr);
                        if (ret == -ENOMEM)
@@ -3694,6 +3699,12 @@ static int __init srp_init_module(void)
                indirect_sg_entries = cmd_sg_entries;
        }
 
+       if (indirect_sg_entries > SG_MAX_SEGMENTS) {
+               pr_warn("Clamping indirect_sg_entries to %u\n",
+                       SG_MAX_SEGMENTS);
+               indirect_sg_entries = SG_MAX_SEGMENTS;
+       }
+
        srp_remove_wq = create_workqueue("srp_remove");
        if (!srp_remove_wq) {
                ret = -ENOMEM;
index 0ea4efb3de6683ee2dca71b3b7e20bc7f5f1e761..ebb5e391b800e0bbbca2fd9da926a4ff191839e0 100644 (file)
@@ -30,8 +30,9 @@
 
 #include "cec-priv.h"
 
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx);
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx);
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+                                        struct cec_msg *msg,
+                                        unsigned int la_idx);
 
 /*
  * 400 ms is the time it takes for one 16 byte message to be
@@ -288,10 +289,10 @@ static void cec_data_cancel(struct cec_data *data)
 
        /* Mark it as an error */
        data->msg.tx_ts = ktime_get_ns();
-       data->msg.tx_status = CEC_TX_STATUS_ERROR |
-                             CEC_TX_STATUS_MAX_RETRIES;
+       data->msg.tx_status |= CEC_TX_STATUS_ERROR |
+                              CEC_TX_STATUS_MAX_RETRIES;
+       data->msg.tx_error_cnt++;
        data->attempts = 0;
-       data->msg.tx_error_cnt = 1;
        /* Queue transmitted message for monitoring purposes */
        cec_queue_msg_monitor(data->adap, &data->msg, 1);
 
@@ -851,7 +852,7 @@ static const u8 cec_msg_size[256] = {
        [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
        [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
        [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
-       [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST,
+       [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
        [CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
 };
 
@@ -1250,30 +1251,49 @@ configured:
                for (i = 1; i < las->num_log_addrs; i++)
                        las->log_addr[i] = CEC_LOG_ADDR_INVALID;
        }
+       for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+               las->log_addr[i] = CEC_LOG_ADDR_INVALID;
        adap->is_configured = true;
        adap->is_configuring = false;
        cec_post_state_event(adap);
-       mutex_unlock(&adap->lock);
 
+       /*
+        * Now post the Report Features and Report Physical Address broadcast
+        * messages. Note that these are non-blocking transmits, meaning that
+        * they are just queued up and once adap->lock is unlocked the main
+        * thread will kick in and start transmitting these.
+        *
+        * If after this function is done (but before one or more of these
+        * messages are actually transmitted) the CEC adapter is unconfigured,
+        * then any remaining messages will be dropped by the main thread.
+        */
        for (i = 0; i < las->num_log_addrs; i++) {
+               struct cec_msg msg = {};
+
                if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
                    (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
                        continue;
 
-               /*
-                * Report Features must come first according
-                * to CEC 2.0
-                */
-               if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED)
-                       cec_report_features(adap, i);
-               cec_report_phys_addr(adap, i);
+               msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
+
+               /* Report Features must come first according to CEC 2.0 */
+               if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
+                   adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
+                       cec_fill_msg_report_features(adap, &msg, i);
+                       cec_transmit_msg_fh(adap, &msg, NULL, false);
+               }
+
+               /* Report Physical Address */
+               cec_msg_report_physical_addr(&msg, adap->phys_addr,
+                                            las->primary_device_type[i]);
+               dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
+                       las->log_addr[i],
+                       cec_phys_addr_exp(adap->phys_addr));
+               cec_transmit_msg_fh(adap, &msg, NULL, false);
        }
-       for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
-               las->log_addr[i] = CEC_LOG_ADDR_INVALID;
-       mutex_lock(&adap->lock);
        adap->kthread_config = NULL;
-       mutex_unlock(&adap->lock);
        complete(&adap->config_completion);
+       mutex_unlock(&adap->lock);
        return 0;
 
 unconfigure:
@@ -1526,52 +1546,32 @@ EXPORT_SYMBOL_GPL(cec_s_log_addrs);
 
 /* High-level core CEC message handling */
 
-/* Transmit the Report Features message */
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx)
+/* Fill in the Report Features message */
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+                                        struct cec_msg *msg,
+                                        unsigned int la_idx)
 {
-       struct cec_msg msg = { };
        const struct cec_log_addrs *las = &adap->log_addrs;
        const u8 *features = las->features[la_idx];
        bool op_is_dev_features = false;
        unsigned int idx;
 
-       /* This is 2.0 and up only */
-       if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
-               return 0;
-
        /* Report Features */
-       msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
-       msg.len = 4;
-       msg.msg[1] = CEC_MSG_REPORT_FEATURES;
-       msg.msg[2] = adap->log_addrs.cec_version;
-       msg.msg[3] = las->all_device_types[la_idx];
+       msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
+       msg->len = 4;
+       msg->msg[1] = CEC_MSG_REPORT_FEATURES;
+       msg->msg[2] = adap->log_addrs.cec_version;
+       msg->msg[3] = las->all_device_types[la_idx];
 
        /* Write RC Profiles first, then Device Features */
        for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
-               msg.msg[msg.len++] = features[idx];
+               msg->msg[msg->len++] = features[idx];
                if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
                        if (op_is_dev_features)
                                break;
                        op_is_dev_features = true;
                }
        }
-       return cec_transmit_msg(adap, &msg, false);
-}
-
-/* Transmit the Report Physical Address message */
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
-{
-       const struct cec_log_addrs *las = &adap->log_addrs;
-       struct cec_msg msg = { };
-
-       /* Report Physical Address */
-       msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
-       cec_msg_report_physical_addr(&msg, adap->phys_addr,
-                                    las->primary_device_type[la_idx]);
-       dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
-               las->log_addr[la_idx],
-                       cec_phys_addr_exp(adap->phys_addr));
-       return cec_transmit_msg(adap, &msg, false);
 }
 
 /* Transmit the Feature Abort message */
@@ -1777,9 +1777,10 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
        }
 
        case CEC_MSG_GIVE_FEATURES:
-               if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
-                       return cec_report_features(adap, la_idx);
-               return 0;
+               if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
+                       return cec_feature_abort(adap, msg);
+               cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
+               return cec_transmit_msg(adap, &tx_cec_msg, false);
 
        default:
                /*
index bc5e8cfe7ca235134cfcaa5723f27fa146d5d0c5..8f11d7e459931bb5a8ed74569781dc83d4bd6422 100644 (file)
@@ -719,6 +719,9 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
                skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
                                          ETH_ALEN);
                skb_pull(h->priv->ule_skb, ETH_ALEN);
+       } else {
+               /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
+               eth_zero_addr(dest_addr);
        }
 
        /* Handle ULE Extension Headers. */
@@ -750,16 +753,8 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
        if (!h->priv->ule_bridged) {
                skb_push(h->priv->ule_skb, ETH_HLEN);
                h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
-               if (!h->priv->ule_dbit) {
-                       /*
-                        * dest_addr buffer is only valid if
-                        * h->priv->ule_dbit == 0
-                        */
-                       memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
-                       eth_zero_addr(h->ethh->h_source);
-               } else /* zeroize source and dest */
-                       memset(h->ethh, 0, ETH_ALEN * 2);
-
+               memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
+               eth_zero_addr(h->ethh->h_source);
                h->ethh->h_proto = htons(h->priv->ule_sndu_type);
        }
        /* else:  skb is in correct state; nothing to do. */
index b31fa6fae009171a8edda132b224a52631a24b39..b979ea148251deab48fbfca7a0141aa408010f5e 100644 (file)
@@ -655,6 +655,7 @@ config VIDEO_S5K6A3
 config VIDEO_S5K4ECGX
         tristate "Samsung S5K4ECGX sensor support"
         depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       select CRC32
         ---help---
           This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
           camera sensor with an embedded SoC image signal processor.
index 59872b31f832cb7983337e1ce1290a5eea1aa36a..f4e92bdfe1926cb71be5a9dc29de7b7608903950 100644 (file)
@@ -2741,9 +2741,7 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
  * I2C Driver
  */
 
-#ifdef CONFIG_PM
-
-static int smiapp_suspend(struct device *dev)
+static int __maybe_unused smiapp_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2768,7 +2766,7 @@ static int smiapp_suspend(struct device *dev)
        return 0;
 }
 
-static int smiapp_resume(struct device *dev)
+static int __maybe_unused smiapp_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2783,13 +2781,6 @@ static int smiapp_resume(struct device *dev)
        return rval;
 }
 
-#else
-
-#define smiapp_suspend NULL
-#define smiapp_resume  NULL
-
-#endif /* CONFIG_PM */
-
 static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
 {
        struct smiapp_hwconfig *hwcfg;
@@ -2913,13 +2904,9 @@ static int smiapp_probe(struct i2c_client *client,
        if (IS_ERR(sensor->xshutdown))
                return PTR_ERR(sensor->xshutdown);
 
-       pm_runtime_enable(&client->dev);
-
-       rval = pm_runtime_get_sync(&client->dev);
-       if (rval < 0) {
-               rval = -ENODEV;
-               goto out_power_off;
-       }
+       rval = smiapp_power_on(&client->dev);
+       if (rval < 0)
+               return rval;
 
        rval = smiapp_identify_module(sensor);
        if (rval) {
@@ -3100,6 +3087,9 @@ static int smiapp_probe(struct i2c_client *client,
        if (rval < 0)
                goto out_media_entity_cleanup;
 
+       pm_runtime_set_active(&client->dev);
+       pm_runtime_get_noresume(&client->dev);
+       pm_runtime_enable(&client->dev);
        pm_runtime_set_autosuspend_delay(&client->dev, 1000);
        pm_runtime_use_autosuspend(&client->dev);
        pm_runtime_put_autosuspend(&client->dev);
@@ -3113,8 +3103,7 @@ out_cleanup:
        smiapp_cleanup(sensor);
 
 out_power_off:
-       pm_runtime_put(&client->dev);
-       pm_runtime_disable(&client->dev);
+       smiapp_power_off(&client->dev);
 
        return rval;
 }
@@ -3127,8 +3116,10 @@ static int smiapp_remove(struct i2c_client *client)
 
        v4l2_async_unregister_subdev(subdev);
 
-       pm_runtime_suspend(&client->dev);
        pm_runtime_disable(&client->dev);
+       if (!pm_runtime_status_suspended(&client->dev))
+               smiapp_power_off(&client->dev);
+       pm_runtime_set_suspended(&client->dev);
 
        for (i = 0; i < sensor->ssds_used; i++) {
                v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
index 3a0fe8cc64e94c5199730e00a6ce0e6fcb41ffb2..48646a7f3fb00c2e35944e656089a9031b2eaf01 100644 (file)
@@ -291,8 +291,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
        tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode);
        tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input);
 
-       /* Svideo should enable YCrCb output and disable GPCL output
-        * For Composite and TV, it should be the reverse
+       /*
+        * Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For
+        * S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK
+        * and set INTREQ/GPCL/VBLK to logic 0. For composite we output the
+        * field indicator (FID) signal on FID/GLCO/VLK/HVLK and set
+        * INTREQ/GPCL/VBLK to logic 1.
         */
        val = tvp5150_read(sd, TVP5150_MISC_CTL);
        if (val < 0) {
@@ -301,9 +305,9 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
        }
 
        if (decoder->input == TVP5150_SVIDEO)
-               val = (val & ~0x40) | 0x10;
+               val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK;
        else
-               val = (val & ~0x10) | 0x40;
+               val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL;
        tvp5150_write(sd, TVP5150_MISC_CTL, val);
 };
 
@@ -455,7 +459,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = {
        },{     /* Automatic offset and AGC enabled */
                TVP5150_ANAL_CHL_CTL, 0x15
        },{     /* Activate YCrCb output 0x9 or 0xd ? */
-               TVP5150_MISC_CTL, 0x6f
+               TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL |
+                                 TVP5150_MISC_CTL_INTREQ_OE |
+                                 TVP5150_MISC_CTL_YCBCR_OE |
+                                 TVP5150_MISC_CTL_SYNC_OE |
+                                 TVP5150_MISC_CTL_VBLANK |
+                                 TVP5150_MISC_CTL_CLOCK_OE,
        },{     /* Activates video std autodetection for all standards */
                TVP5150_AUTOSW_MSK, 0x0
        },{     /* Default format: 0x47. For 4:2:2: 0x40 */
@@ -861,8 +870,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
 
        f = &format->format;
 
-       tvp5150_reset(sd, 0);
-
        f->width = decoder->rect.width;
        f->height = decoder->rect.height / 2;
 
@@ -1051,21 +1058,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = {
 static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
 {
        struct tvp5150 *decoder = to_tvp5150(sd);
-       /* Output format: 8-bit ITU-R BT.656 with embedded syncs */
-       int val = 0x09;
-
-       /* Output format: 8-bit 4:2:2 YUV with discrete sync */
-       if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
-               val = 0x0d;
+       int val;
 
-       /* Initializes TVP5150 to its default values */
-       /* # set PCLK (27MHz) */
-       tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00);
+       /* Enable or disable the video output signals. */
+       val = tvp5150_read(sd, TVP5150_MISC_CTL);
+       if (val < 0)
+               return val;
+
+       val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE |
+                TVP5150_MISC_CTL_CLOCK_OE);
+
+       if (enable) {
+               /*
+                * Enable the YCbCr and clock outputs. In discrete sync mode
+                * (non-BT.656) additionally enable the the sync outputs.
+                */
+               val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE;
+               if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
+                       val |= TVP5150_MISC_CTL_SYNC_OE;
+       }
 
-       if (enable)
-               tvp5150_write(sd, TVP5150_MISC_CTL, val);
-       else
-               tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
+       tvp5150_write(sd, TVP5150_MISC_CTL, val);
 
        return 0;
 }
@@ -1524,7 +1537,6 @@ static int tvp5150_probe(struct i2c_client *c,
                res = core->hdl.error;
                goto err;
        }
-       v4l2_ctrl_handler_setup(&core->hdl);
 
        /* Default is no cropping */
        core->rect.top = 0;
@@ -1535,6 +1547,8 @@ static int tvp5150_probe(struct i2c_client *c,
        core->rect.left = 0;
        core->rect.width = TVP5150_H_MAX;
 
+       tvp5150_reset(sd, 0);   /* Calls v4l2_ctrl_handler_setup() */
+
        res = v4l2_async_register_subdev(sd);
        if (res < 0)
                goto err;
index 25a994944918703f064eee85a6017ca7010dca4b..30a48c28d05ab5d46d9eff505005ebf3f90e6409 100644 (file)
@@ -9,6 +9,15 @@
 #define TVP5150_ANAL_CHL_CTL         0x01 /* Analog channel controls */
 #define TVP5150_OP_MODE_CTL          0x02 /* Operation mode controls */
 #define TVP5150_MISC_CTL             0x03 /* Miscellaneous controls */
+#define TVP5150_MISC_CTL_VBLK_GPCL     BIT(7)
+#define TVP5150_MISC_CTL_GPCL          BIT(6)
+#define TVP5150_MISC_CTL_INTREQ_OE     BIT(5)
+#define TVP5150_MISC_CTL_HVLK          BIT(4)
+#define TVP5150_MISC_CTL_YCBCR_OE      BIT(3)
+#define TVP5150_MISC_CTL_SYNC_OE       BIT(2)
+#define TVP5150_MISC_CTL_VBLANK                BIT(1)
+#define TVP5150_MISC_CTL_CLOCK_OE      BIT(0)
+
 #define TVP5150_AUTOSW_MSK           0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */
 
 /* Reserved 05h */
index 979634000597f79124befabb48af909d4b8f6a6e..d5c911c09e2b792e767970f32c463862617cec19 100644 (file)
@@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev)
 static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev)
 {
        free_irq(pci_dev->irq, (void *)cobalt);
-
-       if (cobalt->msi_enabled)
-               pci_disable_msi(pci_dev);
+       pci_free_irq_vectors(pci_dev);
 }
 
 static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
@@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
           from being generated. */
        cobalt_set_interrupt(cobalt, false);
 
-       if (pci_enable_msi_range(pci_dev, 1, 1) < 1) {
+       if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) {
                cobalt_err("Could not enable MSI\n");
-               cobalt->msi_enabled = false;
                ret = -EIO;
                goto err_release;
        }
        msi_config_show(cobalt, pci_dev);
-       cobalt->msi_enabled = true;
 
        /* Register IRQ */
        if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED,
index ed00dc9d93995e03ddf128dfb476dbc73ae086a3..00f773ec359ad954a15859329fb541fd5ab2a848 100644 (file)
@@ -287,8 +287,6 @@ struct cobalt {
        u32 irq_none;
        u32 irq_full_fifo;
 
-       bool msi_enabled;
-
        /* omnitek dma */
        int dma_channels;
        int first_fifo_channel;
index 07fa08be9e994a3f7d5952251b73f871fe4ecdca..d54ebe7e02150f7240f58da51a32d025026bdf34 100644 (file)
@@ -97,14 +97,13 @@ struct pctv452e_state {
        u8 c;      /* transaction counter, wraps around...  */
        u8 initialized; /* set to 1 if 0x15 has been sent */
        u16 last_rc_key;
-
-       unsigned char data[80];
 };
 
 static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
                         unsigned int write_len, unsigned int read_len)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+       u8 *buf;
        u8 id;
        unsigned int rlen;
        int ret;
@@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
                return -EIO;
        }
 
-       mutex_lock(&state->ca_mutex);
+       buf = kmalloc(64, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
        id = state->c++;
 
-       state->data[0] = SYNC_BYTE_OUT;
-       state->data[1] = id;
-       state->data[2] = cmd;
-       state->data[3] = write_len;
+       buf[0] = SYNC_BYTE_OUT;
+       buf[1] = id;
+       buf[2] = cmd;
+       buf[3] = write_len;
 
-       memcpy(state->data + 4, data, write_len);
+       memcpy(buf + 4, data, write_len);
 
        rlen = (read_len > 0) ? 64 : 0;
-       ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
-                                 state->data, rlen, /* delay_ms */ 0);
+       ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
+                                 buf, rlen, /* delay_ms */ 0);
        if (0 != ret)
                goto failed;
 
        ret = -EIO;
-       if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+       if (SYNC_BYTE_IN != buf[0] || id != buf[1])
                goto failed;
 
-       memcpy(data, state->data + 4, read_len);
+       memcpy(data, buf + 4, read_len);
 
-       mutex_unlock(&state->ca_mutex);
+       kfree(buf);
        return 0;
 
 failed:
        err("CI error %d; %02X %02X %02X -> %*ph.",
-            ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
+            ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
 
-       mutex_unlock(&state->ca_mutex);
+       kfree(buf);
        return ret;
 }
 
@@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
                                u8 *rcv_buf, u8 rcv_len)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+       u8 *buf;
        u8 id;
        int ret;
 
-       mutex_lock(&state->ca_mutex);
+       buf = kmalloc(64, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
        id = state->c++;
 
        ret = -EINVAL;
        if (snd_len > 64 - 7 || rcv_len > 64 - 7)
                goto failed;
 
-       state->data[0] = SYNC_BYTE_OUT;
-       state->data[1] = id;
-       state->data[2] = PCTV_CMD_I2C;
-       state->data[3] = snd_len + 3;
-       state->data[4] = addr << 1;
-       state->data[5] = snd_len;
-       state->data[6] = rcv_len;
+       buf[0] = SYNC_BYTE_OUT;
+       buf[1] = id;
+       buf[2] = PCTV_CMD_I2C;
+       buf[3] = snd_len + 3;
+       buf[4] = addr << 1;
+       buf[5] = snd_len;
+       buf[6] = rcv_len;
 
-       memcpy(state->data + 7, snd_buf, snd_len);
+       memcpy(buf + 7, snd_buf, snd_len);
 
-       ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
-                                 state->data, /* rcv_len */ 64,
+       ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
+                                 buf, /* rcv_len */ 64,
                                  /* delay_ms */ 0);
        if (ret < 0)
                goto failed;
 
        /* TT USB protocol error. */
        ret = -EIO;
-       if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+       if (SYNC_BYTE_IN != buf[0] || id != buf[1])
                goto failed;
 
        /* I2C device didn't respond as expected. */
        ret = -EREMOTEIO;
-       if (state->data[5] < snd_len || state->data[6] < rcv_len)
+       if (buf[5] < snd_len || buf[6] < rcv_len)
                goto failed;
 
-       memcpy(rcv_buf, state->data + 7, rcv_len);
-       mutex_unlock(&state->ca_mutex);
+       memcpy(rcv_buf, buf + 7, rcv_len);
 
+       kfree(buf);
        return rcv_len;
 
 failed:
        err("I2C error %d; %02X %02X  %02X %02X %02X -> %*ph",
             ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
-            7, state->data);
+            7, buf);
 
-       mutex_unlock(&state->ca_mutex);
+       kfree(buf);
        return ret;
 }
 
@@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
 static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
-       u8 *rx;
+       u8 *b0, *rx;
        int ret;
 
        info("%s: %d\n", __func__, i);
@@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
        if (state->initialized)
                return 0;
 
-       rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
-       if (!rx)
+       b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL);
+       if (!b0)
                return -ENOMEM;
 
-       mutex_lock(&state->ca_mutex);
+       rx = b0 + 5;
+
        /* hmm where shoud this should go? */
        ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
        if (ret != 0)
@@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
                        __func__, ret);
 
        /* this is a one-time initialization, dont know where to put */
-       state->data[0] = 0xaa;
-       state->data[1] = state->c++;
-       state->data[2] = PCTV_CMD_RESET;
-       state->data[3] = 1;
-       state->data[4] = 0;
+       b0[0] = 0xaa;
+       b0[1] = state->c++;
+       b0[2] = PCTV_CMD_RESET;
+       b0[3] = 1;
+       b0[4] = 0;
        /* reset board */
-       ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+       ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
        if (ret)
                goto ret;
 
-       state->data[1] = state->c++;
-       state->data[4] = 1;
+       b0[1] = state->c++;
+       b0[4] = 1;
        /* reset board (again?) */
-       ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+       ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
        if (ret)
                goto ret;
 
        state->initialized = 1;
 
 ret:
-       mutex_unlock(&state->ca_mutex);
-       kfree(rx);
+       kfree(b0);
        return ret;
 }
 
 static int pctv452e_rc_query(struct dvb_usb_device *d)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+       u8 *b, *rx;
        int ret, i;
        u8 id;
 
-       mutex_lock(&state->ca_mutex);
+       b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL);
+       if (!b)
+               return -ENOMEM;
+
+       rx = b + CMD_BUFFER_SIZE;
+
        id = state->c++;
 
        /* prepare command header  */
-       state->data[0] = SYNC_BYTE_OUT;
-       state->data[1] = id;
-       state->data[2] = PCTV_CMD_IR;
-       state->data[3] = 0;
+       b[0] = SYNC_BYTE_OUT;
+       b[1] = id;
+       b[2] = PCTV_CMD_IR;
+       b[3] = 0;
 
        /* send ir request */
-       ret = dvb_usb_generic_rw(d, state->data, 4,
-                                state->data, PCTV_ANSWER_LEN, 0);
+       ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
        if (ret != 0)
                goto ret;
 
        if (debug > 3) {
-               info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
-               for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
-                       info(" %02x", state->data[i + 3]);
+               info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
+               for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
+                       info(" %02x", rx[i+3]);
 
                info("\n");
        }
 
-       if ((state->data[3] == 9) &&  (state->data[12] & 0x01)) {
+       if ((rx[3] == 9) &&  (rx[12] & 0x01)) {
                /* got a "press" event */
-               state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
+               state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
                if (debug > 2)
                        info("%s: cmd=0x%02x sys=0x%02x\n",
-                               __func__, state->data[6], state->data[7]);
+                               __func__, rx[6], rx[7]);
 
                rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
        } else if (state->last_rc_key) {
@@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d)
                state->last_rc_key = 0;
        }
 ret:
-       mutex_unlock(&state->ca_mutex);
+       kfree(b);
        return ret;
 }
 
index a0547dbf980645104d862fdc39e2ad740107ad15..76382c858c35435b98e061a7dda49d5dacad9585 100644 (file)
@@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card,
        struct ms_id_register id_reg;
 
        if (!(*mrq)) {
-               memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL,
+               memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
                                  sizeof(struct ms_id_register));
                *mrq = &card->current_mrq;
                return 0;
index b44306b886cb6d7a383abae4d985cd18ea1d48e3..73db08558e4dd6d100d44e04cd58649e63ee94e8 100644 (file)
@@ -3354,10 +3354,11 @@ int dw_mci_runtime_resume(struct device *dev)
 
                if (!slot)
                        continue;
-               if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
+               if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
                        dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
-                       dw_mci_setup_bus(slot, true);
-               }
+
+               /* Force setup bus to guarantee available clock output */
+               dw_mci_setup_bus(slot, true);
        }
 
        /* Now that slots are all setup, we can enable card detect */
index 37300634b7d2c853a05f5dcecce6b9d46d187b59..c123488266ce74883ed8ba972b43103d136bb66e 100644 (file)
@@ -1092,6 +1092,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
        enum pin_config_param param = pinconf_to_config_param(*config);
        void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
        unsigned long flags;
        u32 conf, pull, val, debounce;
        u16 arg = 0;
@@ -1128,7 +1129,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
                        return -EINVAL;
 
                raw_spin_lock_irqsave(&vg->lock, flags);
-               debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG));
+               debounce = readl(db_reg);
                raw_spin_unlock_irqrestore(&vg->lock, flags);
 
                switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
@@ -1176,6 +1177,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
        unsigned int param, arg;
        void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
        unsigned long flags;
        u32 conf, val, debounce;
        int i, ret = 0;
@@ -1238,36 +1240,40 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
 
                        break;
                case PIN_CONFIG_INPUT_DEBOUNCE:
-                       debounce = readl(byt_gpio_reg(vg, offset,
-                                                     BYT_DEBOUNCE_REG));
-                       conf &= ~BYT_DEBOUNCE_PULSE_MASK;
+                       debounce = readl(db_reg);
+                       debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
 
                        switch (arg) {
+                       case 0:
+                               conf &= BYT_DEBOUNCE_EN;
+                               break;
                        case 375:
-                               conf |= BYT_DEBOUNCE_PULSE_375US;
+                               debounce |= BYT_DEBOUNCE_PULSE_375US;
                                break;
                        case 750:
-                               conf |= BYT_DEBOUNCE_PULSE_750US;
+                               debounce |= BYT_DEBOUNCE_PULSE_750US;
                                break;
                        case 1500:
-                               conf |= BYT_DEBOUNCE_PULSE_1500US;
+                               debounce |= BYT_DEBOUNCE_PULSE_1500US;
                                break;
                        case 3000:
-                               conf |= BYT_DEBOUNCE_PULSE_3MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_3MS;
                                break;
                        case 6000:
-                               conf |= BYT_DEBOUNCE_PULSE_6MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_6MS;
                                break;
                        case 12000:
-                               conf |= BYT_DEBOUNCE_PULSE_12MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_12MS;
                                break;
                        case 24000:
-                               conf |= BYT_DEBOUNCE_PULSE_24MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_24MS;
                                break;
                        default:
                                ret = -EINVAL;
                        }
 
+                       if (!ret)
+                               writel(debounce, db_reg);
                        break;
                default:
                        ret = -ENOTSUPP;
@@ -1617,6 +1623,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
 
 static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
 {
+       struct gpio_chip *gc = &vg->chip;
+       struct device *dev = &vg->pdev->dev;
        void __iomem *reg;
        u32 base, value;
        int i;
@@ -1638,10 +1646,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
                }
 
                value = readl(reg);
-               if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
-                   !(value & BYT_DIRECT_IRQ_EN)) {
+               if (value & BYT_DIRECT_IRQ_EN) {
+                       clear_bit(i, gc->irq_valid_mask);
+                       dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
+               } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
                        byt_gpio_clear_triggering(vg, i);
-                       dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
+                       dev_dbg(dev, "disabling GPIO %d\n", i);
                }
        }
 
@@ -1680,6 +1690,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
        gc->can_sleep   = false;
        gc->parent      = &vg->pdev->dev;
        gc->ngpio       = vg->soc_data->npins;
+       gc->irq_need_valid_mask = true;
 
 #ifdef CONFIG_PM_SLEEP
        vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
index 59cb7a6fc5bef316d042f93da72792edca2ea8d9..901b356b09d71679a2b4a03f7cd57b30a22fa6f4 100644 (file)
@@ -19,7 +19,7 @@
 
 #define BXT_PAD_OWN    0x020
 #define BXT_HOSTSW_OWN 0x080
-#define BXT_PADCFGLOCK 0x090
+#define BXT_PADCFGLOCK 0x060
 #define BXT_GPI_IE     0x110
 
 #define BXT_COMMUNITY(s, e)                            \
index 1e139672f1af9da0fa7ff4af1a919395e2ea6957..6df35dcb29aea68c0ddec6cbd29bb1c9a3abd56c 100644 (file)
@@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
        return 0;
 }
 
+static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
+{
+       u32 value;
+
+       value = readl(padcfg0);
+       if (input) {
+               value &= ~PADCFG0_GPIORXDIS;
+               value |= PADCFG0_GPIOTXDIS;
+       } else {
+               value &= ~PADCFG0_GPIOTXDIS;
+               value |= PADCFG0_GPIORXDIS;
+       }
+       writel(value, padcfg0);
+}
+
 static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
                                     struct pinctrl_gpio_range *range,
                                     unsigned pin)
@@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
        /* Disable SCI/SMI/NMI generation */
        value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
        value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
-       /* Disable TX buffer and enable RX (this will be input) */
-       value &= ~PADCFG0_GPIORXDIS;
-       value |= PADCFG0_GPIOTXDIS;
        writel(value, padcfg0);
 
+       /* Disable TX buffer and enable RX (this will be input) */
+       __intel_gpio_set_direction(padcfg0, true);
+
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
        return 0;
@@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
        struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
        void __iomem *padcfg0;
        unsigned long flags;
-       u32 value;
 
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
-
-       value = readl(padcfg0);
-       if (input)
-               value |= PADCFG0_GPIOTXDIS;
-       else
-               value &= ~PADCFG0_GPIOTXDIS;
-       writel(value, padcfg0);
+       __intel_gpio_set_direction(padcfg0, input);
 
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
index c3928aa3fefa9a1d24b0214e877bbac2bc15f67e..e0bca4df2a2f3188da0d559a29013893a5bea528 100644 (file)
@@ -253,9 +253,8 @@ static const unsigned int uart_tx_ao_a_pins[]       = { PIN(GPIOAO_0, 0) };
 static const unsigned int uart_rx_ao_a_pins[]  = { PIN(GPIOAO_1, 0) };
 static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_1, 0),
-                                                   PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_5, 0) };
 static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
 
@@ -498,7 +497,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
        GPIO_GROUP(GPIOAO_13, 0),
 
        /* bank AO */
-       GROUP(uart_tx_ao_b,     0,      26),
+       GROUP(uart_tx_ao_b,     0,      24),
        GROUP(uart_rx_ao_b,     0,      25),
        GROUP(uart_tx_ao_a,     0,      12),
        GROUP(uart_rx_ao_a,     0,      11),
index 25694f7094c714bbf35eee2ae7b51e2b4ce5b1e9..b69743b07a1d591ace36d410583231319234d4f0 100644 (file)
@@ -214,9 +214,8 @@ static const unsigned int uart_tx_ao_a_pins[]       = { PIN(GPIOAO_0, 0) };
 static const unsigned int uart_rx_ao_a_pins[]  = { PIN(GPIOAO_1, 0) };
 static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_1, 0),
-                                                   PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_5, 0) };
 static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
 
@@ -409,7 +408,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
        GPIO_GROUP(GPIOAO_9, 0),
 
        /* bank AO */
-       GROUP(uart_tx_ao_b,     0,      26),
+       GROUP(uart_tx_ao_b,     0,      24),
        GROUP(uart_rx_ao_b,     0,      25),
        GROUP(uart_tx_ao_a,     0,      12),
        GROUP(uart_rx_ao_a,     0,      11),
index c9a146948192dba19ca5da1587791c25b315d628..537b52055756645a8f225dd7e96b191d7d841e96 100644 (file)
@@ -202,6 +202,8 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
                        i = 128;
                        pin_num = AMD_GPIO_PINS_BANK2 + i;
                        break;
+               default:
+                       return;
                }
 
                for (; i < pin_num; i++) {
index aa8bd9794683b715013c82aa9220d11cfb0ea595..96686336e3a396254b9473f01f1776e0297301ce 100644 (file)
@@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                          0, 0, 0, 0};
 static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
                                           41, 42, 45};
-static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
 static const unsigned i2c0_pins[] = {63, 64};
 static const int i2c0_muxvals[] = {0, 0};
 static const unsigned i2c1_pins[] = {65, 66};
index 410741acb3c92dabe36417800f564a943c5d42ec..f46ece2ce3c4d48086c73b0e2d0c63ee1fe35893 100644 (file)
@@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
                        case 8:
                        case 7:
                        case 6:
+                       case 1:
                                ideapad_input_report(priv, vpc_bit);
                                break;
                        case 5:
index 1fc0de870ff826e8b90956ab557cc83008e1ce68..361770568ad03a6e7a3bc7e6d579ccacae1725b7 100644 (file)
@@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev)
 
        input_set_capability(input, EV_KEY, KEY_POWER);
 
-       error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+       error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
                                     DRIVER_NAME, input);
        if (error) {
                dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
index 97b4c3a219c0c79f3a3ed9359bc30bb13a439ce6..25f15df5c2d7b3c37b82e099f301831c403caa7d 100644 (file)
@@ -326,7 +326,7 @@ static int __init mlxplat_init(void)
        return 0;
 
 fail_platform_mux_register:
-       for (i--; i > 0 ; i--)
+       while (--i >= 0)
                platform_device_unregister(priv->pdev_mux[i]);
        platform_device_unregister(priv->pdev_i2c);
 fail_alloc:
index cbf4d83a727106ee0f7e42ca1b868616d994c0c3..25b176996cb793a789214a1a1237910b01cd3673 100644 (file)
@@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
 
 static int s3_wmi_check_platform_device(struct device *dev, void *data)
 {
-       struct acpi_device *adev, *ts_adev;
+       struct acpi_device *adev, *ts_adev = NULL;
        acpi_handle handle;
        acpi_status status;
 
@@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int s3_wmi_resume(struct device *dev)
+static int __maybe_unused s3_wmi_resume(struct device *dev)
 {
        s3_wmi_send_lid_state();
        return 0;
 }
-#endif
 static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
 
 static struct platform_driver s3_wmi_driver = {
index c4a508a124dc2b9dca9e7147c3d8dedc69c5016d..541af5946203bf08e9a2c38a269661e1df81d417 100644 (file)
@@ -58,6 +58,14 @@ static LIST_HEAD(thermal_hwmon_list);
 
 static DEFINE_MUTEX(thermal_hwmon_list_lock);
 
+static ssize_t
+name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev);
+       return sprintf(buf, "%s\n", hwmon->type);
+}
+static DEVICE_ATTR_RO(name);
+
 static ssize_t
 temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -157,12 +165,15 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
 
        INIT_LIST_HEAD(&hwmon->tz_list);
        strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
-       hwmon->device = hwmon_device_register_with_info(NULL, hwmon->type,
-                                                       hwmon, NULL, NULL);
+       hwmon->device = hwmon_device_register(NULL);
        if (IS_ERR(hwmon->device)) {
                result = PTR_ERR(hwmon->device);
                goto free_mem;
        }
+       dev_set_drvdata(hwmon->device, hwmon);
+       result = device_create_file(hwmon->device, &dev_attr_name);
+       if (result)
+               goto free_mem;
 
  register_sys_interface:
        temp = kzalloc(sizeof(*temp), GFP_KERNEL);
@@ -211,8 +222,10 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
  free_temp_mem:
        kfree(temp);
  unregister_name:
-       if (new_hwmon_device)
+       if (new_hwmon_device) {
+               device_remove_file(hwmon->device, &dev_attr_name);
                hwmon_device_unregister(hwmon->device);
+       }
  free_mem:
        if (new_hwmon_device)
                kfree(hwmon);
@@ -254,6 +267,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
        list_del(&hwmon->node);
        mutex_unlock(&thermal_hwmon_list_lock);
 
+       device_remove_file(hwmon->device, &dev_attr_name);
        hwmon_device_unregister(hwmon->device);
        kfree(hwmon);
 }
index c8823578a1b2afd3ae7a36c2f526fd071116876b..128d10282d1632693dc40819ff8b39485ba1e1de 100644 (file)
@@ -1270,6 +1270,10 @@ static int tce_iommu_attach_group(void *iommu_data,
        /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
                        iommu_group_id(iommu_group), iommu_group); */
        table_group = iommu_group_get_iommudata(iommu_group);
+       if (!table_group) {
+               ret = -ENODEV;
+               goto unlock_exit;
+       }
 
        if (tce_groups_attached(container) && (!table_group->ops ||
                        !table_group->ops->take_ownership ||
index bbbf588540ed71d82ed63deb355b77736a2a9628..ce5e63d2c66aac7d019c422ec294cab025e94e5e 100644 (file)
@@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
 
 static int vhost_vsock_start(struct vhost_vsock *vsock)
 {
+       struct vhost_virtqueue *vq;
        size_t i;
        int ret;
 
@@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
                goto err;
 
        for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-               struct vhost_virtqueue *vq = &vsock->vqs[i];
+               vq = &vsock->vqs[i];
 
                mutex_lock(&vq->mutex);
 
                if (!vhost_vq_access_ok(vq)) {
                        ret = -EFAULT;
-                       mutex_unlock(&vq->mutex);
                        goto err_vq;
                }
 
                if (!vq->private_data) {
                        vq->private_data = vsock;
-                       vhost_vq_init_access(vq);
+                       ret = vhost_vq_init_access(vq);
+                       if (ret)
+                               goto err_vq;
                }
 
                mutex_unlock(&vq->mutex);
@@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
        return 0;
 
 err_vq:
+       vq->private_data = NULL;
+       mutex_unlock(&vq->mutex);
+
        for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-               struct vhost_virtqueue *vq = &vsock->vqs[i];
+               vq = &vsock->vqs[i];
 
                mutex_lock(&vq->mutex);
                vq->private_data = NULL;
index f89245b8ba8e9a28483c4ff5edb03b80a1a9b2e3..68a113594808f220aa818424cd6e342897806a74 100644 (file)
@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
 
 int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
 {
-       int tooff = 0, fromoff = 0;
-       int size;
+       unsigned int tooff = 0, fromoff = 0;
+       size_t size;
 
        if (to->start > from->start)
                fromoff = to->start - from->start;
        else
                tooff = from->start - to->start;
-       size = to->len - tooff;
-       if (size > (int) (from->len - fromoff))
-               size = from->len - fromoff;
-       if (size <= 0)
+       if (fromoff >= from->len || tooff >= to->len)
+               return -EINVAL;
+
+       size = min_t(size_t, to->len - tooff, from->len - fromoff);
+       if (size == 0)
                return -EINVAL;
        size *= sizeof(u16);
 
@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
 
 int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
 {
-       int tooff = 0, fromoff = 0;
-       int size;
+       unsigned int tooff = 0, fromoff = 0;
+       size_t size;
 
        if (to->start > from->start)
                fromoff = to->start - from->start;
        else
                tooff = from->start - to->start;
-       size = to->len - tooff;
-       if (size > (int) (from->len - fromoff))
-               size = from->len - fromoff;
-       if (size <= 0)
+       if (fromoff >= from->len || tooff >= to->len)
+               return -EINVAL;
+
+       size = min_t(size_t, to->len - tooff, from->len - fromoff);
+       if (size == 0)
                return -EINVAL;
        size *= sizeof(u16);
 
index d47a2fcef818f3cea1ce7f09160061b3c8d2f0a6..c71fde5fe835c48d1ce4611b29108f8cf7fb44f3 100644 (file)
@@ -59,6 +59,7 @@
 #define pr_fmt(fmt) "virtio-mmio: " fmt
 
 #include <linux/acpi.h>
+#include <linux/dma-mapping.h>
 #include <linux/highmem.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
        struct virtio_mmio_device *vm_dev;
        struct resource *mem;
        unsigned long magic;
+       int rc;
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!mem)
@@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev)
        }
        vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
 
-       if (vm_dev->version == 1)
+       if (vm_dev->version == 1) {
                writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
 
+               rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+               /*
+                * In the legacy case, ensure our coherently-allocated virtio
+                * ring will be at an address expressable as a 32-bit PFN.
+                */
+               if (!rc)
+                       dma_set_coherent_mask(&pdev->dev,
+                                             DMA_BIT_MASK(32 + PAGE_SHIFT));
+       } else {
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       }
+       if (rc)
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (rc)
+               dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.\n");
+
        platform_set_drvdata(pdev, vm_dev);
 
        return register_virtio_device(&vm_dev->vdev);
index 409aeaa49246a0edd7c6da07ca38b58c3f876109..7e38ed79c3fc0f2c095164d480f75b31630a6694 100644 (file)
@@ -159,6 +159,13 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
        if (xen_domain())
                return true;
 
+       /*
+        * On ARM-based machines, the DMA ops will do the right thing,
+        * so always use them with legacy devices.
+        */
+       if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
+               return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
+
        return false;
 }
 
index f905d6eeb0482ee481cb24d9714bc6081a852d1e..f8afc6dcc29f2769694308092a4b543e5e0bed49 100644 (file)
@@ -414,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
        if (map == SWIOTLB_MAP_ERROR)
                return DMA_ERROR_CODE;
 
+       dev_addr = xen_phys_to_bus(map);
        xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
                                        dev_addr, map & ~PAGE_MASK, size, dir, attrs);
-       dev_addr = xen_phys_to_bus(map);
 
        /*
         * Ensure that the address returned is DMA'ble
@@ -575,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                                sg_dma_len(sgl) = 0;
                                return 0;
                        }
+                       dev_addr = xen_phys_to_bus(map);
                        xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
                                                dev_addr,
                                                map & ~PAGE_MASK,
                                                sg->length,
                                                dir,
                                                attrs);
-                       sg->dma_address = xen_phys_to_bus(map);
+                       sg->dma_address = dev_addr;
                } else {
                        /* we are not interested in the dma_addr returned by
                         * xen_dma_map_page, only in the potential cache flushes executed
index c2a377cdda2b03d6efe8768e4ef7894a06ebe853..83eab52fb3f69a75aa06a9f2a31760a384508f41 100644 (file)
@@ -38,6 +38,7 @@ config FS_DAX
        bool "Direct Access (DAX) support"
        depends on MMU
        depends on !(ARM || MIPS || SPARC)
+       select FS_IOMAP
        help
          Direct Access (DAX) can be used on memory-backed block devices.
          If the block device supports DAX and the filesystem supports DAX,
index ddcddfeaa03bd942e83738d34c4abaed06fa2709..3af2da5e64ce77fa8ae4b3f294c82882d350120f 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -990,7 +990,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
 }
 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
 
-#ifdef CONFIG_FS_IOMAP
 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
 {
        return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
@@ -1428,4 +1427,3 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
 #endif /* CONFIG_FS_DAX_PMD */
-#endif /* CONFIG_FS_IOMAP */
index 36bea5adcabaa735056b20290ce23cdc7dfc0851..c634874e12d969fbd0b00ad8da745553168876e6 100644 (file)
@@ -1,6 +1,5 @@
 config EXT2_FS
        tristate "Second extended fs support"
-       select FS_IOMAP if FS_DAX
        help
          Ext2 is a standard Linux file system for hard disks.
 
index 7b90691e98c4f5fdd3b2d162285b00e71ac51b63..e38039fd96ff59ab59ce17407abcf26de4c5a950 100644 (file)
@@ -37,7 +37,6 @@ config EXT4_FS
        select CRC16
        select CRYPTO
        select CRYPTO_CRC32C
-       select FS_IOMAP if FS_DAX
        help
          This is the next generation of the ext3 filesystem.
 
index 8e7e61b28f31c037961c081d09a7be5f818013ef..87c9a9aacda3601e2686e239243f447728137943 100644 (file)
@@ -3179,6 +3179,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
             iter.tgid += 1, iter = next_tgid(ns, iter)) {
                char name[PROC_NUMBUF];
                int len;
+
+               cond_resched();
                if (!has_pid_permissions(ns, iter.task, 2))
                        continue;
 
index d0f8a38dfafacd8f3d524d1ff69ae8f621eea278..0186fe6d39f3b4d2e77497d4d34a7691204ae9fa 100644 (file)
@@ -74,6 +74,7 @@
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/uaccess.h>
+#include <linux/major.h>
 #include "internal.h"
 
 static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode)
 static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct super_block *sb = dentry->d_sb;
-       u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+       u64 id = 0;
+
+       /* When calling huge_encode_dev(),
+        * use sb->s_bdev->bd_dev when,
+        *   - CONFIG_ROMFS_ON_BLOCK defined
+        * use sb->s_dev when,
+        *   - CONFIG_ROMFS_ON_BLOCK undefined and
+        *   - CONFIG_ROMFS_ON_MTD defined
+        * leave id as 0 when,
+        *   - CONFIG_ROMFS_ON_BLOCK undefined and
+        *   - CONFIG_ROMFS_ON_MTD undefined
+        */
+       if (sb->s_bdev)
+               id = huge_encode_dev(sb->s_bdev->bd_dev);
+       else if (sb->s_dev)
+               id = huge_encode_dev(sb->s_dev);
 
        buf->f_type = ROMFS_MAGIC;
        buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_flags |= MS_RDONLY | MS_NOATIME;
        sb->s_op = &romfs_super_ops;
 
+#ifdef CONFIG_ROMFS_ON_MTD
+       /* Use same dev ID from the underlying mtdblock device */
+       if (sb->s_mtd)
+               sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
+#endif
        /* read the image superblock and check it */
        rsb = kmalloc(512, GFP_KERNEL);
        if (!rsb)
index d96e2f30084bcfab552ffe3005af090abfe319c9..43953e03c35682723c6658dfe9b8cceed9de22ef 100644 (file)
@@ -63,6 +63,7 @@ struct userfaultfd_wait_queue {
        struct uffd_msg msg;
        wait_queue_t wq;
        struct userfaultfd_ctx *ctx;
+       bool waken;
 };
 
 struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
        if (len && (start > uwq->msg.arg.pagefault.address ||
                    start + len <= uwq->msg.arg.pagefault.address))
                goto out;
+       WRITE_ONCE(uwq->waken, true);
+       /*
+        * The implicit smp_mb__before_spinlock in try_to_wake_up()
+        * renders uwq->waken visible to other CPUs before the task is
+        * waken.
+        */
        ret = wake_up_state(wq->private, mode);
        if (ret)
                /*
@@ -264,6 +271,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
        struct userfaultfd_wait_queue uwq;
        int ret;
        bool must_wait, return_to_userland;
+       long blocking_state;
 
        BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
@@ -334,10 +342,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
        uwq.wq.private = current;
        uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
        uwq.ctx = ctx;
+       uwq.waken = false;
 
        return_to_userland =
                (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
                (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
+       blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
+                        TASK_KILLABLE;
 
        spin_lock(&ctx->fault_pending_wqh.lock);
        /*
@@ -350,8 +361,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
         * following the spin_unlock to happen before the list_add in
         * __add_wait_queue.
         */
-       set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
-                         TASK_KILLABLE);
+       set_current_state(blocking_state);
        spin_unlock(&ctx->fault_pending_wqh.lock);
 
        must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
@@ -364,6 +374,29 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
                wake_up_poll(&ctx->fd_wqh, POLLIN);
                schedule();
                ret |= VM_FAULT_MAJOR;
+
+               /*
+                * False wakeups can orginate even from rwsem before
+                * up_read() however userfaults will wait either for a
+                * targeted wakeup on the specific uwq waitqueue from
+                * wake_userfault() or for signals or for uffd
+                * release.
+                */
+               while (!READ_ONCE(uwq.waken)) {
+                       /*
+                        * This needs the full smp_store_mb()
+                        * guarantee as the state write must be
+                        * visible to other CPUs before reading
+                        * uwq.waken from other CPUs.
+                        */
+                       set_current_state(blocking_state);
+                       if (READ_ONCE(uwq.waken) ||
+                           READ_ONCE(ctx->released) ||
+                           (return_to_userland ? signal_pending(current) :
+                            fatal_signal_pending(current)))
+                               break;
+                       schedule();
+               }
        }
 
        __set_current_state(TASK_RUNNING);
index d6d241f63b9f8e4c3a9310008f17b9439f7fa2ed..56814e8ae7ea91a4c9aeed59a2c6f9d2263fb2e4 100644 (file)
@@ -144,7 +144,7 @@ struct __drm_crtcs_state {
        struct drm_crtc *ptr;
        struct drm_crtc_state *state;
        struct drm_crtc_commit *commit;
-       s64 __user *out_fence_ptr;
+       s32 __user *out_fence_ptr;
 };
 
 struct __drm_connnectors_state {
index d026f5017c33cea2c9766b28b5ccb67d635fbe85..982c299e435a09703de12d32b4deec08a54d44e0 100644 (file)
@@ -73,5 +73,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
 
 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
 extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
 
 #endif
index bf9991b20611a666d05445152d587446c5a834f5..137432386310aa8a9449d28f655a60b78bdd654a 100644 (file)
@@ -488,7 +488,7 @@ struct drm_mode_config {
        /**
         * @prop_out_fence_ptr: Sync File fd pointer representing the
         * outgoing fences for a CRTC. Userspace should provide a pointer to a
-        * value of type s64, and then cast that pointer to u64.
+        * value of type s32, and then cast that pointer to u64.
         */
        struct drm_property *prop_out_fence_ptr;
        /**
index 01033fadea4766d5e6efddc78ca595d68c021464..c1784c0b4f3585e0d20ca8253813c31d47f11c04 100644 (file)
@@ -284,7 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
                unsigned long map_offset);
 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
                                          unsigned long pnum);
-extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
-                         enum zone_type target);
+extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+                         enum zone_type target, int *zone_shift);
 
 #endif /* __LINUX_MEMORY_HOTPLUG_H */
index 36d9896fbc1eb0d12e60682f15e96648c13ebf98..f4aac87adcc3555014f6215d6599b604c70388e6 100644 (file)
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
  * @zonelist - The zonelist to search for a suitable zone
  * @highest_zoneidx - The zone index of the highest zone to return
  * @nodes - An optional nodemask to filter the zonelist with
- * @zone - The first suitable zone found is returned via this parameter
+ * @return - Zoneref pointer for the first suitable zone found (see below)
  *
  * This function returns the first zone at or below a given zone index that is
  * within the allowed nodemask. The zoneref returned is a cursor that can be
  * used to iterate the zonelist with next_zones_zonelist by advancing it by
  * one before calling.
+ *
+ * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
+ * never NULL). This may happen either genuinely, or due to concurrent nodemask
+ * update due to cpuset modification.
  */
 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
                                        enum zone_type highest_zoneidx,
index aacca824a6aef4fcc4d2480aa9eeefd2fe82d6f9..0a3fadc32693a9cf869693f4c406eee5d168e36b 100644 (file)
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
 extern int watchdog_thresh;
 extern unsigned long watchdog_enabled;
 extern unsigned long *watchdog_cpumask_bits;
+extern atomic_t watchdog_park_in_progress;
 #ifdef CONFIG_SMP
 extern int sysctl_softlockup_all_cpu_backtrace;
 extern int sysctl_hardlockup_all_cpu_backtrace;
index 0c729c3c85499a1e51f79ac7252d60d548787fb4..d9718378a8bee0b327d08c2e80a6fd3b5490b967 100644 (file)
@@ -194,8 +194,6 @@ struct platform_freeze_ops {
 };
 
 #ifdef CONFIG_SUSPEND
-extern suspend_state_t mem_sleep_default;
-
 /**
  * suspend_set_ops - set platform dependent suspend operations
  * @ops: The new suspend operations to set.
index 958a24d8fae794547c486b5b025f3815c96f82e7..b567e4452a4733de98b720e2c0d9060f21cc92e2 100644 (file)
@@ -352,6 +352,20 @@ static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
        }
 }
 
+static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
+{
+       if (mtu >= 4096)
+               return IB_MTU_4096;
+       else if (mtu >= 2048)
+               return IB_MTU_2048;
+       else if (mtu >= 1024)
+               return IB_MTU_1024;
+       else if (mtu >= 512)
+               return IB_MTU_512;
+       else
+               return IB_MTU_256;
+}
+
 enum ib_port_state {
        IB_PORT_NOP             = 0,
        IB_PORT_DOWN            = 1,
index 3cbc327801d6dc625f3f7786730fb07d065ac760..c451eec42a83101a6eea0219788165600d128e49 100644 (file)
@@ -1665,14 +1665,15 @@ static inline void cec_msg_report_current_latency(struct cec_msg *msg,
                                                  __u8 audio_out_compensated,
                                                  __u8 audio_out_delay)
 {
-       msg->len = 7;
+       msg->len = 6;
        msg->msg[0] |= 0xf; /* broadcast */
        msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
        msg->msg[2] = phys_addr >> 8;
        msg->msg[3] = phys_addr & 0xff;
        msg->msg[4] = video_latency;
        msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
-       msg->msg[6] = audio_out_delay;
+       if (audio_out_compensated == 3)
+               msg->msg[msg->len++] = audio_out_delay;
 }
 
 static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
@@ -1686,7 +1687,10 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
        *video_latency = msg->msg[4];
        *low_latency_mode = (msg->msg[5] >> 2) & 1;
        *audio_out_compensated = msg->msg[5] & 3;
-       *audio_out_delay = msg->msg[6];
+       if (*audio_out_compensated == 3 && msg->len >= 7)
+               *audio_out_delay = msg->msg[6];
+       else
+               *audio_out_delay = 0;
 }
 
 static inline void cec_msg_request_current_latency(struct cec_msg *msg,
index 82bdf5626859989085f831ee3a3a70c1b26ff066..bb68cb1b04ed3893faccac5eed815f50521f3813 100644 (file)
@@ -16,3 +16,4 @@ header-y += nes-abi.h
 header-y += ocrdma-abi.h
 header-y += hns-abi.h
 header-y += vmw_pvrdma-abi.h
+header-y += qedr-abi.h
index 48a19bda071b8db5396991dcd97cd9e0f3872ecf..d24eee12128fc5cb7d1f5612e85a7fb9b194520f 100644 (file)
@@ -30,7 +30,7 @@
  * SOFTWARE.
  */
 #ifndef CXGB3_ABI_USER_H
-#define CXBG3_ABI_USER_H
+#define CXGB3_ABI_USER_H
 
 #include <linux/types.h>
 
index 901c4fb46002e38c98394110c49aa50230d58180..08aa88dde7de806d4cb2b14fd93e87be8dd94501 100644 (file)
@@ -249,7 +249,7 @@ void panic(const char *fmt, ...)
                 * Delay timeout seconds before rebooting the machine.
                 * We can't use the "normal" timers since we just panicked.
                 */
-               pr_emerg("Rebooting in %d seconds..", panic_timeout);
+               pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
 
                for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
                        touch_nmi_watchdog();
index f67ceb7768b82ac4e183b22042f1f0784011262b..15e6baef5c73f90b6817c0b1c4e871ea40e30318 100644 (file)
@@ -46,7 +46,7 @@ static const char * const mem_sleep_labels[] = {
 const char *mem_sleep_states[PM_SUSPEND_MAX];
 
 suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE;
-suspend_state_t mem_sleep_default = PM_SUSPEND_MAX;
+static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM;
 
 unsigned int pm_suspend_global_flags;
 EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
@@ -168,7 +168,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
        }
        if (valid_state(PM_SUSPEND_MEM)) {
                mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM];
-               if (mem_sleep_default >= PM_SUSPEND_MEM)
+               if (mem_sleep_default == PM_SUSPEND_MEM)
                        mem_sleep_current = PM_SUSPEND_MEM;
        }
 
index 8dbaec0e4f7f079b87f50ea67c82341304387783..1aea594a54dbdac604ca950fdaf93508e5b6e6a7 100644 (file)
@@ -2475,6 +2475,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
                                break;
                        if (neg)
                                continue;
+                       val = convmul * val / convdiv;
                        if ((min && val < *min) || (max && val > *max))
                                continue;
                        *i = val;
index 9d20d5dd298af25d0cd95635e217180601703959..4bbd38ec37886d3d104e3d37dc80d101ab3767ac 100644 (file)
@@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
        struct hlist_head *hashent = ucounts_hashentry(ns, uid);
        struct ucounts *ucounts, *new;
 
-       spin_lock(&ucounts_lock);
+       spin_lock_irq(&ucounts_lock);
        ucounts = find_ucounts(ns, uid, hashent);
        if (!ucounts) {
-               spin_unlock(&ucounts_lock);
+               spin_unlock_irq(&ucounts_lock);
 
                new = kzalloc(sizeof(*new), GFP_KERNEL);
                if (!new)
@@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
                new->uid = uid;
                atomic_set(&new->count, 0);
 
-               spin_lock(&ucounts_lock);
+               spin_lock_irq(&ucounts_lock);
                ucounts = find_ucounts(ns, uid, hashent);
                if (ucounts) {
                        kfree(new);
@@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
        }
        if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
                ucounts = NULL;
-       spin_unlock(&ucounts_lock);
+       spin_unlock_irq(&ucounts_lock);
        return ucounts;
 }
 
 static void put_ucounts(struct ucounts *ucounts)
 {
+       unsigned long flags;
+
        if (atomic_dec_and_test(&ucounts->count)) {
-               spin_lock(&ucounts_lock);
+               spin_lock_irqsave(&ucounts_lock, flags);
                hlist_del_init(&ucounts->node);
-               spin_unlock(&ucounts_lock);
+               spin_unlock_irqrestore(&ucounts_lock, flags);
 
                kfree(ucounts);
        }
index d4b0fa01cae39cd720661d7a62f50a7926f9db69..63177be0159e9493f6d6ade90efae743aaf117b7 100644 (file)
@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
 #define for_each_watchdog_cpu(cpu) \
        for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
 
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+
 /*
  * The 'watchdog_running' variable is set to 1 when the watchdog threads
  * are registered/started and is set to 0 when the watchdog threads are
@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        int duration;
        int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
 
+       if (atomic_read(&watchdog_park_in_progress) != 0)
+               return HRTIMER_NORESTART;
+
        /* kick the hardlockup detector */
        watchdog_interrupt_count();
 
@@ -467,12 +472,16 @@ static int watchdog_park_threads(void)
 {
        int cpu, ret = 0;
 
+       atomic_set(&watchdog_park_in_progress, 1);
+
        for_each_watchdog_cpu(cpu) {
                ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
                if (ret)
                        break;
        }
 
+       atomic_set(&watchdog_park_in_progress, 0);
+
        return ret;
 }
 
index 84016c8aee6b5d2769495a8c6eee0b4ac559b1a6..12b8dd64078655dd9004d03caa8167da16b57cf5 100644 (file)
@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
        /* Ensure the watchdog never gets throttled */
        event->hw.interrupts = 0;
 
+       if (atomic_read(&watchdog_park_in_progress) != 0)
+               return;
+
        if (__this_cpu_read(watchdog_nmi_touch) == true) {
                __this_cpu_write(watchdog_nmi_touch, false);
                return;
index 86c8911b0e3a6fff02b9e52faa11816cfe508362..a3e14ce92a5684a662c2c8f80f97e6fef95943b7 100644 (file)
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr,
 
        return err;
 }
-EXPORT_SYMBOL_GPL(ioremap_page_range);
index 0b92d605fb69cc805a96c8333dab36174f755e22..84812a9fb16fbbd1409315ea3752fb9a1e3e39ef 100644 (file)
@@ -769,7 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
                        struct radix_tree_node *old = child;
                        offset = child->offset + 1;
                        child = child->parent;
-                       WARN_ON_ONCE(!list_empty(&node->private_list));
+                       WARN_ON_ONCE(!list_empty(&old->private_list));
                        radix_tree_node_free(old);
                        if (old == entry_to_node(node))
                                return;
index 9a6bd6c8d55a6691047e516a46c2cf6b931b912d..5f3ad65c85de01fa6e4c8a07ef9494410bf2b133 100644 (file)
@@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
+       /*
+        * When we COW a devmap PMD entry, we split it into PTEs, so we should
+        * not be in this function with `flags & FOLL_COW` set.
+        */
+       WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
+
        if (flags & FOLL_WRITE && !pmd_write(*pmd))
                return NULL;
 
@@ -1128,6 +1134,16 @@ out_unlock:
        return ret;
 }
 
+/*
+ * FOLL_FORCE can write to even unwritable pmd's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+{
+       return pmd_write(pmd) ||
+              ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+}
+
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                   unsigned long addr,
                                   pmd_t *pmd,
@@ -1138,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
-       if (flags & FOLL_WRITE && !pmd_write(*pmd))
+       if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
                goto out;
 
        /* Avoid dumping huge zero page */
index a63a8f8326647b92bdc63810c8a93be96047f748..b822e158b319e8f2f02ecbfe76c31b6466be51f1 100644 (file)
@@ -4353,9 +4353,9 @@ static int mem_cgroup_do_precharge(unsigned long count)
                return ret;
        }
 
-       /* Try charges one by one with reclaim */
+       /* Try charges one by one with reclaim, but do not retry */
        while (count--) {
-               ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
+               ret = try_charge(mc.to, GFP_KERNEL __GFP_NORETRY, 1);
                if (ret)
                        return ret;
                mc.precharge++;
index e43142c15631fefdf5a605ced247a6429825252f..ca2723d4733849eab01b323a50e6b1bc609e308c 100644 (file)
@@ -1033,36 +1033,39 @@ static void node_states_set_node(int node, struct memory_notify *arg)
        node_set_state(node, N_MEMORY);
 }
 
-int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
-                  enum zone_type target)
+bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+                  enum zone_type target, int *zone_shift)
 {
        struct zone *zone = page_zone(pfn_to_page(pfn));
        enum zone_type idx = zone_idx(zone);
        int i;
 
+       *zone_shift = 0;
+
        if (idx < target) {
                /* pages must be at end of current zone */
                if (pfn + nr_pages != zone_end_pfn(zone))
-                       return 0;
+                       return false;
 
                /* no zones in use between current zone and target */
                for (i = idx + 1; i < target; i++)
                        if (zone_is_initialized(zone - idx + i))
-                               return 0;
+                               return false;
        }
 
        if (target < idx) {
                /* pages must be at beginning of current zone */
                if (pfn != zone->zone_start_pfn)
-                       return 0;
+                       return false;
 
                /* no zones in use between current zone and target */
                for (i = target + 1; i < idx; i++)
                        if (zone_is_initialized(zone - idx + i))
-                               return 0;
+                               return false;
        }
 
-       return target - idx;
+       *zone_shift = target - idx;
+       return true;
 }
 
 /* Must be protected by mem_hotplug_begin() */
@@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
            !can_online_high_movable(zone))
                return -EINVAL;
 
-       if (online_type == MMOP_ONLINE_KERNEL)
-               zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL);
-       else if (online_type == MMOP_ONLINE_MOVABLE)
-               zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE);
+       if (online_type == MMOP_ONLINE_KERNEL) {
+               if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
+                       return -EINVAL;
+       } else if (online_type == MMOP_ONLINE_MOVABLE) {
+               if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
+                       return -EINVAL;
+       }
 
        zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
        if (!zone)
index 2e346645eb80d6bb8f97761c30aed6a512017e59..1e7873e40c9a16e922d4800e6dc41486eee23540 100644 (file)
@@ -2017,8 +2017,8 @@ retry_cpuset:
 
        nmask = policy_nodemask(gfp, pol);
        zl = policy_zonelist(gfp, pol, node);
-       mpol_cond_put(pol);
        page = __alloc_pages_nodemask(gfp, order, zl, nmask);
+       mpol_cond_put(pol);
 out:
        if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;
index d604d2596b7bed41b9748ee3242571b771db4d5e..f3e0c69a97b76997d9fa65cda0b7e1b1fb8fa29a 100644 (file)
@@ -3523,12 +3523,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        struct page *page = NULL;
        unsigned int alloc_flags;
        unsigned long did_some_progress;
-       enum compact_priority compact_priority = DEF_COMPACT_PRIORITY;
+       enum compact_priority compact_priority;
        enum compact_result compact_result;
-       int compaction_retries = 0;
-       int no_progress_loops = 0;
+       int compaction_retries;
+       int no_progress_loops;
        unsigned long alloc_start = jiffies;
        unsigned int stall_timeout = 10 * HZ;
+       unsigned int cpuset_mems_cookie;
 
        /*
         * In the slowpath, we sanity check order to avoid ever trying to
@@ -3549,6 +3550,23 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
                gfp_mask &= ~__GFP_ATOMIC;
 
+retry_cpuset:
+       compaction_retries = 0;
+       no_progress_loops = 0;
+       compact_priority = DEF_COMPACT_PRIORITY;
+       cpuset_mems_cookie = read_mems_allowed_begin();
+       /*
+        * We need to recalculate the starting point for the zonelist iterator
+        * because we might have used different nodemask in the fast path, or
+        * there was a cpuset modification and we are retrying - otherwise we
+        * could end up iterating over non-eligible zones endlessly.
+        */
+       ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+                                       ac->high_zoneidx, ac->nodemask);
+       if (!ac->preferred_zoneref->zone)
+               goto nopage;
+
+
        /*
         * The fast path uses conservative alloc_flags to succeed only until
         * kswapd needs to be woken up, and to avoid the cost of setting up
@@ -3708,6 +3726,13 @@ retry:
                                &compaction_retries))
                goto retry;
 
+       /*
+        * It's possible we raced with cpuset update so the OOM would be
+        * premature (see below the nopage: label for full explanation).
+        */
+       if (read_mems_allowed_retry(cpuset_mems_cookie))
+               goto retry_cpuset;
+
        /* Reclaim has failed us, start killing things */
        page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
        if (page)
@@ -3720,6 +3745,16 @@ retry:
        }
 
 nopage:
+       /*
+        * When updating a task's mems_allowed or mempolicy nodemask, it is
+        * possible to race with parallel threads in such a way that our
+        * allocation can fail while the mask is being updated. If we are about
+        * to fail, check if the cpuset changed during allocation and if so,
+        * retry.
+        */
+       if (read_mems_allowed_retry(cpuset_mems_cookie))
+               goto retry_cpuset;
+
        warn_alloc(gfp_mask,
                        "page allocation failure: order:%u", order);
 got_pg:
@@ -3734,7 +3769,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                        struct zonelist *zonelist, nodemask_t *nodemask)
 {
        struct page *page;
-       unsigned int cpuset_mems_cookie;
        unsigned int alloc_flags = ALLOC_WMARK_LOW;
        gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
        struct alloc_context ac = {
@@ -3771,9 +3805,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
                alloc_flags |= ALLOC_CMA;
 
-retry_cpuset:
-       cpuset_mems_cookie = read_mems_allowed_begin();
-
        /* Dirty zone balancing only done in the fast path */
        ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
 
@@ -3784,8 +3815,13 @@ retry_cpuset:
         */
        ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
                                        ac.high_zoneidx, ac.nodemask);
-       if (!ac.preferred_zoneref) {
+       if (!ac.preferred_zoneref->zone) {
                page = NULL;
+               /*
+                * This might be due to race with cpuset_current_mems_allowed
+                * update, so make sure we retry with original nodemask in the
+                * slow path.
+                */
                goto no_zone;
        }
 
@@ -3794,6 +3830,7 @@ retry_cpuset:
        if (likely(page))
                goto out;
 
+no_zone:
        /*
         * Runtime PM, block IO and its error handling path can deadlock
         * because I/O on the device might not complete.
@@ -3805,21 +3842,10 @@ retry_cpuset:
         * Restore the original nodemask if it was potentially replaced with
         * &cpuset_current_mems_allowed to optimize the fast-path attempt.
         */
-       if (cpusets_enabled())
+       if (unlikely(ac.nodemask != nodemask))
                ac.nodemask = nodemask;
-       page = __alloc_pages_slowpath(alloc_mask, order, &ac);
 
-no_zone:
-       /*
-        * When updating a task's mems_allowed, it is possible to race with
-        * parallel threads in such a way that an allocation can fail while
-        * the mask is being updated. If a page allocation is about to fail,
-        * check if the cpuset changed during allocation and if so, retry.
-        */
-       if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
-               alloc_mask = gfp_mask;
-               goto retry_cpuset;
-       }
+       page = __alloc_pages_slowpath(alloc_mask, order, &ac);
 
 out:
        if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
@@ -7248,6 +7274,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
                .zone = page_zone(pfn_to_page(start)),
                .mode = MIGRATE_SYNC,
                .ignore_skip_hint = true,
+               .gfp_mask = GFP_KERNEL,
        };
        INIT_LIST_HEAD(&cc.migratepages);
 
index 067598a008493fabb68d48120a904943fff4e08c..7aa6f433f4de554d308e774d9e9b40507c6ab48a 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s,
        return 1;
 }
 
-static void print_section(char *text, u8 *addr, unsigned int length)
+static void print_section(char *level, char *text, u8 *addr,
+                         unsigned int length)
 {
        metadata_access_enable();
-       print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
+       print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
                        length, 1);
        metadata_access_disable();
 }
@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
               p, p - addr, get_freepointer(s, p));
 
        if (s->flags & SLAB_RED_ZONE)
-               print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+               print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+                             s->red_left_pad);
        else if (p > addr + 16)
-               print_section("Bytes b4 ", p - 16, 16);
+               print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 
-       print_section("Object ", p, min_t(unsigned long, s->object_size,
-                               PAGE_SIZE));
+       print_section(KERN_ERR, "Object ", p,
+                     min_t(unsigned long, s->object_size, PAGE_SIZE));
        if (s->flags & SLAB_RED_ZONE)
-               print_section("Redzone ", p + s->object_size,
+               print_section(KERN_ERR, "Redzone ", p + s->object_size,
                        s->inuse - s->object_size);
 
        if (s->offset)
@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 
        if (off != size_from_object(s))
                /* Beginning of the filler is the free pointer */
-               print_section("Padding ", p + off, size_from_object(s) - off);
+               print_section(KERN_ERR, "Padding ", p + off,
+                             size_from_object(s) - off);
 
        dump_stack();
 }
@@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
                end--;
 
        slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
-       print_section("Padding ", end - remainder, remainder);
+       print_section(KERN_ERR, "Padding ", end - remainder, remainder);
 
        restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
        return 0;
@@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
                        page->freelist);
 
                if (!alloc)
-                       print_section("Object ", (void *)object,
+                       print_section(KERN_INFO, "Object ", (void *)object,
                                        s->object_size);
 
                dump_stack();