]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 26 Aug 2017 15:59:50 +0000 (08:59 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 26 Aug 2017 15:59:50 +0000 (08:59 -0700)
Pull perf fix from Ingo Molnar:
 "A single fix to not allow nonsensical event groups that result in
  kernel warnings"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/core: Fix group {cpu,task} validation

63 files changed:
arch/powerpc/include/asm/mmu_context.h
arch/powerpc/include/asm/pgtable-be-types.h
arch/powerpc/include/asm/pgtable-types.h
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_xive_template.c
arch/s390/kvm/sthyi.c
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/cpuid.c
arch/x86/kvm/kvm_cache_regs.h
arch/x86/kvm/mmu.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
block/blk-mq-debugfs.c
block/blk-throttle.c
block/bsg-lib.c
drivers/block/loop.c
drivers/block/loop.h
drivers/block/virtio_blk.c
drivers/dma/tegra210-adma.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
drivers/gpu/drm/i915/intel_dsi_vbt.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lspcon.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/ipu-v3/Kconfig
drivers/i2c/busses/i2c-aspeed.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-designware-slave.c
drivers/i2c/busses/i2c-simtec.c
drivers/i2c/i2c-core-base.c
drivers/mmc/core/block.c
drivers/mtd/nand/atmel/nand-controller.c
drivers/mtd/nand/nandsim.c
drivers/virtio/virtio_pci_common.c
fs/cifs/dir.c
fs/cifs/smb2pdu.c
fs/dax.c
fs/nfsd/nfs4xdr.c
include/linux/blkdev.h
include/linux/bsg-lib.h
include/uapi/linux/loop.h
kernel/fork.c
mm/madvise.c
mm/memblock.c
mm/page_alloc.c
mm/shmem.c
net/sunrpc/svcsock.c
sound/core/control.c
sound/firewire/iso-resources.c
sound/firewire/motu/motu.c
sound/pci/hda/patch_conexant.c
sound/soc/codecs/rt5677.c
sound/usb/quirks.c

index 0c76675394c5930d5cecf2ac01a2718c8e9b6c1f..35bec1c5bd5aa63567be4847c2073be53fa0390c 100644 (file)
@@ -90,6 +90,24 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
        /* Mark this context has been used on the new CPU */
        if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
                cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+
+               /*
+                * This full barrier orders the store to the cpumask above vs
+                * a subsequent operation which allows this CPU to begin loading
+                * translations for next.
+                *
+                * When using the radix MMU that operation is the load of the
+                * MMU context id, which is then moved to SPRN_PID.
+                *
+                * For the hash MMU it is either the first load from slb_cache
+                * in switch_slb(), and/or the store of paca->mm_ctx_id in
+                * copy_mm_to_paca().
+                *
+                * On the read side the barrier is in pte_xchg(), which orders
+                * the store to the PTE vs the load of mm_cpumask.
+                */
+               smp_mb();
+
                new_on_cpu = true;
        }
 
index 9c0f5db5cf461a92e72185701b4cbc1df168dfcc..67e7e3d990f44ef495ee02b6fcb3ba16053bd5c5 100644 (file)
@@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
        unsigned long *p = (unsigned long *)ptep;
        __be64 prev;
 
+       /* See comment in switch_mm_irqs_off() */
        prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
                                             (__force unsigned long)pte_raw(new));
 
index 8bd3b13fe2fb2e8bd1c5762c4e080c9cd921edaa..369a164b545c09087e0740fd6c32ea282a7b5245 100644 (file)
@@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
 {
        unsigned long *p = (unsigned long *)ptep;
 
+       /* See comment in switch_mm_irqs_off() */
        return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
 }
 #endif
index a160c14304eba22bd83ee98b8f052a2ed8bf7a3f..53766e2bc029e25efc87270f6983e3b7975be31d 100644 (file)
@@ -294,32 +294,26 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
                                   struct kvm_create_spapr_tce_64 *args)
 {
        struct kvmppc_spapr_tce_table *stt = NULL;
+       struct kvmppc_spapr_tce_table *siter;
        unsigned long npages, size;
        int ret = -ENOMEM;
        int i;
+       int fd = -1;
 
        if (!args->size)
                return -EINVAL;
 
-       /* Check this LIOBN hasn't been previously allocated */
-       list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
-               if (stt->liobn == args->liobn)
-                       return -EBUSY;
-       }
-
        size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
        npages = kvmppc_tce_pages(size);
        ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
-       if (ret) {
-               stt = NULL;
-               goto fail;
-       }
+       if (ret)
+               return ret;
 
        ret = -ENOMEM;
        stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
                      GFP_KERNEL);
        if (!stt)
-               goto fail;
+               goto fail_acct;
 
        stt->liobn = args->liobn;
        stt->page_shift = args->page_shift;
@@ -334,24 +328,42 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
                        goto fail;
        }
 
-       kvm_get_kvm(kvm);
+       ret = fd = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
+                                   stt, O_RDWR | O_CLOEXEC);
+       if (ret < 0)
+               goto fail;
 
        mutex_lock(&kvm->lock);
-       list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
+
+       /* Check this LIOBN hasn't been previously allocated */
+       ret = 0;
+       list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
+               if (siter->liobn == args->liobn) {
+                       ret = -EBUSY;
+                       break;
+               }
+       }
+
+       if (!ret) {
+               list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
+               kvm_get_kvm(kvm);
+       }
 
        mutex_unlock(&kvm->lock);
 
-       return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
-                               stt, O_RDWR | O_CLOEXEC);
+       if (!ret)
+               return fd;
 
-fail:
-       if (stt) {
-               for (i = 0; i < npages; i++)
-                       if (stt->pages[i])
-                               __free_page(stt->pages[i]);
+       put_unused_fd(fd);
 
-               kfree(stt);
-       }
+ fail:
+       for (i = 0; i < npages; i++)
+               if (stt->pages[i])
+                       __free_page(stt->pages[i]);
+
+       kfree(stt);
+ fail_acct:
+       kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
        return ret;
 }
 
index c52184a8efdf025c1efffc6dd7310e9374dca630..9c9c983b864f8d64a81eac456adc405cc0252e5d 100644 (file)
@@ -1291,6 +1291,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        /* Hypervisor doorbell - exit only if host IPI flag set */
        cmpwi   r12, BOOK3S_INTERRUPT_H_DOORBELL
        bne     3f
+BEGIN_FTR_SECTION
+       PPC_MSGSYNC
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
        lbz     r0, HSTATE_HOST_IPI(r13)
        cmpwi   r0, 0
        beq     4f
index 4636ca6e7d383b7d3ce26b18df01152b8087a8ec..d1ed2c41b5d246dde6d53c34530bc30dc63656b9 100644 (file)
@@ -16,7 +16,22 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
        u8 cppr;
        u16 ack;
 
-       /* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */
+       /*
+        * Ensure any previous store to CPPR is ordered vs.
+        * the subsequent loads from PIPR or ACK.
+        */
+       eieio();
+
+       /*
+        * DD1 bug workaround: If PIPR is less favored than CPPR
+        * ignore the interrupt or we might incorrectly lose an IPB
+        * bit.
+        */
+       if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+               u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
+               if (pipr >= xc->hw_cppr)
+                       return;
+       }
 
        /* Perform the acknowledge OS to register cycle. */
        ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
@@ -235,6 +250,11 @@ skip_ipi:
        /*
         * If we found an interrupt, adjust what the guest CPPR should
         * be as if we had just fetched that interrupt from HW.
+        *
+        * Note: This can only make xc->cppr smaller as the previous
+        * loop will only exit with hirq != 0 if prio is lower than
+        * the current xc->cppr. Thus we don't need to re-check xc->mfrr
+        * for pending IPIs.
         */
        if (hirq)
                xc->cppr = prio;
@@ -380,6 +400,12 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
        old_cppr = xc->cppr;
        xc->cppr = cppr;
 
+       /*
+        * Order the above update of xc->cppr with the subsequent
+        * read of xc->mfrr inside push_pending_to_hw()
+        */
+       smp_mb();
+
        /*
         * We are masking less, we need to look for pending things
         * to deliver and set VP pending bits accordingly to trigger
@@ -420,21 +446,37 @@ X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
         * used to signal MFRR changes is EOId when fetched from
         * the queue.
         */
-       if (irq == XICS_IPI || irq == 0)
+       if (irq == XICS_IPI || irq == 0) {
+               /*
+                * This barrier orders the setting of xc->cppr vs.
+                * subsquent test of xc->mfrr done inside
+                * scan_interrupts and push_pending_to_hw
+                */
+               smp_mb();
                goto bail;
+       }
 
        /* Find interrupt source */
        sb = kvmppc_xive_find_source(xive, irq, &src);
        if (!sb) {
                pr_devel(" source not found !\n");
                rc = H_PARAMETER;
+               /* Same as above */
+               smp_mb();
                goto bail;
        }
        state = &sb->irq_state[src];
        kvmppc_xive_select_irq(state, &hw_num, &xd);
 
        state->in_eoi = true;
-       mb();
+
+       /*
+        * This barrier orders both setting of in_eoi above vs,
+        * subsequent test of guest_priority, and the setting
+        * of xc->cppr vs. subsquent test of xc->mfrr done inside
+        * scan_interrupts and push_pending_to_hw
+        */
+       smp_mb();
 
 again:
        if (state->guest_priority == MASKED) {
@@ -461,6 +503,14 @@ again:
 
        }
 
+       /*
+        * This barrier orders the above guest_priority check
+        * and spin_lock/unlock with clearing in_eoi below.
+        *
+        * It also has to be a full mb() as it must ensure
+        * the MMIOs done in source_eoi() are completed before
+        * state->in_eoi is visible.
+        */
        mb();
        state->in_eoi = false;
 bail:
@@ -495,6 +545,18 @@ X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
        /* Locklessly write over MFRR */
        xc->mfrr = mfrr;
 
+       /*
+        * The load of xc->cppr below and the subsequent MMIO store
+        * to the IPI must happen after the above mfrr update is
+        * globally visible so that:
+        *
+        * - Synchronize with another CPU doing an H_EOI or a H_CPPR
+        *   updating xc->cppr then reading xc->mfrr.
+        *
+        * - The target of the IPI sees the xc->mfrr update
+        */
+       mb();
+
        /* Shoot the IPI if most favored than target cppr */
        if (mfrr < xc->cppr)
                __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
index 926b5244263efd3dd1fadf637ce4790251367e60..a2e5c24f47a7471a1e7730f654b8ce4599434ef0 100644 (file)
@@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
                "srl     %[cc],28\n"
                : [cc] "=d" (cc)
                : [code] "d" (code), [addr] "a" (addr)
-               : "memory", "cc");
+               : "3", "memory", "cc");
        return cc;
 }
 
@@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
        VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
        trace_kvm_s390_handle_sthyi(vcpu, code, addr);
 
-       if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK)
+       if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
        if (code & 0xffff) {
@@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
                goto out;
        }
 
+       if (addr & ~PAGE_MASK)
+               return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
        /*
         * If the page has not yet been faulted in, we want to do that
         * now and not after all the expensive calculations.
index 255645f60ca2b4be333de67a10c6780f364e6fb2..554cdb205d17586887e6b599c991b2fc090ba38a 100644 (file)
@@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
        return 0;
 }
 
-static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
+static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
 {
        if (use_xsave()) {
-               copy_kernel_to_xregs(&fpstate->xsave, -1);
+               copy_kernel_to_xregs(&fpstate->xsave, mask);
        } else {
                if (use_fxsr())
                        copy_kernel_to_fxregs(&fpstate->fxsave);
@@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
                        : : [addr] "m" (fpstate));
        }
 
-       __copy_kernel_to_fpregs(fpstate);
+       __copy_kernel_to_fpregs(fpstate, -1);
 }
 
 extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
index 87ac4fba6d8e12f07e8a9f191bdb028a1c3e6234..f4d120a3e22e8aee8326bebeedeaa506c2291b47 100644 (file)
@@ -492,6 +492,7 @@ struct kvm_vcpu_arch {
        unsigned long cr4;
        unsigned long cr4_guest_owned_bits;
        unsigned long cr8;
+       u32 pkru;
        u32 hflags;
        u64 efer;
        u64 apic_base;
index 59ca2eea522c466937287c3e660b8f38e49c12aa..19adbb4184439dd6c40c39cd6dec2afe5a9ae83a 100644 (file)
@@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                        entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
                        cpuid_mask(&entry->ecx, CPUID_7_ECX);
                        /* PKU is not yet implemented for shadow paging. */
-                       if (!tdp_enabled)
+                       if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
                                entry->ecx &= ~F(PKU);
                        entry->edx &= kvm_cpuid_7_0_edx_x86_features;
                        entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
index 762cdf2595f992fd4ac8bb1e4c2c8914b344db04..e1e89ee4af750dc51f78cfbf8aa71e22d77b4cd1 100644 (file)
@@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
                | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
 }
 
-static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
-{
-       return kvm_x86_ops->get_pkru(vcpu);
-}
-
 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.hflags |= HF_GUEST_MASK;
index d7d248a000dd6772681f3f5541e344f9677a2d1d..4b9a3ae6b725d37bdeeb5aeb24c0a9c2716228f4 100644 (file)
@@ -185,7 +185,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                * index of the protection domain, so pte_pkey * 2 is
                * is the index of the first bit for the domain.
                */
-               pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
+               pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
 
                /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
                offset = (pfec & ~1) +
index 56ba05312759d3ed4568e546492d9d8bfad05b71..af256b786a70cccd14906fe926e2b790156967a4 100644 (file)
@@ -1777,11 +1777,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
        to_svm(vcpu)->vmcb->save.rflags = rflags;
 }
 
-static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
-{
-       return 0;
-}
-
 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
 {
        switch (reg) {
@@ -5413,8 +5408,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .get_rflags = svm_get_rflags,
        .set_rflags = svm_set_rflags,
 
-       .get_pkru = svm_get_pkru,
-
        .tlb_flush = svm_flush_tlb,
 
        .run = svm_vcpu_run,
index 9b21b12230354e334900e6536b7612285f75b7e3..c6ef2940119bfdfb00f0547c321697280ebae0fa 100644 (file)
@@ -636,8 +636,6 @@ struct vcpu_vmx {
 
        u64 current_tsc_ratio;
 
-       bool guest_pkru_valid;
-       u32 guest_pkru;
        u32 host_pkru;
 
        /*
@@ -2383,11 +2381,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
                to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
 }
 
-static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
-{
-       return to_vmx(vcpu)->guest_pkru;
-}
-
 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 {
        u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
@@ -9020,8 +9013,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
                vmx_set_interrupt_shadow(vcpu, 0);
 
-       if (vmx->guest_pkru_valid)
-               __write_pkru(vmx->guest_pkru);
+       if (static_cpu_has(X86_FEATURE_PKU) &&
+           kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
+           vcpu->arch.pkru != vmx->host_pkru)
+               __write_pkru(vcpu->arch.pkru);
 
        atomic_switch_perf_msrs(vmx);
        debugctlmsr = get_debugctlmsr();
@@ -9169,13 +9164,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
         * back on host, so it is safe to read guest PKRU from current
         * XSAVE.
         */
-       if (boot_cpu_has(X86_FEATURE_OSPKE)) {
-               vmx->guest_pkru = __read_pkru();
-               if (vmx->guest_pkru != vmx->host_pkru) {
-                       vmx->guest_pkru_valid = true;
+       if (static_cpu_has(X86_FEATURE_PKU) &&
+           kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
+               vcpu->arch.pkru = __read_pkru();
+               if (vcpu->arch.pkru != vmx->host_pkru)
                        __write_pkru(vmx->host_pkru);
-               } else
-                       vmx->guest_pkru_valid = false;
        }
 
        /*
@@ -11682,8 +11675,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .get_rflags = vmx_get_rflags,
        .set_rflags = vmx_set_rflags,
 
-       .get_pkru = vmx_get_pkru,
-
        .tlb_flush = vmx_flush_tlb,
 
        .run = vmx_vcpu_run,
index d734aa8c5b4f7290e365badd00ea962fd0af9acd..05a5e57c6f39770e81e2af3f760f9ce44783f9b3 100644 (file)
@@ -3245,7 +3245,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
                        u32 size, offset, ecx, edx;
                        cpuid_count(XSTATE_CPUID, index,
                                    &size, &offset, &ecx, &edx);
-                       memcpy(dest + offset, src, size);
+                       if (feature == XFEATURE_MASK_PKRU)
+                               memcpy(dest + offset, &vcpu->arch.pkru,
+                                      sizeof(vcpu->arch.pkru));
+                       else
+                               memcpy(dest + offset, src, size);
+
                }
 
                valid -= feature;
@@ -3283,7 +3288,11 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
                        u32 size, offset, ecx, edx;
                        cpuid_count(XSTATE_CPUID, index,
                                    &size, &offset, &ecx, &edx);
-                       memcpy(dest, src + offset, size);
+                       if (feature == XFEATURE_MASK_PKRU)
+                               memcpy(&vcpu->arch.pkru, src + offset,
+                                      sizeof(vcpu->arch.pkru));
+                       else
+                               memcpy(dest, src + offset, size);
                }
 
                valid -= feature;
@@ -7633,7 +7642,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
         */
        vcpu->guest_fpu_loaded = 1;
        __kernel_fpu_begin();
-       __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
+       /* PKRU is separately restored in kvm_x86_ops->run.  */
+       __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
+                               ~XFEATURE_MASK_PKRU);
        trace_kvm_fpu(1);
 }
 
index 9ebc2945f991e9ff5d50c5191de45ca1025f1201..4f927a58dff86183a95baf9c7f4eb474c218f0f2 100644 (file)
@@ -75,6 +75,8 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(STATS),
        QUEUE_FLAG_NAME(POLL_STATS),
        QUEUE_FLAG_NAME(REGISTERED),
+       QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
+       QUEUE_FLAG_NAME(QUIESCED),
 };
 #undef QUEUE_FLAG_NAME
 
@@ -265,6 +267,7 @@ static const char *const cmd_flag_name[] = {
        CMD_FLAG_NAME(RAHEAD),
        CMD_FLAG_NAME(BACKGROUND),
        CMD_FLAG_NAME(NOUNMAP),
+       CMD_FLAG_NAME(NOWAIT),
 };
 #undef CMD_FLAG_NAME
 
index a7285bf2831c7bdbb89b753fccb198f8640e8780..80f5481fe9f6c1603e477349732dac62c3baf204 100644 (file)
@@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
        }                                                               \
 } while (0)
 
+static inline unsigned int throtl_bio_data_size(struct bio *bio)
+{
+       /* assume it's one sector */
+       if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
+               return 512;
+       return bio->bi_iter.bi_size;
+}
+
 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
 {
        INIT_LIST_HEAD(&qn->node);
@@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
        bool rw = bio_data_dir(bio);
        u64 bytes_allowed, extra_bytes, tmp;
        unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
+       unsigned int bio_size = throtl_bio_data_size(bio);
 
        jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 
@@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
        do_div(tmp, HZ);
        bytes_allowed = tmp;
 
-       if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
+       if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
                if (wait)
                        *wait = 0;
                return true;
        }
 
        /* Calc approx time to dispatch */
-       extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
+       extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
        jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
 
        if (!jiffy_wait)
@@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 {
        bool rw = bio_data_dir(bio);
+       unsigned int bio_size = throtl_bio_data_size(bio);
 
        /* Charge the bio to the group */
-       tg->bytes_disp[rw] += bio->bi_iter.bi_size;
+       tg->bytes_disp[rw] += bio_size;
        tg->io_disp[rw]++;
-       tg->last_bytes_disp[rw] += bio->bi_iter.bi_size;
+       tg->last_bytes_disp[rw] += bio_size;
        tg->last_io_disp[rw]++;
 
        /*
index c4513b23f57a6438af6ae38367c072931edf138c..dd56d7460cb91d504aa22ae9215d00f97c844359 100644 (file)
 #include <scsi/scsi_cmnd.h>
 
 /**
- * bsg_destroy_job - routine to teardown/delete a bsg job
+ * bsg_teardown_job - routine to teardown a bsg job
  * @job: bsg_job that is to be torn down
  */
-static void bsg_destroy_job(struct kref *kref)
+static void bsg_teardown_job(struct kref *kref)
 {
        struct bsg_job *job = container_of(kref, struct bsg_job, kref);
        struct request *rq = job->req;
 
-       blk_end_request_all(rq, BLK_STS_OK);
-
        put_device(job->dev);   /* release reference for the request */
 
        kfree(job->request_payload.sg_list);
        kfree(job->reply_payload.sg_list);
-       kfree(job);
+
+       blk_end_request_all(rq, BLK_STS_OK);
 }
 
 void bsg_job_put(struct bsg_job *job)
 {
-       kref_put(&job->kref, bsg_destroy_job);
+       kref_put(&job->kref, bsg_teardown_job);
 }
 EXPORT_SYMBOL_GPL(bsg_job_put);
 
@@ -100,7 +99,7 @@ EXPORT_SYMBOL_GPL(bsg_job_done);
  */
 static void bsg_softirq_done(struct request *rq)
 {
-       struct bsg_job *job = rq->special;
+       struct bsg_job *job = blk_mq_rq_to_pdu(rq);
 
        bsg_job_put(job);
 }
@@ -122,33 +121,20 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
 }
 
 /**
- * bsg_create_job - create the bsg_job structure for the bsg request
+ * bsg_prepare_job - create the bsg_job structure for the bsg request
  * @dev: device that is being sent the bsg request
  * @req: BSG request that needs a job structure
  */
-static int bsg_create_job(struct device *dev, struct request *req)
+static int bsg_prepare_job(struct device *dev, struct request *req)
 {
        struct request *rsp = req->next_rq;
-       struct request_queue *q = req->q;
        struct scsi_request *rq = scsi_req(req);
-       struct bsg_job *job;
+       struct bsg_job *job = blk_mq_rq_to_pdu(req);
        int ret;
 
-       BUG_ON(req->special);
-
-       job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
-       if (!job)
-               return -ENOMEM;
-
-       req->special = job;
-       job->req = req;
-       if (q->bsg_job_size)
-               job->dd_data = (void *)&job[1];
        job->request = rq->cmd;
        job->request_len = rq->cmd_len;
-       job->reply = rq->sense;
-       job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
-                                                * allocated */
+
        if (req->bio) {
                ret = bsg_map_buffer(&job->request_payload, req);
                if (ret)
@@ -187,7 +173,6 @@ static void bsg_request_fn(struct request_queue *q)
 {
        struct device *dev = q->queuedata;
        struct request *req;
-       struct bsg_job *job;
        int ret;
 
        if (!get_device(dev))
@@ -199,7 +184,7 @@ static void bsg_request_fn(struct request_queue *q)
                        break;
                spin_unlock_irq(q->queue_lock);
 
-               ret = bsg_create_job(dev, req);
+               ret = bsg_prepare_job(dev, req);
                if (ret) {
                        scsi_req(req)->result = ret;
                        blk_end_request_all(req, BLK_STS_OK);
@@ -207,8 +192,7 @@ static void bsg_request_fn(struct request_queue *q)
                        continue;
                }
 
-               job = req->special;
-               ret = q->bsg_job_fn(job);
+               ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
                spin_lock_irq(q->queue_lock);
                if (ret)
                        break;
@@ -219,6 +203,35 @@ static void bsg_request_fn(struct request_queue *q)
        spin_lock_irq(q->queue_lock);
 }
 
+static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
+{
+       struct bsg_job *job = blk_mq_rq_to_pdu(req);
+       struct scsi_request *sreq = &job->sreq;
+
+       memset(job, 0, sizeof(*job));
+
+       scsi_req_init(sreq);
+       sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
+       sreq->sense = kzalloc(sreq->sense_len, gfp);
+       if (!sreq->sense)
+               return -ENOMEM;
+
+       job->req = req;
+       job->reply = sreq->sense;
+       job->reply_len = sreq->sense_len;
+       job->dd_data = job + 1;
+
+       return 0;
+}
+
+static void bsg_exit_rq(struct request_queue *q, struct request *req)
+{
+       struct bsg_job *job = blk_mq_rq_to_pdu(req);
+       struct scsi_request *sreq = &job->sreq;
+
+       kfree(sreq->sense);
+}
+
 /**
  * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
  * @dev: device to attach bsg device to
@@ -235,7 +248,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
        q = blk_alloc_queue(GFP_KERNEL);
        if (!q)
                return ERR_PTR(-ENOMEM);
-       q->cmd_size = sizeof(struct scsi_request);
+       q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
+       q->init_rq_fn = bsg_init_rq;
+       q->exit_rq_fn = bsg_exit_rq;
        q->request_fn = bsg_request_fn;
 
        ret = blk_init_allocated_queue(q);
@@ -243,7 +258,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
                goto out_cleanup_queue;
 
        q->queuedata = dev;
-       q->bsg_job_size = dd_job_size;
        q->bsg_job_fn = job_fn;
        queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
        queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
index ef8334949b4217d9d0f1f964432c16a56ed244c4..f321b96405f55746490b50132f583c0124b192bb 100644 (file)
@@ -221,8 +221,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
 }
 
 static int
-figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
-                loff_t logical_blocksize)
+figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
 {
        loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
        sector_t x = (sector_t)size;
@@ -234,12 +233,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
                lo->lo_offset = offset;
        if (lo->lo_sizelimit != sizelimit)
                lo->lo_sizelimit = sizelimit;
-       if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
-               lo->lo_logical_blocksize = logical_blocksize;
-               blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
-               blk_queue_logical_block_size(lo->lo_queue,
-                                            lo->lo_logical_blocksize);
-       }
        set_capacity(lo->lo_disk, x);
        bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
        /* let user-space know about the new size */
@@ -820,7 +813,6 @@ static void loop_config_discard(struct loop_device *lo)
        struct file *file = lo->lo_backing_file;
        struct inode *inode = file->f_mapping->host;
        struct request_queue *q = lo->lo_queue;
-       int lo_bits = 9;
 
        /*
         * We use punch hole to reclaim the free space used by the
@@ -840,11 +832,9 @@ static void loop_config_discard(struct loop_device *lo)
 
        q->limits.discard_granularity = inode->i_sb->s_blocksize;
        q->limits.discard_alignment = 0;
-       if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
-               lo_bits = blksize_bits(lo->lo_logical_blocksize);
 
-       blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits);
-       blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits);
+       blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
+       blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 }
 
@@ -938,7 +928,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 
        lo->use_dio = false;
        lo->lo_blocksize = lo_blocksize;
-       lo->lo_logical_blocksize = 512;
        lo->lo_device = bdev;
        lo->lo_flags = lo_flags;
        lo->lo_backing_file = file;
@@ -1104,7 +1093,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
        int err;
        struct loop_func_table *xfer;
        kuid_t uid = current_uid();
-       int lo_flags = lo->lo_flags;
 
        if (lo->lo_encrypt_key_size &&
            !uid_eq(lo->lo_key_owner, uid) &&
@@ -1137,26 +1125,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
        if (err)
                goto exit;
 
-       if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
-               if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
-                       lo->lo_logical_blocksize = 512;
-               lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
-               if (LO_INFO_BLOCKSIZE(info) != 512 &&
-                   LO_INFO_BLOCKSIZE(info) != 1024 &&
-                   LO_INFO_BLOCKSIZE(info) != 2048 &&
-                   LO_INFO_BLOCKSIZE(info) != 4096)
-                       return -EINVAL;
-               if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
-                       return -EINVAL;
-       }
-
        if (lo->lo_offset != info->lo_offset ||
-           lo->lo_sizelimit != info->lo_sizelimit ||
-           lo->lo_flags != lo_flags ||
-           ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
-            lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
-               if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
-                                    LO_INFO_BLOCKSIZE(info))) {
+           lo->lo_sizelimit != info->lo_sizelimit) {
+               if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
                        err = -EFBIG;
                        goto exit;
                }
@@ -1348,8 +1319,7 @@ static int loop_set_capacity(struct loop_device *lo)
        if (unlikely(lo->lo_state != Lo_bound))
                return -ENXIO;
 
-       return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit,
-                               lo->lo_logical_blocksize);
+       return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
 }
 
 static int loop_set_dio(struct loop_device *lo, unsigned long arg)
index 2c096b9a17b8ccd756065d5102b293796a5833ad..fecd3f97ef8c7cd9f825e6c58777a1a2bc6f3461 100644 (file)
@@ -49,7 +49,6 @@ struct loop_device {
        struct file *   lo_backing_file;
        struct block_device *lo_device;
        unsigned        lo_blocksize;
-       unsigned        lo_logical_blocksize;
        void            *key_data; 
 
        gfp_t           old_gfp_mask;
index 1498b899a593e31951c835f4f537d1bb03d0e596..d3d5523862c227d86166a56fba230467b9b545fc 100644 (file)
@@ -381,6 +381,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
        struct request_queue *q = vblk->disk->queue;
        char cap_str_2[10], cap_str_10[10];
        char *envp[] = { "RESIZE=1", NULL };
+       unsigned long long nblocks;
        u64 capacity;
 
        /* Host must always specify the capacity. */
@@ -393,16 +394,19 @@ static void virtblk_config_changed_work(struct work_struct *work)
                capacity = (sector_t)-1;
        }
 
-       string_get_size(capacity, queue_logical_block_size(q),
+       nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
+
+       string_get_size(nblocks, queue_logical_block_size(q),
                        STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
-       string_get_size(capacity, queue_logical_block_size(q),
+       string_get_size(nblocks, queue_logical_block_size(q),
                        STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
 
        dev_notice(&vdev->dev,
-                 "new size: %llu %d-byte logical blocks (%s/%s)\n",
-                 (unsigned long long)capacity,
-                 queue_logical_block_size(q),
-                 cap_str_10, cap_str_2);
+                  "new size: %llu %d-byte logical blocks (%s/%s)\n",
+                  nblocks,
+                  queue_logical_block_size(q),
+                  cap_str_10,
+                  cap_str_2);
 
        set_capacity(vblk->disk, capacity);
        revalidate_disk(vblk->disk);
index b10cbaa82ff537b1cfb68d93d80aecdfebcd6c22..b26256f23d67fbdf58206adf2a253fcf40c50091 100644 (file)
@@ -717,8 +717,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
                tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);
 
                tdc->irq = of_irq_get(pdev->dev.of_node, i);
-               if (tdc->irq < 0) {
-                       ret = tdc->irq;
+               if (tdc->irq <= 0) {
+                       ret = tdc->irq ?: -ENXIO;
                        goto irq_dispose;
                }
 
index c0f336d23f9ccab1d375eda63f8381d62ccf0f00..aed25c4183bb0e5df17a751e25a33c907b91d9b1 100644 (file)
@@ -1655,6 +1655,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
        if (config->funcs->atomic_check)
                ret = config->funcs->atomic_check(state->dev, state);
 
+       if (ret)
+               return ret;
+
        if (!state->allow_modeset) {
                for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
                        if (drm_atomic_crtc_needs_modeset(crtc_state)) {
@@ -1665,7 +1668,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
                }
        }
 
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL(drm_atomic_check_only);
 
@@ -2167,10 +2170,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
        struct drm_atomic_state *state;
        struct drm_modeset_acquire_ctx ctx;
        struct drm_plane *plane;
-       struct drm_out_fence_state *fence_state = NULL;
+       struct drm_out_fence_state *fence_state;
        unsigned plane_mask;
        int ret = 0;
-       unsigned int i, j, num_fences = 0;
+       unsigned int i, j, num_fences;
 
        /* disallow for drivers not supporting atomic: */
        if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -2211,6 +2214,8 @@ retry:
        plane_mask = 0;
        copied_objs = 0;
        copied_props = 0;
+       fence_state = NULL;
+       num_fences = 0;
 
        for (i = 0; i < arg->count_objs; i++) {
                uint32_t obj_id, count_props;
index 8dc11064253d9e5ed58f8c817a471b36c25c5951..cdaac37907b1e4577565625f6485a7cf3de25088 100644 (file)
@@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
        struct drm_gem_object *obj = ptr;
        struct drm_device *dev = obj->dev;
 
+       if (dev->driver->gem_close_object)
+               dev->driver->gem_close_object(obj, file_priv);
+
        if (drm_core_check_feature(dev, DRIVER_PRIME))
                drm_gem_remove_prime_handles(obj, file_priv);
        drm_vma_node_revoke(&obj->vma_node, file_priv);
 
-       if (dev->driver->gem_close_object)
-               dev->driver->gem_close_object(obj, file_priv);
-
        drm_gem_object_handle_put_unlocked(obj);
 
        return 0;
index 5dc8c4350602a561fe4cfd77fce77e26770696fb..e40c12fabbdeaf9a325811d25e17c30990637e12 100644 (file)
@@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
 
                crtc = drm_crtc_find(dev, plane_req->crtc_id);
                if (!crtc) {
+                       drm_framebuffer_put(fb);
                        DRM_DEBUG_KMS("Unknown crtc ID %d\n",
                                      plane_req->crtc_id);
                        return -ENOENT;
index 713848c3634946bea6c9805a3a705c22c1b8c08c..e556a46cd4c292773b5e5dfca629c21625b05c43 100644 (file)
@@ -2714,7 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 unmap_src:
        i915_gem_object_unpin_map(obj);
 put_obj:
-       i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+       i915_gem_object_put(obj);
        return ret;
 }
 
index 639d45c1dd2e6ac24013431aa6da84abb9f0b089..7ea7fd1e8856dc7bf98f3a6a6fdce0366db1f089 100644 (file)
@@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
        bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
        uint8_t aux_channel, ddc_pin;
        /* Each DDI port can have more than one value on the "DVO Port" field,
-        * so look for all the possible values for each port and abort if more
-        * than one is found. */
+        * so look for all the possible values for each port.
+        */
        int dvo_ports[][3] = {
                {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
                {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
@@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
                {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
        };
 
-       /* Find the child device to use, abort if more than one found. */
+       /*
+        * Find the first child device to reference the port, report if more
+        * than one found.
+        */
        for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
                it = dev_priv->vbt.child_dev + i;
 
@@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 
                        if (it->common.dvo_port == dvo_ports[port][j]) {
                                if (child) {
-                                       DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
+                                       DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
                                                      port_name(port));
-                                       return;
+                               } else {
+                                       child = it;
                                }
-                               child = it;
                        }
                }
        }
index 6e09ceb71500ceeedce5836c3772861f47aeda21..150a156f3b1e9a2bff7f32bbf1ed1905eb71ca8b 100644 (file)
@@ -46,7 +46,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
        struct intel_encoder *encoder = connector->encoder;
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        struct mipi_dsi_device *dsi_device;
-       u8 data;
+       u8 data = 0;
        enum port port;
 
        /* FIXME: Need to take care of 16 bit brightness level */
index 7158c7ce9c0941a05654261e2443bac8b2315b08..91c07b0c8db912dbb24a08fc045dee7a899b237b 100644 (file)
@@ -306,7 +306,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
 
        if (!gpio_desc) {
                gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
-                                                "panel", gpio_index,
+                                                NULL, gpio_index,
                                                 value ? GPIOD_OUT_LOW :
                                                 GPIOD_OUT_HIGH);
 
index 7404cf2aac28690e2e5367de5a93164062a909b4..2afa4daa88e8cc695085c99d7a6d8b33c87a869a 100644 (file)
@@ -1221,6 +1221,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
        return ret;
 }
 
+static u8 gtiir[] = {
+       [RCS] = 0,
+       [BCS] = 0,
+       [VCS] = 1,
+       [VCS2] = 1,
+       [VECS] = 3,
+};
+
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
@@ -1245,9 +1253,22 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
 
        DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
 
-       /* After a GPU reset, we may have requests to replay */
+       GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
+
+       /*
+        * Clear any pending interrupt state.
+        *
+        * We do it twice out of paranoia that some of the IIR are double
+        * buffered, and if we only reset it once there may still be
+        * an interrupt pending.
+        */
+       I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
+                  GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
+       I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
+                  GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
        clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
 
+       /* After a GPU reset, we may have requests to replay */
        submit = false;
        for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
                if (!port_isset(&port[n]))
index 5abef482eacf1b24780edea4c40ab7e593a42dc6..beb9baaf2f2e4e573956f1ea842c2963dce95bd7 100644 (file)
@@ -210,8 +210,8 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
 
-       if (!IS_GEN9(dev_priv)) {
-               DRM_ERROR("LSPCON is supported on GEN9 only\n");
+       if (!HAS_LSPCON(dev_priv)) {
+               DRM_ERROR("LSPCON is not supported on this platform\n");
                return false;
        }
 
index 6276bb834b4fe15529652e73c07e901617abc194..d3845989a29dfcba4bf4b03f5740f9945d1bdcdf 100644 (file)
@@ -545,15 +545,13 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
                return;
        }
 
+       ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
        switch (ipu_plane->dp_flow) {
        case IPU_DP_FLOW_SYNC_BG:
-               ipu_dp_setup_channel(ipu_plane->dp,
-                                       IPUV3_COLORSPACE_RGB,
-                                       IPUV3_COLORSPACE_RGB);
+               ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB);
                ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
                break;
        case IPU_DP_FLOW_SYNC_FG:
-               ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format);
                ipu_dp_setup_channel(ipu_plane->dp, ics,
                                        IPUV3_COLORSPACE_UNKNOWN);
                /* Enable local alpha on partial plane */
index c6b1b7f3a2a397740d5545671113def5bb2e5519..c16bc0a7115b1b41db4ccca2b74061e3e7ccceb2 100644 (file)
@@ -275,11 +275,15 @@ static void rockchip_drm_fb_resume(struct drm_device *drm)
 static int rockchip_drm_sys_suspend(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
-       struct rockchip_drm_private *priv = drm->dev_private;
+       struct rockchip_drm_private *priv;
+
+       if (!drm)
+               return 0;
 
        drm_kms_helper_poll_disable(drm);
        rockchip_drm_fb_suspend(drm);
 
+       priv = drm->dev_private;
        priv->state = drm_atomic_helper_suspend(drm);
        if (IS_ERR(priv->state)) {
                rockchip_drm_fb_resume(drm);
@@ -293,8 +297,12 @@ static int rockchip_drm_sys_suspend(struct device *dev)
 static int rockchip_drm_sys_resume(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
-       struct rockchip_drm_private *priv = drm->dev_private;
+       struct rockchip_drm_private *priv;
+
+       if (!drm)
+               return 0;
 
+       priv = drm->dev_private;
        drm_atomic_helper_resume(drm, priv->state);
        rockchip_drm_fb_resume(drm);
        drm_kms_helper_poll_enable(drm);
index abc7d8fe06b450084bcd20ec560b62405415ff40..a45a627283a149c79693ec90dbd9da5fd8f42056 100644 (file)
 #include "sun4i_framebuffer.h"
 #include "sun4i_tcon.h"
 
+static void sun4i_drv_lastclose(struct drm_device *dev)
+{
+       struct sun4i_drv *drv = dev->dev_private;
+
+       drm_fbdev_cma_restore_mode(drv->fbdev);
+}
+
 DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
 
 static struct drm_driver sun4i_drv_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
 
        /* Generic Operations */
+       .lastclose              = sun4i_drv_lastclose,
        .fops                   = &sun4i_drv_fops,
        .name                   = "sun4i-drm",
        .desc                   = "Allwinner sun4i Display Engine",
index 08766c6e7856b31f6183dba84f47c27980fb49c2..87a20b3dcf7a53a64fc464a3c8834d5c0a167781 100644 (file)
@@ -1,6 +1,7 @@
 config IMX_IPUV3_CORE
        tristate "IPUv3 core support"
        depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
+       depends on DRM || !DRM # if DRM=m, this can't be 'y'
        select GENERIC_IRQ_CHIP
        help
          Choose this if you have a i.MX5/6 system and want to use the Image
index f19348328a715580aa37043abed6d1ee6b061cf4..6fdf9231c23cb0a4f4c736d95662495c2237e24f 100644 (file)
@@ -410,10 +410,11 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
        }
 
        /* We are in an invalid state; reset bus to a known state. */
-       if (!bus->msgs && bus->master_state != ASPEED_I2C_MASTER_STOP) {
+       if (!bus->msgs) {
                dev_err(bus->dev, "bus in unknown state");
                bus->cmd_err = -EIO;
-               aspeed_i2c_do_stop(bus);
+               if (bus->master_state != ASPEED_I2C_MASTER_STOP)
+                       aspeed_i2c_do_stop(bus);
                goto out_no_complete;
        }
        msg = &bus->msgs[bus->msgs_index];
index 143a8fd582b4aeb905ea25b416261a5c1f44a6e9..57248bccadbcb73df29b224594b66bf8088cfb30 100644 (file)
@@ -198,8 +198,7 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
        dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
 
        dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
-                        DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED |
-                        DW_IC_CON_SPEED_FAST;
+                        DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
 
        dev->mode = DW_IC_SLAVE;
 
@@ -430,7 +429,7 @@ static void dw_i2c_plat_complete(struct device *dev)
 #endif
 
 #ifdef CONFIG_PM
-static int dw_i2c_plat_suspend(struct device *dev)
+static int dw_i2c_plat_runtime_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
@@ -452,11 +451,21 @@ static int dw_i2c_plat_resume(struct device *dev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int dw_i2c_plat_suspend(struct device *dev)
+{
+       pm_runtime_resume(dev);
+       return dw_i2c_plat_runtime_suspend(dev);
+}
+#endif
+
 static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
        .prepare = dw_i2c_plat_prepare,
        .complete = dw_i2c_plat_complete,
        SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
-       SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
+       SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
+                          dw_i2c_plat_resume,
+                          NULL)
 };
 
 #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
index 0548c7ea578c9752f7c4cc1efebef37b41bca68a..78d8fb73927d853ebec3061685696db646236e36 100644 (file)
@@ -177,6 +177,8 @@ static int i2c_dw_reg_slave(struct i2c_client *slave)
                return -EBUSY;
        if (slave->flags & I2C_CLIENT_TEN)
                return -EAFNOSUPPORT;
+       pm_runtime_get_sync(dev->dev);
+
        /*
         * Set slave address in the IC_SAR register,
         * the address to which the DW_apb_i2c responds.
@@ -205,6 +207,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
        dev->disable_int(dev);
        dev->disable(dev);
        dev->slave = NULL;
+       pm_runtime_put(dev->dev);
 
        return 0;
 }
@@ -272,7 +275,7 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
        slave_activity = ((dw_readl(dev, DW_IC_STATUS) &
                DW_IC_STATUS_SLAVE_ACTIVITY) >> 6);
 
-       if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY))
+       if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave)
                return 0;
 
        dev_dbg(dev->dev,
@@ -382,7 +385,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
        ret = i2c_add_numbered_adapter(adap);
        if (ret)
                dev_err(dev->dev, "failure adding adapter: %d\n", ret);
-       pm_runtime_put_noidle(dev->dev);
 
        return ret;
 }
index b4685bb9b5d7356f97067a80c1ff9bd0f2d92090..adca51a99487cfb4d2a6d8843047018265e3295a 100644 (file)
@@ -127,8 +127,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
        iounmap(pd->reg);
 
  err_res:
-       release_resource(pd->ioarea);
-       kfree(pd->ioarea);
+       release_mem_region(pd->ioarea->start, size);
 
  err:
        kfree(pd);
@@ -142,8 +141,7 @@ static int simtec_i2c_remove(struct platform_device *dev)
        i2c_del_adapter(&pd->adap);
 
        iounmap(pd->reg);
-       release_resource(pd->ioarea);
-       kfree(pd->ioarea);
+       release_mem_region(pd->ioarea->start, resource_size(pd->ioarea));
        kfree(pd);
 
        return 0;
index 12822a4b8f8f09b5c080f7338a89e0ea00cbb4f2..56e46581b84bdb03eeb07ddaa8d83cec1aa76341 100644 (file)
@@ -353,8 +353,8 @@ static int i2c_device_probe(struct device *dev)
        }
 
        /*
-        * An I2C ID table is not mandatory, if and only if, a suitable Device
-        * Tree match table entry is supplied for the probing device.
+        * An I2C ID table is not mandatory, if and only if, a suitable OF
+        * or ACPI ID table is supplied for the probing device.
         */
        if (!driver->id_table &&
            !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
index f1bbfd389367ff4530137be199c4063c65f97f5c..80d1ec693d2d7d798b1f3315b3c205b529f9a382 100644 (file)
@@ -1371,12 +1371,46 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
         R1_CC_ERROR |          /* Card controller error */             \
         R1_ERROR)              /* General/unknown error */
 
-static bool mmc_blk_has_cmd_err(struct mmc_command *cmd)
+static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
 {
-       if (!cmd->error && cmd->resp[0] & CMD_ERRORS)
-               cmd->error = -EIO;
+       u32 val;
 
-       return cmd->error;
+       /*
+        * Per the SD specification(physical layer version 4.10)[1],
+        * section 4.3.3, it explicitly states that "When the last
+        * block of user area is read using CMD18, the host should
+        * ignore OUT_OF_RANGE error that may occur even the sequence
+        * is correct". And JESD84-B51 for eMMC also has a similar
+        * statement on section 6.8.3.
+        *
+        * Multiple block read/write could be done by either predefined
+        * method, namely CMD23, or open-ending mode. For open-ending mode,
+        * we should ignore the OUT_OF_RANGE error as it's normal behaviour.
+        *
+        * However the spec[1] doesn't tell us whether we should also
+        * ignore that for predefined method. But per the spec[1], section
+        * 4.15 Set Block Count Command, it says"If illegal block count
+        * is set, out of range error will be indicated during read/write
+        * operation (For example, data transfer is stopped at user area
+        * boundary)." In another word, we could expect a out of range error
+        * in the response for the following CMD18/25. And if argument of
+        * CMD23 + the argument of CMD18/25 exceed the max number of blocks,
+        * we could also expect to get a -ETIMEDOUT or any error number from
+        * the host drivers due to missing data response(for write)/data(for
+        * read), as the cards will stop the data transfer by itself per the
+        * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
+        */
+
+       if (!brq->stop.error) {
+               bool oor_with_open_end;
+               /* If there is no error yet, check R1 response */
+
+               val = brq->stop.resp[0] & CMD_ERRORS;
+               oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
+
+               if (val && !oor_with_open_end)
+                       brq->stop.error = -EIO;
+       }
 }
 
 static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
@@ -1400,8 +1434,11 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
         * stop.error indicates a problem with the stop command.  Data
         * may have been transferred, or may still be transferring.
         */
-       if (brq->sbc.error || brq->cmd.error || mmc_blk_has_cmd_err(&brq->stop) ||
-           brq->data.error) {
+
+       mmc_blk_eval_resp_error(brq);
+
+       if (brq->sbc.error || brq->cmd.error ||
+           brq->stop.error || brq->data.error) {
                switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
                case ERR_RETRY:
                        return MMC_BLK_RETRY;
index 2c8baa0c2c4e11f2b5d1c39d4c3ad795d634135a..ceec21bd30c4fc5c49ad4c4bdbfe9bd031005809 100644 (file)
@@ -1364,7 +1364,18 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
        ret = atmel_smc_cs_conf_set_timing(smcconf,
                                           ATMEL_HSMC_TIMINGS_TADL_SHIFT,
                                           ncycles);
-       if (ret)
+       /*
+        * Version 4 of the ONFI spec mandates that tADL be at least 400
+        * nanoseconds, but, depending on the master clock rate, 400 ns may not
+        * fit in the tADL field of the SMC reg. We need to relax the check and
+        * accept the -ERANGE return code.
+        *
+        * Note that previous versions of the ONFI spec had a lower tADL_min
+        * (100 or 200 ns). It's not clear why this timing constraint got
+        * increased but it seems most NANDs are fine with values lower than
+        * 400ns, so we should be safe.
+        */
+       if (ret && ret != -ERANGE)
                return ret;
 
        ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
index 03a0d057bf2f80b3ea4c3f397eb1f607697aad3c..e4211c3cc49b2ac054a080fd34d83db56f9556da 100644 (file)
@@ -2373,6 +2373,7 @@ static int __init ns_init_module(void)
         return 0;
 
 err_exit:
+       nandsim_debugfs_remove(nand);
        free_nandsim(nand);
        nand_release(nsmtd);
        for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
index 007a4f3660862e1aa6e9a16207ae54bbbab61d58..1c4797e53f686b03323e4316d443c256854268dc 100644 (file)
@@ -107,6 +107,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        const char *name = dev_name(&vp_dev->vdev.dev);
+       unsigned flags = PCI_IRQ_MSIX;
        unsigned i, v;
        int err = -ENOMEM;
 
@@ -126,10 +127,13 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
                                        GFP_KERNEL))
                        goto error;
 
+       if (desc) {
+               flags |= PCI_IRQ_AFFINITY;
+               desc->pre_vectors++; /* virtio config vector */
+       }
+
        err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
-                                            nvectors, PCI_IRQ_MSIX |
-                                            (desc ? PCI_IRQ_AFFINITY : 0),
-                                            desc);
+                                            nvectors, flags, desc);
        if (err < 0)
                goto error;
        vp_dev->msix_enabled = 1;
index 56366e9840769dd8a45250ec3a7b65097c979ff9..569d3fb736be070d2693f600f867bd836aeb1dc4 100644 (file)
@@ -194,15 +194,20 @@ cifs_bp_rename_retry:
 }
 
 /*
+ * Don't allow path components longer than the server max.
  * Don't allow the separator character in a path component.
  * The VFS will not allow "/", but "\" is allowed by posix.
  */
 static int
-check_name(struct dentry *direntry)
+check_name(struct dentry *direntry, struct cifs_tcon *tcon)
 {
        struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
        int i;
 
+       if (unlikely(direntry->d_name.len >
+                    tcon->fsAttrInfo.MaxPathNameComponentLength))
+               return -ENAMETOOLONG;
+
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
                for (i = 0; i < direntry->d_name.len; i++) {
                        if (direntry->d_name.name[i] == '\\') {
@@ -500,10 +505,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
                return finish_no_open(file, res);
        }
 
-       rc = check_name(direntry);
-       if (rc)
-               return rc;
-
        xid = get_xid();
 
        cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
@@ -516,6 +517,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
        }
 
        tcon = tlink_tcon(tlink);
+
+       rc = check_name(direntry, tcon);
+       if (rc)
+               goto out_free_xid;
+
        server = tcon->ses->server;
 
        if (server->ops->new_lease_key)
@@ -776,7 +782,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
        }
        pTcon = tlink_tcon(tlink);
 
-       rc = check_name(direntry);
+       rc = check_name(direntry, pTcon);
        if (rc)
                goto lookup_out;
 
index 5fb2fc2d0080b6e62fd0ad10e1a9c90c08308f2d..97edb4d376cd40e1e6044ed6b5ff7072e0d1b10c 100644 (file)
@@ -3219,8 +3219,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
        kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
                          le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
        kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
-       kst->f_bfree  = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
-       kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
+       kst->f_bfree  = kst->f_bavail =
+                       le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
        return;
 }
 
index 306c2b603fb8aa8845558a2bf8226523e5282cdb..865d42c63e23e4746c6658fbe2746fd17ade11f9 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1383,6 +1383,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
 
        trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
 
+       /*
+        * Make sure that the faulting address's PMD offset (color) matches
+        * the PMD offset from the start of the file.  This is necessary so
+        * that a PMD range in the page table overlaps exactly with a PMD
+        * range in the radix tree.
+        */
+       if ((vmf->pgoff & PG_PMD_COLOUR) !=
+           ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
+               goto fallback;
+
        /* Fall back to PTEs if we're going to COW */
        if (write && !(vma->vm_flags & VM_SHARED))
                goto fallback;
index 20fbcab977531bee75501a9403f4c8f60ee404d4..5f940d2a136b72a8e5ffcbff2f094b9f0ebfdaa3 100644 (file)
@@ -144,7 +144,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp)
        argp->p = page_address(argp->pagelist[0]);
        argp->pagelist++;
        if (argp->pagelen < PAGE_SIZE) {
-               argp->end = argp->p + (argp->pagelen>>2);
+               argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
                argp->pagelen = 0;
        } else {
                argp->end = argp->p + (PAGE_SIZE>>2);
@@ -1279,9 +1279,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
                argp->pagelen -= pages * PAGE_SIZE;
                len -= pages * PAGE_SIZE;
 
-               argp->p = (__be32 *)page_address(argp->pagelist[0]);
-               argp->pagelist++;
-               argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
+               next_decode_page(argp);
        }
        argp->p += XDR_QUADLEN(len);
 
index 25f6a0cb27d3e9644c1ef42e7f1ff2b5d817293d..2a5d52fa90f5da353086a0faf5c8a5cdea63128a 100644 (file)
@@ -568,7 +568,6 @@ struct request_queue {
 
 #if defined(CONFIG_BLK_DEV_BSG)
        bsg_job_fn              *bsg_job_fn;
-       int                     bsg_job_size;
        struct bsg_class_device bsg_dev;
 #endif
 
index e34dde2da0ef57c3692ce7001fa66fe37c4dc578..637a20cfb237db4ab76a480172e918e6e48f849b 100644 (file)
@@ -24,6 +24,7 @@
 #define _BLK_BSG_
 
 #include <linux/blkdev.h>
+#include <scsi/scsi_request.h>
 
 struct request;
 struct device;
@@ -37,6 +38,7 @@ struct bsg_buffer {
 };
 
 struct bsg_job {
+       struct scsi_request sreq;
        struct device *dev;
        struct request *req;
 
index a3960f98679c13567c3db4ecb7919cd14a70cfb0..c8125ec1f4f2270a5a01a358832425cd57edb11a 100644 (file)
@@ -22,7 +22,6 @@ enum {
        LO_FLAGS_AUTOCLEAR      = 4,
        LO_FLAGS_PARTSCAN       = 8,
        LO_FLAGS_DIRECT_IO      = 16,
-       LO_FLAGS_BLOCKSIZE      = 32,
 };
 
 #include <asm/posix_types.h>   /* for __kernel_old_dev_t */
@@ -60,8 +59,6 @@ struct loop_info64 {
        __u64              lo_init[2];
 };
 
-#define LO_INFO_BLOCKSIZE(l) (l)->lo_init[0]
-
 /*
  * Loop filter types
  */
index e075b7780421dee1d8243b9dc178248398c5f189..cbbea277b3fba7a8a1ff73ffaf620202a706036a 100644 (file)
@@ -806,6 +806,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
        mm_init_cpumask(mm);
        mm_init_aio(mm);
        mm_init_owner(mm, p);
+       RCU_INIT_POINTER(mm->exe_file, NULL);
        mmu_notifier_mm_init(mm);
        init_tlb_flush_pending(mm);
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
index 47d8d8a25eae49604f81bcffe40e45ef9b8e4c6c..23ed525bc2bc1367dde306cd6a1fd24a5d6b5bae 100644 (file)
@@ -368,8 +368,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
                                pte_offset_map_lock(mm, pmd, addr, &ptl);
                                goto out;
                        }
-                       put_page(page);
                        unlock_page(page);
+                       put_page(page);
                        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
                        pte--;
                        addr -= PAGE_SIZE;
index bf14aea6ab709dc61666c1994718d9d244291e22..91205780e6b151f8239574c5f97b8d5ebc89b59f 100644 (file)
@@ -299,7 +299,7 @@ void __init memblock_discard(void)
                __memblock_free_late(addr, size);
        }
 
-       if (memblock.memory.regions == memblock_memory_init_regions) {
+       if (memblock.memory.regions != memblock_memory_init_regions) {
                addr = __pa(memblock.memory.regions);
                size = PAGE_ALIGN(sizeof(struct memblock_region) *
                                  memblock.memory.max);
index 1bad301820c7a2e2729fc2f7c04e4b3694131576..7a58eb5757e3bd61d9dadfd8d851b9641454552d 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/kthread.h>
 #include <linux/memcontrol.h>
 #include <linux/ftrace.h>
+#include <linux/nmi.h>
 
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
@@ -2535,9 +2536,14 @@ void drain_all_pages(struct zone *zone)
 
 #ifdef CONFIG_HIBERNATION
 
+/*
+ * Touch the watchdog for every WD_PAGE_COUNT pages.
+ */
+#define WD_PAGE_COUNT  (128*1024)
+
 void mark_free_pages(struct zone *zone)
 {
-       unsigned long pfn, max_zone_pfn;
+       unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
        unsigned long flags;
        unsigned int order, t;
        struct page *page;
@@ -2552,6 +2558,11 @@ void mark_free_pages(struct zone *zone)
                if (pfn_valid(pfn)) {
                        page = pfn_to_page(pfn);
 
+                       if (!--page_count) {
+                               touch_nmi_watchdog();
+                               page_count = WD_PAGE_COUNT;
+                       }
+
                        if (page_zone(page) != zone)
                                continue;
 
@@ -2565,8 +2576,13 @@ void mark_free_pages(struct zone *zone)
                        unsigned long i;
 
                        pfn = page_to_pfn(page);
-                       for (i = 0; i < (1UL << order); i++)
+                       for (i = 0; i < (1UL << order); i++) {
+                               if (!--page_count) {
+                                       touch_nmi_watchdog();
+                                       page_count = WD_PAGE_COUNT;
+                               }
                                swsusp_set_page_free(pfn_to_page(pfn + i));
+                       }
                }
        }
        spin_unlock_irqrestore(&zone->lock, flags);
index 6540e598244412023db650412062604b704b58b3..fbcb3c96a186e8bcf9758189e4d1f17f1bc00cdd 100644 (file)
@@ -3967,7 +3967,7 @@ int __init shmem_init(void)
        }
 
 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
-       if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY)
+       if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
                SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
        else
                shmem_huge = 0; /* just in case it was patched */
@@ -4028,7 +4028,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
                return -EINVAL;
 
        shmem_huge = huge;
-       if (shmem_huge < SHMEM_HUGE_DENY)
+       if (shmem_huge > SHMEM_HUGE_DENY)
                SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
        return count;
 }
index 2b720fa35c4ff7c2ae906e9e76d13d27a2b2f008..e18500151236ed3b162cebcf24966957e58be484 100644 (file)
@@ -421,6 +421,9 @@ static void svc_data_ready(struct sock *sk)
                dprintk("svc: socket %p(inet %p), busy=%d\n",
                        svsk, sk,
                        test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
+
+               /* Refer to svc_setup_socket() for details. */
+               rmb();
                svsk->sk_odata(sk);
                if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
                        svc_xprt_enqueue(&svsk->sk_xprt);
@@ -437,6 +440,9 @@ static void svc_write_space(struct sock *sk)
        if (svsk) {
                dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
                        svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
+
+               /* Refer to svc_setup_socket() for details. */
+               rmb();
                svsk->sk_owspace(sk);
                svc_xprt_enqueue(&svsk->sk_xprt);
        }
@@ -760,8 +766,12 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
        dprintk("svc: socket %p TCP (listen) state change %d\n",
                sk, sk->sk_state);
 
-       if (svsk)
+       if (svsk) {
+               /* Refer to svc_setup_socket() for details. */
+               rmb();
                svsk->sk_odata(sk);
+       }
+
        /*
         * This callback may called twice when a new connection
         * is established as a child socket inherits everything
@@ -794,6 +804,8 @@ static void svc_tcp_state_change(struct sock *sk)
        if (!svsk)
                printk("svc: socket %p: no user data\n", sk);
        else {
+               /* Refer to svc_setup_socket() for details. */
+               rmb();
                svsk->sk_ostate(sk);
                if (sk->sk_state != TCP_ESTABLISHED) {
                        set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1381,12 +1393,18 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
                return ERR_PTR(err);
        }
 
-       inet->sk_user_data = svsk;
        svsk->sk_sock = sock;
        svsk->sk_sk = inet;
        svsk->sk_ostate = inet->sk_state_change;
        svsk->sk_odata = inet->sk_data_ready;
        svsk->sk_owspace = inet->sk_write_space;
+       /*
+        * This barrier is necessary in order to prevent race condition
+        * with svc_data_ready(), svc_listen_data_ready() and others
+        * when calling callbacks above.
+        */
+       wmb();
+       inet->sk_user_data = svsk;
 
        /* Initialize the socket */
        if (sock->type == SOCK_DGRAM)
index 3c6be1452e35dfc48e74ad23e1668c633ce57b24..4525e127afd904e62f34af00e26ccee52252b460 100644 (file)
@@ -1137,7 +1137,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
                mutex_lock(&ue->card->user_ctl_lock);
                change = ue->tlv_data_size != size;
                if (!change)
-                       change = memcmp(ue->tlv_data, new_data, size);
+                       change = memcmp(ue->tlv_data, new_data, size) != 0;
                kfree(ue->tlv_data);
                ue->tlv_data = new_data;
                ue->tlv_data_size = size;
index f0e4d502d60482ae8374cf9f830b739eb9e38753..066b5df666f42d2259b40ee5d21492f9165e4470 100644 (file)
@@ -210,9 +210,14 @@ EXPORT_SYMBOL(fw_iso_resources_update);
  */
 void fw_iso_resources_free(struct fw_iso_resources *r)
 {
-       struct fw_card *card = fw_parent_device(r->unit)->card;
+       struct fw_card *card;
        int bandwidth, channel;
 
+       /* Not initialized. */
+       if (r->unit == NULL)
+               return;
+       card = fw_parent_device(r->unit)->card;
+
        mutex_lock(&r->mutex);
 
        if (r->allocated) {
index bf779cfeef0dfaea62ea5684997314a9fe02d4c0..59a270406353d71563f2473ff1675116729acda9 100644 (file)
@@ -128,6 +128,7 @@ static void do_registration(struct work_struct *work)
        return;
 error:
        snd_motu_transaction_unregister(motu);
+       snd_motu_stream_destroy_duplex(motu);
        snd_card_free(motu->card);
        dev_info(&motu->unit->device,
                 "Sound card registration failed: %d\n", err);
index 8c1289963c802b34783a8a8b4af42ed55bbcd71c..a81aacf684b26341ec9257366d7c83a47962a16d 100644 (file)
@@ -947,6 +947,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
+       SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
index 36e530a36c8241d5005c8e8f5f933f2a405c3434..6f629278d982d23e53baf1bd7819e2103e7e02b7 100644 (file)
@@ -5021,6 +5021,7 @@ static const struct regmap_config rt5677_regmap = {
 static const struct i2c_device_id rt5677_i2c_id[] = {
        { "rt5677", RT5677 },
        { "rt5676", RT5676 },
+       { "RT5677CE:00", RT5677 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id);
index 6a03f9697039cc3d157bba92d5253b328e6ad45d..5d2a63248b1d4e77b1bf3edde93ba86710d08f38 100644 (file)
@@ -1309,10 +1309,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
            && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
                mdelay(20);
 
-       /* Zoom R16/24 needs a tiny delay here, otherwise requests like
-        * get/set frequency return as failed despite actually succeeding.
+       /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
+        * otherwise requests like get/set frequency return as failed despite
+        * actually succeeding.
         */
-       if (chip->usb_id == USB_ID(0x1686, 0x00dd) &&
+       if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
+            chip->usb_id == USB_ID(0x046d, 0x0a46) ||
+            chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
            (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
                mdelay(1);
 }