]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge tag 'mmc-v4.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Sep 2017 17:41:02 +0000 (10:41 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Sep 2017 17:41:02 +0000 (10:41 -0700)
Pull two more MMC fixes from Ulf Hansson:
 "MMC core:
   - Fix block status codes

  MMC host:
   - sdhci-xenon: Fix SD bus voltage select"

* tag 'mmc-v4.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc:
  mmc: sdhci-xenon: add set_power callback
  mmc: block: Fix block status codes

87 files changed:
arch/alpha/include/asm/io.h
arch/alpha/include/asm/types.h
arch/alpha/include/asm/unistd.h
arch/alpha/include/uapi/asm/types.h
arch/alpha/include/uapi/asm/unistd.h
arch/alpha/kernel/core_marvel.c
arch/alpha/kernel/core_titan.c
arch/alpha/kernel/module.c
arch/alpha/kernel/smp.c
arch/alpha/kernel/systbls.S
arch/alpha/lib/Makefile
arch/alpha/lib/copy_user.S
arch/alpha/lib/ev6-copy_user.S
arch/arc/kernel/intc-arcv2.c
arch/arc/kernel/intc-compact.c
arch/arm/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_host.h
arch/c6x/configs/dsk6455_defconfig
arch/c6x/configs/evmc6457_defconfig
arch/c6x/configs/evmc6472_defconfig
arch/c6x/configs/evmc6474_defconfig
arch/c6x/configs/evmc6678_defconfig
arch/c6x/platforms/megamod-pic.c
arch/c6x/platforms/plldata.c
arch/c6x/platforms/timer64.c
arch/mips/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/platforms/powernv/npu-dma.c
arch/s390/include/asm/mmu_context.h
arch/s390/mm/mmap.c
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c
arch/x86/um/user-offsets.c
crypto/algif_skcipher.c
crypto/chacha20_generic.c
crypto/testmgr.h
drivers/ata/ahci_da850.c
drivers/ata/libata-core.c
drivers/block/xen-blkback/xenbus.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
drivers/gpu/drm/bridge/sil-sii8620.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/hfi1/mmu_rb.c
drivers/iommu/amd_iommu_v2.c
drivers/iommu/intel-svm.c
drivers/md/dm-mpath.c
drivers/md/dm.c
drivers/misc/mic/scif/scif_dma.c
drivers/misc/sgi-gru/grutlbpurge.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/s390/cio/vfio_ccw_cp.c
drivers/scsi/ipr.c
drivers/scsi/qedf/qedf_els.c
drivers/scsi/sg.c
drivers/xen/gntdev.c
fs/cifs/dir.c
fs/cifs/smb2pdu.h
fs/dax.c
fs/jfs/super.c
fs/select.c
include/asm-generic/topology.h
include/linux/ata.h
include/linux/compiler.h
include/linux/device-mapper.h
include/linux/mm.h
include/linux/mmu_notifier.h
include/linux/nvme.h
include/uapi/linux/ndctl.h
kernel/cgroup/cpuset.c
kernel/events/uprobes.c
kernel/fork.c
kernel/kthread.c
lib/mpi/mpicoder.c
mm/filemap.c
mm/madvise.c
mm/memory.c
mm/mmu_notifier.c
mm/page_alloc.c
mm/rmap.c
scripts/dtc/checks.c
sound/core/pcm_native.c
sound/soc/codecs/rt5670.c
sound/soc/generic/simple-card-utils.c
sound/soc/intel/boards/cht_bsw_rt5672.c
virt/kvm/kvm_main.c

index ff4049155c840c2fc4bc9e8015b5282dadb57469..4d61d2a50c525aab2531e3bf670227cbd5e66bca 100644 (file)
@@ -299,6 +299,7 @@ static inline void __iomem * ioremap_nocache(unsigned long offset,
        return ioremap(offset, size);
 }
 
+#define ioremap_wc ioremap_nocache
 #define ioremap_uc ioremap_nocache
 
 static inline void iounmap(volatile void __iomem *addr)
index 4cb4b6d3452c0b3439c3aa3c0f928f74de09fb3a..0bc66e1d3a7e9c81f1cca84943f60cd91d85a625 100644 (file)
@@ -1,6 +1,6 @@
 #ifndef _ALPHA_TYPES_H
 #define _ALPHA_TYPES_H
 
-#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
 
 #endif /* _ALPHA_TYPES_H */
index b37153ecf2ac33e19b7df4b061afddf4238f7ebb..db7fc0f511e2ae9cc905a6bae64f9195562c013f 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <uapi/asm/unistd.h>
 
-#define NR_SYSCALLS                    514
+#define NR_SYSCALLS                    523
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
index 9fd3cd459777767e7c80c4c761ee88278f62b17d..8d1024d7be0546bc744adf163e87f9dc723539fe 100644 (file)
@@ -9,8 +9,18 @@
  * need to be careful to avoid a name clashes.
  */
 
-#ifndef __KERNEL__
+/*
+ * This is here because we used to use l64 for alpha
+ * and we don't want to impact user mode with our change to ll64
+ * in the kernel.
+ *
+ * However, some user programs are fine with this.  They can
+ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
+ */
+#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__)
 #include <asm-generic/int-l64.h>
+#else
+#include <asm-generic/int-ll64.h>
 #endif
 
 #endif /* _UAPI_ALPHA_TYPES_H */
index aa33bf5aacb6c1666203e38700939750c90cb5c5..a2945fea6c868096e6094780bd96384205806a74 100644 (file)
 #define __NR_getrandom                 511
 #define __NR_memfd_create              512
 #define __NR_execveat                  513
+#define __NR_seccomp                   514
+#define __NR_bpf                       515
+#define __NR_userfaultfd               516
+#define __NR_membarrier                        517
+#define __NR_mlock2                    518
+#define __NR_copy_file_range           519
+#define __NR_preadv2                   520
+#define __NR_pwritev2                  521
+#define __NR_statx                     522
+
+/* Alpha doesn't have protection keys. */
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
 
 #endif /* _UAPI_ALPHA_UNISTD_H */
index d5f0580746a5d632452816b00427c212230b0810..03ff832b1cb4483e7d717def1c4f10ebe6c63619 100644 (file)
@@ -351,7 +351,7 @@ marvel_init_io7(struct io7 *io7)
        }
 }
 
-void
+void __init
 marvel_io7_present(gct6_node *node)
 {
        int pe;
@@ -369,6 +369,7 @@ marvel_io7_present(gct6_node *node)
 static void __init
 marvel_find_console_vga_hose(void)
 {
+#ifdef CONFIG_VGA_HOSE
        u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
 
        if (pu64[7] == 3) {     /* TERM_TYPE == graphics */
@@ -402,9 +403,10 @@ marvel_find_console_vga_hose(void)
                        pci_vga_hose = hose;
                }
        }
+#endif
 }
 
-gct6_search_struct gct_wanted_node_list[] = {
+gct6_search_struct gct_wanted_node_list[] __initdata = {
        { GCT_TYPE_HOSE, GCT_SUBTYPE_IO_PORT_MODULE, marvel_io7_present },
        { 0, 0, NULL }
 };
index 219bf271c0ba2e5f2d668af707df57fbbd00ccfd..b532d925443d50b02f3a54481de122c36d948f79 100644 (file)
@@ -461,6 +461,7 @@ titan_ioremap(unsigned long addr, unsigned long size)
        unsigned long *ptes;
        unsigned long pfn;
 
+#ifdef CONFIG_VGA_HOSE
        /*
         * Adjust the address and hose, if necessary.
         */ 
@@ -468,6 +469,7 @@ titan_ioremap(unsigned long addr, unsigned long size)
                h = pci_vga_hose->index;
                addr += pci_vga_hose->mem_space->start;
        }
+#endif
 
        /*
         * Find the hose.
index 936bc8f89a679f2f8aeb2f7e3cb41f11190a2d77..47632fa8c24e02418cdbac8d5c139f101aa51945 100644 (file)
@@ -181,6 +181,9 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
                switch (r_type) {
                case R_ALPHA_NONE:
                        break;
+               case R_ALPHA_REFLONG:
+                       *(u32 *)location = value;
+                       break;
                case R_ALPHA_REFQUAD:
                        /* BUG() can produce misaligned relocations. */
                        ((u32 *)location)[0] = value;
index 9fc560459ebd64ad3735f9b4dcd5ce6596b7bfee..f6726a74642703381c2151e9d927e267fcba0975 100644 (file)
@@ -115,7 +115,7 @@ wait_boot_cpu_to_stop(int cpuid)
 /*
  * Where secondaries begin a life of C.
  */
-void
+void __init
 smp_callin(void)
 {
        int cpuid = hard_smp_processor_id();
index 9b62e3fd4f038a925657beb15de3de89f8548473..5b4514abb23450998515530ebc824ae1857f27b9 100644 (file)
@@ -532,6 +532,15 @@ sys_call_table:
        .quad sys_getrandom
        .quad sys_memfd_create
        .quad sys_execveat
+       .quad sys_seccomp
+       .quad sys_bpf                           /* 515 */
+       .quad sys_userfaultfd
+       .quad sys_membarrier
+       .quad sys_mlock2
+       .quad sys_copy_file_range
+       .quad sys_preadv2                       /* 520 */
+       .quad sys_pwritev2
+       .quad sys_statx
 
        .size sys_call_table, . - sys_call_table
        .type sys_call_table, @object
index 7083434dd2419b7b3ef6ef8e74c204522d4a3257..a8081596036457af2149b37d356c64f2f1e7acc6 100644 (file)
@@ -20,12 +20,8 @@ lib-y =      __divqu.o __remqu.o __divlu.o __remlu.o \
        checksum.o \
        csum_partial_copy.o \
        $(ev67-y)strlen.o \
-       $(ev67-y)strcat.o \
-       strcpy.o \
-       $(ev67-y)strncat.o \
-       strncpy.o \
-       $(ev6-y)stxcpy.o \
-       $(ev6-y)stxncpy.o \
+       stycpy.o \
+       styncpy.o \
        $(ev67-y)strchr.o \
        $(ev67-y)strrchr.o \
        $(ev6-y)memchr.o \
@@ -49,3 +45,17 @@ AFLAGS___remlu.o =       -DREM -DINTSIZE
 $(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \
                                                $(src)/$(ev6-y)divide.S FORCE
        $(call if_changed_rule,as_o_S)
+
+# There are direct branches between {str*cpy,str*cat} and stx*cpy.
+# Ensure the branches are within range by merging these objects.
+
+LDFLAGS_stycpy.o := -r
+LDFLAGS_styncpy.o := -r
+
+$(obj)/stycpy.o: $(obj)/strcpy.o $(obj)/$(ev67-y)strcat.o \
+                $(obj)/$(ev6-y)stxcpy.o FORCE
+       $(call if_changed,ld)
+
+$(obj)/styncpy.o: $(obj)/strncpy.o $(obj)/$(ev67-y)strncat.o \
+                $(obj)/$(ev6-y)stxncpy.o FORCE
+       $(call if_changed,ld)
index 159f1b7e6e495f098a28dc818c7c383ede2dda76..c277a1a4383e5fbb4bbb76b8c2c7802b8bbd6a26 100644 (file)
@@ -34,7 +34,7 @@
        .ent __copy_user
 __copy_user:
        .prologue 0
-       and $18,$18,$0
+       mov $18,$0
        and $16,7,$3
        beq $0,$35
        beq $3,$36
index 35e6710d070054b1c9987d0f5c10f1c29510b1a2..954ca03ebebef371a4e1dd873efce4493de53309 100644 (file)
                                # Pipeline info: Slotting & Comments
 __copy_user:
        .prologue 0
-       andq $18, $18, $0
-       subq $18, 32, $1        # .. E  .. ..   : Is this going to be a small copy?
-       beq $0, $zerolength     # U  .. .. ..   : U L U L
+       mov $18, $0             # .. .. .. E
+       subq $18, 32, $1        # .. .. E. ..   : Is this going to be a small copy?
+       nop                     # .. E  .. ..
+       beq $18, $zerolength    # U  .. .. ..   : U L U L
 
        and $16,7,$3            # .. .. .. E    : is leading dest misalignment
        ble $1, $onebyteloop    # .. .. U  ..   : 1st branch : small amount of data
index cf90714a676d6baabbdc1beb2119fe530e1c84f3..067ea362fb3efc3bc3a9217aaf197763eafb275e 100644 (file)
@@ -75,13 +75,20 @@ void arc_init_IRQ(void)
         * Set a default priority for all available interrupts to prevent
         * switching of register banks if Fast IRQ and multiple register banks
         * are supported by CPU.
-        * Also disable all IRQ lines so faulty external hardware won't
+        * Also disable private-per-core IRQ lines so faulty external HW won't
         * trigger interrupt that kernel is not ready to handle.
         */
        for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) {
                write_aux_reg(AUX_IRQ_SELECT, i);
                write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
-               write_aux_reg(AUX_IRQ_ENABLE, 0);
+
+               /*
+                * Only mask cpu private IRQs here.
+                * "common" interrupts are masked at IDU, otherwise it would
+                * need to be unmasked at each cpu, with IPIs
+                */
+               if (i < FIRST_EXT_IRQ)
+                       write_aux_reg(AUX_IRQ_ENABLE, 0);
        }
 
        /* setup status32, don't enable intr yet as kernel doesn't want */
index cef388025adf43aed091869f102e01e1bee72092..47b421fa0147be9604b1c808d378a0108f735b34 100644 (file)
@@ -27,7 +27,7 @@
  */
 void arc_init_IRQ(void)
 {
-       int level_mask = 0, i;
+       unsigned int level_mask = 0, i;
 
        /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
        level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
index 127e2dd2e21ce12c7daa21e682ce72d36f6b1afd..4a879f6ff13bea92d189eec9370be73cd616705c 100644 (file)
@@ -225,12 +225,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 
-/* We do not have shadow page tables, hence the empty hooks */
-static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
-                                                        unsigned long address)
-{
-}
-
 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
 struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
 void kvm_arm_halt_guest(struct kvm *kvm);
index d68630007b14f542865df7e49bc0efacefe9a39d..e923b58606e2bf8e33a76bd4a8a9a4c1336199c6 100644 (file)
@@ -326,12 +326,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 
-/* We do not have shadow page tables, hence the empty hooks */
-static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
-                                                        unsigned long address)
-{
-}
-
 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
 void kvm_arm_halt_guest(struct kvm *kvm);
index 4663487c67a176472469348c957137ed12c68412..d764ea4cce7f51c739cc65408df3d469b87d66a1 100644 (file)
@@ -1,5 +1,4 @@
 CONFIG_SOC_TMS320C6455=y
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_SPARSE_IRQ=y
@@ -25,7 +24,6 @@ CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=2
 CONFIG_BLK_DEV_RAM_SIZE=17000
-CONFIG_MISC_DEVICES=y
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
index bba40e195ec43d1e70d9c2ccc6a4876f4a96abfd..05d0b4a25ab1fcba6500106cc68a7e546aaeeb78 100644 (file)
@@ -1,5 +1,4 @@
 CONFIG_SOC_TMS320C6457=y
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_SPARSE_IRQ=y
@@ -26,7 +25,6 @@ CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=2
 CONFIG_BLK_DEV_RAM_SIZE=17000
-CONFIG_MISC_DEVICES=y
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
index 8c46155f6d311befd6b2b6d90369149290c5fd4c..8d81fcf86b0e0e86ca9d8bca1a260e9217706b68 100644 (file)
@@ -1,5 +1,4 @@
 CONFIG_SOC_TMS320C6472=y
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_SPARSE_IRQ=y
@@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=2
 CONFIG_BLK_DEV_RAM_SIZE=17000
-CONFIG_MISC_DEVICES=y
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
index 15533f63231315d7282a0442d9ebd48ef35f0dd3..8156a98f3958be3af5d8a407f6dbae8d6f26bfd0 100644 (file)
@@ -1,5 +1,4 @@
 CONFIG_SOC_TMS320C6474=y
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_SPARSE_IRQ=y
@@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=2
 CONFIG_BLK_DEV_RAM_SIZE=17000
-CONFIG_MISC_DEVICES=y
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
index 5f126d4905b1973f502010959592b24a94d71aa5..c4f433c25b69d94618779049f9f38fa5ac872437 100644 (file)
@@ -1,5 +1,4 @@
 CONFIG_SOC_TMS320C6678=y
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_SPARSE_IRQ=y
@@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=2
 CONFIG_BLK_DEV_RAM_SIZE=17000
-CONFIG_MISC_DEVICES=y
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
index 43afc03e41251d45c2ccd0f0a00656ad5e368fe9..9519fa5f97d0c224b83d65abdacd7495a613de14 100644 (file)
@@ -208,14 +208,14 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
 
        pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
        if (!pic) {
-               pr_err("%s: Could not alloc PIC structure.\n", np->full_name);
+               pr_err("%pOF: Could not alloc PIC structure.\n", np);
                return NULL;
        }
 
        pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
                                             &megamod_domain_ops, pic);
        if (!pic->irqhost) {
-               pr_err("%s: Could not alloc host.\n", np->full_name);
+               pr_err("%pOF: Could not alloc host.\n", np);
                goto error_free;
        }
 
@@ -225,7 +225,7 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
 
        pic->regs = of_iomap(np, 0);
        if (!pic->regs) {
-               pr_err("%s: Could not map registers.\n", np->full_name);
+               pr_err("%pOF: Could not map registers.\n", np);
                goto error_free;
        }
 
@@ -253,8 +253,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
 
                irq_data = irq_get_irq_data(irq);
                if (!irq_data) {
-                       pr_err("%s: combiner-%d no irq_data for virq %d!\n",
-                              np->full_name, i, irq);
+                       pr_err("%pOF: combiner-%d no irq_data for virq %d!\n",
+                              np, i, irq);
                        continue;
                }
 
@@ -265,16 +265,16 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
                 * of the core priority interrupts (4 - 15).
                 */
                if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) {
-                       pr_err("%s: combiner-%d core irq %ld out of range!\n",
-                              np->full_name, i, hwirq);
+                       pr_err("%pOF: combiner-%d core irq %ld out of range!\n",
+                              np, i, hwirq);
                        continue;
                }
 
                /* record the mapping */
                mapping[hwirq - 4] = i;
 
-               pr_debug("%s: combiner-%d cascading to hwirq %ld\n",
-                        np->full_name, i, hwirq);
+               pr_debug("%pOF: combiner-%d cascading to hwirq %ld\n",
+                        np, i, hwirq);
 
                cascade_data[i].pic = pic;
                cascade_data[i].index = i;
@@ -290,8 +290,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
        /* Finally, set up the MUX registers */
        for (i = 0; i < NR_MUX_OUTPUTS; i++) {
                if (mapping[i] != IRQ_UNMAPPED) {
-                       pr_debug("%s: setting mux %d to priority %d\n",
-                                np->full_name, mapping[i], i + 4);
+                       pr_debug("%pOF: setting mux %d to priority %d\n",
+                                np, mapping[i], i + 4);
                        set_megamod_mux(pic, mapping[i], i);
                }
        }
index 755359eb628622ffd60d2bdd6ebb3a060159620b..e8b6cc6a7b5ac4e97f20877e05817e209fff7e91 100644 (file)
@@ -436,8 +436,8 @@ void __init c64x_setup_clocks(void)
 
        err = of_property_read_u32(node, "clock-frequency", &val);
        if (err || val == 0) {
-               pr_err("%s: no clock-frequency found! Using %dMHz\n",
-                      node->full_name, (int)val / 1000000);
+               pr_err("%pOF: no clock-frequency found! Using %dMHz\n",
+                      node, (int)val / 1000000);
                val = 25000000;
        }
        clkin1.rate = val;
index 0bd0452ded80d53698dd1673e15be770d89f8ae8..241a9a607193a00a726fcda8da4d2921d9bcbc83 100644 (file)
@@ -204,14 +204,14 @@ void __init timer64_init(void)
 
        timer = of_iomap(np, 0);
        if (!timer) {
-               pr_debug("%s: Cannot map timer registers.\n", np->full_name);
+               pr_debug("%pOF: Cannot map timer registers.\n", np);
                goto out;
        }
-       pr_debug("%s: Timer registers=%p.\n", np->full_name, timer);
+       pr_debug("%pOF: Timer registers=%p.\n", np, timer);
 
        cd->irq = irq_of_parse_and_map(np, 0);
        if (cd->irq == NO_IRQ) {
-               pr_debug("%s: Cannot find interrupt.\n", np->full_name);
+               pr_debug("%pOF: Cannot find interrupt.\n", np);
                iounmap(timer);
                goto out;
        }
@@ -229,7 +229,7 @@ void __init timer64_init(void)
                dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
        }
 
-       pr_debug("%s: Timer irq=%d.\n", np->full_name, cd->irq);
+       pr_debug("%pOF: Timer irq=%d.\n", np, cd->irq);
 
        clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5);
 
index 2998479fd4e83f0ac4c6ccd7d89938c7cc9a6f5f..a9af1d2dcd699114d00a55689c29137cef384841 100644 (file)
@@ -938,11 +938,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 
-static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
-                                                        unsigned long address)
-{
-}
-
 /* Emulation */
 int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
index 8b3f1238d07f18eda49eb9326934427cf2d83736..e372ed871c513b00e78f69898601c598ee5a026b 100644 (file)
@@ -67,11 +67,6 @@ extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 
-static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
-                                                        unsigned long address)
-{
-}
-
 #define HPTEG_CACHE_NUM                        (1 << 15)
 #define HPTEG_HASH_BITS_PTE            13
 #define HPTEG_HASH_BITS_PTE_LONG       12
index b5d960d6db3d0b18d33273b31ac67e03caebd02b..4c7b8591f7379931a1d319c8edd5995610ca6d8c 100644 (file)
@@ -614,15 +614,6 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
        mmio_invalidate(npu_context, 1, address, true);
 }
 
-static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
-                                       struct mm_struct *mm,
-                                       unsigned long address)
-{
-       struct npu_context *npu_context = mn_to_npu_context(mn);
-
-       mmio_invalidate(npu_context, 1, address, true);
-}
-
 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
                                        struct mm_struct *mm,
                                        unsigned long start, unsigned long end)
@@ -640,7 +631,6 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
        .release = pnv_npu2_mn_release,
        .change_pte = pnv_npu2_mn_change_pte,
-       .invalidate_page = pnv_npu2_mn_invalidate_page,
        .invalidate_range = pnv_npu2_mn_invalidate_range,
 };
 
index 4541ac44b35f0e39b831b3eba2e5669d29dd9f89..24bc41622a983cf353c6d414d521cbd94cd43cbb 100644 (file)
@@ -44,6 +44,11 @@ static inline int init_new_context(struct task_struct *tsk,
                mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
                                   _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
                break;
+       case -PAGE_SIZE:
+               /* forked 5-level task, set new asce with new_mm->pgd */
+               mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+                       _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
+               break;
        case 1UL << 53:
                /* forked 4-level task, set new asce with new mm->pgd */
                mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
index 2e10d2b8ad359607981ce381e27c3bf22e2e184d..5bea139517a2edc21dc50074d2c2e9a94dabb19e 100644 (file)
@@ -119,7 +119,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                return addr;
 
 check_asce_limit:
-       if (addr + len > current->mm->context.asce_limit) {
+       if (addr + len > current->mm->context.asce_limit &&
+           addr + len <= TASK_SIZE) {
                rc = crst_table_upgrade(mm, addr + len);
                if (rc)
                        return (unsigned long) rc;
@@ -183,7 +184,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        }
 
 check_asce_limit:
-       if (addr + len > current->mm->context.asce_limit) {
+       if (addr + len > current->mm->context.asce_limit &&
+           addr + len <= TASK_SIZE) {
                rc = crst_table_upgrade(mm, addr + len);
                if (rc)
                        return (unsigned long) rc;
index f4d120a3e22e8aee8326bebeedeaa506c2291b47..92c9032502d87b3291268f2c98b04ec4cb59854d 100644 (file)
@@ -1375,8 +1375,6 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
 int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
-void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
-                                          unsigned long address);
 
 void kvm_define_shared_msr(unsigned index, u32 msr);
 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
index 05a5e57c6f39770e81e2af3f760f9ce44783f9b3..272320eb328c9bc8c2d0cf839a097a30ca27491f 100644 (file)
@@ -6734,17 +6734,6 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
 
-void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
-                                          unsigned long address)
-{
-       /*
-        * The physical address of apic access page is stored in the VMCS.
-        * Update it when it becomes invalid.
-        */
-       if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
-               kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
-}
-
 /*
  * Returns 1 to let vcpu_run() continue the guest execution loop without
  * exiting to the userspace.  Otherwise, the value will be returned to the
index ae4cd58c0c7a403c8ba40f978e5d4221ce94bfc6..02250b2633b839c7fa56704d804dd4953f244121 100644 (file)
@@ -50,7 +50,7 @@ void foo(void)
        DEFINE(HOST_GS, GS);
        DEFINE(HOST_ORIG_AX, ORIG_EAX);
 #else
-#if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET)
+#ifdef FP_XSTATE_MAGIC1
        DEFINE(HOST_FP_SIZE, sizeof(struct _xstate) / sizeof(unsigned long));
 #else
        DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
index 43839b00fe6c42fff5f8afbc04ffdff4e61a147d..903605dbc1a50282e8e3d7ced0bba148ec7ebe9c 100644 (file)
@@ -87,8 +87,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
        }
        sgl = sreq->tsg;
        n = sg_nents(sgl);
-       for_each_sg(sgl, sg, n, i)
-               put_page(sg_page(sg));
+       for_each_sg(sgl, sg, n, i) {
+               struct page *page = sg_page(sg);
+
+               /* some SGs may not have a page mapped */
+               if (page && page_ref_count(page))
+                       put_page(page);
+       }
 
        kfree(sreq->tsg);
 }
index 8b3c04d625c3bb0ee667a8a61a0c1e4499b0b486..4a45fa4890c0e45eb74124d39b9fee35823ac4dd 100644 (file)
@@ -91,9 +91,14 @@ int crypto_chacha20_crypt(struct skcipher_request *req)
        crypto_chacha20_init(state, ctx, walk.iv);
 
        while (walk.nbytes > 0) {
+               unsigned int nbytes = walk.nbytes;
+
+               if (nbytes < walk.total)
+                       nbytes = round_down(nbytes, walk.stride);
+
                chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
-                                walk.nbytes);
-               err = skcipher_walk_done(&walk, 0);
+                                nbytes);
+               err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
        }
 
        return err;
index 6ceb0e2758bbccd4f0542a5f9628e9f3e987ce7c..d54971d2d1c8694805bb8618b8673c8490f9e8bb 100644 (file)
@@ -32675,6 +32675,10 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = {
                          "\x5b\x86\x2f\x37\x30\xe3\x7c\xfd"
                          "\xc4\xfd\x80\x6c\x22\xf2\x21",
                .rlen   = 375,
+               .also_non_np = 1,
+               .np     = 3,
+               .tap    = { 375 - 20, 4, 16 },
+
        }, { /* RFC7539 A.2. Test Vector #3 */
                .key    = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
                          "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
@@ -33049,6 +33053,9 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = {
                          "\xa1\xed\xad\xd5\x76\xfa\x24\x8f"
                          "\x98",
                .rlen   = 1281,
+               .also_non_np = 1,
+               .np     = 3,
+               .tap    = { 1200, 1, 80 },
        },
 };
 
index 1a50cd3b4233bdf467049f3308c636fe32c0caf4..9b34dff6453633fcc9cf63393eb6fe553af29dbe 100644 (file)
@@ -216,12 +216,16 @@ static int ahci_da850_probe(struct platform_device *pdev)
                return rc;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res)
+       if (!res) {
+               rc = -ENODEV;
                goto disable_resources;
+       }
 
        pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res));
-       if (!pwrdn_reg)
+       if (!pwrdn_reg) {
+               rc = -ENOMEM;
                goto disable_resources;
+       }
 
        da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy);
 
index fa7dd4394c02b642c00119bd00a4b59b18921a0d..1945a8ea20998490b48aa70dfe56c4fbaad744ea 100644 (file)
@@ -2411,6 +2411,9 @@ static void ata_dev_config_trusted(struct ata_device *dev)
        u64 trusted_cap;
        unsigned int err;
 
+       if (!ata_id_has_trusted(dev->id))
+               return;
+
        if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
                ata_dev_warn(dev,
                             "Security Log not supported\n");
index 792da683e70dafafa6f69e224b8e57272d3e6be1..2adb8599be93147bf7e9e89d3d65d43b6305bff0 100644 (file)
@@ -244,6 +244,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
 {
        struct pending_req *req, *n;
        unsigned int j, r;
+       bool busy = false;
 
        for (r = 0; r < blkif->nr_rings; r++) {
                struct xen_blkif_ring *ring = &blkif->rings[r];
@@ -261,8 +262,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
                 * don't have any discard_io or other_io requests. So, checking
                 * for inflight IO is enough.
                 */
-               if (atomic_read(&ring->inflight) > 0)
-                       return -EBUSY;
+               if (atomic_read(&ring->inflight) > 0) {
+                       busy = true;
+                       continue;
+               }
 
                if (ring->irq) {
                        unbind_from_irqhandler(ring->irq, ring);
@@ -300,6 +303,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
                WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
                ring->active = false;
        }
+       if (busy)
+               return -EBUSY;
+
        blkif->nr_ring_pages = 0;
        /*
         * blkif->rings was allocated in connect_ring, so we should free it in
index 6558a3ed57a7f6a1127f81aa0e1b5a861aa21b8d..e1cde6b80027fe1cad56b6169c38aadac1974e3e 100644 (file)
@@ -146,36 +146,6 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
        }
 }
 
-/**
- * amdgpu_mn_invalidate_page - callback to notify about mm change
- *
- * @mn: our notifier
- * @mn: the mm this callback is about
- * @address: address of invalidate page
- *
- * Invalidation of a single page. Blocks for all BOs mapping it
- * and unmap them by move them into system domain again.
- */
-static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
-                                     struct mm_struct *mm,
-                                     unsigned long address)
-{
-       struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
-       struct interval_tree_node *it;
-
-       mutex_lock(&rmn->lock);
-
-       it = interval_tree_iter_first(&rmn->objects, address, address);
-       if (it) {
-               struct amdgpu_mn_node *node;
-
-               node = container_of(it, struct amdgpu_mn_node, it);
-               amdgpu_mn_invalidate_node(node, address, address);
-       }
-
-       mutex_unlock(&rmn->lock);
-}
-
 /**
  * amdgpu_mn_invalidate_range_start - callback to notify about mm change
  *
@@ -215,7 +185,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
 
 static const struct mmu_notifier_ops amdgpu_mn_ops = {
        .release = amdgpu_mn_release,
-       .invalidate_page = amdgpu_mn_invalidate_page,
        .invalidate_range_start = amdgpu_mn_invalidate_range_start,
 };
 
index 2d51a2269fc610ffc705abcb7a49c1bdcaffbae9..5131bfb94f065ceb20ba61713f990b76f11103cb 100644 (file)
@@ -597,9 +597,9 @@ static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
 static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx,
                struct sii8620_mt_msg *msg)
 {
-       u8 reg = msg->reg[0] & 0x7f;
+       u8 reg = msg->reg[1] & 0x7f;
 
-       if (msg->reg[0] & 0x80)
+       if (msg->reg[1] & 0x80)
                ctx->xdevcap[reg] = msg->ret;
        else
                ctx->devcap[reg] = msg->ret;
index 61e06f0e8cd3b43890e807a2ab2bb9037ac95e99..625ba24f143f22c3369cecde2abf4927ba6199f5 100644 (file)
@@ -1567,10 +1567,34 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
 }
 
 
+/**
+ * vmw_kms_atomic_commit - Perform an atomic state commit
+ *
+ * @dev: DRM device
+ * @state: the driver state object
+ * @nonblock: Whether nonblocking behaviour is requested
+ *
+ * This is a simple wrapper around drm_atomic_helper_commit() for
+ * us to clear the nonblocking value.
+ *
+ * Nonblocking commits currently cause synchronization issues
+ * for vmwgfx.
+ *
+ * RETURNS
+ * Zero for success or negative error code on failure.
+ */
+int vmw_kms_atomic_commit(struct drm_device *dev,
+                         struct drm_atomic_state *state,
+                         bool nonblock)
+{
+       return drm_atomic_helper_commit(dev, state, false);
+}
+
+
 static const struct drm_mode_config_funcs vmw_kms_funcs = {
        .fb_create = vmw_kms_fb_create,
        .atomic_check = vmw_kms_atomic_check_modeset,
-       .atomic_commit = drm_atomic_helper_commit,
+       .atomic_commit = vmw_kms_atomic_commit,
 };
 
 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
index 8c4ec564e49583f6d05eab8df5e57e4378582f08..55e8f5ed8b3c69563033c50fc963446837284475 100644 (file)
@@ -166,24 +166,6 @@ static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
        return 0;
 }
 
-static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
-                                            struct mm_struct *mm,
-                                            unsigned long address)
-{
-       struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
-
-       if (!context->invalidate_range)
-               return;
-
-       ib_ucontext_notifier_start_account(context);
-       down_read(&context->umem_rwsem);
-       rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
-                                     address + PAGE_SIZE,
-                                     invalidate_page_trampoline, NULL);
-       up_read(&context->umem_rwsem);
-       ib_ucontext_notifier_end_account(context);
-}
-
 static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
                                             u64 end, void *cookie)
 {
@@ -237,7 +219,6 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
 
 static const struct mmu_notifier_ops ib_umem_notifiers = {
        .release                    = ib_umem_notifier_release,
-       .invalidate_page            = ib_umem_notifier_invalidate_page,
        .invalidate_range_start     = ib_umem_notifier_invalidate_range_start,
        .invalidate_range_end       = ib_umem_notifier_invalidate_range_end,
 };
index ccbf52c8ff6f037a485060e9d78f66c3b7fe79e6..e4b56a0dd6d08ec7eb36dac1e303353f3b0095f7 100644 (file)
@@ -67,8 +67,6 @@ struct mmu_rb_handler {
 
 static unsigned long mmu_node_start(struct mmu_rb_node *);
 static unsigned long mmu_node_last(struct mmu_rb_node *);
-static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *,
-                                    unsigned long);
 static inline void mmu_notifier_range_start(struct mmu_notifier *,
                                            struct mm_struct *,
                                            unsigned long, unsigned long);
@@ -82,7 +80,6 @@ static void do_remove(struct mmu_rb_handler *handler,
 static void handle_remove(struct work_struct *work);
 
 static const struct mmu_notifier_ops mn_opts = {
-       .invalidate_page = mmu_notifier_page,
        .invalidate_range_start = mmu_notifier_range_start,
 };
 
@@ -285,12 +282,6 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
        handler->ops->remove(handler->ops_arg, node);
 }
 
-static inline void mmu_notifier_page(struct mmu_notifier *mn,
-                                    struct mm_struct *mm, unsigned long addr)
-{
-       mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
-}
-
 static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
                                            struct mm_struct *mm,
                                            unsigned long start,
index 6629c472eafd828bb45a9e7fe3a4260c2e199de2..dccf5b76eff24bc1bdd82397d2fd516c9226fae2 100644 (file)
@@ -391,13 +391,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn,
        return 0;
 }
 
-static void mn_invalidate_page(struct mmu_notifier *mn,
-                              struct mm_struct *mm,
-                              unsigned long address)
-{
-       __mn_flush_page(mn, address);
-}
-
 static void mn_invalidate_range(struct mmu_notifier *mn,
                                struct mm_struct *mm,
                                unsigned long start, unsigned long end)
@@ -436,7 +429,6 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
 static const struct mmu_notifier_ops iommu_mn = {
        .release                = mn_release,
        .clear_flush_young      = mn_clear_flush_young,
-       .invalidate_page        = mn_invalidate_page,
        .invalidate_range       = mn_invalidate_range,
 };
 
index f167c0d84ebfb7f5937eb798c9cdbd8bd9abe6c6..f620dccec8ee3d9782b3c1322984e3b26078722a 100644 (file)
@@ -223,14 +223,6 @@ static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm,
        intel_flush_svm_range(svm, address, 1, 1, 0);
 }
 
-static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
-                                 unsigned long address)
-{
-       struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
-
-       intel_flush_svm_range(svm, address, 1, 1, 0);
-}
-
 /* Pages have been freed at this point */
 static void intel_invalidate_range(struct mmu_notifier *mn,
                                   struct mm_struct *mm,
@@ -285,7 +277,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
 static const struct mmu_notifier_ops intel_mmuops = {
        .release = intel_mm_release,
        .change_pte = intel_change_pte,
-       .invalidate_page = intel_invalidate_page,
        .invalidate_range = intel_invalidate_range,
 };
 
index 0e8ab5bb3575fccf24a5734d1f5fe8149210b6aa..d24e4b05f5dacefe5c0528b0ff3b75444bb216e6 100644 (file)
@@ -504,7 +504,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
                if (queue_dying) {
                        atomic_inc(&m->pg_init_in_progress);
                        activate_or_offline_path(pgpath);
-                       return DM_MAPIO_REQUEUE;
                }
                return DM_MAPIO_DELAY_REQUEUE;
        }
@@ -1458,7 +1457,6 @@ static int noretry_error(blk_status_t error)
        case BLK_STS_TARGET:
        case BLK_STS_NEXUS:
        case BLK_STS_MEDIUM:
-       case BLK_STS_RESOURCE:
                return 1;
        }
 
index 2edbcc2d7d3f6274b689447859470d2773ea4e3b..d669fddd9290d027a39fa2e65b01b0a1b24afc0a 100644 (file)
 
 #define DM_MSG_PREFIX "core"
 
-#ifdef CONFIG_PRINTK
-/*
- * ratelimit state to be used in DMXXX_LIMIT().
- */
-DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
-                      DEFAULT_RATELIMIT_INTERVAL,
-                      DEFAULT_RATELIMIT_BURST);
-EXPORT_SYMBOL(dm_ratelimit_state);
-#endif
-
 /*
  * Cookies are numeric values sent with CHANGE and REMOVE
  * uevents while resuming, removing or renaming the device.
@@ -1523,7 +1513,7 @@ static void __split_and_process_bio(struct mapped_device *md,
        }
 
        /* drop the extra reference count */
-       dec_pending(ci.io, error);
+       dec_pending(ci.io, errno_to_blk_status(error));
 }
 /*-----------------------------------------------------------------
  * CRUD END
index 64d5760d069ab3887682e5358f009c9a4767b374..63d6246d6dff3caf8b6f7099257f902d50d2de20 100644 (file)
@@ -200,16 +200,6 @@ static void scif_mmu_notifier_release(struct mmu_notifier *mn,
        schedule_work(&scif_info.misc_work);
 }
 
-static void scif_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
-                                             struct mm_struct *mm,
-                                             unsigned long address)
-{
-       struct scif_mmu_notif   *mmn;
-
-       mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
-       scif_rma_destroy_tcw(mmn, address, PAGE_SIZE);
-}
-
 static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
                                                     struct mm_struct *mm,
                                                     unsigned long start,
@@ -235,7 +225,6 @@ static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
 static const struct mmu_notifier_ops scif_mmu_notifier_ops = {
        .release = scif_mmu_notifier_release,
        .clear_flush_young = NULL,
-       .invalidate_page = scif_mmu_notifier_invalidate_page,
        .invalidate_range_start = scif_mmu_notifier_invalidate_range_start,
        .invalidate_range_end = scif_mmu_notifier_invalidate_range_end};
 
index e936d43895d2579965c9a8d6127cc9111c9cc716..9918eda0e05f649882b8db5f0828db2e3bcfe31e 100644 (file)
@@ -247,17 +247,6 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn,
        gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end);
 }
 
-static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
-                               unsigned long address)
-{
-       struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
-                                                ms_notifier);
-
-       STAT(mmu_invalidate_page);
-       gru_flush_tlb_range(gms, address, PAGE_SIZE);
-       gru_dbg(grudev, "gms %p, address 0x%lx\n", gms, address);
-}
-
 static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
 {
        struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
@@ -269,7 +258,6 @@ static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
 
 
 static const struct mmu_notifier_ops gru_mmuops = {
-       .invalidate_page        = gru_invalidate_page,
        .invalidate_range_start = gru_invalidate_range_start,
        .invalidate_range_end   = gru_invalidate_range_end,
        .release                = gru_release,
index 925467b31a333940dc62d62c4c5f3376316e4120..ea892e732268fe37bd4ea0c52b6d89fa055b691d 100644 (file)
@@ -109,6 +109,7 @@ struct nvme_dev {
        /* host memory buffer support: */
        u64 host_mem_size;
        u32 nr_host_mem_descs;
+       dma_addr_t host_mem_descs_dma;
        struct nvme_host_mem_buf_desc *host_mem_descs;
        void **host_mem_desc_bufs;
 };
@@ -1565,16 +1566,10 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
 
 static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
 {
-       size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs);
+       u64 dma_addr = dev->host_mem_descs_dma;
        struct nvme_command c;
-       u64 dma_addr;
        int ret;
 
-       dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len,
-                       DMA_TO_DEVICE);
-       if (dma_mapping_error(dev->dev, dma_addr))
-               return -ENOMEM;
-
        memset(&c, 0, sizeof(c));
        c.features.opcode       = nvme_admin_set_features;
        c.features.fid          = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
@@ -1591,7 +1586,6 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
                         "failed to set host mem (err %d, flags %#x).\n",
                         ret, bits);
        }
-       dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE);
        return ret;
 }
 
@@ -1609,7 +1603,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
 
        kfree(dev->host_mem_desc_bufs);
        dev->host_mem_desc_bufs = NULL;
-       kfree(dev->host_mem_descs);
+       dma_free_coherent(dev->dev,
+                       dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
+                       dev->host_mem_descs, dev->host_mem_descs_dma);
        dev->host_mem_descs = NULL;
 }
 
@@ -1617,6 +1613,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
 {
        struct nvme_host_mem_buf_desc *descs;
        u32 chunk_size, max_entries, len;
+       dma_addr_t descs_dma;
        int i = 0;
        void **bufs;
        u64 size = 0, tmp;
@@ -1627,7 +1624,8 @@ retry:
        tmp = (preferred + chunk_size - 1);
        do_div(tmp, chunk_size);
        max_entries = tmp;
-       descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL);
+       descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
+                       &descs_dma, GFP_KERNEL);
        if (!descs)
                goto out;
 
@@ -1661,6 +1659,7 @@ retry:
        dev->nr_host_mem_descs = i;
        dev->host_mem_size = size;
        dev->host_mem_descs = descs;
+       dev->host_mem_descs_dma = descs_dma;
        dev->host_mem_desc_bufs = bufs;
        return 0;
 
@@ -1674,7 +1673,8 @@ out_free_bufs:
 
        kfree(bufs);
 out_free_descs:
-       kfree(descs);
+       dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
+                       descs_dma);
 out:
        /* try a smaller chunk size if we failed early */
        if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
index da04df1af231758cb4965735c417a215d736ec94..a03299d779229de271eb28704a73515f1020ffe1 100644 (file)
@@ -920,7 +920,11 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
        struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
        int nr;
 
-       nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE);
+       /*
+        * Align the MR to a 4K page size to match the ctrl page size and
+        * the block virtual boundary.
+        */
+       nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
        if (nr < count) {
                if (nr < 0)
                        return nr;
@@ -1583,7 +1587,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
                goto out_cleanup_queue;
 
        ctrl->ctrl.max_hw_sectors =
-               (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9);
+               (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
 
        error = nvme_init_identify(&ctrl->ctrl);
        if (error)
index ba6ac83a6c2500c874f6a64518e72398bf5d52cb..5ccfdc80d0ec942d2e4dde4113830ab9eb2ba735 100644 (file)
@@ -481,7 +481,7 @@ static int ccwchain_fetch_tic(struct ccwchain *chain,
                ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1);
 
                if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) {
-                       ccw->cda = (__u32) (addr_t) (iter->ch_ccw +
+                       ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
                                                     (ccw->cda - ccw_head));
                        return 0;
                }
index da5bdbdcce527262489cae939761b9cf1834eeb0..f838bd73befa8f3b2fe915b8bbbe1990c563de1f 100644 (file)
@@ -4945,6 +4945,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
                }
                if (ipr_is_vset_device(res)) {
                        sdev->scsi_level = SCSI_SPC_3;
+                       sdev->no_report_opcodes = 1;
                        blk_queue_rq_timeout(sdev->request_queue,
                                             IPR_VSET_RW_TIMEOUT);
                        blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
index eb07f1de8afa5316be9c038b881d076327c36bb3..59c18ca4cda98e59285853d14e93c1a79a80f3f0 100644 (file)
@@ -489,7 +489,7 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
 
        /* If a SRR times out, simply free resources */
        if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
-               goto out_free;
+               goto out_put;
 
        /* Normalize response data into struct fc_frame */
        mp_req = &(srr_req->mp_req);
@@ -501,7 +501,7 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
        if (!fp) {
                QEDF_ERR(&(qedf->dbg_ctx),
                    "fc_frame_alloc failure.\n");
-               goto out_free;
+               goto out_put;
        }
 
        /* Copy frame header from firmware into fp */
@@ -526,9 +526,10 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
        }
 
        fc_frame_free(fp);
-out_free:
+out_put:
        /* Put reference for original command since SRR completed */
        kref_put(&orig_io_req->refcount, qedf_release_cmd);
+out_free:
        kfree(cb_arg);
 }
 
@@ -780,7 +781,7 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
 
        /* If a REC times out, free resources */
        if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
-               goto out_free;
+               goto out_put;
 
        /* Normalize response data into struct fc_frame */
        mp_req = &(rec_req->mp_req);
@@ -792,7 +793,7 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
        if (!fp) {
                QEDF_ERR(&(qedf->dbg_ctx),
                    "fc_frame_alloc failure.\n");
-               goto out_free;
+               goto out_put;
        }
 
        /* Copy frame header from firmware into fp */
@@ -884,9 +885,10 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
 
 out_free_frame:
        fc_frame_free(fp);
-out_free:
+out_put:
        /* Put reference for original command since REC completed */
        kref_put(&orig_io_req->refcount, qedf_release_cmd);
+out_free:
        kfree(cb_arg);
 }
 
index d7ff71e0c85c6ecd525d0d59d3f3f0da63952b47..84e782d8e7c3f0cb8dd4c3bdedb46b7060b18f1e 100644 (file)
@@ -1021,7 +1021,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
                        read_lock_irqsave(&sfp->rq_list_lock, iflags);
                        val = 0;
                        list_for_each_entry(srp, &sfp->rq_list, entry) {
-                               if (val > SG_MAX_QUEUE)
+                               if (val >= SG_MAX_QUEUE)
                                        break;
                                memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
                                rinfo[val].req_state = srp->done + 1;
index f3bf8f4e2d6cef09101b53aa9f1a69563b206287..82360594fa8e49bcbad179a6bb349564286cf203 100644 (file)
@@ -484,13 +484,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
        mutex_unlock(&priv->lock);
 }
 
-static void mn_invl_page(struct mmu_notifier *mn,
-                        struct mm_struct *mm,
-                        unsigned long address)
-{
-       mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
-}
-
 static void mn_release(struct mmu_notifier *mn,
                       struct mm_struct *mm)
 {
@@ -522,7 +515,6 @@ static void mn_release(struct mmu_notifier *mn,
 
 static const struct mmu_notifier_ops gntdev_mmu_ops = {
        .release                = mn_release,
-       .invalidate_page        = mn_invl_page,
        .invalidate_range_start = mn_invl_range_start,
 };
 
index 569d3fb736be070d2693f600f867bd836aeb1dc4..e702d48bd023411f3bbed69c6cc6a571f2fc059c 100644 (file)
@@ -205,7 +205,7 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
        int i;
 
        if (unlikely(direntry->d_name.len >
-                    tcon->fsAttrInfo.MaxPathNameComponentLength))
+                    le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
                return -ENAMETOOLONG;
 
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
index 18700fd25a0b3f6ceebce14d59716dc4cf2d3cb0..2826882c81d14f138b393824fac9c9091311cd1f 100644 (file)
@@ -84,8 +84,8 @@
 
 #define NUMBER_OF_SMB2_COMMANDS        0x0013
 
-/* BB FIXME - analyze following length BB */
-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
+/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
+#define MAX_SMB2_HDR_SIZE 0x00b0
 
 #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
 #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
index 865d42c63e23e4746c6658fbe2746fd17ade11f9..ab925dc6647ac764c5a2620a9ef21c53305f90dc 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -646,11 +646,10 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
        pte_t pte, *ptep = NULL;
        pmd_t *pmdp = NULL;
        spinlock_t *ptl;
-       bool changed;
 
        i_mmap_lock_read(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
-               unsigned long address;
+               unsigned long address, start, end;
 
                cond_resched();
 
@@ -658,8 +657,13 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
                        continue;
 
                address = pgoff_address(index, vma);
-               changed = false;
-               if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
+
+               /*
+                * Note because we provide start/end to follow_pte_pmd it will
+                * call mmu_notifier_invalidate_range_start() on our behalf
+                * before taking any lock.
+                */
+               if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
                        continue;
 
                if (pmdp) {
@@ -676,7 +680,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
                        pmd = pmd_wrprotect(pmd);
                        pmd = pmd_mkclean(pmd);
                        set_pmd_at(vma->vm_mm, address, pmdp, pmd);
-                       changed = true;
+                       mmu_notifier_invalidate_range(vma->vm_mm, start, end);
 unlock_pmd:
                        spin_unlock(ptl);
 #endif
@@ -691,13 +695,12 @@ unlock_pmd:
                        pte = pte_wrprotect(pte);
                        pte = pte_mkclean(pte);
                        set_pte_at(vma->vm_mm, address, ptep, pte);
-                       changed = true;
+                       mmu_notifier_invalidate_range(vma->vm_mm, start, end);
 unlock_pte:
                        pte_unmap_unlock(ptep, ptl);
                }
 
-               if (changed)
-                       mmu_notifier_invalidate_page(vma->vm_mm, address);
+               mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
        }
        i_mmap_unlock_read(mapping);
 }
index 78b41e1d5c67151744fb1c98bf1af54560593457..60726ae7cf26ef9d5b4af4f67f764d3c282a37b8 100644 (file)
@@ -619,16 +619,10 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
        if (!sb->s_root)
                goto out_no_root;
 
-       /* logical blocks are represented by 40 bits in pxd_t, etc. */
-       sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
-#if BITS_PER_LONG == 32
-       /*
-        * Page cache is indexed by long.
-        * I would use MAX_LFS_FILESIZE, but it's only half as big
+       /* logical blocks are represented by 40 bits in pxd_t, etc.
+        * and page cache is indexed by long
         */
-       sb->s_maxbytes = min(((u64) PAGE_SIZE << 32) - 1,
-                            (u64)sb->s_maxbytes);
-#endif
+       sb->s_maxbytes = min(((loff_t)sb->s_blocksize) << 40, MAX_LFS_FILESIZE);
        sb->s_time_gran = 1;
        return 0;
 
index 9d5f15ed87fe7999eaaad22497899e696ad34147..c6362e38ae92dcfafd76f2b10541cb9e4464e230 100644 (file)
@@ -1164,11 +1164,7 @@ int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
        if (ufdset) {
                return compat_get_bitmap(fdset, ufdset, nr);
        } else {
-               /* Tricky, must clear full unsigned long in the
-                * kernel fdset at the end, ALIGN makes sure that
-                * actually happens.
-                */
-               memset(fdset, 0, ALIGN(nr, BITS_PER_LONG));
+               zero_fd_set(nr, fdset);
                return 0;
        }
 }
index fc824e2828f3cacad21cd7d3d9d98261cf1d78fb..5d2add1a6c964870b212f848879338f7b9b3ba18 100644 (file)
 #define parent_node(node)      ((void)(node),0)
 #endif
 #ifndef cpumask_of_node
-#define cpumask_of_node(node)  ((void)node, cpu_online_mask)
+  #ifdef CONFIG_NEED_MULTIPLE_NODES
+    #define cpumask_of_node(node)      ((node) == 0 ? cpu_online_mask : cpu_none_mask)
+  #else
+    #define cpumask_of_node(node)      ((void)node, cpu_online_mask)
+  #endif
 #endif
 #ifndef pcibus_to_node
 #define pcibus_to_node(bus)    ((void)(bus), -1)
index e65ae4b2ed485a67a26d7c45d542a76d70ab9d97..c7a353825450a950a2f53f93774ef4c5bce934a0 100644 (file)
@@ -60,7 +60,8 @@ enum {
        ATA_ID_FW_REV           = 23,
        ATA_ID_PROD             = 27,
        ATA_ID_MAX_MULTSECT     = 47,
-       ATA_ID_DWORD_IO         = 48,
+       ATA_ID_DWORD_IO         = 48,   /* before ATA-8 */
+       ATA_ID_TRUSTED          = 48,   /* ATA-8 and later */
        ATA_ID_CAPABILITY       = 49,
        ATA_ID_OLD_PIO_MODES    = 51,
        ATA_ID_OLD_DMA_MODES    = 52,
@@ -889,6 +890,13 @@ static inline bool ata_id_has_dword_io(const u16 *id)
        return id[ATA_ID_DWORD_IO] & (1 << 0);
 }
 
+static inline bool ata_id_has_trusted(const u16 *id)
+{
+       if (ata_id_major_version(id) <= 7)
+               return false;
+       return id[ATA_ID_TRUSTED] & (1 << 0);
+}
+
 static inline bool ata_id_has_unload(const u16 *id)
 {
        if (ata_id_major_version(id) >= 7 &&
index eca8ad75e28b054db4657d5e562b3904120b042e..043b60de041e3a03731ef99c4a635ea992d5e6ae 100644 (file)
@@ -517,7 +517,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 # define __compiletime_error_fallback(condition) do { } while (0)
 #endif
 
-#define __compiletime_assert(condition, msg, prefix, suffix)           \
+#ifdef __OPTIMIZE__
+# define __compiletime_assert(condition, msg, prefix, suffix)          \
        do {                                                            \
                bool __cond = !(condition);                             \
                extern void prefix ## suffix(void) __compiletime_error(msg); \
@@ -525,6 +526,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
                        prefix ## suffix();                             \
                __compiletime_error_fallback(__cond);                   \
        } while (0)
+#else
+# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
+#endif
 
 #define _compiletime_assert(condition, msg, prefix, suffix) \
        __compiletime_assert(condition, msg, prefix, suffix)
index 1473455d0341bed5b157cab1cd95d8a08980e711..4f2b3b2076c42556da4d7d244ba786c2d0c045c3 100644 (file)
@@ -549,46 +549,29 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
  *---------------------------------------------------------------*/
 #define DM_NAME "device-mapper"
 
-#ifdef CONFIG_PRINTK
-extern struct ratelimit_state dm_ratelimit_state;
-
-#define dm_ratelimit() __ratelimit(&dm_ratelimit_state)
-#else
-#define dm_ratelimit() 0
-#endif
+#define DM_RATELIMIT(pr_func, fmt, ...)                                        \
+do {                                                                   \
+       static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,   \
+                                     DEFAULT_RATELIMIT_BURST);         \
+                                                                       \
+       if (__ratelimit(&rs))                                           \
+               pr_func(DM_FMT(fmt), ##__VA_ARGS__);                    \
+} while (0)
 
 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
 
 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
 
 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMERR_LIMIT(fmt, ...)                                          \
-do {                                                                   \
-       if (dm_ratelimit())                                             \
-               DMERR(fmt, ##__VA_ARGS__);                              \
-} while (0)
-
+#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__)
 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMWARN_LIMIT(fmt, ...)                                         \
-do {                                                                   \
-       if (dm_ratelimit())                                             \
-               DMWARN(fmt, ##__VA_ARGS__);                             \
-} while (0)
-
+#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__)
 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMINFO_LIMIT(fmt, ...)                                         \
-do {                                                                   \
-       if (dm_ratelimit())                                             \
-               DMINFO(fmt, ##__VA_ARGS__);                             \
-} while (0)
+#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__)
 
 #ifdef CONFIG_DM_DEBUG
 #define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
-#define DMDEBUG_LIMIT(fmt, ...)                                                \
-do {                                                                   \
-       if (dm_ratelimit())                                             \
-               DMDEBUG(fmt, ##__VA_ARGS__);                            \
-} while (0)
+#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__)
 #else
 #define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
 #define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
index 46b9ac5e856923ec77125cf5c468585167bf0ff6..c1f6c95f34963177b13018e8299ddcd88e72e625 100644 (file)
@@ -1260,6 +1260,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
 void unmap_mapping_range(struct address_space *mapping,
                loff_t const holebegin, loff_t const holelen, int even_cows);
 int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+                            unsigned long *start, unsigned long *end,
                             pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
        unsigned long *pfn);
index c91b3bcd158f8fa8b2dbc3d4738ade5fdcc600cb..7b2e31b1745aaf3a7d3e8af0ca915c6c2dc04516 100644 (file)
@@ -94,17 +94,6 @@ struct mmu_notifier_ops {
                           unsigned long address,
                           pte_t pte);
 
-       /*
-        * Before this is invoked any secondary MMU is still ok to
-        * read/write to the page previously pointed to by the Linux
-        * pte because the page hasn't been freed yet and it won't be
-        * freed until this returns. If required set_page_dirty has to
-        * be called internally to this method.
-        */
-       void (*invalidate_page)(struct mmu_notifier *mn,
-                               struct mm_struct *mm,
-                               unsigned long address);
-
        /*
         * invalidate_range_start() and invalidate_range_end() must be
         * paired and are called only when the mmap_sem and/or the
@@ -220,8 +209,6 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
                                     unsigned long address);
 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
                                      unsigned long address, pte_t pte);
-extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
-                                         unsigned long address);
 extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
                                  unsigned long start, unsigned long end);
 extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
@@ -268,13 +255,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
                __mmu_notifier_change_pte(mm, address, pte);
 }
 
-static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
-                                         unsigned long address)
-{
-       if (mm_has_notifiers(mm))
-               __mmu_notifier_invalidate_page(mm, address);
-}
-
 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
                                  unsigned long start, unsigned long end)
 {
@@ -442,11 +422,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
 {
 }
 
-static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
-                                         unsigned long address)
-{
-}
-
 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
                                  unsigned long start, unsigned long end)
 {
index 25d8225dbd046d4dcac62e8261d1e49981b72df9..8efff888bd9bccdd4794bfaf4be66a3631e90cf1 100644 (file)
@@ -254,7 +254,7 @@ enum {
        NVME_CTRL_VWC_PRESENT                   = 1 << 0,
        NVME_CTRL_OACS_SEC_SUPP                 = 1 << 0,
        NVME_CTRL_OACS_DIRECTIVES               = 1 << 5,
-       NVME_CTRL_OACS_DBBUF_SUPP               = 1 << 7,
+       NVME_CTRL_OACS_DBBUF_SUPP               = 1 << 8,
 };
 
 struct nvme_lbaf {
index 6d3c54264d8e46f662e3a0caa416538042727fb3..3f03567631cb092dc8f51aae740dd2a3472730e9 100644 (file)
@@ -145,43 +145,6 @@ struct nd_cmd_clear_error {
        __u64 cleared;
 } __packed;
 
-struct nd_cmd_trans_spa {
-       __u64 spa;
-       __u32 status;
-       __u8  flags;
-       __u8  _reserved[3];
-       __u64 trans_length;
-       __u32 num_nvdimms;
-       struct nd_nvdimm_device {
-               __u32 nfit_device_handle;
-               __u32 _reserved;
-               __u64 dpa;
-       } __packed devices[0];
-
-} __packed;
-
-struct nd_cmd_ars_err_inj {
-       __u64 err_inj_spa_range_base;
-       __u64 err_inj_spa_range_length;
-       __u8  err_inj_options;
-       __u32 status;
-} __packed;
-
-struct nd_cmd_ars_err_inj_clr {
-       __u64 err_inj_clr_spa_range_base;
-       __u64 err_inj_clr_spa_range_length;
-       __u32 status;
-} __packed;
-
-struct nd_cmd_ars_err_inj_stat {
-       __u32 status;
-       __u32 inj_err_rec_count;
-       struct nd_error_stat_query_record {
-               __u64 err_inj_stat_spa_range_base;
-               __u64 err_inj_stat_spa_range_length;
-       } __packed record[0];
-} __packed;
-
 enum {
        ND_CMD_IMPLEMENTED = 0,
 
index 8d51516885047c7807cb050979c1ebbdaec6dc07..87a1213dd32678fba0487b7411dc4af5661cf01e 100644 (file)
@@ -1892,6 +1892,7 @@ static struct cftype files[] = {
        {
                .name = "memory_pressure",
                .read_u64 = cpuset_read_u64,
+               .private = FILE_MEMORY_PRESSURE,
        },
 
        {
index 0e137f98a50c30db936a8deecedc1f84455aca23..267f6ef91d9709e2f53c35e053a792d3fcede64d 100644 (file)
@@ -1262,8 +1262,6 @@ void uprobe_end_dup_mmap(void)
 
 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
 {
-       newmm->uprobes_state.xol_area = NULL;
-
        if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
                set_bit(MMF_HAS_UPROBES, &newmm->flags);
                /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
index cbbea277b3fba7a8a1ff73ffaf620202a706036a..b7e9e57b71eaef65bd56b409b32b582e9b16ed4a 100644 (file)
@@ -785,6 +785,13 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
 #endif
 }
 
+static void mm_init_uprobes_state(struct mm_struct *mm)
+{
+#ifdef CONFIG_UPROBES
+       mm->uprobes_state.xol_area = NULL;
+#endif
+}
+
 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
        struct user_namespace *user_ns)
 {
@@ -812,6 +819,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
        mm->pmd_huge_pte = NULL;
 #endif
+       mm_init_uprobes_state(mm);
 
        if (current->mm) {
                mm->flags = current->mm->flags & MMF_INIT_MASK;
index 26db528c1d881bf371ea5b53b7ade0815c990bf1..1c19edf824272db47a48730478af5f3b582bbf67 100644 (file)
@@ -637,6 +637,7 @@ repeat:
                schedule();
 
        try_to_freeze();
+       cond_resched();
        goto repeat;
 }
 EXPORT_SYMBOL_GPL(kthread_worker_fn);
index 5a0f75a3bf01c1b259fb125fbe7266c186d83a71..eead4b339466854f51db30a17f17ee0472ebe6d3 100644 (file)
@@ -364,11 +364,11 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
        }
 
        miter.consumed = lzeros;
-       sg_miter_stop(&miter);
 
        nbytes -= lzeros;
        nbits = nbytes * 8;
        if (nbits > MAX_EXTERN_MPI_BITS) {
+               sg_miter_stop(&miter);
                pr_info("MPI: mpi too large (%u bits)\n", nbits);
                return NULL;
        }
@@ -376,6 +376,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
        if (nbytes > 0)
                nbits -= count_leading_zeros(*buff) - (BITS_PER_LONG - 8);
 
+       sg_miter_stop(&miter);
+
        nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
        val = mpi_alloc(nlimbs);
        if (!val)
index 0b41c8cbeabc7a6b4e2533be1dfd74b9f10e29cd..65b4b6e7f7bde69620b73af705ecca33629a8f79 100644 (file)
@@ -1041,7 +1041,7 @@ void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
        unsigned long flags;
 
        spin_lock_irqsave(&q->lock, flags);
-       __add_wait_queue(q, waiter);
+       __add_wait_queue_entry_tail(q, waiter);
        SetPageWaiters(page);
        spin_unlock_irqrestore(&q->lock, flags);
 }
index 23ed525bc2bc1367dde306cd6a1fd24a5d6b5bae..4d7d1e5ddba9d9b26583b6ca9f05774247d9b1d8 100644 (file)
@@ -613,6 +613,7 @@ static int madvise_inject_error(int behavior,
                unsigned long start, unsigned long end)
 {
        struct page *page;
+       struct zone *zone;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -646,6 +647,11 @@ static int madvise_inject_error(int behavior,
                if (ret)
                        return ret;
        }
+
+       /* Ensure that all poisoned pages are removed from per-cpu lists */
+       for_each_populated_zone(zone)
+               drain_all_pages(zone);
+
        return 0;
 }
 #endif
index fe2fba27ded2fab229d0ef7a4908551343d31b89..56e48e4593cb76b1f89be8d2afd72762ebbfff4d 100644 (file)
@@ -4008,7 +4008,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 #endif /* __PAGETABLE_PMD_FOLDED */
 
 static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
-               pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
+                           unsigned long *start, unsigned long *end,
+                           pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
 {
        pgd_t *pgd;
        p4d_t *p4d;
@@ -4035,17 +4036,29 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
                if (!pmdpp)
                        goto out;
 
+               if (start && end) {
+                       *start = address & PMD_MASK;
+                       *end = *start + PMD_SIZE;
+                       mmu_notifier_invalidate_range_start(mm, *start, *end);
+               }
                *ptlp = pmd_lock(mm, pmd);
                if (pmd_huge(*pmd)) {
                        *pmdpp = pmd;
                        return 0;
                }
                spin_unlock(*ptlp);
+               if (start && end)
+                       mmu_notifier_invalidate_range_end(mm, *start, *end);
        }
 
        if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
                goto out;
 
+       if (start && end) {
+               *start = address & PAGE_MASK;
+               *end = *start + PAGE_SIZE;
+               mmu_notifier_invalidate_range_start(mm, *start, *end);
+       }
        ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
        if (!pte_present(*ptep))
                goto unlock;
@@ -4053,6 +4066,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
        return 0;
 unlock:
        pte_unmap_unlock(ptep, *ptlp);
+       if (start && end)
+               mmu_notifier_invalidate_range_end(mm, *start, *end);
 out:
        return -EINVAL;
 }
@@ -4064,20 +4079,21 @@ static inline int follow_pte(struct mm_struct *mm, unsigned long address,
 
        /* (void) is needed to make gcc happy */
        (void) __cond_lock(*ptlp,
-                          !(res = __follow_pte_pmd(mm, address, ptepp, NULL,
-                                          ptlp)));
+                          !(res = __follow_pte_pmd(mm, address, NULL, NULL,
+                                                   ptepp, NULL, ptlp)));
        return res;
 }
 
 int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+                            unsigned long *start, unsigned long *end,
                             pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
 {
        int res;
 
        /* (void) is needed to make gcc happy */
        (void) __cond_lock(*ptlp,
-                          !(res = __follow_pte_pmd(mm, address, ptepp, pmdpp,
-                                          ptlp)));
+                          !(res = __follow_pte_pmd(mm, address, start, end,
+                                                   ptepp, pmdpp, ptlp)));
        return res;
 }
 EXPORT_SYMBOL(follow_pte_pmd);
index 54ca545629286223a16ef931830a3757b29da77d..314285284e6e6a4c764d0f8126dd0fecf8f7f84e 100644 (file)
@@ -174,20 +174,6 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
        srcu_read_unlock(&srcu, id);
 }
 
-void __mmu_notifier_invalidate_page(struct mm_struct *mm,
-                                         unsigned long address)
-{
-       struct mmu_notifier *mn;
-       int id;
-
-       id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
-               if (mn->ops->invalidate_page)
-                       mn->ops->invalidate_page(mn, mm, address);
-       }
-       srcu_read_unlock(&srcu, id);
-}
-
 void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
                                  unsigned long start, unsigned long end)
 {
index 7a58eb5757e3bd61d9dadfd8d851b9641454552d..1423da8dd16f5bdc83e20ddf6665b2022a9a6492 100644 (file)
@@ -3291,10 +3291,13 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        /*
         * Go through the zonelist yet one more time, keep very high watermark
         * here, this is only to catch a parallel oom killing, we must fail if
-        * we're still under heavy pressure.
+        * we're still under heavy pressure. But make sure that this reclaim
+        * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
+        * allocation which will never fail due to oom_lock already held.
         */
-       page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
-                                       ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
+       page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
+                                     ~__GFP_DIRECT_RECLAIM, order,
+                                     ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
        if (page)
                goto out;
 
index c1286d47aa1fad7fee7ea5bb865a2dc7efd672f2..c570f82e6827153316465b9e18f0fca376a1c1a1 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -887,11 +887,21 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                .address = address,
                .flags = PVMW_SYNC,
        };
+       unsigned long start = address, end;
        int *cleaned = arg;
-       bool invalidation_needed = false;
+
+       /*
+        * We have to assume the worse case ie pmd for invalidation. Note that
+        * the page can not be free from this function.
+        */
+       end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
+       mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
 
        while (page_vma_mapped_walk(&pvmw)) {
+               unsigned long cstart, cend;
                int ret = 0;
+
+               cstart = address = pvmw.address;
                if (pvmw.pte) {
                        pte_t entry;
                        pte_t *pte = pvmw.pte;
@@ -899,11 +909,12 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        if (!pte_dirty(*pte) && !pte_write(*pte))
                                continue;
 
-                       flush_cache_page(vma, pvmw.address, pte_pfn(*pte));
-                       entry = ptep_clear_flush(vma, pvmw.address, pte);
+                       flush_cache_page(vma, address, pte_pfn(*pte));
+                       entry = ptep_clear_flush(vma, address, pte);
                        entry = pte_wrprotect(entry);
                        entry = pte_mkclean(entry);
-                       set_pte_at(vma->vm_mm, pvmw.address, pte, entry);
+                       set_pte_at(vma->vm_mm, address, pte, entry);
+                       cend = cstart + PAGE_SIZE;
                        ret = 1;
                } else {
 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
@@ -913,11 +924,13 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                        if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
                                continue;
 
-                       flush_cache_page(vma, pvmw.address, page_to_pfn(page));
-                       entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd);
+                       flush_cache_page(vma, address, page_to_pfn(page));
+                       entry = pmdp_huge_clear_flush(vma, address, pmd);
                        entry = pmd_wrprotect(entry);
                        entry = pmd_mkclean(entry);
-                       set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry);
+                       set_pmd_at(vma->vm_mm, address, pmd, entry);
+                       cstart &= PMD_MASK;
+                       cend = cstart + PMD_SIZE;
                        ret = 1;
 #else
                        /* unexpected pmd-mapped page? */
@@ -926,15 +939,12 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                }
 
                if (ret) {
+                       mmu_notifier_invalidate_range(vma->vm_mm, cstart, cend);
                        (*cleaned)++;
-                       invalidation_needed = true;
                }
        }
 
-       if (invalidation_needed) {
-               mmu_notifier_invalidate_range(vma->vm_mm, address,
-                               address + (1UL << compound_order(page)));
-       }
+       mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
 
        return true;
 }
@@ -1328,7 +1338,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        };
        pte_t pteval;
        struct page *subpage;
-       bool ret = true, invalidation_needed = false;
+       bool ret = true;
+       unsigned long start = address, end;
        enum ttu_flags flags = (enum ttu_flags)arg;
 
        /* munlock has nothing to gain from examining un-locked vmas */
@@ -1340,6 +1351,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                flags & TTU_MIGRATION, page);
        }
 
+       /*
+        * We have to assume the worse case ie pmd for invalidation. Note that
+        * the page can not be free in this function as call of try_to_unmap()
+        * must hold a reference on the page.
+        */
+       end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
+       mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
+
        while (page_vma_mapped_walk(&pvmw)) {
                /*
                 * If the page is mlock()d, we cannot swap it out.
@@ -1368,9 +1387,11 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                VM_BUG_ON_PAGE(!pvmw.pte, page);
 
                subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
+               address = pvmw.address;
+
 
                if (!(flags & TTU_IGNORE_ACCESS)) {
-                       if (ptep_clear_flush_young_notify(vma, pvmw.address,
+                       if (ptep_clear_flush_young_notify(vma, address,
                                                pvmw.pte)) {
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
@@ -1379,7 +1400,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                }
 
                /* Nuke the page table entry. */
-               flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte));
+               flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
                if (should_defer_flush(mm, flags)) {
                        /*
                         * We clear the PTE but do not flush so potentially
@@ -1389,12 +1410,11 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                         * transition on a cached TLB entry is written through
                         * and traps if the PTE is unmapped.
                         */
-                       pteval = ptep_get_and_clear(mm, pvmw.address,
-                                                   pvmw.pte);
+                       pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 
                        set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
                } else {
-                       pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
+                       pteval = ptep_clear_flush(vma, address, pvmw.pte);
                }
 
                /* Move the dirty bit to the page. Now the pte is gone. */
@@ -1409,12 +1429,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        if (PageHuge(page)) {
                                int nr = 1 << compound_order(page);
                                hugetlb_count_sub(nr, mm);
-                               set_huge_swap_pte_at(mm, pvmw.address,
+                               set_huge_swap_pte_at(mm, address,
                                                     pvmw.pte, pteval,
                                                     vma_mmu_pagesize(vma));
                        } else {
                                dec_mm_counter(mm, mm_counter(page));
-                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
+                               set_pte_at(mm, address, pvmw.pte, pteval);
                        }
 
                } else if (pte_unused(pteval)) {
@@ -1438,7 +1458,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        swp_pte = swp_entry_to_pte(entry);
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
-                       set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
+                       set_pte_at(mm, address, pvmw.pte, swp_pte);
                } else if (PageAnon(page)) {
                        swp_entry_t entry = { .val = page_private(subpage) };
                        pte_t swp_pte;
@@ -1449,6 +1469,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
                                WARN_ON_ONCE(1);
                                ret = false;
+                               /* We have to invalidate as we cleared the pte */
                                page_vma_mapped_walk_done(&pvmw);
                                break;
                        }
@@ -1464,7 +1485,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                 * If the page was redirtied, it cannot be
                                 * discarded. Remap the page to page table.
                                 */
-                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
+                               set_pte_at(mm, address, pvmw.pte, pteval);
                                SetPageSwapBacked(page);
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
@@ -1472,7 +1493,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        }
 
                        if (swap_duplicate(entry) < 0) {
-                               set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
+                               set_pte_at(mm, address, pvmw.pte, pteval);
                                ret = false;
                                page_vma_mapped_walk_done(&pvmw);
                                break;
@@ -1488,18 +1509,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        swp_pte = swp_entry_to_pte(entry);
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);
-                       set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
+                       set_pte_at(mm, address, pvmw.pte, swp_pte);
                } else
                        dec_mm_counter(mm, mm_counter_file(page));
 discard:
                page_remove_rmap(subpage, PageHuge(page));
                put_page(page);
-               invalidation_needed = true;
+               mmu_notifier_invalidate_range(mm, address,
+                                             address + PAGE_SIZE);
        }
 
-       if (invalidation_needed)
-               mmu_notifier_invalidate_range(mm, address,
-                               address + (1UL << compound_order(page)));
+       mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
+
        return ret;
 }
 
index 4b72b530c84f1f4b4e98d8342d9d62fefe8d34d6..62ea8f83d4a023a9780c6dc219d8f29ba244c506 100644 (file)
@@ -873,7 +873,7 @@ static void check_simple_bus_reg(struct check *c, struct dt_info *dti, struct no
        while (size--)
                reg = (reg << 32) | fdt32_to_cpu(*(cells++));
 
-       snprintf(unit_addr, sizeof(unit_addr), "%zx", reg);
+       snprintf(unit_addr, sizeof(unit_addr), "%llx", (unsigned long long)reg);
        if (!streq(unitname, unit_addr))
                FAIL(c, dti, "Node %s simple-bus unit address format error, expected \"%s\"",
                     node->fullpath, unit_addr);
index 22995cb3bd447ee0744e142b9a6500b706d85853..cf0433f8006772392061b0cf95514a266a44f948 100644 (file)
@@ -3064,6 +3064,7 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
 {
        snd_pcm_uframes_t *frames = arg;
        snd_pcm_sframes_t result;
+       int err;
        
        switch (cmd) {
        case SNDRV_PCM_IOCTL_FORWARD:
@@ -3083,7 +3084,10 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
        case SNDRV_PCM_IOCTL_START:
                return snd_pcm_start_lock_irq(substream);
        case SNDRV_PCM_IOCTL_DRAIN:
-               return snd_pcm_drain(substream, NULL);
+               snd_power_lock(substream->pcm->card);
+               err = snd_pcm_drain(substream, NULL);
+               snd_power_unlock(substream->pcm->card);
+               return err;
        case SNDRV_PCM_IOCTL_DROP:
                return snd_pcm_drop(substream);
        case SNDRV_PCM_IOCTL_DELAY:
index 0ec7985ed306612db13bd6314ccc94733e835cce..054b613cb0d04c9dabe3e67e647448845708b29e 100644 (file)
@@ -567,7 +567,7 @@ int rt5670_set_jack_detect(struct snd_soc_codec *codec,
 
        rt5670->jack = jack;
        rt5670->hp_gpio.gpiod_dev = codec->dev;
-       rt5670->hp_gpio.name = "headphone detect";
+       rt5670->hp_gpio.name = "headset";
        rt5670->hp_gpio.report = SND_JACK_HEADSET |
                SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2;
        rt5670->hp_gpio.debounce_time = 150;
index 7d7ab4aee42e3ab133926f2dc5b11489b93a28ae..d72f7d58102f7666740866eb3b8b213604df4e54 100644 (file)
@@ -132,7 +132,7 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
 
        /* Parse the card name from DT */
        ret = snd_soc_of_parse_card_name(card, "label");
-       if (ret < 0) {
+       if (ret < 0 || !card->name) {
                char prop[128];
 
                snprintf(prop, sizeof(prop), "%sname", prefix);
index bc2a52de06a39729ac6aae83c38e08e822575445..f597d558222388e0b52302395ecfbf22a5a127d7 100644 (file)
@@ -184,6 +184,13 @@ static int cht_aif1_hw_params(struct snd_pcm_substream *substream,
        return 0;
 }
 
+static const struct acpi_gpio_params headset_gpios = { 0, 0, false };
+
+static const struct acpi_gpio_mapping cht_rt5672_gpios[] = {
+       { "headset-gpios", &headset_gpios, 1 },
+       {},
+};
+
 static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
 {
        int ret;
@@ -191,6 +198,9 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
        struct snd_soc_codec *codec = codec_dai->codec;
        struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
 
+       if (devm_acpi_dev_add_driver_gpios(codec->dev, cht_rt5672_gpios))
+               dev_warn(runtime->dev, "Unable to add GPIO mapping table\n");
+
        /* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */
        ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0xF, 4, 24);
        if (ret < 0) {
index 15252d723b54e196d864d29a7aac2040686dc59a..4d81f6ded88e823c064e72eff3a98a37b68125a8 100644 (file)
@@ -322,47 +322,6 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
        return container_of(mn, struct kvm, mmu_notifier);
 }
 
-static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
-                                            struct mm_struct *mm,
-                                            unsigned long address)
-{
-       struct kvm *kvm = mmu_notifier_to_kvm(mn);
-       int need_tlb_flush, idx;
-
-       /*
-        * When ->invalidate_page runs, the linux pte has been zapped
-        * already but the page is still allocated until
-        * ->invalidate_page returns. So if we increase the sequence
-        * here the kvm page fault will notice if the spte can't be
-        * established because the page is going to be freed. If
-        * instead the kvm page fault establishes the spte before
-        * ->invalidate_page runs, kvm_unmap_hva will release it
-        * before returning.
-        *
-        * The sequence increase only need to be seen at spin_unlock
-        * time, and not at spin_lock time.
-        *
-        * Increasing the sequence after the spin_unlock would be
-        * unsafe because the kvm page fault could then establish the
-        * pte after kvm_unmap_hva returned, without noticing the page
-        * is going to be freed.
-        */
-       idx = srcu_read_lock(&kvm->srcu);
-       spin_lock(&kvm->mmu_lock);
-
-       kvm->mmu_notifier_seq++;
-       need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
-       /* we've to flush the tlb before the pages can be freed */
-       if (need_tlb_flush)
-               kvm_flush_remote_tlbs(kvm);
-
-       spin_unlock(&kvm->mmu_lock);
-
-       kvm_arch_mmu_notifier_invalidate_page(kvm, address);
-
-       srcu_read_unlock(&kvm->srcu, idx);
-}
-
 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
                                        struct mm_struct *mm,
                                        unsigned long address,
@@ -510,7 +469,6 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
 }
 
 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
-       .invalidate_page        = kvm_mmu_notifier_invalidate_page,
        .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
        .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
        .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,