]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 24 Jan 2021 20:16:34 +0000 (12:16 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 24 Jan 2021 20:16:34 +0000 (12:16 -0800)
Merge misc fixes from Andrew Morton:
 "18 patches.

  Subsystems affected by this patch series: mm (pagealloc, memcg, kasan,
  memory-failure, and highmem), ubsan, proc, and MAINTAINERS"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  MAINTAINERS: add a couple more files to the Clang/LLVM section
  proc_sysctl: fix oops caused by incorrect command parameters
  powerpc/mm/highmem: use __set_pte_at() for kmap_local()
  mips/mm/highmem: use set_pte() for kmap_local()
  mm/highmem: prepare for overriding set_pte_at()
  sparc/mm/highmem: flush cache and TLB
  mm: fix page reference leak in soft_offline_page()
  ubsan: disable unsigned-overflow check for i386
  kasan, mm: fix resetting page_alloc tags for HW_TAGS
  kasan, mm: fix conflicts with init_on_alloc/free
  kasan: fix HW_TAGS boot parameters
  kasan: fix incorrect arguments passing in kasan_add_zero_shadow
  kasan: fix unaligned address is unhandled in kasan_remove_zero_shadow
  mm: fix numa stats for thp migration
  mm: memcg: fix memcg file_dirty numa stat
  mm: memcg/slab: optimize objcg stock draining
  mm: fix initialization of struct page for holes in memory layout
  x86/setup: don't remove E820_TYPE_RAM for pfn 0

89 files changed:
Documentation/ABI/testing/sysfs-class-devlink
Documentation/ABI/testing/sysfs-devices-consumer
Documentation/ABI/testing/sysfs-devices-supplier
Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml
MAINTAINERS
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/feature-fixups.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/lib/feature-fixups.c
arch/x86/entry/common.c
arch/x86/include/asm/fpu/api.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/topology.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/topology.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/sev-es.c
arch/x86/kernel/smpboot.c
arch/x86/lib/mmx_32.c
drivers/base/core.c
drivers/base/dd.c
drivers/base/platform.c
drivers/counter/ti-eqep.c
drivers/hwtracing/intel_th/pci.c
drivers/hwtracing/stm/heartbeat.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/common/st_sensors/st_sensors_trigger.c
drivers/iio/dac/ad5504.c
drivers/iio/proximity/sx9310.c
drivers/iio/temperature/mlx90632.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-bcm2836.c
drivers/irqchip/irq-loongson-liointc.c
drivers/irqchip/irq-mips-cpu.c
drivers/irqchip/irq-sl28cpld.c
drivers/misc/cardreader/rtsx_pcr.c
drivers/misc/habanalabs/common/device.c
drivers/misc/habanalabs/common/firmware_if.c
drivers/misc/habanalabs/common/habanalabs.h
drivers/misc/habanalabs/common/habanalabs_ioctl.c
drivers/misc/habanalabs/common/memory.c
drivers/misc/habanalabs/common/mmu.c
drivers/misc/habanalabs/common/mmu_v1.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/misc/habanalabs/goya/goya.c
drivers/phy/ingenic/Makefile
drivers/phy/mediatek/Kconfig
drivers/phy/motorola/phy-cpcap-usb.c
drivers/thunderbolt/icm.c
drivers/tty/serial/mvebu-uart.c
drivers/tty/tty_io.c
drivers/usb/cdns3/cdns3-imx.c
drivers/usb/gadget/udc/aspeed-vhub/epn.c
drivers/usb/gadget/udc/bdc/Kconfig
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci-tegra.c
fs/cifs/connect.c
fs/cifs/transport.c
fs/kernfs/file.c
include/linux/device.h
include/linux/kthread.h
include/linux/ktime.h
include/linux/timekeeping32.h [deleted file]
include/trace/events/sched.h
kernel/fork.c
kernel/irq/manage.c
kernel/irq/msi.c
kernel/kthread.c
kernel/locking/lockdep.c
kernel/sched/core.c
kernel/sched/sched.h
kernel/signal.c
kernel/smpboot.c
kernel/time/ntp.c
kernel/time/timekeeping.c
kernel/workqueue.c
tools/objtool/check.c
tools/objtool/elf.c
tools/testing/selftests/powerpc/alignment/alignment_handler.c
tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
tools/testing/selftests/powerpc/mm/pkey_siginfo.c

index b662f747c83ebd99eac633e92a034ca407ed5354..8a21ce515f61fb2cf6680508c15445fcb4c1991e 100644 (file)
@@ -5,8 +5,8 @@ Description:
                Provide a place in sysfs for the device link objects in the
                kernel at any given time.  The name of a device link directory,
                denoted as ... above, is of the form <supplier>--<consumer>
-               where <supplier> is the supplier device name and <consumer> is
-               the consumer device name.
+               where <supplier> is the supplier bus:device name and <consumer>
+               is the consumer bus:device name.
 
 What:          /sys/class/devlink/.../auto_remove_on
 Date:          May 2020
index 1f06d74d1c3ccca76cb67e48c456d269ca3b2435..0809fda092e668e81535100630d9b653869fa4e8 100644 (file)
@@ -4,5 +4,6 @@ Contact:        Saravana Kannan <saravanak@google.com>
 Description:
                The /sys/devices/.../consumer:<consumer> are symlinks to device
                links where this device is the supplier. <consumer> denotes the
-               name of the consumer in that device link. There can be zero or
-               more of these symlinks for a given device.
+               name of the consumer in that device link and is of the form
+               bus:device name. There can be zero or more of these symlinks
+               for a given device.
index a919e0db5e902ca572ddde6560663a2ef05ad15d..207f5972e98d8c1cd847fddbf91e88759a4a581c 100644 (file)
@@ -4,5 +4,6 @@ Contact:        Saravana Kannan <saravanak@google.com>
 Description:
                The /sys/devices/.../supplier:<supplier> are symlinks to device
                links where this device is the consumer. <supplier> denotes the
-               name of the supplier in that device link. There can be zero or
-               more of these symlinks for a given device.
+               name of the supplier in that device link and is of the form
+               bus:device name. There can be zero or more of these symlinks
+               for a given device.
index 6eef3480ea8fc0730f6b5e1277b2e7e38b062a84..c2efbb813ca275562044aa154699e03e87e7ca9c 100644 (file)
@@ -16,8 +16,8 @@ description:
 properties:
   compatible:
     enum:
-      - bosch,bmc150
-      - bosch,bmi055
+      - bosch,bmc150_accel
+      - bosch,bmi055_accel
       - bosch,bma255
       - bosch,bma250e
       - bosch,bma222
index 91c6dee7850e92fcfe33a73b88cb28788b80380a..992fe3b0900af299f799b6a1c776d1bda6ae1013 100644 (file)
@@ -3879,7 +3879,7 @@ F:        Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
 F:     drivers/mtd/nand/raw/cadence-nand-controller.c
 
 CADENCE USB3 DRD IP DRIVER
-M:     Peter Chen <peter.chen@nxp.com>
+M:     Peter Chen <peter.chen@kernel.org>
 M:     Pawel Laszczak <pawell@cadence.com>
 R:     Roger Quadros <rogerq@kernel.org>
 R:     Aswath Govindraju <a-govindraju@ti.com>
@@ -4161,7 +4161,7 @@ S:        Maintained
 F:     Documentation/translations/zh_CN/
 
 CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
-M:     Peter Chen <Peter.Chen@nxp.com>
+M:     Peter Chen <peter.chen@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
@@ -18422,7 +18422,7 @@ F:      Documentation/usb/ohci.rst
 F:     drivers/usb/host/ohci*
 
 USB OTG FSM (Finite State Machine)
-M:     Peter Chen <Peter.Chen@nxp.com>
+M:     Peter Chen <peter.chen@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
index 1d32b174ab6aec3fb5403c11bddd7f938a78c2e8..c1a8aac01cf91d2917e923044cef69886f6eb367 100644 (file)
        nop;                                                            \
        nop;
 
+#define SCV_ENTRY_FLUSH_SLOT                                           \
+       SCV_ENTRY_FLUSH_FIXUP_SECTION;                                  \
+       nop;                                                            \
+       nop;                                                            \
+       nop;
+
 /*
  * r10 must be free to use, r13 must be paca
  */
        STF_ENTRY_BARRIER_SLOT;                                         \
        ENTRY_FLUSH_SLOT
 
+/*
+ * r10, ctr must be free to use, r13 must be paca
+ */
+#define SCV_INTERRUPT_TO_KERNEL                                                \
+       STF_ENTRY_BARRIER_SLOT;                                         \
+       SCV_ENTRY_FLUSH_SLOT
+
 /*
  * Macros for annotating the expected destination of (h)rfid
  *
index f6d2acb574252e4810c8cff1455b23cf8cb2cedf..ac605fc369c42326d8537d0be9239e41dc937c6f 100644 (file)
@@ -240,6 +240,14 @@ label##3:                                          \
        FTR_ENTRY_OFFSET 957b-958b;                     \
        .popsection;
 
+#define SCV_ENTRY_FLUSH_FIXUP_SECTION                  \
+957:                                                   \
+       .pushsection __scv_entry_flush_fixup,"a";       \
+       .align 2;                                       \
+958:                                                   \
+       FTR_ENTRY_OFFSET 957b-958b;                     \
+       .popsection;
+
 #define RFI_FLUSH_FIXUP_SECTION                                \
 951:                                                   \
        .pushsection __rfi_flush_fixup,"a";             \
@@ -273,10 +281,12 @@ label##3:                                         \
 
 extern long stf_barrier_fallback;
 extern long entry_flush_fallback;
+extern long scv_entry_flush_fallback;
 extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
 extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
 extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
 extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
+extern long __start___scv_entry_flush_fixup, __stop___scv_entry_flush_fixup;
 extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
 extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
 extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
index aa1af139d947297fdac16126fdb7b48d4d189a2b..33ddfeef4fe9ed4a8eb13f38964cc195e896d7c4 100644 (file)
@@ -75,7 +75,7 @@ BEGIN_FTR_SECTION
        bne     .Ltabort_syscall
 END_FTR_SECTION_IFSET(CPU_FTR_TM)
 #endif
-       INTERRUPT_TO_KERNEL
+       SCV_INTERRUPT_TO_KERNEL
        mr      r10,r1
        ld      r1,PACAKSAVE(r13)
        std     r10,0(r1)
index e02ad6fefa46cc4232199fddac38353c5433246b..6e53f76387374bc7f0485246bbc018f7ed21947f 100644 (file)
@@ -2993,6 +2993,25 @@ TRAMP_REAL_BEGIN(entry_flush_fallback)
        ld      r11,PACA_EXRFI+EX_R11(r13)
        blr
 
+/*
+ * The SCV entry flush happens with interrupts enabled, so it must disable
+ * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
+ * (containing LR) does not need to be preserved here because scv entry
+ * puts 0 in the pt_regs, CTR can be clobbered for the same reason.
+ */
+TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
+       li      r10,0
+       mtmsrd  r10,1
+       lbz     r10,PACAIRQHAPPENED(r13)
+       ori     r10,r10,PACA_IRQ_HARD_DIS
+       stb     r10,PACAIRQHAPPENED(r13)
+       std     r11,PACA_EXRFI+EX_R11(r13)
+       L1D_DISPLACEMENT_FLUSH
+       ld      r11,PACA_EXRFI+EX_R11(r13)
+       li      r10,MSR_RI
+       mtmsrd  r10,1
+       blr
+
 TRAMP_REAL_BEGIN(rfi_flush_fallback)
        SET_SCRATCH0(r13);
        GET_PACA(r13);
index 4ab426b8b0e02276423241b25d2386eedc24b90e..72fa3c00229a56ebfab492c563a2678c8fbc8bc2 100644 (file)
@@ -145,6 +145,13 @@ SECTIONS
                __stop___entry_flush_fixup = .;
        }
 
+       . = ALIGN(8);
+       __scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
+               __start___scv_entry_flush_fixup = .;
+               *(__scv_entry_flush_fixup)
+               __stop___scv_entry_flush_fixup = .;
+       }
+
        . = ALIGN(8);
        __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
                __start___stf_exit_barrier_fixup = .;
index 47821055b94c933851a3a213f25238707bbc3d9a..1fd31b4b0e139e36715f49fbd2928f5384d6ed3b 100644 (file)
@@ -290,9 +290,6 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
        long *start, *end;
        int i;
 
-       start = PTRRELOC(&__start___entry_flush_fixup);
-       end = PTRRELOC(&__stop___entry_flush_fixup);
-
        instrs[0] = 0x60000000; /* nop */
        instrs[1] = 0x60000000; /* nop */
        instrs[2] = 0x60000000; /* nop */
@@ -312,6 +309,8 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
        if (types & L1D_FLUSH_MTTRIG)
                instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
 
+       start = PTRRELOC(&__start___entry_flush_fixup);
+       end = PTRRELOC(&__stop___entry_flush_fixup);
        for (i = 0; start < end; start++, i++) {
                dest = (void *)start + *start;
 
@@ -328,6 +327,25 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
                patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
        }
 
+       start = PTRRELOC(&__start___scv_entry_flush_fixup);
+       end = PTRRELOC(&__stop___scv_entry_flush_fixup);
+       for (; start < end; start++, i++) {
+               dest = (void *)start + *start;
+
+               pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+               patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+
+               if (types == L1D_FLUSH_FALLBACK)
+                       patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
+                                    BRANCH_SET_LINK);
+               else
+                       patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
+
+               patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+       }
+
+
        printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
                (types == L1D_FLUSH_NONE)       ? "no" :
                (types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
index 18d8f17f755c4fab63fd71eadfcc632228aa1c0d..0904f5676e4d8880562ad9d00532c12d2fb79b8c 100644 (file)
@@ -73,10 +73,8 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
                                                  unsigned int nr)
 {
        if (likely(nr < IA32_NR_syscalls)) {
-               instrumentation_begin();
                nr = array_index_nospec(nr, IA32_NR_syscalls);
                regs->ax = ia32_sys_call_table[nr](regs);
-               instrumentation_end();
        }
 }
 
@@ -91,8 +89,11 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
         * or may not be necessary, but it matches the old asm behavior.
         */
        nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+       instrumentation_begin();
 
        do_syscall_32_irqs_on(regs, nr);
+
+       instrumentation_end();
        syscall_exit_to_user_mode(regs);
 }
 
@@ -121,11 +122,12 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
                res = get_user(*(u32 *)&regs->bp,
                       (u32 __user __force *)(unsigned long)(u32)regs->sp);
        }
-       instrumentation_end();
 
        if (res) {
                /* User code screwed up. */
                regs->ax = -EFAULT;
+
+               instrumentation_end();
                syscall_exit_to_user_mode(regs);
                return false;
        }
@@ -135,6 +137,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
 
        /* Now this is just like a normal syscall. */
        do_syscall_32_irqs_on(regs, nr);
+
+       instrumentation_end();
        syscall_exit_to_user_mode(regs);
        return true;
 }
index a5aba4ab02248c5423c2d30576d137115cdd3046..67a4f1cb2aac58cf196fd4cd645b9736a0d76aaf 100644 (file)
  * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
  * disables preemption so be careful if you intend to use it for long periods
  * of time.
- * If you intend to use the FPU in softirq you need to check first with
+ * If you intend to use the FPU in irq/softirq you need to check first with
  * irq_fpu_usable() if it is possible.
  */
-extern void kernel_fpu_begin(void);
+
+/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
+#define KFPU_387       _BITUL(0)       /* 387 state will be initialized */
+#define KFPU_MXCSR     _BITUL(1)       /* MXCSR will be initialized */
+
+extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
 extern void kernel_fpu_end(void);
 extern bool irq_fpu_usable(void);
 extern void fpregs_mark_activate(void);
 
+/* Code that is unaware of kernel_fpu_begin_mask() can use this */
+static inline void kernel_fpu_begin(void)
+{
+       kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
+}
+
 /*
  * Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
  * A context switch will (and softirq might) save CPU's FPU registers to
index 5e658ba2654a7f687886276b504657b4d035e22a..9abe842dbd843a2d569a10e0131c937282ed1b46 100644 (file)
@@ -97,6 +97,7 @@
 
 #define        INTEL_FAM6_LAKEFIELD            0x8A
 #define INTEL_FAM6_ALDERLAKE           0x97
+#define INTEL_FAM6_ALDERLAKE_L         0x9A
 
 /* "Small Core" Processors (Atom) */
 
index 0b4920a7238e389c0eb1414fa0e1b5fa392f8f52..e16cccdd04207c8a9a0268663c6ded1d9c5f7904 100644 (file)
@@ -86,7 +86,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
  * think of extending them - you will be slapped with a stinking trout or a frozen
  * shark will reach you, wherever you are! You've been warned.
  */
-static inline unsigned long long notrace __rdmsr(unsigned int msr)
+static __always_inline unsigned long long __rdmsr(unsigned int msr)
 {
        DECLARE_ARGS(val, low, high);
 
@@ -98,7 +98,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr)
        return EAX_EDX_VAL(val, low, high);
 }
 
-static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
+static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
 {
        asm volatile("1: wrmsr\n"
                     "2:\n"
index 488a8e8487548b7bc34ae6567b8a81528ccd3a41..9239399e54914537c5453c77c234d4af7877b403 100644 (file)
@@ -110,6 +110,8 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
 #define topology_die_id(cpu)                   (cpu_data(cpu).cpu_die_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).cpu_core_id)
 
+extern unsigned int __max_die_per_package;
+
 #ifdef CONFIG_SMP
 #define topology_die_cpumask(cpu)              (per_cpu(cpu_die_map, cpu))
 #define topology_core_cpumask(cpu)             (per_cpu(cpu_core_map, cpu))
@@ -118,8 +120,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
 extern unsigned int __max_logical_packages;
 #define topology_max_packages()                        (__max_logical_packages)
 
-extern unsigned int __max_die_per_package;
-
 static inline int topology_max_die_per_package(void)
 {
        return __max_die_per_package;
index f8ca66f3d8617ddd14fb508c6fe0fda603e50376..347a956f71ca098b8423ba96d64ce394352b16c3 100644 (file)
@@ -542,12 +542,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                u32 ecx;
 
                ecx = cpuid_ecx(0x8000001e);
-               nodes_per_socket = ((ecx >> 8) & 7) + 1;
+               __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
        } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
                u64 value;
 
                rdmsrl(MSR_FAM10H_NODE_ID, value);
-               nodes_per_socket = ((value >> 3) & 7) + 1;
+               __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
        }
 
        if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
index 13d3f1cbda176711069606c6dacbe2900579c208..e133ce1e562b381468e897d2c7b9dc708d35f4ac 100644 (file)
@@ -1992,10 +1992,9 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
         * that out because it's an indirect call. Annotate it.
         */
        instrumentation_begin();
-       trace_hardirqs_off_finish();
+
        machine_check_vector(regs);
-       if (regs->flags & X86_EFLAGS_IF)
-               trace_hardirqs_on_prepare();
+
        instrumentation_end();
        irqentry_nmi_exit(regs, irq_state);
 }
@@ -2004,7 +2003,9 @@ static __always_inline void exc_machine_check_user(struct pt_regs *regs)
 {
        irqentry_enter_from_user_mode(regs);
        instrumentation_begin();
+
        machine_check_vector(regs);
+
        instrumentation_end();
        irqentry_exit_to_user_mode(regs);
 }
index 1068002c8532391beaed43a7f1125e96a2d929c5..8678864ce7123c35cbb5d0ab09e9ffcd91095e16 100644 (file)
 #define BITS_SHIFT_NEXT_LEVEL(eax)     ((eax) & 0x1f)
 #define LEVEL_MAX_SIBLINGS(ebx)                ((ebx) & 0xffff)
 
-#ifdef CONFIG_SMP
 unsigned int __max_die_per_package __read_mostly = 1;
 EXPORT_SYMBOL(__max_die_per_package);
 
+#ifdef CONFIG_SMP
 /*
  * Check if given CPUID extended toplogy "leaf" is implemented
  */
index eb86a2b831b15a74588b302ccf8eed58e7490331..571220ac8beaa7020cb1a98a41ffb0e79c759484 100644 (file)
@@ -121,7 +121,7 @@ int copy_fpregs_to_fpstate(struct fpu *fpu)
 }
 EXPORT_SYMBOL(copy_fpregs_to_fpstate);
 
-void kernel_fpu_begin(void)
+void kernel_fpu_begin_mask(unsigned int kfpu_mask)
 {
        preempt_disable();
 
@@ -141,13 +141,14 @@ void kernel_fpu_begin(void)
        }
        __cpu_invalidate_fpregs_state();
 
-       if (boot_cpu_has(X86_FEATURE_XMM))
+       /* Put sane initial values into the control registers. */
+       if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
                ldmxcsr(MXCSR_DEFAULT);
 
-       if (boot_cpu_has(X86_FEATURE_FPU))
+       if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
                asm volatile ("fninit");
 }
-EXPORT_SYMBOL_GPL(kernel_fpu_begin);
+EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
 
 void kernel_fpu_end(void)
 {
index 0bd1a0fc587e0f830e25e5b4199972ad1471ff96..84c1821819afb8fb3da3b09b8d429930bd1d4da3 100644 (file)
@@ -225,7 +225,7 @@ static inline u64 sev_es_rd_ghcb_msr(void)
        return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
 }
 
-static inline void sev_es_wr_ghcb_msr(u64 val)
+static __always_inline void sev_es_wr_ghcb_msr(u64 val)
 {
        u32 low, high;
 
@@ -286,6 +286,12 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
        u16 d2;
        u8  d1;
 
+       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+       if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
+               memcpy(dst, buf, size);
+               return ES_OK;
+       }
+
        switch (size) {
        case 1:
                memcpy(&d1, buf, 1);
@@ -335,6 +341,12 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
        u16 d2;
        u8  d1;
 
+       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+       if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
+               memcpy(buf, src, size);
+               return ES_OK;
+       }
+
        switch (size) {
        case 1:
                if (get_user(d1, s))
index 8ca66af96a547ddc6606e3783ec33950f7716e93..117e24fbfd8a07587532c613fd3386872273a2ad 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/numa.h>
 #include <linux/pgtable.h>
 #include <linux/overflow.h>
+#include <linux/syscore_ops.h>
 
 #include <asm/acpi.h>
 #include <asm/desc.h>
@@ -2083,6 +2084,23 @@ static void init_counter_refs(void)
        this_cpu_write(arch_prev_mperf, mperf);
 }
 
+#ifdef CONFIG_PM_SLEEP
+static struct syscore_ops freq_invariance_syscore_ops = {
+       .resume = init_counter_refs,
+};
+
+static void register_freq_invariance_syscore_ops(void)
+{
+       /* Bail out if registered already. */
+       if (freq_invariance_syscore_ops.node.prev)
+               return;
+
+       register_syscore_ops(&freq_invariance_syscore_ops);
+}
+#else
+static inline void register_freq_invariance_syscore_ops(void) {}
+#endif
+
 static void init_freq_invariance(bool secondary, bool cppc_ready)
 {
        bool ret = false;
@@ -2109,6 +2127,7 @@ static void init_freq_invariance(bool secondary, bool cppc_ready)
        if (ret) {
                init_counter_refs();
                static_branch_enable(&arch_scale_freq_key);
+               register_freq_invariance_syscore_ops();
                pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
        } else {
                pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
index 4321fa02e18df07368689469049cad31b87eca04..419365c48b2ada2b40094552affd0e4cd4ba433c 100644 (file)
 #include <asm/fpu/api.h>
 #include <asm/asm.h>
 
+/*
+ * Use KFPU_387.  MMX instructions are not affected by MXCSR,
+ * but both AMD and Intel documentation states that even integer MMX
+ * operations will result in #MF if an exception is pending in FCW.
+ *
+ * EMMS is not needed afterwards because, after calling kernel_fpu_end(),
+ * any subsequent user of the 387 stack will reinitialize it using
+ * KFPU_387.
+ */
+
 void *_mmx_memcpy(void *to, const void *from, size_t len)
 {
        void *p;
@@ -37,7 +47,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
        p = to;
        i = len >> 6; /* len/64 */
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "1: prefetch (%0)\n"            /* This set is 28 bytes */
@@ -127,7 +137,7 @@ static void fast_clear_page(void *page)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "  pxor %%mm0, %%mm0\n" : :
@@ -160,7 +170,7 @@ static void fast_copy_page(void *to, void *from)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        /*
         * maybe the prefetch stuff can go before the expensive fnsave...
@@ -247,7 +257,7 @@ static void fast_clear_page(void *page)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "  pxor %%mm0, %%mm0\n" : :
@@ -282,7 +292,7 @@ static void fast_copy_page(void *to, void *from)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "1: prefetch (%0)\n"
index 14f1658167425a5d20f26cd3d73bcf4ad1a17948..6eb4c7a904c560d9ebaf6e7e871c364ffac123d1 100644 (file)
@@ -208,6 +208,16 @@ int device_links_read_lock_held(void)
 #endif
 #endif /* !CONFIG_SRCU */
 
+static bool device_is_ancestor(struct device *dev, struct device *target)
+{
+       while (target->parent) {
+               target = target->parent;
+               if (dev == target)
+                       return true;
+       }
+       return false;
+}
+
 /**
  * device_is_dependent - Check if one device depends on another one
  * @dev: Device to check dependencies for.
@@ -221,7 +231,12 @@ int device_is_dependent(struct device *dev, void *target)
        struct device_link *link;
        int ret;
 
-       if (dev == target)
+       /*
+        * The "ancestors" check is needed to catch the case when the target
+        * device has not been completely initialized yet and it is still
+        * missing from the list of children of its parent device.
+        */
+       if (dev == target || device_is_ancestor(dev, target))
                return 1;
 
        ret = device_for_each_child(dev, target, device_is_dependent);
@@ -456,7 +471,9 @@ static int devlink_add_symlinks(struct device *dev,
        struct device *con = link->consumer;
        char *buf;
 
-       len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+       len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+                 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+       len += strlen(":");
        len += strlen("supplier:") + 1;
        buf = kzalloc(len, GFP_KERNEL);
        if (!buf)
@@ -470,12 +487,12 @@ static int devlink_add_symlinks(struct device *dev,
        if (ret)
                goto err_con;
 
-       snprintf(buf, len, "consumer:%s", dev_name(con));
+       snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
        ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
        if (ret)
                goto err_con_dev;
 
-       snprintf(buf, len, "supplier:%s", dev_name(sup));
+       snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
        ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
        if (ret)
                goto err_sup_dev;
@@ -483,7 +500,7 @@ static int devlink_add_symlinks(struct device *dev,
        goto out;
 
 err_sup_dev:
-       snprintf(buf, len, "consumer:%s", dev_name(con));
+       snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
        sysfs_remove_link(&sup->kobj, buf);
 err_con_dev:
        sysfs_remove_link(&link->link_dev.kobj, "consumer");
@@ -506,7 +523,9 @@ static void devlink_remove_symlinks(struct device *dev,
        sysfs_remove_link(&link->link_dev.kobj, "consumer");
        sysfs_remove_link(&link->link_dev.kobj, "supplier");
 
-       len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+       len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+                 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+       len += strlen(":");
        len += strlen("supplier:") + 1;
        buf = kzalloc(len, GFP_KERNEL);
        if (!buf) {
@@ -514,9 +533,9 @@ static void devlink_remove_symlinks(struct device *dev,
                return;
        }
 
-       snprintf(buf, len, "supplier:%s", dev_name(sup));
+       snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
        sysfs_remove_link(&con->kobj, buf);
-       snprintf(buf, len, "consumer:%s", dev_name(con));
+       snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
        sysfs_remove_link(&sup->kobj, buf);
        kfree(buf);
 }
@@ -737,8 +756,9 @@ struct device_link *device_link_add(struct device *consumer,
 
        link->link_dev.class = &devlink_class;
        device_set_pm_not_required(&link->link_dev);
-       dev_set_name(&link->link_dev, "%s--%s",
-                    dev_name(supplier), dev_name(consumer));
+       dev_set_name(&link->link_dev, "%s:%s--%s:%s",
+                    dev_bus_name(supplier), dev_name(supplier),
+                    dev_bus_name(consumer), dev_name(consumer));
        if (device_register(&link->link_dev)) {
                put_device(consumer);
                put_device(supplier);
@@ -1808,9 +1828,7 @@ const char *dev_driver_string(const struct device *dev)
         * never change once they are set, so they don't need special care.
         */
        drv = READ_ONCE(dev->driver);
-       return drv ? drv->name :
-                       (dev->bus ? dev->bus->name :
-                       (dev->class ? dev->class->name : ""));
+       return drv ? drv->name : dev_bus_name(dev);
 }
 EXPORT_SYMBOL(dev_driver_string);
 
index 2f32f38a11ed0b1c176bffeaf94005216152418c..9179825ff646f4e3aeb2fe87455caaec8db233de 100644 (file)
@@ -370,13 +370,6 @@ static void driver_bound(struct device *dev)
 
        device_pm_check_callbacks(dev);
 
-       /*
-        * Reorder successfully probed devices to the end of the device list.
-        * This ensures that suspend/resume order matches probe order, which
-        * is usually what drivers rely on.
-        */
-       device_pm_move_to_tail(dev);
-
        /*
         * Make sure the device is no longer in one of the deferred lists and
         * kick off retrying all pending devices
@@ -619,6 +612,8 @@ dev_groups_failed:
        else if (drv->remove)
                drv->remove(dev);
 probe_failed:
+       kfree(dev->dma_range_map);
+       dev->dma_range_map = NULL;
        if (dev->bus)
                blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
                                             BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
index 95fd1549f87de38dcbad0846e923b06ee25bdbd2..8456d8384ac8e14842c603554516aede25239f6c 100644 (file)
@@ -366,6 +366,8 @@ int devm_platform_get_irqs_affinity(struct platform_device *dev,
                return -ERANGE;
 
        nvec = platform_irq_count(dev);
+       if (nvec < 0)
+               return nvec;
 
        if (nvec < minvec)
                return -ENOSPC;
index a60aee1a1a29150a94fd4a43d69f4154023baab4..65df9ef5b5bc053d8b85d56063fdf92a833f05aa 100644 (file)
@@ -235,36 +235,6 @@ static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
        return len;
 }
 
-static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
-                                          struct counter_count *count,
-                                          void *ext_priv, char *buf)
-{
-       struct ti_eqep_cnt *priv = counter->priv;
-       u32 qposinit;
-
-       regmap_read(priv->regmap32, QPOSINIT, &qposinit);
-
-       return sprintf(buf, "%u\n", qposinit);
-}
-
-static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
-                                           struct counter_count *count,
-                                           void *ext_priv, const char *buf,
-                                           size_t len)
-{
-       struct ti_eqep_cnt *priv = counter->priv;
-       int err;
-       u32 res;
-
-       err = kstrtouint(buf, 0, &res);
-       if (err < 0)
-               return err;
-
-       regmap_write(priv->regmap32, QPOSINIT, res);
-
-       return len;
-}
-
 static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
                                            struct counter_count *count,
                                            void *ext_priv, char *buf)
@@ -301,11 +271,6 @@ static struct counter_count_ext ti_eqep_position_ext[] = {
                .read   = ti_eqep_position_ceiling_read,
                .write  = ti_eqep_position_ceiling_write,
        },
-       {
-               .name   = "floor",
-               .read   = ti_eqep_position_floor_read,
-               .write  = ti_eqep_position_floor_write,
-       },
        {
                .name   = "enable",
                .read   = ti_eqep_position_enable_read,
index 52acd77438ede98280f0ab6e3364f4e28c8becc7..251e75c9ba9d0dda26e17d2558715a00a7a923bb 100644 (file)
@@ -268,6 +268,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7aa6),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Alder Lake-P */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        {
                /* Alder Lake CPU */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
index 3e7df1c0477f75a6e75680c0e55816e672202dce..81d7b21d31ec27312ab39b853f28593781d005d8 100644 (file)
@@ -64,7 +64,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data)
 
 static int stm_heartbeat_init(void)
 {
-       int i, ret = -ENOMEM;
+       int i, ret;
 
        if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
                return -EINVAL;
@@ -72,8 +72,10 @@ static int stm_heartbeat_init(void)
        for (i = 0; i < nr_devs; i++) {
                stm_heartbeat[i].data.name =
                        kasprintf(GFP_KERNEL, "heartbeat.%d", i);
-               if (!stm_heartbeat[i].data.name)
+               if (!stm_heartbeat[i].data.name) {
+                       ret = -ENOMEM;
                        goto fail_unregister;
+               }
 
                stm_heartbeat[i].data.nr_chans  = 1;
                stm_heartbeat[i].data.link      = stm_heartbeat_link;
index b11c8c47ba2aaee658e6d68709062910736e0cf4..e946903b099367b02faafbc46e5e15da646071ba 100644 (file)
@@ -397,16 +397,12 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev,
        ret = devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh,
                                flags, indio_dev->name, indio_dev);
        if (ret)
-               goto error_kfifo_free;
+               return ret;
 
        indio_dev->setup_ops = setup_ops;
        indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
 
        return 0;
-
-error_kfifo_free:
-       iio_kfifo_free(indio_dev->buffer);
-       return ret;
 }
 
 static const char * const chan_name_ain[] = {
index 0507283bd4c1dedd78780f9c20dcca39beef0748..2dbd2646e44e97f3edd899143b5380245156ebe7 100644 (file)
  * @sdata: Sensor data.
  *
  * returns:
- * 0 - no new samples available
- * 1 - new samples available
- * negative - error or unknown
+ * false - no new samples available or read error
+ * true - new samples available
  */
-static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
-                                           struct st_sensor_data *sdata)
+static bool st_sensors_new_samples_available(struct iio_dev *indio_dev,
+                                            struct st_sensor_data *sdata)
 {
        int ret, status;
 
        /* How would I know if I can't check it? */
        if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
-               return -EINVAL;
+               return true;
 
        /* No scan mask, no interrupt */
        if (!indio_dev->active_scan_mask)
-               return 0;
+               return false;
 
        ret = regmap_read(sdata->regmap,
                          sdata->sensor_settings->drdy_irq.stat_drdy.addr,
                          &status);
        if (ret < 0) {
                dev_err(sdata->dev, "error checking samples available\n");
-               return ret;
+               return false;
        }
 
-       if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask)
-               return 1;
-
-       return 0;
+       return !!(status & sdata->sensor_settings->drdy_irq.stat_drdy.mask);
 }
 
 /**
@@ -180,9 +176,15 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
 
        /* Tell the interrupt handler that we're dealing with edges */
        if (irq_trig == IRQF_TRIGGER_FALLING ||
-           irq_trig == IRQF_TRIGGER_RISING)
+           irq_trig == IRQF_TRIGGER_RISING) {
+               if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
+                       dev_err(&indio_dev->dev,
+                               "edge IRQ not supported w/o stat register.\n");
+                       err = -EOPNOTSUPP;
+                       goto iio_trigger_free;
+               }
                sdata->edge_irq = true;
-       else
+       } else {
                /*
                 * If we're not using edges (i.e. level interrupts) we
                 * just mask off the IRQ, handle one interrupt, then
@@ -190,6 +192,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                 * interrupt handler top half again and start over.
                 */
                irq_trig |= IRQF_ONESHOT;
+       }
 
        /*
         * If the interrupt pin is Open Drain, by definition this
index 28921b62e64203427a79272798ac9cea4f3c93ce..e9297c25d4ef63b38c5407baadf47a10d8b0e716 100644 (file)
@@ -187,9 +187,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
                return ret;
 
        if (pwr_down)
-               st->pwr_down_mask |= (1 << chan->channel);
-       else
                st->pwr_down_mask &= ~(1 << chan->channel);
+       else
+               st->pwr_down_mask |= (1 << chan->channel);
 
        ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
                                AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
index a2f820997afc2a13f7960093e0d3c3570c2076b4..37fd0b65a0140085a9672269bd02adf8da391d6f 100644 (file)
@@ -601,7 +601,7 @@ static int sx9310_read_thresh(struct sx9310_data *data,
                return ret;
 
        regval = FIELD_GET(SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
-       if (regval > ARRAY_SIZE(sx9310_pthresh_codes))
+       if (regval >= ARRAY_SIZE(sx9310_pthresh_codes))
                return -EINVAL;
 
        *val = sx9310_pthresh_codes[regval];
@@ -1305,7 +1305,8 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
                if (ret)
                        break;
 
-               pos = min(max(ilog2(pos), 3), 10) - 3;
+               /* Powers of 2, except for a gap between 16 and 64 */
+               pos = clamp(ilog2(pos), 3, 11) - (pos >= 32 ? 4 : 3);
                reg_def->def &= ~SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK;
                reg_def->def |= FIELD_PREP(SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK,
                                           pos);
index 503fe54a0bb937ad9684b001cf3dd59dbf2741b1..608ccb1d8bc82feb28eef57a1a31f58f27b1851a 100644 (file)
@@ -248,6 +248,12 @@ static int mlx90632_set_meas_type(struct regmap *regmap, u8 type)
        if (ret < 0)
                return ret;
 
+       /*
+        * Give the mlx90632 some time to reset properly before sending a new I2C command
+        * if this is not done, the following I2C command(s) will not be accepted.
+        */
+       usleep_range(150, 200);
+
        ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
                                 (MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
                                 (MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT));
index 94920a51c6286375ea055d74b13c1c11073782c2..b147f22a78f484509f4205ab9fb50715387f40cb 100644 (file)
@@ -493,8 +493,9 @@ config TI_SCI_INTA_IRQCHIP
          TI System Controller, say Y here. Otherwise, say N.
 
 config TI_PRUSS_INTC
-       tristate "TI PRU-ICSS Interrupt Controller"
-       depends on ARCH_DAVINCI || SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+       tristate
+       depends on TI_PRUSS
+       default TI_PRUSS
        select IRQ_DOMAIN
        help
          This enables support for the PRU-ICSS Local Interrupt Controller
index 5f5eb8877c4134849d1c484a723274dfd467cb74..25c9a9c06e4100d727e8a192e69b3c31387181b8 100644 (file)
@@ -167,7 +167,7 @@ static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
-static void bcm2836_arm_irqchip_ipi_eoi(struct irq_data *d)
+static void bcm2836_arm_irqchip_ipi_ack(struct irq_data *d)
 {
        int cpu = smp_processor_id();
 
@@ -195,7 +195,7 @@ static struct irq_chip bcm2836_arm_irqchip_ipi = {
        .name           = "IPI",
        .irq_mask       = bcm2836_arm_irqchip_dummy_op,
        .irq_unmask     = bcm2836_arm_irqchip_dummy_op,
-       .irq_eoi        = bcm2836_arm_irqchip_ipi_eoi,
+       .irq_ack        = bcm2836_arm_irqchip_ipi_ack,
        .ipi_send_mask  = bcm2836_arm_irqchip_ipi_send_mask,
 };
 
index 9ed1bc47366348b5b9066bc4873a317d82c011af..09b91b81851cca97df7c3a7784b0e683152c2e42 100644 (file)
@@ -142,8 +142,8 @@ static void liointc_resume(struct irq_chip_generic *gc)
 
 static const char * const parent_names[] = {"int0", "int1", "int2", "int3"};
 
-int __init liointc_of_init(struct device_node *node,
-                               struct device_node *parent)
+static int __init liointc_of_init(struct device_node *node,
+                                 struct device_node *parent)
 {
        struct irq_chip_generic *gc;
        struct irq_domain *domain;
index 95d4fd8f7a96818e2b9687503e3521317a939da8..0bbb0b2d0dd5f72f79e078061744af225bd83aa7 100644 (file)
@@ -197,6 +197,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
                if (ret)
                        return ret;
 
+               ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
+                                                   &mips_mt_cpu_irq_controller,
+                                                   NULL);
+
+               if (ret)
+                       return ret;
+
                ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
                if (ret)
                        return ret;
index 0aa50d025ef6c2f877750b7d7b981bdc1d7fd8f3..fbb354413ffa139a31844559a0fb0db56c1390f1 100644 (file)
@@ -66,7 +66,7 @@ static int sl28cpld_intc_probe(struct platform_device *pdev)
        irqchip->chip.num_regs = 1;
        irqchip->chip.status_base = base + INTC_IP;
        irqchip->chip.mask_base = base + INTC_IE;
-       irqchip->chip.mask_invert = true,
+       irqchip->chip.mask_invert = true;
        irqchip->chip.ack_base = base + INTC_IP;
 
        return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
index 2aa6648fa41f953051e839f37664ad08dbb9734e..5a491d2cd1ae61f930583e2fb968f74c06ddb14d 100644 (file)
@@ -1512,6 +1512,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        struct pcr_handle *handle;
        u32 base, len;
        int ret, i, bar = 0;
+       u8 val;
 
        dev_dbg(&(pcidev->dev),
                ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1577,7 +1578,11 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
        pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
        pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
-
+       rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+       if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+               pcr->aspm_enabled = false;
+       else
+               pcr->aspm_enabled = true;
        pcr->card_inserted = 0;
        pcr->card_removed = 0;
        INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
index 1456eabf96010f94584e17db22dfda4263195801..69d04eca767f5ff61ed709e6910d3f411e2cbb44 100644 (file)
@@ -1037,7 +1037,7 @@ kill_processes:
 
        if (hard_reset) {
                /* Release kernel context */
-               if (hl_ctx_put(hdev->kernel_ctx) == 1)
+               if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
                        hdev->kernel_ctx = NULL;
                hl_vm_fini(hdev);
                hl_mmu_fini(hdev);
@@ -1487,6 +1487,15 @@ void hl_device_fini(struct hl_device *hdev)
                }
        }
 
+       /* Disable PCI access from device F/W so it won't send us additional
+        * interrupts. We disable MSI/MSI-X at the halt_engines function and we
+        * can't have the F/W sending us interrupts after that. We need to
+        * disable the access here because if the device is marked disable, the
+        * message won't be send. Also, in case of heartbeat, the device CPU is
+        * marked as disable so this message won't be sent
+        */
+       hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
+
        /* Mark device as disabled */
        hdev->disabled = true;
 
index 20f77f58edef5573577592110289f3937202208f..c9a12980218ac55545c5f6a31684d85a039f419f 100644 (file)
@@ -402,6 +402,10 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
        }
        counters->rx_throughput = result;
 
+       memset(&pkt, 0, sizeof(pkt));
+       pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
+                       CPUCP_PKT_CTL_OPCODE_SHIFT);
+
        /* Fetch PCI tx counter */
        pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
        rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -414,6 +418,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
        counters->tx_throughput = result;
 
        /* Fetch PCI replay counter */
+       memset(&pkt, 0, sizeof(pkt));
        pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
                        CPUCP_PKT_CTL_OPCODE_SHIFT);
 
index e0d7f5fbaa5c3f2950068d4618b2d5fb5b540a9c..60e16dc4bcac33b0f2bd4dc073d2f483e3197d00 100644 (file)
@@ -2182,6 +2182,7 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
 int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
 int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
                        struct hl_mmu_hop_info *hops);
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
 
 int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
                                void __iomem *dst, u32 src_offset, u32 size);
index 12efbd9d2e3a0b7459dcb8298e69248bef1e1750..d25892d61ec9ddc2ceda987b39d45d0ec6ec75d9 100644 (file)
@@ -133,6 +133,8 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
 
        hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
                                        &hw_idle.busy_engines_mask_ext, NULL);
+       hw_idle.busy_engines_mask =
+                       lower_32_bits(hw_idle.busy_engines_mask_ext);
 
        return copy_to_user(out, &hw_idle,
                min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
index cbe9da4e0211b5118b112b2bdf92f91cfef46360..5d4fbdcaefe3f2bb0c4522d47e68372720c95c41 100644 (file)
@@ -886,8 +886,10 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
 {
        struct hl_device *hdev = ctx->hdev;
        u64 next_vaddr, i;
+       bool is_host_addr;
        u32 page_size;
 
+       is_host_addr = !hl_is_dram_va(hdev, vaddr);
        page_size = phys_pg_pack->page_size;
        next_vaddr = vaddr;
 
@@ -900,9 +902,13 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
                /*
                 * unmapping on Palladium can be really long, so avoid a CPU
                 * soft lockup bug by sleeping a little between unmapping pages
+                *
+                * In addition, when unmapping host memory we pass through
+                * the Linux kernel to unpin the pages and that takes a long
+                * time. Therefore, sleep every 32K pages to avoid soft lockup
                 */
-               if (hdev->pldm)
-                       usleep_range(500, 1000);
+               if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
+                       usleep_range(50, 200);
        }
 }
 
index 33ae953d3a3680126cbe42090ea677fb25b9f582..28a4638741d8881d4589a3023297e6e0718ece2f 100644 (file)
@@ -9,7 +9,7 @@
 
 #include "habanalabs.h"
 
-static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
 
@@ -156,7 +156,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
        if (!hdev->mmu_enable)
                return 0;
 
-       is_dram_addr = is_dram_va(hdev, virt_addr);
+       is_dram_addr = hl_is_dram_va(hdev, virt_addr);
 
        if (is_dram_addr)
                mmu_prop = &prop->dmmu;
@@ -236,7 +236,7 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
        if (!hdev->mmu_enable)
                return 0;
 
-       is_dram_addr = is_dram_va(hdev, virt_addr);
+       is_dram_addr = hl_is_dram_va(hdev, virt_addr);
 
        if (is_dram_addr)
                mmu_prop = &prop->dmmu;
index 2ce6ea89d4fa22930aef8e325e2db9ce1b193d49..06d8a44dd5d428e0bfdbb12a5571afef8c7163c8 100644 (file)
@@ -467,8 +467,16 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
 {
        /* MMU H/W fini was already done in device hw_fini() */
 
-       kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
-       gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+       if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
+               kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
+               gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+       }
+
+       /* Make sure that if we arrive here again without init was called we
+        * won't cause kernel panic. This can happen for example if we fail
+        * during hard reset code at certain points
+        */
+       hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
 }
 
 /**
index 8c09e4466af8ce72eff7787b1986389f68bdb595..b328ddaa64ee51efde1a58188c2dfd965c87be46 100644 (file)
@@ -4002,7 +4002,8 @@ static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
                        VM_DONTCOPY | VM_NORESERVE;
 
-       rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+       rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+                               (dma_addr - HOST_PHYS_BASE), size);
        if (rc)
                dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
 
index b8b4aa636b7cb652a4e7e5790ef26cfb6792fee6..63679a747d2cd285e74f8f5681e3688873efbf32 100644 (file)
@@ -2719,7 +2719,8 @@ static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
                        VM_DONTCOPY | VM_NORESERVE;
 
-       rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+       rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+                               (dma_addr - HOST_PHYS_BASE), size);
        if (rc)
                dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
 
index 65d5ea00fc9d4bb8eac784605f3b15527afdfe16..1cb158d7233f483bb26cd5fe10607042e11aec5e 100644 (file)
@@ -1,2 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-y          += phy-ingenic-usb.o
+obj-$(CONFIG_PHY_INGENIC_USB)          += phy-ingenic-usb.o
index d38def43b1bf6214303deef36d6066b848005f5d..55f8e6c048ab3ac26f60055d0814cfd9dd8683e7 100644 (file)
@@ -49,7 +49,9 @@ config PHY_MTK_HDMI
 
 config PHY_MTK_MIPI_DSI
        tristate "MediaTek MIPI-DSI Driver"
-       depends on ARCH_MEDIATEK && OF
+       depends on ARCH_MEDIATEK || COMPILE_TEST
+       depends on COMMON_CLK
+       depends on OF
        select GENERIC_PHY
        help
          Support MIPI DSI for Mediatek SoCs.
index 442522ba487f07c1aa7d87b179202cd9f7923c7a..4728e2bff6620bea2d6971cdba4445df34b1ff36 100644 (file)
@@ -662,35 +662,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev)
        generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
        if (IS_ERR(generic_phy)) {
                error = PTR_ERR(generic_phy);
-               return PTR_ERR(generic_phy);
+               goto out_reg_disable;
        }
 
        phy_set_drvdata(generic_phy, ddata);
 
        phy_provider = devm_of_phy_provider_register(ddata->dev,
                                                     of_phy_simple_xlate);
-       if (IS_ERR(phy_provider))
-               return PTR_ERR(phy_provider);
+       if (IS_ERR(phy_provider)) {
+               error = PTR_ERR(phy_provider);
+               goto out_reg_disable;
+       }
 
        error = cpcap_usb_init_optional_pins(ddata);
        if (error)
-               return error;
+               goto out_reg_disable;
 
        cpcap_usb_init_optional_gpios(ddata);
 
        error = cpcap_usb_init_iio(ddata);
        if (error)
-               return error;
+               goto out_reg_disable;
 
        error = cpcap_usb_init_interrupts(pdev, ddata);
        if (error)
-               return error;
+               goto out_reg_disable;
 
        usb_add_phy_dev(&ddata->phy);
        atomic_set(&ddata->active, 1);
        schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
 
        return 0;
+
+out_reg_disable:
+       regulator_disable(ddata->vusb);
+
+       return error;
 }
 
 static int cpcap_usb_phy_remove(struct platform_device *pdev)
index 8b7f941a9bb7f104f79f205cd236053e34cdc293..b8c4159bc32d0163c0454fc01dd761f860d0ed24 100644 (file)
@@ -2316,7 +2316,7 @@ static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw,
 
        if (auth && auth->reply.route_hi == sw->config.route_hi &&
            auth->reply.route_lo == sw->config.route_lo) {
-               tb_dbg(tb, "NVM_AUTH found for %llx flags 0x%#x status %#x\n",
+               tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n",
                       tb_route(sw), auth->reply.hdr.flags, auth->reply.status);
                if (auth->reply.hdr.flags & ICM_FLAGS_ERROR)
                        ret = -EIO;
index 118b29912289845328e91cba182ecc56ad586473..e0c00a1b07639b5a12a583059af4150c3f8d44aa 100644 (file)
@@ -648,6 +648,14 @@ static void wait_for_xmitr(struct uart_port *port)
                                  (val & STAT_TX_RDY(port)), 1, 10000);
 }
 
+static void wait_for_xmite(struct uart_port *port)
+{
+       u32 val;
+
+       readl_poll_timeout_atomic(port->membase + UART_STAT, val,
+                                 (val & STAT_TX_EMP), 1, 10000);
+}
+
 static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
 {
        wait_for_xmitr(port);
@@ -675,7 +683,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
 
        uart_console_write(port, s, count, mvebu_uart_console_putchar);
 
-       wait_for_xmitr(port);
+       wait_for_xmite(port);
 
        if (ier)
                writel(ier, port->membase + UART_CTRL(port));
index 8034489337d75241b1a42d781839679446e83d82..4a208a95e9219ba34f6b539d0e84af2d9556ea69 100644 (file)
@@ -143,9 +143,8 @@ LIST_HEAD(tty_drivers);                     /* linked list of tty drivers */
 DEFINE_MUTEX(tty_mutex);
 
 static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
-ssize_t redirected_tty_write(struct file *, const char __user *,
-                                                       size_t, loff_t *);
+static ssize_t tty_write(struct kiocb *, struct iov_iter *);
+ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
 static __poll_t tty_poll(struct file *, poll_table *);
 static int tty_open(struct inode *, struct file *);
 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
@@ -438,8 +437,7 @@ static ssize_t hung_up_tty_read(struct file *file, char __user *buf,
        return 0;
 }
 
-static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
-                                size_t count, loff_t *ppos)
+static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
 {
        return -EIO;
 }
@@ -478,7 +476,8 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file)
 static const struct file_operations tty_fops = {
        .llseek         = no_llseek,
        .read           = tty_read,
-       .write          = tty_write,
+       .write_iter     = tty_write,
+       .splice_write   = iter_file_splice_write,
        .poll           = tty_poll,
        .unlocked_ioctl = tty_ioctl,
        .compat_ioctl   = tty_compat_ioctl,
@@ -491,7 +490,8 @@ static const struct file_operations tty_fops = {
 static const struct file_operations console_fops = {
        .llseek         = no_llseek,
        .read           = tty_read,
-       .write          = redirected_tty_write,
+       .write_iter     = redirected_tty_write,
+       .splice_write   = iter_file_splice_write,
        .poll           = tty_poll,
        .unlocked_ioctl = tty_ioctl,
        .compat_ioctl   = tty_compat_ioctl,
@@ -503,7 +503,7 @@ static const struct file_operations console_fops = {
 static const struct file_operations hung_up_tty_fops = {
        .llseek         = no_llseek,
        .read           = hung_up_tty_read,
-       .write          = hung_up_tty_write,
+       .write_iter     = hung_up_tty_write,
        .poll           = hung_up_tty_poll,
        .unlocked_ioctl = hung_up_tty_ioctl,
        .compat_ioctl   = hung_up_tty_compat_ioctl,
@@ -606,9 +606,9 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
        /* This breaks for file handles being sent over AF_UNIX sockets ? */
        list_for_each_entry(priv, &tty->tty_files, list) {
                filp = priv->file;
-               if (filp->f_op->write == redirected_tty_write)
+               if (filp->f_op->write_iter == redirected_tty_write)
                        cons_filp = filp;
-               if (filp->f_op->write != tty_write)
+               if (filp->f_op->write_iter != tty_write)
                        continue;
                closecount++;
                __tty_fasync(-1, filp, 0);      /* can't block */
@@ -901,9 +901,9 @@ static inline ssize_t do_tty_write(
        ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
        struct tty_struct *tty,
        struct file *file,
-       const char __user *buf,
-       size_t count)
+       struct iov_iter *from)
 {
+       size_t count = iov_iter_count(from);
        ssize_t ret, written = 0;
        unsigned int chunk;
 
@@ -955,14 +955,20 @@ static inline ssize_t do_tty_write(
                size_t size = count;
                if (size > chunk)
                        size = chunk;
+
                ret = -EFAULT;
-               if (copy_from_user(tty->write_buf, buf, size))
+               if (copy_from_iter(tty->write_buf, size, from) != size)
                        break;
+
                ret = write(tty, file, tty->write_buf, size);
                if (ret <= 0)
                        break;
+
+               /* FIXME! Have Al check this! */
+               if (ret != size)
+                       iov_iter_revert(from, size-ret);
+
                written += ret;
-               buf += ret;
                count -= ret;
                if (!count)
                        break;
@@ -1022,9 +1028,9 @@ void tty_write_message(struct tty_struct *tty, char *msg)
  *     write method will not be invoked in parallel for each device.
  */
 
-static ssize_t tty_write(struct file *file, const char __user *buf,
-                                               size_t count, loff_t *ppos)
+static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
 {
+       struct file *file = iocb->ki_filp;
        struct tty_struct *tty = file_tty(file);
        struct tty_ldisc *ld;
        ssize_t ret;
@@ -1038,17 +1044,16 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
                tty_err(tty, "missing write_room method\n");
        ld = tty_ldisc_ref_wait(tty);
        if (!ld)
-               return hung_up_tty_write(file, buf, count, ppos);
+               return hung_up_tty_write(iocb, from);
        if (!ld->ops->write)
                ret = -EIO;
        else
-               ret = do_tty_write(ld->ops->write, tty, file, buf, count);
+               ret = do_tty_write(ld->ops->write, tty, file, from);
        tty_ldisc_deref(ld);
        return ret;
 }
 
-ssize_t redirected_tty_write(struct file *file, const char __user *buf,
-                                               size_t count, loff_t *ppos)
+ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
 {
        struct file *p = NULL;
 
@@ -1059,11 +1064,11 @@ ssize_t redirected_tty_write(struct file *file, const char __user *buf,
 
        if (p) {
                ssize_t res;
-               res = vfs_write(p, buf, count, &p->f_pos);
+               res = vfs_iocb_iter_write(p, iocb, iter);
                fput(p);
                return res;
        }
-       return tty_write(file, buf, count, ppos);
+       return tty_write(iocb, iter);
 }
 
 /*
@@ -2295,7 +2300,7 @@ static int tioccons(struct file *file)
 {
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
-       if (file->f_op->write == redirected_tty_write) {
+       if (file->f_op->write_iter == redirected_tty_write) {
                struct file *f;
                spin_lock(&redirect_lock);
                f = redirect;
index 22a56c4dce678d507a80a27378412b1f91a30a8d..7990fee03fe4bb03f102a0cdb550771131e75be4 100644 (file)
@@ -185,7 +185,11 @@ static int cdns_imx_probe(struct platform_device *pdev)
        }
 
        data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
-       data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+       data->clks = devm_kmemdup(dev, imx_cdns3_core_clks,
+                               sizeof(imx_cdns3_core_clks), GFP_KERNEL);
+       if (!data->clks)
+               return -ENOMEM;
+
        ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
        if (ret)
                return ret;
@@ -214,20 +218,16 @@ err:
        return ret;
 }
 
-static int cdns_imx_remove_core(struct device *dev, void *data)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-
-       platform_device_unregister(pdev);
-
-       return 0;
-}
-
 static int cdns_imx_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
+       struct cdns_imx *data = dev_get_drvdata(dev);
 
-       device_for_each_child(dev, NULL, cdns_imx_remove_core);
+       pm_runtime_get_sync(dev);
+       of_platform_depopulate(dev);
+       clk_bulk_disable_unprepare(data->num_clks, data->clks);
+       pm_runtime_disable(dev);
+       pm_runtime_put_noidle(dev);
        platform_set_drvdata(pdev, NULL);
 
        return 0;
index 0bd6b20435b8a547a1b4dd09cff6be3dbc985f3a..02d8bfae58fb141dfe4aac8720df258fdb0ab44b 100644 (file)
@@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
        u32 state, reg, loops;
 
        /* Stop DMA activity */
-       writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+       if (ep->epn.desc_mode)
+               writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+       else
+               writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 
        /* Wait for it to complete */
        for (loops = 0; loops < 1000; loops++) {
index 3e88c7670b2ed40b019a2d3410236f07f2447079..fb01ff47b64cf63dafda5ab40692d9cf5d18ffc1 100644 (file)
@@ -17,7 +17,7 @@ if USB_BDC_UDC
 comment "Platform Support"
 config USB_BDC_PCI
        tristate "BDC support for PCIe based platforms"
-       depends on USB_PCI
+       depends on USB_PCI && BROKEN
        default USB_BDC_UDC
        help
                Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
index 6a62bbd01324f5bb99998a489188a5949a72dd3e..ea114f922ccf6c0336e25d3a7e76c8c2cd0b19ab 100644 (file)
@@ -1529,10 +1529,13 @@ static ssize_t soft_connect_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t n)
 {
        struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
+       ssize_t                 ret;
 
+       mutex_lock(&udc_lock);
        if (!udc->driver) {
                dev_err(dev, "soft-connect without a gadget driver\n");
-               return -EOPNOTSUPP;
+               ret = -EOPNOTSUPP;
+               goto out;
        }
 
        if (sysfs_streq(buf, "connect")) {
@@ -1543,10 +1546,14 @@ static ssize_t soft_connect_store(struct device *dev,
                usb_gadget_udc_stop(udc);
        } else {
                dev_err(dev, "unsupported command '%s'\n", buf);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
-       return n;
+       ret = n;
+out:
+       mutex_unlock(&udc_lock);
+       return ret;
 }
 static DEVICE_ATTR_WO(soft_connect);
 
index 1a953f44183aaf0cb99a89c8d18784eefead39e2..57067763b1005f7fcd021b5012cb7d1b11b97639 100644 (file)
@@ -2270,17 +2270,20 @@ static int dummy_hub_control(
                        }
                        fallthrough;
                case USB_PORT_FEAT_RESET:
+                       if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION))
+                               break;
                        /* if it's already enabled, disable */
                        if (hcd->speed == HCD_USB3) {
-                               dum_hcd->port_status = 0;
                                dum_hcd->port_status =
                                        (USB_SS_PORT_STAT_POWER |
                                         USB_PORT_STAT_CONNECTION |
                                         USB_PORT_STAT_RESET);
-                       } else
+                       } else {
                                dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
                                        | USB_PORT_STAT_LOW_SPEED
                                        | USB_PORT_STAT_HIGH_SPEED);
+                               dum_hcd->port_status |= USB_PORT_STAT_RESET;
+                       }
                        /*
                         * We want to reset device status. All but the
                         * Self powered feature
@@ -2292,7 +2295,8 @@ static int dummy_hub_control(
                         * interval? Is it still 50msec as for HS?
                         */
                        dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
-                       fallthrough;
+                       set_link_state(dum_hcd);
+                       break;
                case USB_PORT_FEAT_C_CONNECTION:
                case USB_PORT_FEAT_C_RESET:
                case USB_PORT_FEAT_C_ENABLE:
index e358ae17d51e7c1ae0e13ca80c8ba0e1f317593e..1926b328b6aa7a2e5a7726561cd44ae111f40d76 100644 (file)
@@ -574,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd)
        struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
        u32                     temp;
        u32                     hcc_params;
+       int                     rc;
 
        hcd->uses_new_polling = 1;
 
@@ -629,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd)
        down_write(&ehci_cf_port_reset_rwsem);
        ehci->rh_state = EHCI_RH_RUNNING;
        ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+
+       /* Wait until HC become operational */
        ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
        msleep(5);
+       rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
+
        up_write(&ehci_cf_port_reset_rwsem);
+
+       if (rc) {
+               ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
+                        ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
+               return rc;
+       }
+
        ehci->last_periodic_enable = ktime_get_real();
 
        temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
index 087402aec5cbeb43ab851364d6a99fff96a2b53a..9f9ab5ccea889df6e8aebf630d941a1c82c392c8 100644 (file)
@@ -345,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
 
        unlink_empty_async_suspended(ehci);
 
+       /* Some Synopsys controllers mistakenly leave IAA turned on */
+       ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+
        /* Any IAA cycle that started before the suspend is now invalid */
        end_iaa_cycle(ehci);
        ehci_handle_start_intr_unlinks(ehci);
index 5677b81c0915551581efd60caa50b2ef13007dfc..cf0c93a90200f8c487dbd0e3fa728c7c552d40d4 100644 (file)
@@ -2931,6 +2931,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
        trb->field[0] = cpu_to_le32(field1);
        trb->field[1] = cpu_to_le32(field2);
        trb->field[2] = cpu_to_le32(field3);
+       /* make sure TRB is fully written before giving it to the controller */
+       wmb();
        trb->field[3] = cpu_to_le32(field4);
 
        trace_xhci_queue_trb(ring, trb);
index 934be168635230b3dbc557559c8f78cfceab1f16..50bb91b6a4b8d8e4a39ba7e58d010470d5423776 100644 (file)
@@ -623,6 +623,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
                                                                     enable);
                        if (err < 0)
                                break;
+
+                       /*
+                        * wait 500us for LFPS detector to be disabled before
+                        * sending ACK
+                        */
+                       if (!enable)
+                               usleep_range(500, 1000);
                }
 
                if (err < 0) {
index 5d39129406ea66668a62a4d9d990bb89f8ad3d49..c8ef24bac94fc7015df7358f52a2478ec8355f74 100644 (file)
@@ -2195,7 +2195,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
        if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
                tcon->nohandlecache = ctx->nohandlecache;
        else
-               tcon->nohandlecache = 1;
+               tcon->nohandlecache = true;
        tcon->nodelete = ctx->nodelete;
        tcon->local_lease = ctx->local_lease;
        INIT_LIST_HEAD(&tcon->pending_opens);
@@ -2628,7 +2628,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
        } else if (ctx)
                tcon->unix_ext = 1; /* Unix Extensions supported */
 
-       if (tcon->unix_ext == 0) {
+       if (!tcon->unix_ext) {
                cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
                return;
        }
index e9abb41aa89bcc12002c61d777d4c4f728e9ffb8..95ef26b555b911c620a5c357bea82bb25e3c8f11 100644 (file)
@@ -338,7 +338,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
        if (ssocket == NULL)
                return -EAGAIN;
 
-       if (signal_pending(current)) {
+       if (fatal_signal_pending(current)) {
                cifs_dbg(FYI, "signal pending before send request\n");
                return -ERESTARTSYS;
        }
@@ -429,7 +429,7 @@ unmask:
 
        if (signal_pending(current) && (total_len != send_length)) {
                cifs_dbg(FYI, "signal is pending after attempt to send\n");
-               rc = -EINTR;
+               rc = -ERESTARTSYS;
        }
 
        /* uncork it */
index f277d023ebcd14b44454906cc5b776600151013b..c7571931214751b67d3ee3a8c9932d364831ac80 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/pagemap.h>
 #include <linux/sched/mm.h>
 #include <linux/fsnotify.h>
+#include <linux/uio.h>
 
 #include "kernfs-internal.h"
 
@@ -180,11 +181,10 @@ static const struct seq_operations kernfs_seq_ops = {
  * it difficult to use seq_file.  Implement simplistic custom buffering for
  * bin files.
  */
-static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
-                                      char __user *user_buf, size_t count,
-                                      loff_t *ppos)
+static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
-       ssize_t len = min_t(size_t, count, PAGE_SIZE);
+       struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
+       ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE);
        const struct kernfs_ops *ops;
        char *buf;
 
@@ -210,7 +210,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
        of->event = atomic_read(&of->kn->attr.open->event);
        ops = kernfs_ops(of->kn);
        if (ops->read)
-               len = ops->read(of, buf, len, *ppos);
+               len = ops->read(of, buf, len, iocb->ki_pos);
        else
                len = -EINVAL;
 
@@ -220,12 +220,12 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
        if (len < 0)
                goto out_free;
 
-       if (copy_to_user(user_buf, buf, len)) {
+       if (copy_to_iter(buf, len, iter) != len) {
                len = -EFAULT;
                goto out_free;
        }
 
-       *ppos += len;
+       iocb->ki_pos += len;
 
  out_free:
        if (buf == of->prealloc_buf)
@@ -235,31 +235,14 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
        return len;
 }
 
-/**
- * kernfs_fop_read - kernfs vfs read callback
- * @file: file pointer
- * @user_buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
- */
-static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
-                              size_t count, loff_t *ppos)
+static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
-       struct kernfs_open_file *of = kernfs_of(file);
-
-       if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
-               return seq_read(file, user_buf, count, ppos);
-       else
-               return kernfs_file_direct_read(of, user_buf, count, ppos);
+       if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW)
+               return seq_read_iter(iocb, iter);
+       return kernfs_file_read_iter(iocb, iter);
 }
 
-/**
- * kernfs_fop_write - kernfs vfs write callback
- * @file: file pointer
- * @user_buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
- *
+/*
  * Copy data in from userland and pass it to the matching kernfs write
  * operation.
  *
@@ -269,20 +252,18 @@ static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
  * modify only the the value you're changing, then write entire buffer
  * back.
  */
-static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
-                               size_t count, loff_t *ppos)
+static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
-       struct kernfs_open_file *of = kernfs_of(file);
+       struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
+       ssize_t len = iov_iter_count(iter);
        const struct kernfs_ops *ops;
-       ssize_t len;
        char *buf;
 
        if (of->atomic_write_len) {
-               len = count;
                if (len > of->atomic_write_len)
                        return -E2BIG;
        } else {
-               len = min_t(size_t, count, PAGE_SIZE);
+               len = min_t(size_t, len, PAGE_SIZE);
        }
 
        buf = of->prealloc_buf;
@@ -293,7 +274,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
        if (!buf)
                return -ENOMEM;
 
-       if (copy_from_user(buf, user_buf, len)) {
+       if (copy_from_iter(buf, len, iter) != len) {
                len = -EFAULT;
                goto out_free;
        }
@@ -312,7 +293,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
 
        ops = kernfs_ops(of->kn);
        if (ops->write)
-               len = ops->write(of, buf, len, *ppos);
+               len = ops->write(of, buf, len, iocb->ki_pos);
        else
                len = -EINVAL;
 
@@ -320,7 +301,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
        mutex_unlock(&of->mutex);
 
        if (len > 0)
-               *ppos += len;
+               iocb->ki_pos += len;
 
 out_free:
        if (buf == of->prealloc_buf)
@@ -673,7 +654,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
 
        /*
         * Write path needs to atomic_write_len outside active reference.
-        * Cache it in open_file.  See kernfs_fop_write() for details.
+        * Cache it in open_file.  See kernfs_fop_write_iter() for details.
         */
        of->atomic_write_len = ops->atomic_write_len;
 
@@ -960,14 +941,16 @@ void kernfs_notify(struct kernfs_node *kn)
 EXPORT_SYMBOL_GPL(kernfs_notify);
 
 const struct file_operations kernfs_file_fops = {
-       .read           = kernfs_fop_read,
-       .write          = kernfs_fop_write,
+       .read_iter      = kernfs_fop_read_iter,
+       .write_iter     = kernfs_fop_write_iter,
        .llseek         = generic_file_llseek,
        .mmap           = kernfs_fop_mmap,
        .open           = kernfs_fop_open,
        .release        = kernfs_fop_release,
        .poll           = kernfs_fop_poll,
        .fsync          = noop_fsync,
+       .splice_read    = generic_file_splice_read,
+       .splice_write   = iter_file_splice_write,
 };
 
 /**
index 89bb8b84173e4ad0c8ca683384419b6027e890b8..1779f90eeb4cb4b3daf2a99b2246148de0c407a7 100644 (file)
@@ -609,6 +609,18 @@ static inline const char *dev_name(const struct device *dev)
        return kobject_name(&dev->kobj);
 }
 
+/**
+ * dev_bus_name - Return a device's bus/class name, if at all possible
+ * @dev: struct device to get the bus/class name of
+ *
+ * Will return the name of the bus/class the device is attached to.  If it is
+ * not attached to a bus/class, an empty string will be returned.
+ */
+static inline const char *dev_bus_name(const struct device *dev)
+{
+       return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
+}
+
 __printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
 
 #ifdef CONFIG_NUMA
index 65b81e0c494d207dc27c9dd526c7146262eae5d9..2484ed97e72f586b7258ec76c392343f7ef4bf03 100644 (file)
@@ -33,6 +33,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
                                          unsigned int cpu,
                                          const char *namefmt);
 
+void kthread_set_per_cpu(struct task_struct *k, int cpu);
+bool kthread_is_per_cpu(struct task_struct *k);
+
 /**
  * kthread_run - create and wake a thread.
  * @threadfn: the function to run until signal_pending(current).
index a12b5523cc18e699fea0f36ca49b65dbfdfeb015..73f20deb497d5a86f58b3823615e2b0cd7920201 100644 (file)
@@ -230,6 +230,5 @@ static inline ktime_t ms_to_ktime(u64 ms)
 }
 
 # include <linux/timekeeping.h>
-# include <linux/timekeeping32.h>
 
 #endif
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
deleted file mode 100644 (file)
index 266017f..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _LINUX_TIMEKEEPING32_H
-#define _LINUX_TIMEKEEPING32_H
-/*
- * These interfaces are all based on the old timespec type
- * and should get replaced with the timespec64 based versions
- * over time so we can remove the file here.
- */
-
-static inline unsigned long get_seconds(void)
-{
-       return ktime_get_real_seconds();
-}
-
-#endif
index 5039af667645d64813b0fa151393163f8918cb2f..cbe3e152d24c007c89ae6e6a2e03ca10d9aab390 100644 (file)
@@ -366,7 +366,7 @@ TRACE_EVENT(sched_process_wait,
 );
 
 /*
- * Tracepoint for do_fork:
+ * Tracepoint for kernel_clone:
  */
 TRACE_EVENT(sched_process_fork,
 
index 37720a6d04eaa8c61cdae87092df4a75daa59c75..d66cd1014211b92de6e9f881089175395845265c 100644 (file)
@@ -819,9 +819,8 @@ void __init fork_init(void)
        init_task.signal->rlim[RLIMIT_SIGPENDING] =
                init_task.signal->rlim[RLIMIT_NPROC];
 
-       for (i = 0; i < UCOUNT_COUNTS; i++) {
+       for (i = 0; i < UCOUNT_COUNTS; i++)
                init_user_ns.ucount_max[i] = max_threads/2;
-       }
 
 #ifdef CONFIG_VMAP_STACK
        cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
@@ -1654,9 +1653,8 @@ static inline void init_task_pid_links(struct task_struct *task)
 {
        enum pid_type type;
 
-       for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
+       for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type)
                INIT_HLIST_NODE(&task->pid_links[type]);
-       }
 }
 
 static inline void
index ab8567f32501f6e0c2dbab9065fa6946f30bc65e..dec3f73e8db92e87e7cf2647d75ad5f7afaf2c62 100644 (file)
@@ -2859,3 +2859,4 @@ bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
        rcu_read_unlock();
        return res;
 }
+EXPORT_SYMBOL_GPL(irq_check_status_bit);
index 2c0c4d6d0f83afcc0d70b696e4b216ed06086106..dc0e2d7fbdfd927c98aade002389549f89477a0c 100644 (file)
@@ -402,7 +402,7 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
        struct msi_domain_ops *ops = info->ops;
        struct irq_data *irq_data;
        struct msi_desc *desc;
-       msi_alloc_info_t arg;
+       msi_alloc_info_t arg = { };
        int i, ret, virq;
        bool can_reserve;
 
index a5eceecd4513c431f24eb72b33c45089037b3413..1578973c57409cceab891f4f36820a2025dc6b1c 100644 (file)
@@ -294,7 +294,7 @@ static int kthread(void *_create)
        do_exit(ret);
 }
 
-/* called from do_fork() to get node information for about to be created task */
+/* called from kernel_clone() to get node information for about to be created task */
 int tsk_fork_get_node(struct task_struct *tsk)
 {
 #ifdef CONFIG_NUMA
@@ -493,11 +493,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
                return p;
        kthread_bind(p, cpu);
        /* CPU hotplug need to bind once again when unparking the thread. */
-       set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
        to_kthread(p)->cpu = cpu;
        return p;
 }
 
+void kthread_set_per_cpu(struct task_struct *k, int cpu)
+{
+       struct kthread *kthread = to_kthread(k);
+       if (!kthread)
+               return;
+
+       WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
+
+       if (cpu < 0) {
+               clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+               return;
+       }
+
+       kthread->cpu = cpu;
+       set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
+bool kthread_is_per_cpu(struct task_struct *k)
+{
+       struct kthread *kthread = to_kthread(k);
+       if (!kthread)
+               return false;
+
+       return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
 /**
  * kthread_unpark - unpark a thread created by kthread_create().
  * @k:         thread created by kthread_create().
index c1418b47f625a25fc32d8b18852057dc48b6cc93..bdaf4829098c026e096e88be59df9d198d5b1c9b 100644 (file)
@@ -79,7 +79,7 @@ module_param(lock_stat, int, 0644);
 DEFINE_PER_CPU(unsigned int, lockdep_recursion);
 EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
 
-static inline bool lockdep_enabled(void)
+static __always_inline bool lockdep_enabled(void)
 {
        if (!debug_locks)
                return false;
@@ -5271,12 +5271,15 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
 /*
  * Check whether we follow the irq-flags state precisely:
  */
-static void check_flags(unsigned long flags)
+static noinstr void check_flags(unsigned long flags)
 {
 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
        if (!debug_locks)
                return;
 
+       /* Get the warning out..  */
+       instrumentation_begin();
+
        if (irqs_disabled_flags(flags)) {
                if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
                        printk("possible reason: unannotated irqs-off.\n");
@@ -5304,6 +5307,8 @@ static void check_flags(unsigned long flags)
 
        if (!debug_locks)
                print_irqtrace_events(current);
+
+       instrumentation_end();
 #endif
 }
 
index 15d2562118d1727aa197bd5f9cc6314cfebace6c..ff74fca39ed21693428e2f5276839581808c693b 100644 (file)
@@ -1796,13 +1796,28 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
  */
 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
 {
+       /* When not in the task's cpumask, no point in looking further. */
        if (!cpumask_test_cpu(cpu, p->cpus_ptr))
                return false;
 
-       if (is_per_cpu_kthread(p) || is_migration_disabled(p))
+       /* migrate_disabled() must be allowed to finish. */
+       if (is_migration_disabled(p))
                return cpu_online(cpu);
 
-       return cpu_active(cpu);
+       /* Non kernel threads are not allowed during either online or offline. */
+       if (!(p->flags & PF_KTHREAD))
+               return cpu_active(cpu);
+
+       /* KTHREAD_IS_PER_CPU is always allowed. */
+       if (kthread_is_per_cpu(p))
+               return cpu_online(cpu);
+
+       /* Regular kernel threads don't get to stay during offline. */
+       if (cpu_rq(cpu)->balance_push)
+               return false;
+
+       /* But are allowed during online. */
+       return cpu_online(cpu);
 }
 
 /*
@@ -2327,7 +2342,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
 
        if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
                /*
-                * Kernel threads are allowed on online && !active CPUs.
+                * Kernel threads are allowed on online && !active CPUs,
+                * however, during cpu-hot-unplug, even these might get pushed
+                * away if not KTHREAD_IS_PER_CPU.
                 *
                 * Specifically, migration_disabled() tasks must not fail the
                 * cpumask_any_and_distribute() pick below, esp. so on
@@ -2371,16 +2388,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
 
        __do_set_cpus_allowed(p, new_mask, flags);
 
-       if (p->flags & PF_KTHREAD) {
-               /*
-                * For kernel threads that do indeed end up on online &&
-                * !active we want to ensure they are strict per-CPU threads.
-                */
-               WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
-                       !cpumask_intersects(new_mask, cpu_active_mask) &&
-                       p->nr_cpus_allowed != 1);
-       }
-
        return affine_move_task(rq, p, &rf, dest_cpu, flags);
 
 out:
@@ -3121,6 +3128,13 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
 
 static inline bool ttwu_queue_cond(int cpu, int wake_flags)
 {
+       /*
+        * Do not complicate things with the async wake_list while the CPU is
+        * in hotplug state.
+        */
+       if (!cpu_active(cpu))
+               return false;
+
        /*
         * If the CPU does not share cache, then queue the task on the
         * remote rqs wakelist to avoid accessing remote data.
@@ -7276,8 +7290,14 @@ static void balance_push(struct rq *rq)
        /*
         * Both the cpu-hotplug and stop task are in this case and are
         * required to complete the hotplug process.
+        *
+        * XXX: the idle task does not match kthread_is_per_cpu() due to
+        * histerical raisins.
         */
-       if (is_per_cpu_kthread(push_task) || is_migration_disabled(push_task)) {
+       if (rq->idle == push_task ||
+           ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
+           is_migration_disabled(push_task)) {
+
                /*
                 * If this is the idle task on the outgoing CPU try to wake
                 * up the hotplug control thread which might wait for the
@@ -7309,7 +7329,7 @@ static void balance_push(struct rq *rq)
        /*
         * At this point need_resched() is true and we'll take the loop in
         * schedule(). The next pick is obviously going to be the stop task
-        * which is_per_cpu_kthread() and will push this task away.
+        * which kthread_is_per_cpu() and will push this task away.
         */
        raw_spin_lock(&rq->lock);
 }
@@ -7320,10 +7340,13 @@ static void balance_push_set(int cpu, bool on)
        struct rq_flags rf;
 
        rq_lock_irqsave(rq, &rf);
-       if (on)
+       rq->balance_push = on;
+       if (on) {
+               WARN_ON_ONCE(rq->balance_callback);
                rq->balance_callback = &balance_push_callback;
-       else
+       } else if (rq->balance_callback == &balance_push_callback) {
                rq->balance_callback = NULL;
+       }
        rq_unlock_irqrestore(rq, &rf);
 }
 
@@ -7441,6 +7464,10 @@ int sched_cpu_activate(unsigned int cpu)
        struct rq *rq = cpu_rq(cpu);
        struct rq_flags rf;
 
+       /*
+        * Make sure that when the hotplug state machine does a roll-back
+        * we clear balance_push. Ideally that would happen earlier...
+        */
        balance_push_set(cpu, false);
 
 #ifdef CONFIG_SCHED_SMT
@@ -7483,17 +7510,27 @@ int sched_cpu_deactivate(unsigned int cpu)
        int ret;
 
        set_cpu_active(cpu, false);
+
+       /*
+        * From this point forward, this CPU will refuse to run any task that
+        * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
+        * push those tasks away until this gets cleared, see
+        * sched_cpu_dying().
+        */
+       balance_push_set(cpu, true);
+
        /*
-        * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
-        * users of this state to go away such that all new such users will
-        * observe it.
+        * We've cleared cpu_active_mask / set balance_push, wait for all
+        * preempt-disabled and RCU users of this state to go away such that
+        * all new such users will observe it.
+        *
+        * Specifically, we rely on ttwu to no longer target this CPU, see
+        * ttwu_queue_cond() and is_cpu_allowed().
         *
         * Do sync before park smpboot threads to take care the rcu boost case.
         */
        synchronize_rcu();
 
-       balance_push_set(cpu, true);
-
        rq_lock_irqsave(rq, &rf);
        if (rq->rd) {
                update_rq_clock(rq);
@@ -7574,6 +7611,25 @@ static void calc_load_migrate(struct rq *rq)
                atomic_long_add(delta, &calc_load_tasks);
 }
 
+static void dump_rq_tasks(struct rq *rq, const char *loglvl)
+{
+       struct task_struct *g, *p;
+       int cpu = cpu_of(rq);
+
+       lockdep_assert_held(&rq->lock);
+
+       printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
+       for_each_process_thread(g, p) {
+               if (task_cpu(p) != cpu)
+                       continue;
+
+               if (!task_on_rq_queued(p))
+                       continue;
+
+               printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
+       }
+}
+
 int sched_cpu_dying(unsigned int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
@@ -7583,9 +7639,18 @@ int sched_cpu_dying(unsigned int cpu)
        sched_tick_stop(cpu);
 
        rq_lock_irqsave(rq, &rf);
-       BUG_ON(rq->nr_running != 1 || rq_has_pinned_tasks(rq));
+       if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
+               WARN(true, "Dying CPU not properly vacated!");
+               dump_rq_tasks(rq, KERN_WARNING);
+       }
        rq_unlock_irqrestore(rq, &rf);
 
+       /*
+        * Now that the CPU is offline, make sure we're welcome
+        * to new tasks once we come back up.
+        */
+       balance_push_set(cpu, false);
+
        calc_load_migrate(rq);
        update_max_interval();
        nohz_balance_exit_idle(rq);
index 12ada79d40f338209e40244b156a69883ffbe0e9..bb09988451a041e526957d4de46a09b99d83fa63 100644 (file)
@@ -975,6 +975,7 @@ struct rq {
        unsigned long           cpu_capacity_orig;
 
        struct callback_head    *balance_callback;
+       unsigned char           balance_push;
 
        unsigned char           nohz_idle_balance;
        unsigned char           idle_balance;
index 6b9c431da08fe92d7d9c13866b4ddb2d767385b8..5ad8566534e7604d275e60cd4bd2ba78a1eb0b3e 100644 (file)
@@ -3704,7 +3704,8 @@ static bool access_pidfd_pidns(struct pid *pid)
        return true;
 }
 
-static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
+static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
+               siginfo_t __user *info)
 {
 #ifdef CONFIG_COMPAT
        /*
index 2efe1e206167ccfbade03bf0b76b8e5cb657c207..f25208e8df8365e090cedf887638b3e0b00e92bc 100644 (file)
@@ -188,6 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
                kfree(td);
                return PTR_ERR(tsk);
        }
+       kthread_set_per_cpu(tsk, cpu);
        /*
         * Park the thread so that it could start right on the CPU
         * when it is available.
index 7404d38315276a96fa69f78d67a856c4867dbf95..87389b9e21abaa5f4a876eeec52840b5ad65e403 100644 (file)
@@ -498,7 +498,7 @@ out:
 static void sync_hw_clock(struct work_struct *work);
 static DECLARE_WORK(sync_work, sync_hw_clock);
 static struct hrtimer sync_hrtimer;
-#define SYNC_PERIOD_NS (11UL * 60 * NSEC_PER_SEC)
+#define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC)
 
 static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer)
 {
@@ -512,7 +512,7 @@ static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry)
        ktime_t exp = ktime_set(ktime_get_real_seconds(), 0);
 
        if (retry)
-               exp = ktime_add_ns(exp, 2 * NSEC_PER_SEC - offset_nsec);
+               exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec);
        else
                exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec);
 
index a45cedda93a7cc26b9ba8e92b7f763a5c8c0700e..6aee5768c86ff7dbd4acd58cc96d4fac949f9437 100644 (file)
@@ -991,8 +991,7 @@ EXPORT_SYMBOL_GPL(ktime_get_seconds);
 /**
  * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
  *
- * Returns the wall clock seconds since 1970. This replaces the
- * get_seconds() interface which is not y2038 safe on 32bit systems.
+ * Returns the wall clock seconds since 1970.
  *
  * For 64bit systems the fast access to tk->xtime_sec is preserved. On
  * 32bit systems the access must be protected with the sequence
index 9880b6c0e2721fe5c0758eda58d82c1f36576d19..894bb885b40b146ef5c13d007427bb0156490bf5 100644 (file)
@@ -1848,12 +1848,6 @@ static void worker_attach_to_pool(struct worker *worker,
 {
        mutex_lock(&wq_pool_attach_mutex);
 
-       /*
-        * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
-        * online CPUs.  It'll be re-applied when any of the CPUs come up.
-        */
-       set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
-
        /*
         * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
         * stable across this function.  See the comments above the flag
@@ -1861,6 +1855,11 @@ static void worker_attach_to_pool(struct worker *worker,
         */
        if (pool->flags & POOL_DISASSOCIATED)
                worker->flags |= WORKER_UNBOUND;
+       else
+               kthread_set_per_cpu(worker->task, pool->cpu);
+
+       if (worker->rescue_wq)
+               set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
 
        list_add_tail(&worker->node, &pool->workers);
        worker->pool = pool;
@@ -1883,6 +1882,7 @@ static void worker_detach_from_pool(struct worker *worker)
 
        mutex_lock(&wq_pool_attach_mutex);
 
+       kthread_set_per_cpu(worker->task, -1);
        list_del(&worker->node);
        worker->pool = NULL;
 
@@ -4919,8 +4919,10 @@ static void unbind_workers(int cpu)
 
                raw_spin_unlock_irq(&pool->lock);
 
-               for_each_pool_worker(worker, pool)
-                       WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_active_mask) < 0);
+               for_each_pool_worker(worker, pool) {
+                       kthread_set_per_cpu(worker->task, -1);
+                       WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+               }
 
                mutex_unlock(&wq_pool_attach_mutex);
 
@@ -4972,9 +4974,11 @@ static void rebind_workers(struct worker_pool *pool)
         * of all workers first and then clear UNBOUND.  As we're called
         * from CPU_ONLINE, the following shouldn't fail.
         */
-       for_each_pool_worker(worker, pool)
+       for_each_pool_worker(worker, pool) {
+               kthread_set_per_cpu(worker->task, pool->cpu);
                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
                                                  pool->attrs->cpumask) < 0);
+       }
 
        raw_spin_lock_irq(&pool->lock);
 
index 5f8d3eed78a1869e3f294e27b7522508a82bf17c..4bd30315eb62b792c85e52559de94cd38380a42d 100644 (file)
@@ -2928,14 +2928,10 @@ int check(struct objtool_file *file)
        warnings += ret;
 
 out:
-       if (ret < 0) {
-               /*
-                *  Fatal error.  The binary is corrupt or otherwise broken in
-                *  some way, or objtool itself is broken.  Fail the kernel
-                *  build.
-                */
-               return ret;
-       }
-
+       /*
+        *  For now, don't fail the kernel build on fatal warnings.  These
+        *  errors are still fairly common due to the growing matrix of
+        *  supported toolchains and their recent pace of change.
+        */
        return 0;
 }
index be89c741ba9a09441e80566f370b7f44102d0c33..d8421e1d06bed33f79e8e46d14f7253aa0256102 100644 (file)
@@ -380,8 +380,11 @@ static int read_symbols(struct elf *elf)
 
        symtab = find_section_by_name(elf, ".symtab");
        if (!symtab) {
-               WARN("missing symbol table");
-               return -1;
+               /*
+                * A missing symbol table is actually possible if it's an empty
+                * .o file.  This can happen for thunk_64.o.
+                */
+               return 0;
        }
 
        symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
@@ -448,6 +451,13 @@ static int read_symbols(struct elf *elf)
                list_add(&sym->list, entry);
                elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
                elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
+
+               /*
+                * Don't store empty STT_NOTYPE symbols in the rbtree.  They
+                * can exist within a function, confusing the sorting.
+                */
+               if (!sym->len)
+                       rb_erase(&sym->node, &sym->sec->symbol_tree);
        }
 
        if (stats)
index cb53a8b777e68a7fc49c5c7ed1d249cb13c0e1d1..c25cf7cd45e9fdb8ec328c5e5a8dff16bc993e05 100644 (file)
@@ -443,7 +443,6 @@ int test_alignment_handler_integer(void)
        LOAD_DFORM_TEST(ldu);
        LOAD_XFORM_TEST(ldx);
        LOAD_XFORM_TEST(ldux);
-       LOAD_DFORM_TEST(lmw);
        STORE_DFORM_TEST(stb);
        STORE_XFORM_TEST(stbx);
        STORE_DFORM_TEST(stbu);
@@ -462,7 +461,11 @@ int test_alignment_handler_integer(void)
        STORE_XFORM_TEST(stdx);
        STORE_DFORM_TEST(stdu);
        STORE_XFORM_TEST(stdux);
+
+#ifdef __BIG_ENDIAN__
+       LOAD_DFORM_TEST(lmw);
        STORE_DFORM_TEST(stmw);
+#endif
 
        return rc;
 }
index 9e5c7f3f498a7937e13614413019588666671c87..0af4f02669a115f547d33b8ed8261201876dbc57 100644 (file)
@@ -290,5 +290,5 @@ static int test(void)
 
 int main(void)
 {
-       test_harness(test, "pkey_exec_prot");
+       return test_harness(test, "pkey_exec_prot");
 }
index 4f815d7c12145a61ba37ba83488b04c1f90e8ee2..2db76e56d4cb99ee9897817fb3557a2f3e8800fc 100644 (file)
@@ -329,5 +329,5 @@ static int test(void)
 
 int main(void)
 {
-       test_harness(test, "pkey_siginfo");
+       return test_harness(test, "pkey_siginfo");
 }