]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge tag 'gpio-v4.12-4' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 1 Jul 2017 15:24:54 +0000 (08:24 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 1 Jul 2017 15:24:54 +0000 (08:24 -0700)
Pull last minute fixes for GPIO from Linus Walleij:

 - Fix another ACPI problem with broken BIOSes.

 - Filter out the right GPIO events, making a very user-visible bug go
   away.

* tag 'gpio-v4.12-4' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio:
  gpio: acpi: Skip _AEI entries without a handler rather then aborting the scan
  gpiolib: fix filtering out unwanted events

124 files changed:
MAINTAINERS
arch/arc/include/asm/processor.h
arch/arm/Kconfig
arch/arm/boot/compressed/efi-header.S
arch/arm/kernel/setup.c
arch/blackfin/include/asm/processor.h
arch/c6x/include/asm/processor.h
arch/cris/arch-v10/kernel/process.c
arch/cris/arch-v32/kernel/process.c
arch/cris/include/asm/processor.h
arch/frv/include/asm/processor.h
arch/frv/kernel/process.c
arch/h8300/include/asm/processor.h
arch/h8300/kernel/process.c
arch/hexagon/include/asm/processor.h
arch/hexagon/kernel/process.c
arch/ia64/include/asm/processor.h
arch/m32r/include/asm/processor.h
arch/m32r/kernel/process.c
arch/m68k/include/asm/processor.h
arch/m68k/kernel/process.c
arch/microblaze/include/asm/processor.h
arch/microblaze/kernel/process.c
arch/mn10300/include/asm/processor.h
arch/mn10300/kernel/process.c
arch/nios2/include/asm/processor.h
arch/openrisc/include/asm/processor.h
arch/openrisc/kernel/process.c
arch/parisc/include/asm/processor.h
arch/parisc/kernel/process.c
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/uaccess.h
arch/s390/include/asm/processor.h
arch/s390/kernel/ipl.c
arch/s390/kernel/process.c
arch/score/include/asm/processor.h
arch/score/kernel/process.c
arch/sparc/include/asm/processor_32.h
arch/sparc/include/asm/processor_64.h
arch/sparc/kernel/process_32.c
arch/sparc/kernel/process_64.c
arch/tile/include/asm/processor.h
arch/um/include/asm/processor-generic.h
arch/um/kernel/um_arch.c
arch/x86/include/asm/processor.h
arch/x86/kernel/process.c
arch/x86/kernel/tboot.c
arch/xtensa/include/asm/processor.h
block/bio.c
drivers/gpu/drm/etnaviv/etnaviv_gem.h
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
drivers/iommu/amd_iommu.c
drivers/md/dm-raid.c
drivers/md/dm-thin.c
drivers/misc/cxl/context.c
drivers/misc/cxl/cxl.h
drivers/misc/cxl/fault.c
drivers/misc/cxl/main.c
drivers/misc/cxl/native.c
drivers/misc/cxl/pci.c
drivers/net/arcnet/arcnet.c
drivers/net/arcnet/capmode.c
drivers/net/arcnet/com20020-pci.c
drivers/net/arcnet/com20020.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/freescale/fman/Kconfig
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/rocker/rocker_ofdpa.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/ti/cpsw-common.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macvlan.c
drivers/net/phy/dp83640.c
drivers/net/phy/micrel.c
drivers/net/usb/ax88179_178a.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/nvme/host/pci.c
fs/block_dev.c
fs/nfs/callback_xdr.c
fs/nfs/dir.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/overlayfs/copy_up.c
include/linux/bio.h
include/linux/hashtable.h
include/net/xfrm.h
include/uapi/linux/a.out.h
kernel/bpf/verifier.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_functions.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_stack.c
net/core/dev.c
net/ipv4/ip_output.c
net/ipv4/tcp.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/esp6_offload.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/udp.c
net/ipv6/xfrm6_input.c
net/key/af_key.c
net/sched/sch_api.c
net/xfrm/Makefile
net/xfrm/xfrm_device.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
sound/pci/hda/hda_codec.h
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_generic.c
tools/testing/selftests/bpf/test_verifier.c

index 09b5ab6a8a5ce8fd66bed85f30b9ca7db26c14c2..767e9d202adf889d5ae0a746dc6dd7a9561c4a83 100644 (file)
@@ -2964,7 +2964,7 @@ F:        sound/pci/oxygen/
 
 C6X ARCHITECTURE
 M:     Mark Salter <msalter@redhat.com>
-M:     Aurelien Jacquiot <a-jacquiot@ti.com>
+M:     Aurelien Jacquiot <jacquiot.aurelien@gmail.com>
 L:     linux-c6x-dev@linux-c6x.org
 W:     http://www.linux-c6x.org/wiki/index.php/Main_Page
 S:     Maintained
index 6e1242da0159e274b7e161f9648e031cbddd7ef4..4104a08392146f6c479710557010135aa4774a15 100644 (file)
@@ -86,8 +86,6 @@ struct task_struct;
 #define TSK_K_BLINK(tsk)       TSK_K_REG(tsk, 4)
 #define TSK_K_FP(tsk)          TSK_K_REG(tsk, 0)
 
-#define thread_saved_pc(tsk)   TSK_K_BLINK(tsk)
-
 extern void start_thread(struct pt_regs * regs, unsigned long pc,
                         unsigned long usp);
 
index 4c1a35f1583872d2ce39db5c1cacce28be48ccb1..c0fcab6a550473859b4a2ca525cefc16624e1710 100644 (file)
@@ -1416,6 +1416,7 @@ choice
        config VMSPLIT_3G
                bool "3G/1G user/kernel split"
        config VMSPLIT_3G_OPT
+               depends on !ARM_LPAE
                bool "3G/1G user/kernel split (for full 1G low memory)"
        config VMSPLIT_2G
                bool "2G/2G user/kernel split"
index 3f7d1b74c5e02bd46730c58b0a66756c89b904ab..a17ca8d78656d1012910ffb8a37b3433720a8d44 100644 (file)
@@ -17,7 +17,8 @@
                @ there.
                .inst   'M' | ('Z' << 8) | (0x1310 << 16)   @ tstne r0, #0x4d000
 #else
-               W(mov)  r0, r0
+ AR_CLASS(     mov     r0, r0          )
+  M_CLASS(     nop.w                   )
 #endif
                .endm
 
index 32e1a9513dc70eba4787ca1af0ba3e32b9f29d7e..4e80bf7420d4e65fb30e0c68e7bef53932f765b3 100644 (file)
@@ -315,7 +315,7 @@ static void __init cacheid_init(void)
        if (arch >= CPU_ARCH_ARMv6) {
                unsigned int cachetype = read_cpuid_cachetype();
 
-               if ((arch == CPU_ARCH_ARMv7M) && !cachetype) {
+               if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
                        cacheid = 0;
                } else if ((cachetype & (7 << 29)) == 4 << 29) {
                        /* ARMv7 register format */
index 85d4af97c986aee4a7eff90b4347aca444ee1ed9..dbdbb8a558df4acb548b71dbc52f63b3e7373d7f 100644 (file)
@@ -75,11 +75,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-/*
- * Return saved PC of a blocked thread.
- */
-#define thread_saved_pc(tsk)   (tsk->thread.pc)
-
 unsigned long get_wchan(struct task_struct *p);
 
 #define        KSTK_EIP(tsk)                                                   \
index b9eb3da7f278dac858bfb2191094f9fc837ac5c8..7c87b5be53b5b74c76fbe03829034b1e96ac3c7c 100644 (file)
@@ -95,11 +95,6 @@ static inline void release_thread(struct task_struct *dead_task)
 #define copy_segments(tsk, mm)         do { } while (0)
 #define release_segments(mm)           do { } while (0)
 
-/*
- * saved PC of a blocked thread.
- */
-#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
-
 /*
  * saved kernel SP and DP of a blocked thread.
  */
index e299d30105b53bf5ad0e8b8df7f036f39267ad66..a2cdb1521aca4db4069d449f56094ee822e0313d 100644 (file)
@@ -69,14 +69,6 @@ void hard_reset_now (void)
        while(1) /* waiting for RETRIBUTION! */ ;
 }
 
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *t)
-{
-       return task_pt_regs(t)->irp;
-}
-
 /* setup the child's kernel stack with a pt_regs and switch_stack on it.
  * it will be un-nested during _resume and _ret_from_sys_call when the
  * new thread is scheduled.
index c530a8fa87ceb751a0c275885e34852dbcd5b6c2..fe87b383fbf3fc45522d8ce74a122c8469347993 100644 (file)
@@ -84,14 +84,6 @@ hard_reset_now(void)
                ; /* Wait for reset. */
 }
 
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *t)
-{
-       return task_pt_regs(t)->erp;
-}
-
 /*
  * Setup the child's kernel stack with a pt_regs and call switch_stack() on it.
  * It will be unnested during _resume and _ret_from_sys_call when the new thread
index 15b815df29c165809c4e6e229ede6a077d9e8e71..bc2729e4b2c97e89b5a9dd3daf30df555dee1e0f 100644 (file)
@@ -52,8 +52,6 @@ unsigned long get_wchan(struct task_struct *p);
 
 #define KSTK_ESP(tsk)   ((tsk) == current ? rdusp() : (tsk)->thread.usp)
 
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
 /* Free all resources held by a thread. */
 static inline void release_thread(struct task_struct *dead_task)
 {
index ddaeb9cc9143333d8f00de60291298f6dce5530c..e4d08d74ed9f8dc4f81140fcf60442378f02cf58 100644 (file)
@@ -96,11 +96,6 @@ extern asmlinkage void *restore_user_regs(const struct user_context *target, ...
 #define release_segments(mm)           do { } while (0)
 #define forget_segments()              do { } while (0)
 
-/*
- * Return saved PC of a blocked thread.
- */
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
 unsigned long get_wchan(struct task_struct *p);
 
 #define        KSTK_EIP(tsk)   ((tsk)->thread.frame0->pc)
index 5a4c92abc99ec320b54ef80841bc6f3333a9d3dd..a957b374e3a66b28a30bf5d30b623bfccbc2fb0a 100644 (file)
@@ -198,15 +198,6 @@ unsigned long get_wchan(struct task_struct *p)
        return 0;
 }
 
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       /* Check whether the thread is blocked in resume() */
-       if (in_sched_functions(tsk->thread.pc))
-               return ((unsigned long *)tsk->thread.fp)[2];
-       else
-               return tsk->thread.pc;
-}
-
 int elf_check_arch(const struct elf32_hdr *hdr)
 {
        unsigned long hsr0 = __get_HSR(0);
index 65132d7ae9e5b66fb484014f7930fd91f75cc944..afa53147e66a82e24d8d9e100b4706a9ce2ee760 100644 (file)
@@ -110,10 +110,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk);
 unsigned long get_wchan(struct task_struct *p);
 
 #define        KSTK_EIP(tsk)   \
index 0f5db5bb561b75cb30bb2871ffa7c00545f84539..d1ddcabbbe8383fc6160db3946a334286676b5ae 100644 (file)
@@ -129,11 +129,6 @@ int copy_thread(unsigned long clone_flags,
        return 0;
 }
 
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       return ((struct pt_regs *)tsk->thread.esp0)->pc;
-}
-
 unsigned long get_wchan(struct task_struct *p)
 {
        unsigned long fp, pc;
index 45a825402f634ee4b650a08a60efc5c60d50e261..ce67940860a536dce66bea9b59daeab1fcaa103e 100644 (file)
@@ -33,9 +33,6 @@
 /*  task_struct, defined elsewhere, is the "process descriptor" */
 struct task_struct;
 
-/*  this is defined in arch/process.c  */
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
 extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
 
 /*
index de715bab7956c7e38df0b8ab689d11cca8fceb16..656050c2e6a06ab7f60ad6c1a65351dfd29f544f 100644 (file)
@@ -60,14 +60,6 @@ void arch_cpu_idle(void)
        local_irq_enable();
 }
 
-/*
- *  Return saved PC of a blocked thread
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       return 0;
-}
-
 /*
  * Copy architecture-specific thread state
  */
index 26a63d69c599addab0486c2fa08d548dd229ccf9..ab982f07ea681253d42f351c230c976a83258f95 100644 (file)
@@ -601,23 +601,6 @@ ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
        *unat = (*unat & ~mask) | (nat << bit);
 }
 
-/*
- * Return saved PC of a blocked thread.
- * Note that the only way T can block is through a call to schedule() -> switch_to().
- */
-static inline unsigned long
-thread_saved_pc (struct task_struct *t)
-{
-       struct unw_frame_info info;
-       unsigned long ip;
-
-       unw_init_from_blocked_task(&info, t);
-       if (unw_unwind(&info) < 0)
-               return 0;
-       unw_get_ip(&info, &ip);
-       return ip;
-}
-
 /*
  * Get the current instruction/program counter value.
  */
index 5767367550c69637a0b51c7a037b7a5e0c4fbfb4..657874eeeccc262c11268094e0f0ba530e05dd92 100644 (file)
@@ -122,8 +122,6 @@ extern void release_thread(struct task_struct *);
 extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
 extern void release_segments(struct mm_struct * mm);
 
-extern unsigned long thread_saved_pc(struct task_struct *);
-
 /* Copy and release all segment info associated with a VM */
 #define copy_segments(p, mm)  do { } while (0)
 #define release_segments(mm)  do { } while (0)
index d8ffcfec599cb6a10a6eb649f15d628a97a377f9..8cd7e03f4370c06a633d2a12b5db70299ffa41ae 100644 (file)
 
 #include <linux/err.h>
 
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       return tsk->thread.lr;
-}
-
 void (*pm_power_off)(void) = NULL;
 EXPORT_SYMBOL(pm_power_off);
 
index 77239e81379b16b52e39cff979d1da4ceb2e1965..94c36030440cc825c4688244a0b973ba60be3863 100644 (file)
@@ -130,8 +130,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
 unsigned long get_wchan(struct task_struct *p);
 
 #define        KSTK_EIP(tsk)   \
index e475c945c8b2bf199e6147a16fcf42c2d7c0d714..7df92f8b0781dd2651096f83f8c45185a26be803 100644 (file)
 asmlinkage void ret_from_fork(void);
 asmlinkage void ret_from_kernel_thread(void);
 
-
-/*
- * Return saved PC from a blocked thread
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
-       /* Check whether the thread is blocked in resume() */
-       if (in_sched_functions(sw->retpc))
-               return ((unsigned long *)sw->a6)[1];
-       else
-               return sw->retpc;
-}
-
 void arch_cpu_idle(void)
 {
 #if defined(MACH_ATARI_ONLY)
index 37ef196e45191adb481450c8648e9e9acbb05106..330d556860ba7a8211b767cd4ac03275adcab9f0 100644 (file)
@@ -69,8 +69,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-extern unsigned long thread_saved_pc(struct task_struct *t);
-
 extern unsigned long get_wchan(struct task_struct *p);
 
 # define KSTK_EIP(tsk) (0)
@@ -121,10 +119,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-/* Return saved (kernel) PC of a blocked thread.  */
-#  define thread_saved_pc(tsk) \
-       ((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0)
-
 unsigned long get_wchan(struct task_struct *p);
 
 /* The size allocated for kernel stacks. This _must_ be a power of two! */
index e92a817e645fac7bf8782e782b2525429572d5b3..6527ec22f158f16acef89a0b78310c518866ae73 100644 (file)
@@ -119,23 +119,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        return 0;
 }
 
-#ifndef CONFIG_MMU
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       struct cpu_context *ctx =
-               &(((struct thread_info *)(tsk->stack))->cpu_context);
-
-       /* Check whether the thread is blocked in resume() */
-       if (in_sched_functions(ctx->r15))
-               return (unsigned long)ctx->r15;
-       else
-               return ctx->r14;
-}
-#endif
-
 unsigned long get_wchan(struct task_struct *p)
 {
 /* TBD (used by procfs) */
index 18e17abf7664e51c807e3d5649b0585bfd89a3be..3ae479117b42efd07d719282ee1dd287abecb1a4 100644 (file)
@@ -132,11 +132,6 @@ static inline void start_thread(struct pt_regs *regs,
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
 
-/*
- * Return saved PC of a blocked thread.
- */
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
 unsigned long get_wchan(struct task_struct *p);
 
 #define task_pt_regs(task) ((task)->thread.uregs)
index c9fa42619c6a9aa4f5fa3244b3ce45efdfaeea11..89e8027e07fb327d39de0170c0da61af177c7160 100644 (file)
 #include <asm/gdb-stub.h>
 #include "internal.h"
 
-/*
- * return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       return ((unsigned long *) tsk->thread.sp)[3];
-}
-
 /*
  * power off function, if any
  */
index 3bbbc3d798e5f4738548690eb8511834050baa76..4944e2e1d8b0677d48c30ebba98629d43db90933 100644 (file)
@@ -75,9 +75,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-/* Return saved PC of a blocked thread. */
-#define thread_saved_pc(tsk)   ((tsk)->thread.kregs->ea)
-
 extern unsigned long get_wchan(struct task_struct *p);
 
 #define task_pt_regs(p) \
index a908e6c30a001e14860fe70f174f1451c6e4ae09..396d8f306c21b6c24f872780b9500cb8ebc1b963 100644 (file)
@@ -84,11 +84,6 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
 void release_thread(struct task_struct *);
 unsigned long get_wchan(struct task_struct *p);
 
-/*
- * Return saved PC of a blocked thread. For now, this is the "user" PC
- */
-extern unsigned long thread_saved_pc(struct task_struct *t);
-
 #define init_stack      (init_thread_union.stack)
 
 #define cpu_relax()     barrier()
index 106859ae27ffba114f9f4b0011151db0f65f98d4..f9b77003f1138ce42dff841814cb280d562481e0 100644 (file)
@@ -110,11 +110,6 @@ void show_regs(struct pt_regs *regs)
        show_registers(regs);
 }
 
-unsigned long thread_saved_pc(struct task_struct *t)
-{
-       return (unsigned long)user_regs(t->stack)->pc;
-}
-
 void release_thread(struct task_struct *dead_task)
 {
 }
index a3661ee6b060c1d258ab740e5468cfffb665f8d5..4c6694b4e77e0dd12ef6aed95508ea035ccdb10c 100644 (file)
@@ -163,12 +163,7 @@ struct thread_struct {
        .flags          = 0 \
        }
 
-/*
- * Return saved PC of a blocked thread.  This is used by ps mostly.
- */
-
 struct task_struct;
-unsigned long thread_saved_pc(struct task_struct *t);
 void show_trace(struct task_struct *task, unsigned long *stack);
 
 /*
index 4516a5b53f38ef651c038e4231effa00fd6db19d..b64d7d21646ed50c4a5c1f046b0f8ca758abfcc6 100644 (file)
@@ -239,11 +239,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
        return 0;
 }
 
-unsigned long thread_saved_pc(struct task_struct *t)
-{
-       return t->thread.regs.kpc;
-}
-
 unsigned long
 get_wchan(struct task_struct *p)
 {
index bb99b651085aaf292e5f98ee23c7cdc53d443cd2..1189d04f3bd1ce6db0f6ed5da3414f25dc3f4c38 100644 (file)
@@ -378,12 +378,6 @@ struct thread_struct {
 }
 #endif
 
-/*
- * Return saved PC of a blocked thread. For now, this is the "user" PC
- */
-#define thread_saved_pc(tsk)    \
-        ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
-
 #define task_pt_regs(tsk)      ((struct pt_regs *)(tsk)->thread.regs)
 
 unsigned long get_wchan(struct task_struct *p);
index 5c0d8a8cdae5b588ac1254882c90787f81c40052..41e88d3ce36bfbbe0b4e2ffdc614194c3b797449 100644 (file)
@@ -267,13 +267,7 @@ do {                                                               \
 extern unsigned long __copy_tofrom_user(void __user *to,
                const void __user *from, unsigned long size);
 
-#ifndef __powerpc64__
-
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-
-#else /* __powerpc64__ */
-
+#ifdef __powerpc64__
 static inline unsigned long
 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
index 60d395fdc86438e55f49ddf853dca7b6f99582b3..aeac013968f2a00fd0050e772d50a8959dd57e04 100644 (file)
@@ -221,11 +221,6 @@ extern void release_thread(struct task_struct *);
 /* Free guarded storage control block for current */
 void exit_thread_gs(void);
 
-/*
- * Return saved PC of a blocked thread.
- */
-extern unsigned long thread_saved_pc(struct task_struct *t);
-
 unsigned long get_wchan(struct task_struct *p);
 #define task_pt_regs(tsk) ((struct pt_regs *) \
         (task_stack_page(tsk) + THREAD_SIZE) - 1)
index e545ffe5155ab0179327cfe4f9f66e677c604041..8e622bb52f7a95fd59c2f89aec95f04c616633cf 100644 (file)
@@ -564,8 +564,6 @@ static struct kset *ipl_kset;
 
 static void __ipl_run(void *unused)
 {
-       if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW)
-               diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
        diag308(DIAG308_LOAD_CLEAR, NULL);
        if (MACHINE_IS_VM)
                __cpcmd("IPL", NULL, 0, NULL);
@@ -1088,10 +1086,7 @@ static void __reipl_run(void *unused)
                break;
        case REIPL_METHOD_CCW_DIAG:
                diag308(DIAG308_SET, reipl_block_ccw);
-               if (MACHINE_IS_LPAR)
-                       diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
-               else
-                       diag308(DIAG308_LOAD_CLEAR, NULL);
+               diag308(DIAG308_LOAD_CLEAR, NULL);
                break;
        case REIPL_METHOD_FCP_RW_DIAG:
                diag308(DIAG308_SET, reipl_block_fcp);
index 999d7154bbdcd0891f6e2d5e6c55ea4ab62d0554..bb32b8618bf61836888d383c5b3fe1c9a352c0d2 100644 (file)
 
 asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
 
-/*
- * Return saved PC of a blocked thread. used in kernel/sched.
- * resume in entry.S does not create a new stack frame, it
- * just stores the registers %r6-%r15 to the frame given by
- * schedule. We want to return the address of the caller of
- * schedule, so we have to walk the backchain one time to
- * find the frame schedule() store its return address.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       struct stack_frame *sf, *low, *high;
-
-       if (!tsk || !task_stack_page(tsk))
-               return 0;
-       low = task_stack_page(tsk);
-       high = (struct stack_frame *) task_pt_regs(tsk);
-       sf = (struct stack_frame *) tsk->thread.ksp;
-       if (sf <= low || sf > high)
-               return 0;
-       sf = (struct stack_frame *) sf->back_chain;
-       if (sf <= low || sf > high)
-               return 0;
-       return sf->gprs[8];
-}
-
 extern void kernel_thread_starter(void);
 
 /*
index d9a922d8711b2155a2225699e5958236dbb70adb..299274581968d1c850f56341615d81bddae19003 100644 (file)
@@ -13,7 +13,6 @@ struct task_struct;
  */
 extern void (*cpu_wait)(void);
 
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
 extern void start_thread(struct pt_regs *regs,
                        unsigned long pc, unsigned long sp);
 extern unsigned long get_wchan(struct task_struct *p);
index eb64d7a677cb9525afc874a1cb3a21872ad4bddb..6e20241a1ed45c428d7f5bfe9e76c55ecd5e0d07 100644 (file)
@@ -101,11 +101,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
        return 1;
 }
 
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       return task_pt_regs(tsk)->cp0_epc;
-}
-
 unsigned long get_wchan(struct task_struct *task)
 {
        if (!task || task == current || task->state == TASK_RUNNING)
index dd27159819ebedce4d0479ec800e91d56706311f..b395e5620c0b986ec808675ccf3062e9a534ae43 100644 (file)
@@ -67,9 +67,6 @@ struct thread_struct {
        .current_ds = KERNEL_DS, \
 }
 
-/* Return saved PC of a blocked thread. */
-unsigned long thread_saved_pc(struct task_struct *t);
-
 /* Do necessary setup to start up a newly executed thread. */
 static inline void start_thread(struct pt_regs * regs, unsigned long pc,
                                    unsigned long sp)
index b58ee90184334224b756360e769c47a0d10e088a..f04dc5a4306245ffc0b53b3c88707332b8a25a0f 100644 (file)
@@ -89,9 +89,7 @@ struct thread_struct {
 #include <linux/types.h>
 #include <asm/fpumacro.h>
 
-/* Return saved PC of a blocked thread. */
 struct task_struct;
-unsigned long thread_saved_pc(struct task_struct *);
 
 /* On Uniprocessor, even in RMO processes see TSO semantics */
 #ifdef CONFIG_SMP
index b6dac8e980f07183f9ea300abcfb033d0251b5c0..9245f93398c76a59a073963253609c1fc54bfeec 100644 (file)
@@ -176,14 +176,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
        printk("\n");
 }
 
-/*
- * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       return task_thread_info(tsk)->kpc;
-}
-
 /*
  * Free current thread data structures etc..
  */
index 1badc493e62ee71c2e538758aec3c446780ea671..b96104da5bd6116b119872eaa79e013636aa328c 100644 (file)
@@ -400,25 +400,6 @@ core_initcall(sparc_sysrq_init);
 
 #endif
 
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       struct thread_info *ti = task_thread_info(tsk);
-       unsigned long ret = 0xdeadbeefUL;
-       
-       if (ti && ti->ksp) {
-               unsigned long *sp;
-               sp = (unsigned long *)(ti->ksp + STACK_BIAS);
-               if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
-                   sp[14]) {
-                       unsigned long *fp;
-                       fp = (unsigned long *)(sp[14] + STACK_BIAS);
-                       if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
-                               ret = fp[15];
-               }
-       }
-       return ret;
-}
-
 /* Free current thread data structures etc.. */
 void exit_thread(struct task_struct *tsk)
 {
index 0bc9968b97a19296a48d4dfb215cd3464b69f9a6..f71e5206650bf91578677720a7701a3d766d0436 100644 (file)
@@ -214,13 +214,6 @@ static inline void release_thread(struct task_struct *dead_task)
 
 extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
 
-
-/*
- * Return saved (kernel) PC of a blocked thread.
- * Only used in a printk() in kernel/sched/core.c, so don't work too hard.
- */
-#define thread_saved_pc(t)   ((t)->thread.pc)
-
 unsigned long get_wchan(struct task_struct *p);
 
 /* Return initial ksp value for given task. */
index 2d1e0dd5bb0bf55a7e543f9f7f9e0eefd6ab4cac..f6d1a3f747a9b58b3f33ce0ee25c3bf889985c0b 100644 (file)
@@ -58,8 +58,6 @@ static inline void release_thread(struct task_struct *task)
 {
 }
 
-extern unsigned long thread_saved_pc(struct task_struct *t);
-
 static inline void mm_copy_segments(struct mm_struct *from_mm,
                                    struct mm_struct *new_mm)
 {
index 64a1fd06f3fde02d964c4fedcb9e73328085c94d..7b56401173250e1dbb61208106c35c58e324812b 100644 (file)
@@ -56,12 +56,6 @@ union thread_union cpu0_irqstack
        __attribute__((__section__(".data..init_irqstack"))) =
                { INIT_THREAD_INFO(init_task) };
 
-unsigned long thread_saved_pc(struct task_struct *task)
-{
-       /* FIXME: Need to look up userspace_pid by cpu */
-       return os_process_pc(userspace_pid[0]);
-}
-
 /* Changed in setup_arch, which is called in early boot */
 static char host_info[(__NEW_UTS_LEN + 1) * 5];
 
index 3cada998a402a7893ffd2fc709916f4fcbc3f970..a28b671f15499590d3b7243eb7909436aa27b814 100644 (file)
@@ -860,8 +860,6 @@ extern unsigned long KSTK_ESP(struct task_struct *task);
 
 #endif /* CONFIG_X86_64 */
 
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
 extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
                                               unsigned long new_sp);
 
index 0bb88428cbf2697c89a60311051cc5351ea55fde..3ca198080ea9294486ae9a1121e7815dfba7cb19 100644 (file)
@@ -544,17 +544,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
        return randomize_page(mm->brk, 0x02000000);
 }
 
-/*
- * Return saved PC of a blocked thread.
- * What is this good for? it will be always the scheduler or ret_from_fork.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       struct inactive_task_frame *frame =
-               (struct inactive_task_frame *) READ_ONCE(tsk->thread.sp);
-       return READ_ONCE_NOCHECK(frame->ret_addr);
-}
-
 /*
  * Called from fs/proc with a reference on @p to find the function
  * which called into schedule(). This needs to be done carefully
index 4b17240599093a6ec34c45e053ae665050d18e14..a4eb27918cebf99dc2415f7eec49c4838d6f9f41 100644 (file)
@@ -514,7 +514,7 @@ int tboot_force_iommu(void)
        if (!tboot_enabled())
                return 0;
 
-       if (!intel_iommu_tboot_noforce)
+       if (intel_iommu_tboot_noforce)
                return 1;
 
        if (no_iommu || swiotlb || dmar_disabled)
index 003eeee3fbc636d91aed849aaa39bb0c24370227..30ee8c608853d4fb4b238a01319589d38ce018b7 100644 (file)
@@ -213,8 +213,6 @@ struct mm_struct;
 #define release_segments(mm)   do { } while(0)
 #define forget_segments()      do { } while (0)
 
-#define thread_saved_pc(tsk)   (task_pt_regs(tsk)->pc)
-
 extern unsigned long get_wchan(struct task_struct *p);
 
 #define KSTK_EIP(tsk)          (task_pt_regs(tsk)->pc)
index 888e7801c6381edd8d995503643917b2f452282e..26b0810fb8eac14b0a39c27bdf53f2398c767fb3 100644 (file)
@@ -240,20 +240,21 @@ fallback:
        return bvl;
 }
 
-static void __bio_free(struct bio *bio)
+void bio_uninit(struct bio *bio)
 {
        bio_disassociate_task(bio);
 
        if (bio_integrity(bio))
                bio_integrity_free(bio);
 }
+EXPORT_SYMBOL(bio_uninit);
 
 static void bio_free(struct bio *bio)
 {
        struct bio_set *bs = bio->bi_pool;
        void *p;
 
-       __bio_free(bio);
+       bio_uninit(bio);
 
        if (bs) {
                bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
@@ -271,6 +272,11 @@ static void bio_free(struct bio *bio)
        }
 }
 
+/*
+ * Users of this function have their own bio allocation. Subsequently,
+ * they must remember to pair any call to bio_init() with bio_uninit()
+ * when IO has completed, or when the bio is released.
+ */
 void bio_init(struct bio *bio, struct bio_vec *table,
              unsigned short max_vecs)
 {
@@ -297,7 +303,7 @@ void bio_reset(struct bio *bio)
 {
        unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
 
-       __bio_free(bio);
+       bio_uninit(bio);
 
        memset(bio, 0, BIO_RESET_BYTES);
        bio->bi_flags = flags;
index c4a091e874269fd9ac79a025f9d37f250ac95520..e437fba1209d925cca7bf7f33b5651c3eeeda21a 100644 (file)
@@ -106,9 +106,10 @@ struct etnaviv_gem_submit {
        struct etnaviv_gpu *gpu;
        struct ww_acquire_ctx ticket;
        struct dma_fence *fence;
+       u32 flags;
        unsigned int nr_bos;
        struct etnaviv_gem_submit_bo bos[0];
-       u32 flags;
+       /* No new members here, the previous one is variable-length! */
 };
 
 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
index de80ee1b71dfa2e8380b6e74b2d8cc6ed4aa6f25..1013765274da4a4853c21b302dbc646dacba5760 100644 (file)
@@ -172,7 +172,7 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
        for (i = 0; i < submit->nr_bos; i++) {
                struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
                bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
-               bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
+               bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
 
                ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
                                                 explicit);
index d689e511744e8f2fc9508e2d7345827c6a70bbb2..4bd1467c17b17c6225e27e05ccbde63444012f39 100644 (file)
@@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data)
        struct file_stats *stats = data;
        struct i915_vma *vma;
 
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+
        stats->count++;
        stats->total += obj->base.size;
        if (!obj->bind_count)
@@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                struct drm_i915_gem_request *request;
                struct task_struct *task;
 
+               mutex_lock(&dev->struct_mutex);
+
                memset(&stats, 0, sizeof(stats));
                stats.file_priv = file->driver_priv;
                spin_lock(&file->table_lock);
@@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                 * still alive (e.g. get_pid(current) => fork() => exit()).
                 * Therefore, we need to protect this ->comm access using RCU.
                 */
-               mutex_lock(&dev->struct_mutex);
                request = list_first_entry_or_null(&file_priv->mm.request_list,
                                                   struct drm_i915_gem_request,
                                                   client_link);
@@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                                PIDTYPE_PID);
                print_file_stats(m, task ? task->comm : "<unknown>", stats);
                rcu_read_unlock();
+
                mutex_unlock(&dev->struct_mutex);
        }
        mutex_unlock(&dev->filelist_mutex);
index a3e59c8ef27baf4f3584ff5016635d8005735af6..9ad13eeed904d4d012c3fe93f6124b5ed5884b04 100644 (file)
@@ -546,11 +546,12 @@ repeat:
 }
 
 static int
-i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+i915_gem_execbuffer_relocate_entry(struct i915_vma *vma,
                                   struct eb_vmas *eb,
                                   struct drm_i915_gem_relocation_entry *reloc,
                                   struct reloc_cache *cache)
 {
+       struct drm_i915_gem_object *obj = vma->obj;
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct drm_gem_object *target_obj;
        struct drm_i915_gem_object *target_i915_obj;
@@ -628,6 +629,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                return -EINVAL;
        }
 
+       /*
+        * If we write into the object, we need to force the synchronisation
+        * barrier, either with an asynchronous clflush or if we executed the
+        * patching using the GPU (though that should be serialised by the
+        * timeline). To be completely sure, and since we are required to
+        * do relocations we are already stalling, disable the user's opt
+        * of our synchronisation.
+        */
+       vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;
+
        ret = relocate_entry(obj, reloc, cache, target_offset);
        if (ret)
                return ret;
@@ -678,7 +689,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
                do {
                        u64 offset = r->presumed_offset;
 
-                       ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
+                       ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache);
                        if (ret)
                                goto out;
 
@@ -726,7 +737,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
 
        reloc_cache_init(&cache, eb->i915);
        for (i = 0; i < entry->relocation_count; i++) {
-               ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
+               ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache);
                if (ret)
                        break;
        }
index 1aba47024656817190168984f1d076ceea710e9b..f066e2d785f5c9d30fbe544b3caec698a3b4f8d1 100644 (file)
@@ -650,6 +650,11 @@ int i915_vma_unbind(struct i915_vma *vma)
                                break;
                }
 
+               if (!ret) {
+                       ret = i915_gem_active_retire(&vma->last_fence,
+                                                    &vma->vm->i915->drm.struct_mutex);
+               }
+
                __i915_vma_unpin(vma);
                if (ret)
                        return ret;
index 13db8a2851edd475cc1e44adedd44796c3ccbca1..1f013d45c9e9a3959dfa19300ba76fc37820592a 100644 (file)
@@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
        list_for_each_entry_safe(entry, next, &man->list, head)
                vmw_cmdbuf_res_free(man, entry);
 
+       drm_ht_remove(&man->resources);
        kfree(man);
 }
 
index 63cacf5d6cf23ab5611ba4219207a47c00662226..0f1219fa85617bc72b39c8f450ee4edfed2f364f 100644 (file)
@@ -3879,11 +3879,9 @@ static void irte_ga_prepare(void *entry,
                            u8 vector, u32 dest_apicid, int devid)
 {
        struct irte_ga *irte = (struct irte_ga *) entry;
-       struct iommu_dev_data *dev_data = search_dev_data(devid);
 
        irte->lo.val                      = 0;
        irte->hi.val                      = 0;
-       irte->lo.fields_remap.guest_mode  = dev_data ? dev_data->use_vapic : 0;
        irte->lo.fields_remap.int_type    = delivery_mode;
        irte->lo.fields_remap.dm          = dest_mode;
        irte->hi.fields.vector            = vector;
@@ -3939,10 +3937,10 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
        struct irte_ga *irte = (struct irte_ga *) entry;
        struct iommu_dev_data *dev_data = search_dev_data(devid);
 
-       if (!dev_data || !dev_data->use_vapic) {
+       if (!dev_data || !dev_data->use_vapic ||
+           !irte->lo.fields_remap.guest_mode) {
                irte->hi.fields.vector = vector;
                irte->lo.fields_remap.destination = dest_apicid;
-               irte->lo.fields_remap.guest_mode = 0;
                modify_irte_ga(devid, index, irte, NULL);
        }
 }
index 7d893228c40f50dd7d0017fca004b504fa27f567..b4b75dad816ad95c0028f1b64b2be73e8993caac 100644 (file)
@@ -1927,7 +1927,7 @@ struct dm_raid_superblock {
        /********************************************************************
         * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
         *
-        * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist
+        * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
         */
 
        __le32 flags; /* Flags defining array states for reshaping */
@@ -2092,6 +2092,11 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
        sb->layout = cpu_to_le32(mddev->layout);
        sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
 
+       /********************************************************************
+        * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
+        *
+        * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
+        */
        sb->new_level = cpu_to_le32(mddev->new_level);
        sb->new_layout = cpu_to_le32(mddev->new_layout);
        sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
@@ -2438,8 +2443,14 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
        mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
 
        if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
-               /* Retrieve device size stored in superblock to be prepared for shrink */
-               rdev->sectors = le64_to_cpu(sb->sectors);
+               /*
+                * Retrieve rdev size stored in superblock to be prepared for shrink.
+                * Check extended superblock members are present otherwise the size
+                * will not be set!
+                */
+               if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
+                       rdev->sectors = le64_to_cpu(sb->sectors);
+
                rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
                if (rdev->recovery_offset == MaxSector)
                        set_bit(In_sync, &rdev->flags);
index 17ad50daed08ef5022b8648ef2e8701208c85a9d..28808e5ec0fd68346609eb33ac552359f5cff760 100644 (file)
@@ -1094,6 +1094,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
                return;
        }
 
+       /*
+        * Increment the unmapped blocks.  This prevents a race between the
+        * passdown io and reallocation of freed blocks.
+        */
+       r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
+       if (r) {
+               metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
+               bio_io_error(m->bio);
+               cell_defer_no_holder(tc, m->cell);
+               mempool_free(m, pool->mapping_pool);
+               return;
+       }
+
        discard_parent = bio_alloc(GFP_NOIO, 1);
        if (!discard_parent) {
                DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
@@ -1114,19 +1127,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
                        end_discard(&op, r);
                }
        }
-
-       /*
-        * Increment the unmapped blocks.  This prevents a race between the
-        * passdown io and reallocation of freed blocks.
-        */
-       r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
-       if (r) {
-               metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
-               bio_io_error(m->bio);
-               cell_defer_no_holder(tc, m->cell);
-               mempool_free(m, pool->mapping_pool);
-               return;
-       }
 }
 
 static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
index 4472ce11f98d74e03fb50c5bab8302274535d423..8c32040b9c09f988274ac8cc9354e4b56419787f 100644 (file)
@@ -45,7 +45,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
        mutex_init(&ctx->mapping_lock);
        ctx->mapping = NULL;
 
-       if (cxl_is_psl8(afu)) {
+       if (cxl_is_power8()) {
                spin_lock_init(&ctx->sste_lock);
 
                /*
@@ -189,7 +189,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
                if (start + len > ctx->afu->adapter->ps_size)
                        return -EINVAL;
 
-               if (cxl_is_psl9(ctx->afu)) {
+               if (cxl_is_power9()) {
                        /*
                         * Make sure there is a valid problem state
                         * area space for this AFU.
@@ -324,7 +324,7 @@ static void reclaim_ctx(struct rcu_head *rcu)
 {
        struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
 
-       if (cxl_is_psl8(ctx->afu))
+       if (cxl_is_power8())
                free_page((u64)ctx->sstp);
        if (ctx->ff_page)
                __free_page(ctx->ff_page);
index c8568ea7c5186745dd36774ec16cf909c4b6f996..a03f8e7535e58fe77d899b7a40e86e780792f50c 100644 (file)
@@ -357,6 +357,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An     = {0x0A0};
 #define CXL_PSL9_DSISR_An_PF_RGP  0x0000000000000090ULL  /* PTE not found (Radix Guest (parent)) 0b10010000 */
 #define CXL_PSL9_DSISR_An_PF_HRH  0x0000000000000094ULL  /* PTE not found (HPT/Radix Host)       0b10010100 */
 #define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL  /* PTE not found (STEG VA)              0b10011100 */
+#define CXL_PSL9_DSISR_An_URTCH   0x00000000000000B4ULL  /* Unsupported Radix Tree Configuration 0b10110100 */
 
 /****** CXL_PSL_TFC_An ******************************************************/
 #define CXL_PSL_TFC_An_A  (1ull << (63-28)) /* Acknowledge non-translation fault */
@@ -844,24 +845,15 @@ static inline bool cxl_is_power8(void)
 
 static inline bool cxl_is_power9(void)
 {
-       /* intermediate solution */
-       if (!cxl_is_power8() &&
-          (cpu_has_feature(CPU_FTRS_POWER9) ||
-           cpu_has_feature(CPU_FTR_POWER9_DD1)))
+       if (pvr_version_is(PVR_POWER9))
                return true;
        return false;
 }
 
-static inline bool cxl_is_psl8(struct cxl_afu *afu)
+static inline bool cxl_is_power9_dd1(void)
 {
-       if (afu->adapter->caia_major == 1)
-               return true;
-       return false;
-}
-
-static inline bool cxl_is_psl9(struct cxl_afu *afu)
-{
-       if (afu->adapter->caia_major == 2)
+       if ((pvr_version_is(PVR_POWER9)) &&
+           cpu_has_feature(CPU_FTR_POWER9_DD1))
                return true;
        return false;
 }
index 5344448f514e16ac7b8b50a68b6173461a66783c..c79e39bad7a42673ed0a2273236519c654120f57 100644 (file)
@@ -187,7 +187,7 @@ static struct mm_struct *get_mem_context(struct cxl_context *ctx)
 
 static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
 {
-       if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DS))
+       if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS)))
                return true;
 
        return false;
@@ -195,16 +195,23 @@ static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
 
 static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
 {
-       if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DM))
-               return true;
+       u64 crs; /* Translation Checkout Response Status */
 
-       if ((cxl_is_psl9(ctx->afu)) &&
-          ((dsisr & CXL_PSL9_DSISR_An_CO_MASK) &
-               (CXL_PSL9_DSISR_An_PF_SLR | CXL_PSL9_DSISR_An_PF_RGC |
-                CXL_PSL9_DSISR_An_PF_RGP | CXL_PSL9_DSISR_An_PF_HRH |
-                CXL_PSL9_DSISR_An_PF_STEG)))
+       if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
                return true;
 
+       if (cxl_is_power9()) {
+               crs = (dsisr & CXL_PSL9_DSISR_An_CO_MASK);
+               if ((crs == CXL_PSL9_DSISR_An_PF_SLR) ||
+                   (crs == CXL_PSL9_DSISR_An_PF_RGC) ||
+                   (crs == CXL_PSL9_DSISR_An_PF_RGP) ||
+                   (crs == CXL_PSL9_DSISR_An_PF_HRH) ||
+                   (crs == CXL_PSL9_DSISR_An_PF_STEG) ||
+                   (crs == CXL_PSL9_DSISR_An_URTCH)) {
+                       return true;
+               }
+       }
+
        return false;
 }
 
index 1703655072b1ed312fc9970d0f71eb5fb75baf46..c1ba0d42cbc865467334c25a0b62a3f1f8932a21 100644 (file)
@@ -329,8 +329,15 @@ static int __init init_cxl(void)
 
        cxl_debugfs_init();
 
-       if ((rc = register_cxl_calls(&cxl_calls)))
-               goto err;
+       /*
+        * we don't register the callback on P9. slb callack is only
+        * used for the PSL8 MMU and CX4.
+        */
+       if (cxl_is_power8()) {
+               rc = register_cxl_calls(&cxl_calls);
+               if (rc)
+                       goto err;
+       }
 
        if (cpu_has_feature(CPU_FTR_HVMODE)) {
                cxl_ops = &cxl_native_ops;
@@ -347,7 +354,8 @@ static int __init init_cxl(void)
 
        return 0;
 err1:
-       unregister_cxl_calls(&cxl_calls);
+       if (cxl_is_power8())
+               unregister_cxl_calls(&cxl_calls);
 err:
        cxl_debugfs_exit();
        cxl_file_exit();
@@ -366,7 +374,8 @@ static void exit_cxl(void)
 
        cxl_debugfs_exit();
        cxl_file_exit();
-       unregister_cxl_calls(&cxl_calls);
+       if (cxl_is_power8())
+               unregister_cxl_calls(&cxl_calls);
        idr_destroy(&cxl_adapter_idr);
 }
 
index 8d6ea9712dbd1830fcdc5d6eecda3d28d69a9376..2b2f8894149df307b04a8cd17b2309873e962fc3 100644 (file)
@@ -105,11 +105,16 @@ static int native_afu_reset(struct cxl_afu *afu)
                           CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
                           false);
 
-       /* Re-enable any masked interrupts */
-       serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
-       serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
-       cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
-
+       /*
+        * Re-enable any masked interrupts when the AFU is not
+        * activated to avoid side effects after attaching a process
+        * in dedicated mode.
+        */
+       if (afu->current_mode == 0) {
+               serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
+               serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
+               cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
+       }
 
        return rc;
 }
@@ -139,9 +144,9 @@ int cxl_psl_purge(struct cxl_afu *afu)
 
        pr_devel("PSL purge request\n");
 
-       if (cxl_is_psl8(afu))
+       if (cxl_is_power8())
                trans_fault = CXL_PSL_DSISR_TRANS;
-       if (cxl_is_psl9(afu))
+       if (cxl_is_power9())
                trans_fault = CXL_PSL9_DSISR_An_TF;
 
        if (!cxl_ops->link_ok(afu->adapter, afu)) {
@@ -603,7 +608,7 @@ static u64 calculate_sr(struct cxl_context *ctx)
                if (!test_tsk_thread_flag(current, TIF_32BIT))
                        sr |= CXL_PSL_SR_An_SF;
        }
-       if (cxl_is_psl9(ctx->afu)) {
+       if (cxl_is_power9()) {
                if (radix_enabled())
                        sr |= CXL_PSL_SR_An_XLAT_ror;
                else
@@ -1117,10 +1122,10 @@ static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
 
 static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
 {
-       if ((cxl_is_psl8(afu)) && (dsisr & CXL_PSL_DSISR_TRANS))
+       if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
                return true;
 
-       if ((cxl_is_psl9(afu)) && (dsisr & CXL_PSL9_DSISR_An_TF))
+       if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
                return true;
 
        return false;
@@ -1194,10 +1199,10 @@ static void native_irq_wait(struct cxl_context *ctx)
                if (ph != ctx->pe)
                        return;
                dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
-               if (cxl_is_psl8(ctx->afu) &&
+               if (cxl_is_power8() &&
                   ((dsisr & CXL_PSL_DSISR_PENDING) == 0))
                        return;
-               if (cxl_is_psl9(ctx->afu) &&
+               if (cxl_is_power9() &&
                   ((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
                        return;
                /*
index 6dc1ee5b92c97121c86cbe12f0f46dab77a7c957..1eb9859809bff6ae367f849afc76fc21d8fc844d 100644 (file)
@@ -436,7 +436,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci
        /* nMMU_ID Defaults to: b’000001001’*/
        xsl_dsnctl |= ((u64)0x09 << (63-28));
 
-       if (cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+       if (!(cxl_is_power9_dd1())) {
                /*
                 * Used to identify CAPI packets which should be sorted into
                 * the Non-Blocking queues by the PHB. This field should match
@@ -491,7 +491,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci
        cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000003FFFF0000ULL);
 
        /* Disable vc dd1 fix */
-       if ((cxl_is_power9() && cpu_has_feature(CPU_FTR_POWER9_DD1)))
+       if (cxl_is_power9_dd1())
                cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL);
 
        return 0;
@@ -1439,8 +1439,7 @@ int cxl_pci_reset(struct cxl *adapter)
         * The adapter is about to be reset, so ignore errors.
         * Not supported on P9 DD1
         */
-       if ((cxl_is_power8()) ||
-           ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1))))
+       if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
                cxl_data_cache_flush(adapter);
 
        /* pcie_warm_reset requests a fundamental pci reset which includes a
@@ -1750,7 +1749,6 @@ static const struct cxl_service_layer_ops psl9_ops = {
        .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
        .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
        .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
-       .err_irq_dump_registers = cxl_native_err_irq_dump_regs,
        .debugfs_stop_trace = cxl_stop_trace_psl9,
        .write_timebase_ctrl = write_timebase_ctrl_psl9,
        .timebase_read = timebase_read_psl9,
@@ -1889,8 +1887,7 @@ static void cxl_pci_remove_adapter(struct cxl *adapter)
         * Flush adapter datacache as its about to be removed.
         * Not supported on P9 DD1.
         */
-       if ((cxl_is_power8()) ||
-           ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1))))
+       if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
                cxl_data_cache_flush(adapter);
 
        cxl_deconfigure_adapter(adapter);
index 62ee439d58829574d732e84b8afba738871a43e5..53a1cb551defabe29e763e578632c33ac851547f 100644 (file)
@@ -756,6 +756,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
        struct net_device *dev = dev_id;
        struct arcnet_local *lp;
        int recbuf, status, diagstatus, didsomething, boguscount;
+       unsigned long flags;
        int retval = IRQ_NONE;
 
        arc_printk(D_DURING, dev, "\n");
@@ -765,7 +766,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
        lp = netdev_priv(dev);
        BUG_ON(!lp);
 
-       spin_lock(&lp->lock);
+       spin_lock_irqsave(&lp->lock, flags);
 
        /* RESET flag was enabled - if device is not running, we must
         * clear it right away (but nothing else).
@@ -774,7 +775,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
                if (lp->hw.status(dev) & RESETflag)
                        lp->hw.command(dev, CFLAGScmd | RESETclear);
                lp->hw.intmask(dev, 0);
-               spin_unlock(&lp->lock);
+               spin_unlock_irqrestore(&lp->lock, flags);
                return retval;
        }
 
@@ -998,7 +999,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
        udelay(1);
        lp->hw.intmask(dev, lp->intmask);
 
-       spin_unlock(&lp->lock);
+       spin_unlock_irqrestore(&lp->lock, flags);
        return retval;
 }
 EXPORT_SYMBOL(arcnet_interrupt);
index 2056878fb087d6d3bc3bf6564220065f30322283..4fa2e46b48d3e561e3883fdc1a023493bc39ed30 100644 (file)
@@ -212,7 +212,7 @@ static int ack_tx(struct net_device *dev, int acked)
        ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */
        ackpkt->soft.cap.mes.ack = acked;
 
-       arc_printk(D_PROTO, dev, "Ackknowledge for cap packet %x.\n",
+       arc_printk(D_PROTO, dev, "Acknowledge for cap packet %x.\n",
                   *((int *)&ackpkt->soft.cap.cookie[0]));
 
        ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
index 239de38fbd6a588bbb0e90e3452ea60ca1e5a161..47f80b83dcf42a47666f05a688a680b7690251cf 100644 (file)
@@ -135,6 +135,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
        for (i = 0; i < ci->devcount; i++) {
                struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i];
                struct com20020_dev *card;
+               int dev_id_mask = 0xf;
 
                dev = alloc_arcdev(device);
                if (!dev) {
@@ -166,6 +167,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
                arcnet_outb(0x00, ioaddr, COM20020_REG_W_COMMAND);
                arcnet_inb(ioaddr, COM20020_REG_R_DIAGSTAT);
 
+               SET_NETDEV_DEV(dev, &pdev->dev);
                dev->base_addr = ioaddr;
                dev->dev_addr[0] = node;
                dev->irq = pdev->irq;
@@ -179,8 +181,8 @@ static int com20020pci_probe(struct pci_dev *pdev,
 
                /* Get the dev_id from the PLX rotary coder */
                if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
-                       dev->dev_id = 0xc;
-               dev->dev_id ^= inb(priv->misc + ci->rotary) >> 4;
+                       dev_id_mask = 0x3;
+               dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
 
                snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
 
index 13d9ad4b3f5c977e99f3ac2f38d3f244de3ae203..78043a9c5981e5a0b8d562879de0f77e0763aa75 100644 (file)
@@ -246,8 +246,6 @@ int com20020_found(struct net_device *dev, int shared)
                return -ENODEV;
        }
 
-       dev->base_addr = ioaddr;
-
        arc_printk(D_NORMAL, dev, "%s: station %02Xh found at %03lXh, IRQ %d.\n",
                   lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq);
 
index a851f95c307a3331a889972bc3c60273219f8a7b..349a46593abff6e4ea80ddd3a79301dac9cca517 100644 (file)
@@ -12729,7 +12729,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
        } else {
                /* If no mc addresses are required, flush the configuration */
                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
-               if (rc)
+               if (rc < 0)
                        BNX2X_ERR("Failed to clear multicast configuration %d\n",
                                  rc);
        }
index 03f55daecb20b70bec8924eb64f2146a90591590..74e8e215524d72029a4a48dfc18897a9bf369f6b 100644 (file)
@@ -1301,10 +1301,11 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
                cp_cons = NEXT_CMP(cp_cons);
        }
 
-       if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
+       if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
                bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
-               netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
-                           agg_bufs, (int)MAX_SKB_FRAGS);
+               if (agg_bufs > MAX_SKB_FRAGS)
+                       netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+                                   agg_bufs, (int)MAX_SKB_FRAGS);
                return NULL;
        }
 
@@ -1562,6 +1563,45 @@ next_rx_no_prod:
        return rc;
 }
 
+/* In netpoll mode, if we are using a combined completion ring, we need to
+ * discard the rx packets and recycle the buffers.
+ */
+static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
+                                u32 *raw_cons, u8 *event)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       u32 tmp_raw_cons = *raw_cons;
+       struct rx_cmp_ext *rxcmp1;
+       struct rx_cmp *rxcmp;
+       u16 cp_cons;
+       u8 cmp_type;
+
+       cp_cons = RING_CMP(tmp_raw_cons);
+       rxcmp = (struct rx_cmp *)
+                       &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+       tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+       cp_cons = RING_CMP(tmp_raw_cons);
+       rxcmp1 = (struct rx_cmp_ext *)
+                       &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+       if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+               return -EBUSY;
+
+       cmp_type = RX_CMP_TYPE(rxcmp);
+       if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+               rxcmp1->rx_cmp_cfa_code_errors_v2 |=
+                       cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
+       } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+               struct rx_tpa_end_cmp_ext *tpa_end1;
+
+               tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
+               tpa_end1->rx_tpa_end_cmp_errors_v2 |=
+                       cpu_to_le32(RX_TPA_END_CMP_ERRORS);
+       }
+       return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
+}
+
 #define BNXT_GET_EVENT_PORT(data)      \
        ((data) &                       \
         ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
@@ -1744,7 +1784,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                        if (unlikely(tx_pkts > bp->tx_wake_thresh))
                                rx_pkts = budget;
                } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
-                       rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+                       if (likely(budget))
+                               rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+                       else
+                               rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
+                                                          &event);
                        if (likely(rc >= 0))
                                rx_pkts += rc;
                        else if (rc == -EBUSY)  /* partial completion */
@@ -6663,12 +6707,11 @@ static void bnxt_poll_controller(struct net_device *dev)
        struct bnxt *bp = netdev_priv(dev);
        int i;
 
-       for (i = 0; i < bp->cp_nr_rings; i++) {
-               struct bnxt_irq *irq = &bp->irq_tbl[i];
+       /* Only process tx rings/combined rings in netpoll mode. */
+       for (i = 0; i < bp->tx_nr_rings; i++) {
+               struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
 
-               disable_irq(irq->vector);
-               irq->handler(irq->vector, bp->bnapi[i]);
-               enable_irq(irq->vector);
+               napi_schedule(&txr->bnapi->napi);
        }
 }
 #endif
index 3ef42dbc63273f756bb7e52fe7da65ba1968eea4..d46a85041083c3708860b71cb661afe9123028d0 100644 (file)
@@ -374,12 +374,16 @@ struct rx_tpa_end_cmp_ext {
 
        __le32 rx_tpa_end_cmp_errors_v2;
        #define RX_TPA_END_CMP_V2                               (0x1 << 0)
-       #define RX_TPA_END_CMP_ERRORS                           (0x7fff << 1)
+       #define RX_TPA_END_CMP_ERRORS                           (0x3 << 1)
        #define RX_TPA_END_CMPL_ERRORS_SHIFT                     1
 
        u32 rx_tpa_end_cmp_start_opaque;
 };
 
+#define TPA_END_ERRORS(rx_tpa_end_ext)                                 \
+       ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 &                   \
+        cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+
 #define DB_IDX_MASK                                            0xffffff
 #define DB_IDX_VALID                                           (0x1 << 26)
 #define DB_IRQ_DIS                                             (0x1 << 27)
index dc0850b3b517b9b02e3cd9a42cf98425a55d0df3..8870a9a798ca4e0245e6b05ac8d4ee2cf8499b46 100644 (file)
@@ -2,6 +2,7 @@ config FSL_FMAN
        tristate "FMan support"
        depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
        select GENERIC_ALLOCATOR
+       depends on HAS_DMA
        select PHYLIB
        default n
        help
index 9f89c4137d2137f78bcda8f79a918b8db61a5189..0744452a0b188190a1da2fe0e14651aad31748de 100644 (file)
@@ -3334,6 +3334,9 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
        u16 vid = vlan_dev_vlan_id(vlan_dev);
 
+       if (netif_is_bridge_port(vlan_dev))
+               return 0;
+
        if (mlxsw_sp_port_dev_check(real_dev))
                return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
                                                     vid);
index 2ae85245478087d2d640617bd79bfbfabd5f0763..a9ce82d3e9cf4b8875474cd62f02ccbfc322bc20 100644 (file)
@@ -1505,8 +1505,8 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
                *index = entry->index;
                resolved = false;
        } else if (removing) {
-               ofdpa_neigh_del(trans, found);
                *index = found->index;
+               ofdpa_neigh_del(trans, found);
        } else if (updating) {
                ofdpa_neigh_update(found, trans, NULL, false);
                resolved = !is_zero_ether_addr(found->eth_dst);
index 78efb2822b8648c6e6f02ffa764aad89570ffbd0..78f9e43420e0df217d8f118876831e54c204f191 100644 (file)
@@ -4172,7 +4172,7 @@ found:
         * recipients
         */
        if (is_mc_recip) {
-               MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+               MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
                unsigned int depth, i;
 
                memset(inbuf, 0, sizeof(inbuf));
@@ -4320,7 +4320,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
                        efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
                } else {
                        efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
-                                              MC_CMD_FILTER_OP_IN_LEN,
+                                              MC_CMD_FILTER_OP_EXT_IN_LEN,
                                               NULL, 0, rc);
                }
        }
@@ -4453,7 +4453,7 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
                                      struct efx_filter_spec *spec)
 {
        struct efx_ef10_filter_table *table = efx->filter_state;
-       MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
        struct efx_filter_spec *saved_spec;
        unsigned int hash, i, depth = 1;
        bool replacing = false;
@@ -4940,7 +4940,7 @@ not_restored:
 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
 {
        struct efx_ef10_filter_table *table = efx->filter_state;
-       MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
        struct efx_filter_spec *spec;
        unsigned int filter_idx;
        int rc;
@@ -5105,6 +5105,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
 
        /* Insert/renew filters */
        for (i = 0; i < addr_count; i++) {
+               EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
                efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
                efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
                rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -5122,11 +5123,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
                                }
                                return rc;
                        } else {
-                               /* mark as not inserted, and carry on */
-                               rc = EFX_EF10_FILTER_ID_INVALID;
+                               /* keep invalid ID, and carry on */
                        }
+               } else {
+                       ids[i] = efx_ef10_filter_get_unsafe_id(rc);
                }
-               ids[i] = efx_ef10_filter_get_unsafe_id(rc);
        }
 
        if (multicast && rollback) {
index 1562ab4151e192a079fc2a54dec7f8c101bcd109..56ba411421f0a77bae5b4568fb273464472741f5 100644 (file)
@@ -90,7 +90,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
        if (of_device_is_compatible(dev->of_node, "ti,dm816-emac"))
                return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr);
 
-       if (of_machine_is_compatible("ti,am4372"))
+       if (of_machine_is_compatible("ti,am43"))
                return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
 
        if (of_machine_is_compatible("ti,dra7"))
index 82d6c022ca859735c412eadad487556eb34b6f33..643c539a08badf1c723ca3460003293395945db5 100644 (file)
@@ -776,7 +776,7 @@ static int netvsc_set_channels(struct net_device *net,
            channels->rx_count || channels->tx_count || channels->other_count)
                return -EINVAL;
 
-       if (count > net->num_tx_queues || count > net->num_rx_queues)
+       if (count > net->num_tx_queues || count > VRSS_CHANNEL_MAX)
                return -EINVAL;
 
        if (!nvdev || nvdev->destroy)
@@ -1203,7 +1203,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
        rndis_dev = ndev->extension;
        if (indir) {
                for (i = 0; i < ITAB_NUM; i++)
-                       if (indir[i] >= dev->num_rx_queues)
+                       if (indir[i] >= VRSS_CHANNEL_MAX)
                                return -EINVAL;
 
                for (i = 0; i < ITAB_NUM; i++)
index 67bf7ebae5c6dba9b9ea28c0c9c0801093f55eb7..72b801803aa4d450328a5df8c4c782fbb1d5e4cc 100644 (file)
 #define MACVLAN_HASH_SIZE      (1<<MACVLAN_HASH_BITS)
 #define MACVLAN_BC_QUEUE_LEN   1000
 
+#define MACVLAN_F_PASSTHRU     1
+#define MACVLAN_F_ADDRCHANGE   2
+
 struct macvlan_port {
        struct net_device       *dev;
        struct hlist_head       vlan_hash[MACVLAN_HASH_SIZE];
        struct list_head        vlans;
        struct sk_buff_head     bc_queue;
        struct work_struct      bc_work;
-       bool                    passthru;
+       u32                     flags;
        int                     count;
        struct hlist_head       vlan_source_hash[MACVLAN_HASH_SIZE];
        DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
+       unsigned char           perm_addr[ETH_ALEN];
 };
 
 struct macvlan_source_entry {
@@ -66,6 +70,31 @@ struct macvlan_skb_cb {
 
 static void macvlan_port_destroy(struct net_device *dev);
 
+static inline bool macvlan_passthru(const struct macvlan_port *port)
+{
+       return port->flags & MACVLAN_F_PASSTHRU;
+}
+
+static inline void macvlan_set_passthru(struct macvlan_port *port)
+{
+       port->flags |= MACVLAN_F_PASSTHRU;
+}
+
+static inline bool macvlan_addr_change(const struct macvlan_port *port)
+{
+       return port->flags & MACVLAN_F_ADDRCHANGE;
+}
+
+static inline void macvlan_set_addr_change(struct macvlan_port *port)
+{
+       port->flags |= MACVLAN_F_ADDRCHANGE;
+}
+
+static inline void macvlan_clear_addr_change(struct macvlan_port *port)
+{
+       port->flags &= ~MACVLAN_F_ADDRCHANGE;
+}
+
 /* Hash Ethernet address */
 static u32 macvlan_eth_hash(const unsigned char *addr)
 {
@@ -181,11 +210,12 @@ static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
 static bool macvlan_addr_busy(const struct macvlan_port *port,
                              const unsigned char *addr)
 {
-       /* Test to see if the specified multicast address is
+       /* Test to see if the specified address is
         * currently in use by the underlying device or
         * another macvlan.
         */
-       if (ether_addr_equal_64bits(port->dev->dev_addr, addr))
+       if (!macvlan_passthru(port) && !macvlan_addr_change(port) &&
+           ether_addr_equal_64bits(port->dev->dev_addr, addr))
                return true;
 
        if (macvlan_hash_lookup(port, addr))
@@ -445,7 +475,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
        }
 
        macvlan_forward_source(skb, port, eth->h_source);
-       if (port->passthru)
+       if (macvlan_passthru(port))
                vlan = list_first_or_null_rcu(&port->vlans,
                                              struct macvlan_dev, list);
        else
@@ -574,7 +604,7 @@ static int macvlan_open(struct net_device *dev)
        struct net_device *lowerdev = vlan->lowerdev;
        int err;
 
-       if (vlan->port->passthru) {
+       if (macvlan_passthru(vlan->port)) {
                if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
                        err = dev_set_promiscuity(lowerdev, 1);
                        if (err < 0)
@@ -649,7 +679,7 @@ static int macvlan_stop(struct net_device *dev)
        dev_uc_unsync(lowerdev, dev);
        dev_mc_unsync(lowerdev, dev);
 
-       if (vlan->port->passthru) {
+       if (macvlan_passthru(vlan->port)) {
                if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
                        dev_set_promiscuity(lowerdev, -1);
                goto hash_del;
@@ -672,6 +702,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
+       struct macvlan_port *port = vlan->port;
        int err;
 
        if (!(dev->flags & IFF_UP)) {
@@ -682,7 +713,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
                if (macvlan_addr_busy(vlan->port, addr))
                        return -EBUSY;
 
-               if (!vlan->port->passthru) {
+               if (!macvlan_passthru(port)) {
                        err = dev_uc_add(lowerdev, addr);
                        if (err)
                                return err;
@@ -692,6 +723,15 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
 
                macvlan_hash_change_addr(vlan, addr);
        }
+       if (macvlan_passthru(port) && !macvlan_addr_change(port)) {
+               /* Since addr_change isn't set, we are here due to lower
+                * device change.  Save the lower-dev address so we can
+                * restore it later.
+                */
+               ether_addr_copy(vlan->port->perm_addr,
+                               lowerdev->dev_addr);
+       }
+       macvlan_clear_addr_change(port);
        return 0;
 }
 
@@ -703,7 +743,12 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
+       /* If the addresses are the same, this is a no-op */
+       if (ether_addr_equal(dev->dev_addr, addr->sa_data))
+               return 0;
+
        if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
+               macvlan_set_addr_change(vlan->port);
                dev_set_mac_address(vlan->lowerdev, addr);
                return 0;
        }
@@ -928,7 +973,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        /* Support unicast filter only on passthru devices.
         * Multicast filter should be allowed on all devices.
         */
-       if (!vlan->port->passthru && is_unicast_ether_addr(addr))
+       if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr))
                return -EOPNOTSUPP;
 
        if (flags & NLM_F_REPLACE)
@@ -952,7 +997,7 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
        /* Support unicast filter only on passthru devices.
         * Multicast filter should be allowed on all devices.
         */
-       if (!vlan->port->passthru && is_unicast_ether_addr(addr))
+       if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr))
                return -EOPNOTSUPP;
 
        if (is_unicast_ether_addr(addr))
@@ -1120,8 +1165,8 @@ static int macvlan_port_create(struct net_device *dev)
        if (port == NULL)
                return -ENOMEM;
 
-       port->passthru = false;
        port->dev = dev;
+       ether_addr_copy(port->perm_addr, dev->dev_addr);
        INIT_LIST_HEAD(&port->vlans);
        for (i = 0; i < MACVLAN_HASH_SIZE; i++)
                INIT_HLIST_HEAD(&port->vlan_hash[i]);
@@ -1161,6 +1206,18 @@ static void macvlan_port_destroy(struct net_device *dev)
                kfree_skb(skb);
        }
 
+       /* If the lower device address has been changed by passthru
+        * macvlan, put it back.
+        */
+       if (macvlan_passthru(port) &&
+           !ether_addr_equal(port->dev->dev_addr, port->perm_addr)) {
+               struct sockaddr sa;
+
+               sa.sa_family = port->dev->type;
+               memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len);
+               dev_set_mac_address(port->dev, &sa);
+       }
+
        kfree(port);
 }
 
@@ -1326,7 +1383,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        port = macvlan_port_get_rtnl(lowerdev);
 
        /* Only 1 macvlan device can be created in passthru mode */
-       if (port->passthru) {
+       if (macvlan_passthru(port)) {
                /* The macvlan port must be not created this time,
                 * still goto destroy_macvlan_port for readability.
                 */
@@ -1352,7 +1409,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
                        err = -EINVAL;
                        goto destroy_macvlan_port;
                }
-               port->passthru = true;
+               macvlan_set_passthru(port);
                eth_hw_addr_inherit(dev, lowerdev);
        }
 
@@ -1434,7 +1491,7 @@ static int macvlan_changelink(struct net_device *dev,
        if (data && data[IFLA_MACVLAN_FLAGS]) {
                __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
                bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
-               if (vlan->port->passthru && promisc) {
+               if (macvlan_passthru(vlan->port) && promisc) {
                        int err;
 
                        if (flags & MACVLAN_FLAG_NOPROMISC)
@@ -1597,7 +1654,7 @@ static int macvlan_device_event(struct notifier_block *unused,
                }
                break;
        case NETDEV_CHANGEADDR:
-               if (!port->passthru)
+               if (!macvlan_passthru(port))
                        return NOTIFY_DONE;
 
                vlan = list_first_entry_or_null(&port->vlans,
index ed0d10f54f2607533868dfd10e6bc9d0e09050de..c3065236ffcca6839d1326e60b96ac280787d2ee 100644 (file)
@@ -908,7 +908,7 @@ static void decode_txts(struct dp83640_private *dp83640,
        if (overflow) {
                pr_debug("tx timestamp queue overflow, count %d\n", overflow);
                while (skb) {
-                       skb_complete_tx_timestamp(skb, NULL);
+                       kfree_skb(skb);
                        skb = skb_dequeue(&dp83640->tx_queue);
                }
                return;
index b9252b8d81ffb720272ca5f0b25910c021eb28a3..8b2038844ba96a4a86c5b42aca4fa59fe96ecca2 100644 (file)
@@ -619,6 +619,8 @@ static int ksz9031_read_status(struct phy_device *phydev)
        if ((regval & 0xFF) == 0xFF) {
                phy_init_hw(phydev);
                phydev->link = 0;
+               if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
+                       phydev->drv->config_intr(phydev);
        }
 
        return 0;
index 51cf60092a18e33924f52c568d91a33f30828b21..4037ab27734ae76fcfedafc8ce40f74f1549fcfb 100644 (file)
@@ -1722,6 +1722,18 @@ static const struct driver_info lenovo_info = {
        .tx_fixup = ax88179_tx_fixup,
 };
 
+static const struct driver_info belkin_info = {
+       .description = "Belkin USB Ethernet Adapter",
+       .bind   = ax88179_bind,
+       .unbind = ax88179_unbind,
+       .status = ax88179_status,
+       .link_reset = ax88179_link_reset,
+       .reset  = ax88179_reset,
+       .flags  = FLAG_ETHER | FLAG_FRAMING_AX,
+       .rx_fixup = ax88179_rx_fixup,
+       .tx_fixup = ax88179_tx_fixup,
+};
+
 static const struct usb_device_id products[] = {
 {
        /* ASIX AX88179 10/100/1000 */
@@ -1751,6 +1763,10 @@ static const struct usb_device_id products[] = {
        /* Lenovo OneLinkDock Gigabit LAN */
        USB_DEVICE(0x17ef, 0x304b),
        .driver_info = (unsigned long)&lenovo_info,
+}, {
+       /* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */
+       USB_DEVICE(0x050d, 0x0128),
+       .driver_info = (unsigned long)&belkin_info,
 },
        { },
 };
index 0156fe8cac172a909cfe4ed5b9567572132b888d..364fa9d11d1a1e74c59f830fe23a9b9e5b37fcee 100644 (file)
@@ -383,7 +383,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
                tbp = tb;
        }
 
-       if (tbp[IFLA_IFNAME]) {
+       if (ifmp && tbp[IFLA_IFNAME]) {
                nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
                name_assign_type = NET_NAME_USER;
        } else {
@@ -402,7 +402,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
                return PTR_ERR(peer);
        }
 
-       if (tbp[IFLA_ADDRESS] == NULL)
+       if (!ifmp || !tbp[IFLA_ADDRESS])
                eth_hw_addr_random(peer);
 
        if (ifmp && (dev->ifindex != 0))
index a871f45ecc79a438b2b43465d3719f240ff25cb5..143d8a95a60d97b9036cd092d15b0bd8f8e361f8 100644 (file)
@@ -1797,6 +1797,7 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
        flush_work(&vi->config_work);
 
        netif_device_detach(vi->dev);
+       netif_tx_disable(vi->dev);
        cancel_delayed_work_sync(&vi->refill);
 
        if (netif_running(vi->dev)) {
index 530586be05b4357dc8ee6439b7a9e225bce012c6..5b1d2e8402d9d5482085018186d947b59cbb35a4 100644 (file)
@@ -199,6 +199,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        unsigned long   remaining_credit;
        struct timer_list credit_timeout;
        u64 credit_window_start;
+       bool rate_limited;
 
        /* Statistics */
        struct xenvif_stats stats;
index 8397f6c9245158e8b3ff005bc58a419e4250169d..e322a862ddfe70b4e1b2fbdbffd8cd78cbdc4b24 100644 (file)
@@ -106,7 +106,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
 
        if (work_done < budget) {
                napi_complete_done(napi, work_done);
-               xenvif_napi_schedule_or_enable_events(queue);
+               /* If the queue is rate-limited, it shall be
+                * rescheduled in the timer callback.
+                */
+               if (likely(!queue->rate_limited))
+                       xenvif_napi_schedule_or_enable_events(queue);
        }
 
        return work_done;
index 602d408fa25e98a4651716b1390d2507bced4605..5042ff8d449af70b2a05ac6e166eb8acbf7ae44c 100644 (file)
@@ -180,6 +180,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
                max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
 
        queue->remaining_credit = min(max_credit, max_burst);
+       queue->rate_limited = false;
 }
 
 void xenvif_tx_credit_callback(unsigned long data)
@@ -686,8 +687,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
                msecs_to_jiffies(queue->credit_usec / 1000);
 
        /* Timer could already be pending in rare cases. */
-       if (timer_pending(&queue->credit_timeout))
+       if (timer_pending(&queue->credit_timeout)) {
+               queue->rate_limited = true;
                return true;
+       }
 
        /* Passed the point where we can replenish credit? */
        if (time_after_eq64(now, next_credit)) {
@@ -702,6 +705,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
                mod_timer(&queue->credit_timeout,
                          next_credit);
                queue->credit_window_start = next_credit;
+               queue->rate_limited = true;
 
                return true;
        }
index 951042a375d6b22dbd34988e38fef7114593c366..40c7581caeb00d30a60c7b9152ac67dd99a888d6 100644 (file)
@@ -1805,7 +1805,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
        if (pci_is_enabled(pdev)) {
                u32 csts = readl(dev->bar + NVME_REG_CSTS);
 
-               if (dev->ctrl.state == NVME_CTRL_LIVE)
+               if (dev->ctrl.state == NVME_CTRL_LIVE ||
+                   dev->ctrl.state == NVME_CTRL_RESETTING)
                        nvme_start_freeze(&dev->ctrl);
                dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
                        pdev->error_state  != pci_channel_io_normal);
index 519599dddd3692ee373a9eb00d95d5757556ad42..0a7404ef9335bf7ea19926e2896c9b737951afe5 100644 (file)
@@ -263,7 +263,10 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
                kfree(vecs);
 
        if (unlikely(bio.bi_error))
-               return bio.bi_error;
+               ret = bio.bi_error;
+
+       bio_uninit(&bio);
+
        return ret;
 }
 
index c14758e08d738eec44bf08c79acee71366ce0e70..390ac9c39c5932ef93f3ae8d3a5615f2737848af 100644 (file)
@@ -753,7 +753,6 @@ static void nfs4_callback_free_slot(struct nfs4_session *session,
         * A single slot, so highest used slotid is either 0 or -1
         */
        nfs4_free_slot(tbl, slot);
-       nfs4_slot_tbl_drain_complete(tbl);
        spin_unlock(&tbl->slot_tbl_lock);
 }
 
index 32ccd7754f8a2875933d1f9c532b54c656971bfd..2ac00bf4ecf146815bff44755f4406161e569007 100644 (file)
@@ -1946,29 +1946,6 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
 }
 EXPORT_SYMBOL_GPL(nfs_link);
 
-static void
-nfs_complete_rename(struct rpc_task *task, struct nfs_renamedata *data)
-{
-       struct dentry *old_dentry = data->old_dentry;
-       struct dentry *new_dentry = data->new_dentry;
-       struct inode *old_inode = d_inode(old_dentry);
-       struct inode *new_inode = d_inode(new_dentry);
-
-       nfs_mark_for_revalidate(old_inode);
-
-       switch (task->tk_status) {
-       case 0:
-               if (new_inode != NULL)
-                       nfs_drop_nlink(new_inode);
-               d_move(old_dentry, new_dentry);
-               nfs_set_verifier(new_dentry,
-                                       nfs_save_change_attribute(data->new_dir));
-               break;
-       case -ENOENT:
-               nfs_dentry_handle_enoent(old_dentry);
-       }
-}
-
 /*
  * RENAME
  * FIXME: Some nfsds, like the Linux user space nfsd, may generate a
@@ -1999,7 +1976,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 {
        struct inode *old_inode = d_inode(old_dentry);
        struct inode *new_inode = d_inode(new_dentry);
-       struct dentry *dentry = NULL;
+       struct dentry *dentry = NULL, *rehash = NULL;
        struct rpc_task *task;
        int error = -EBUSY;
 
@@ -2022,8 +1999,10 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                 * To prevent any new references to the target during the
                 * rename, we unhash the dentry in advance.
                 */
-               if (!d_unhashed(new_dentry))
+               if (!d_unhashed(new_dentry)) {
                        d_drop(new_dentry);
+                       rehash = new_dentry;
+               }
 
                if (d_count(new_dentry) > 2) {
                        int err;
@@ -2040,6 +2019,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                                goto out;
 
                        new_dentry = dentry;
+                       rehash = NULL;
                        new_inode = NULL;
                }
        }
@@ -2048,8 +2028,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (new_inode != NULL)
                NFS_PROTO(new_inode)->return_delegation(new_inode);
 
-       task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
-                                       nfs_complete_rename);
+       task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
        if (IS_ERR(task)) {
                error = PTR_ERR(task);
                goto out;
@@ -2059,9 +2038,27 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (error == 0)
                error = task->tk_status;
        rpc_put_task(task);
+       nfs_mark_for_revalidate(old_inode);
 out:
+       if (rehash)
+               d_rehash(rehash);
        trace_nfs_rename_exit(old_dir, old_dentry,
                        new_dir, new_dentry, error);
+       if (!error) {
+               if (new_inode != NULL)
+                       nfs_drop_nlink(new_inode);
+               /*
+                * The d_move() should be here instead of in an async RPC completion
+                * handler because we need the proper locks to move the dentry.  If
+                * we're interrupted by a signal, the async RPC completion handler
+                * should mark the directories for revalidation.
+                */
+               d_move(old_dentry, new_dentry);
+               nfs_set_verifier(new_dentry,
+                                       nfs_save_change_attribute(new_dir));
+       } else if (error == -ENOENT)
+               nfs_dentry_handle_enoent(old_dentry);
+
        /* new dentry created? */
        if (dentry)
                dput(dentry);
index c08c46a3b8cde00ef5aa40fae87ed2fce06faea1..dbfa18900e25a38a0998a2d429644a860c559ac2 100644 (file)
@@ -2589,7 +2589,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
 
        /* Except MODE, it seems harmless of setting twice. */
        if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
-               attrset[1] & FATTR4_WORD1_MODE)
+               (attrset[1] & FATTR4_WORD1_MODE ||
+                attrset[2] & FATTR4_WORD2_MODE_UMASK))
                sattr->ia_valid &= ~ATTR_MODE;
 
        if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
@@ -8416,6 +8417,7 @@ static void nfs4_layoutget_release(void *calldata)
        size_t max_pages = max_response_pages(server);
 
        dprintk("--> %s\n", __func__);
+       nfs4_sequence_free_slot(&lgp->res.seq_res);
        nfs4_free_pages(lgp->args.layout.pages, max_pages);
        pnfs_put_layout_hdr(NFS_I(inode)->layout);
        put_nfs_open_context(lgp->args.ctx);
@@ -8490,7 +8492,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
        /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
        if (status == 0 && lgp->res.layoutp->len)
                lseg = pnfs_layout_process(lgp);
-       nfs4_sequence_free_slot(&lgp->res.seq_res);
        rpc_put_task(task);
        dprintk("<-- %s status=%d\n", __func__, status);
        if (status)
index b34de036501bc90e48be043aec38485f7f755e55..cbf82b0d446759a4934fbf7329e6a767ad5530ce 100644 (file)
@@ -2134,6 +2134,8 @@ again:
        put_rpccred(cred);
        switch (status) {
        case 0:
+       case -EINTR:
+       case -ERESTARTSYS:
                break;
        case -ETIMEDOUT:
                if (clnt->cl_softrtry)
index 7a44533f4bbf24134a95bdc030bde5779f28457a..a2a65120c9d04dbc0d4c5dd0de5baa1c133b8ad5 100644 (file)
@@ -330,15 +330,9 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
                .link = link
        };
 
-       upper = lookup_one_len(dentry->d_name.name, upperdir,
-                              dentry->d_name.len);
-       err = PTR_ERR(upper);
-       if (IS_ERR(upper))
-               goto out;
-
        err = security_inode_copy_up(dentry, &new_creds);
        if (err < 0)
-               goto out1;
+               goto out;
 
        if (new_creds)
                old_creds = override_creds(new_creds);
@@ -362,7 +356,7 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
        }
 
        if (err)
-               goto out2;
+               goto out;
 
        if (S_ISREG(stat->mode)) {
                struct path upperpath;
@@ -398,10 +392,23 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
        /*
         * Store identifier of lower inode in upper inode xattr to
         * allow lookup of the copy up origin inode.
+        *
+        * Don't set origin when we are breaking the association with a lower
+        * hard link.
         */
-       err = ovl_set_origin(dentry, lowerpath->dentry, temp);
-       if (err)
+       if (S_ISDIR(stat->mode) || stat->nlink == 1) {
+               err = ovl_set_origin(dentry, lowerpath->dentry, temp);
+               if (err)
+                       goto out_cleanup;
+       }
+
+       upper = lookup_one_len(dentry->d_name.name, upperdir,
+                              dentry->d_name.len);
+       if (IS_ERR(upper)) {
+               err = PTR_ERR(upper);
+               upper = NULL;
                goto out_cleanup;
+       }
 
        if (tmpfile)
                err = ovl_do_link(temp, udir, upper, true);
@@ -416,17 +423,15 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
 
        /* Restore timestamps on parent (best effort) */
        ovl_set_timestamps(upperdir, pstat);
-out2:
+out:
        dput(temp);
-out1:
        dput(upper);
-out:
        return err;
 
 out_cleanup:
        if (!tmpfile)
                ovl_cleanup(wdir, temp);
-       goto out2;
+       goto out;
 }
 
 /*
index d1b04b0e99cf8c293d4ded6eccb2b0aa2fce0d41..a7e29fa0981f148602dcb11dd53e07e1c46147d3 100644 (file)
@@ -426,6 +426,7 @@ extern void bio_advance(struct bio *, unsigned);
 
 extern void bio_init(struct bio *bio, struct bio_vec *table,
                     unsigned short max_vecs);
+extern void bio_uninit(struct bio *);
 extern void bio_reset(struct bio *);
 void bio_chain(struct bio *, struct bio *);
 
index 661e5c2a8e2a49e30d2fe3895f234e321ec81c71..082dc1bd0801c0b7f8fe61916d13a4c48a68f4c3 100644 (file)
@@ -167,7 +167,6 @@ static inline void hash_del_rcu(struct hlist_node *node)
 /**
  * hash_for_each_possible_rcu - iterate over all possible objects hashing to the
  * same bucket in an rcu enabled hashtable
- * in a rcu enabled hashtable
  * @name: hashtable to iterate
  * @obj: the type * to use as a loop cursor for each entry
  * @member: the name of the hlist_node within the struct
index 7e7e2b0d29157047fa0d3596b3f97cf501d754f9..62f5a259e597572ad4b8981ca04dd44a790760bd 100644 (file)
@@ -1850,8 +1850,9 @@ static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
 }
 #endif
 
-#ifdef CONFIG_XFRM_OFFLOAD
 void __net_init xfrm_dev_init(void);
+
+#ifdef CONFIG_XFRM_OFFLOAD
 int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
                       struct xfrm_user_offload *xuo);
@@ -1877,10 +1878,6 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
        }
 }
 #else
-static inline void __net_init xfrm_dev_init(void)
-{
-}
-
 static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
 {
        return 0;
index 7caf44c7fa51bcdec2b7db2efb7439beede648dc..295cd3ef633049cfc5baf00a2668da89e9ffc580 100644 (file)
@@ -112,24 +112,7 @@ enum machine_type {
 #define N_TXTADDR(x) (N_MAGIC(x) == QMAGIC ? PAGE_SIZE : 0)
 #endif
 
-/* Address of data segment in memory after it is loaded.
-   Note that it is up to you to define SEGMENT_SIZE
-   on machines not listed here.  */
-#if defined(vax) || defined(hp300) || defined(pyr)
-#define SEGMENT_SIZE page_size
-#endif
-#ifdef sony
-#define        SEGMENT_SIZE    0x2000
-#endif /* Sony.  */
-#ifdef is68k
-#define SEGMENT_SIZE 0x20000
-#endif
-#if defined(m68k) && defined(PORTAR)
-#define PAGE_SIZE 0x400
-#define SEGMENT_SIZE PAGE_SIZE
-#endif
-
-#ifdef linux
+/* Address of data segment in memory after it is loaded. */
 #ifndef __KERNEL__
 #include <unistd.h>
 #endif
@@ -142,7 +125,6 @@ enum machine_type {
 #endif
 #endif
 #endif
-#endif
 
 #define _N_SEGMENT_ROUND(x) ALIGN(x, SEGMENT_SIZE)
 
@@ -260,13 +242,7 @@ struct relocation_info
   unsigned int r_extern:1;
   /* Four bits that aren't used, but when writing an object file
      it is desirable to clear them.  */
-#ifdef NS32K
-  unsigned r_bsr:1;
-  unsigned r_disp:1;
-  unsigned r_pad:2;
-#else
   unsigned int r_pad:4;
-#endif
 };
 #endif /* no N_RELOCATION_INFO_DECLARED.  */
 
index 339c8a1371de0201df0f7ac799280168ea4d22e3..a8a725697bed693e8e77f225eea5dcc7db46dad0 100644 (file)
@@ -989,6 +989,11 @@ static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
        if (err)
                return err;
 
+       if (is_pointer_value(env, insn->src_reg)) {
+               verbose("R%d leaks addr into mem\n", insn->src_reg);
+               return -EACCES;
+       }
+
        /* check whether atomic_add can read the memory */
        err = check_mem_access(env, insn->dst_reg, insn->off,
                               BPF_SIZE(insn->code), BPF_READ, -1);
index 9e5841dc14b5fa62e0c1470df704c3dc9b99f8ad..b308be30dfb9b93307305a9c47d2156f6d90600a 100644 (file)
@@ -4337,9 +4337,6 @@ static int ftrace_process_regex(struct ftrace_iterator *iter,
 
        command = strsep(&next, ":");
 
-       if (WARN_ON_ONCE(!tr))
-               return -EINVAL;
-
        mutex_lock(&ftrace_cmd_mutex);
        list_for_each_entry(p, &ftrace_commands, list) {
                if (strcmp(p->name, command) == 0) {
index 1122f151466f64425089b9b9ecbd4b1a7584ba8a..091e801145c9996812fbaedafae1e912c123ecd7 100644 (file)
@@ -6881,6 +6881,9 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
        char *number;
        int ret;
 
+       if (!tr)
+               return -ENODEV;
+
        /* hash funcs only work with set_ftrace_filter */
        if (!enable)
                return -EINVAL;
index a3bddbfd0874a26bcdbc84b3b4713cb206a0998b..a0910c0cdf2eabfa690d7cf434a5467faa5e01e4 100644 (file)
@@ -654,6 +654,9 @@ ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
 {
        struct ftrace_probe_ops *ops;
 
+       if (!tr)
+               return -ENODEV;
+
        /* we register both traceon and traceoff to this callback */
        if (strcmp(cmd, "traceon") == 0)
                ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
@@ -670,6 +673,9 @@ ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
 {
        struct ftrace_probe_ops *ops;
 
+       if (!tr)
+               return -ENODEV;
+
        ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
 
        return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
@@ -682,6 +688,9 @@ ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
 {
        struct ftrace_probe_ops *ops;
 
+       if (!tr)
+               return -ENODEV;
+
        ops = &dump_probe_ops;
 
        /* Only dump once. */
@@ -695,6 +704,9 @@ ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
 {
        struct ftrace_probe_ops *ops;
 
+       if (!tr)
+               return -ENODEV;
+
        ops = &cpudump_probe_ops;
 
        /* Only dump once. */
index c129fca6ec993a85aeb30c7e3aa69254e3f5ac16..b53c8d36916351156d8042b05b13bc5b8b717388 100644 (file)
@@ -707,20 +707,16 @@ static int create_trace_kprobe(int argc, char **argv)
                pr_info("Probe point is not specified.\n");
                return -EINVAL;
        }
-       if (isdigit(argv[1][0])) {
-               /* an address specified */
-               ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
-               if (ret) {
-                       pr_info("Failed to parse address.\n");
-                       return ret;
-               }
-       } else {
+
+       /* try to parse an address. if that fails, try to read the
+        * input as a symbol. */
+       if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
                /* a symbol specified */
                symbol = argv[1];
                /* TODO: support .init module functions */
                ret = traceprobe_split_symbol_offset(symbol, &offset);
                if (ret) {
-                       pr_info("Failed to parse symbol.\n");
+                       pr_info("Failed to parse either an address or a symbol.\n");
                        return ret;
                }
                if (offset && is_return &&
index 76aa04d4c9257482181c9b2885530fb4dc417edd..b4a751e8f9d69763c1d1fbd10083819b50db3ab4 100644 (file)
@@ -409,7 +409,9 @@ static const struct file_operations stack_trace_fops = {
 static int
 stack_trace_filter_open(struct inode *inode, struct file *file)
 {
-       return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
+       struct ftrace_ops *ops = inode->i_private;
+
+       return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
                                 inode, file);
 }
 
@@ -476,7 +478,7 @@ static __init int stack_trace_init(void)
                        NULL, &stack_trace_fops);
 
        trace_create_file("stack_trace_filter", 0444, d_tracer,
-                       NULL, &stack_trace_filter_fops);
+                         &trace_ops, &stack_trace_filter_fops);
 
        if (stack_trace_filter_buf[0])
                ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
index 7243421c9783bf060dac1938fc1f2f59a4afafea..416137c64bf809328898f42fff41cd27ccf036d6 100644 (file)
@@ -4767,6 +4767,13 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
 }
 EXPORT_SYMBOL(gro_find_complete_by_type);
 
+static void napi_skb_free_stolen_head(struct sk_buff *skb)
+{
+       skb_dst_drop(skb);
+       secpath_reset(skb);
+       kmem_cache_free(skbuff_head_cache, skb);
+}
+
 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
 {
        switch (ret) {
@@ -4780,13 +4787,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
                break;
 
        case GRO_MERGED_FREE:
-               if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
-                       skb_dst_drop(skb);
-                       secpath_reset(skb);
-                       kmem_cache_free(skbuff_head_cache, skb);
-               } else {
+               if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+                       napi_skb_free_stolen_head(skb);
+               else
                        __kfree_skb(skb);
-               }
                break;
 
        case GRO_HELD:
@@ -4858,10 +4862,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
                break;
 
        case GRO_DROP:
-       case GRO_MERGED_FREE:
                napi_reuse_skb(napi, skb);
                break;
 
+       case GRO_MERGED_FREE:
+               if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+                       napi_skb_free_stolen_head(skb);
+               else
+                       napi_reuse_skb(napi, skb);
+               break;
+
        case GRO_MERGED:
        case GRO_CONSUMED:
                break;
@@ -7783,9 +7793,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
        } else {
                netdev_stats_to_stats64(storage, &dev->stats);
        }
-       storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
-       storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
-       storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
+       storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
+       storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
+       storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
        return storage;
 }
 EXPORT_SYMBOL(dev_get_stats);
index 7a3fd25e8913a99d0fcbb256bc9001f6f1d4dd6f..532b36e9ce2a196805411cc2d1d2c26a2d83acfb 100644 (file)
@@ -964,7 +964,8 @@ static int __ip_append_data(struct sock *sk,
                csummode = CHECKSUM_PARTIAL;
 
        cork->length += length;
-       if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
+       if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
+            (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
            (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
index b5ea036ca78144b86622cb0944d0b840f7225ec5..40aca7803cf2db25361dc7ef5c83ee6ea955c335 100644 (file)
@@ -2330,6 +2330,8 @@ int tcp_disconnect(struct sock *sk, int flags)
        tcp_init_send_head(sk);
        memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
        __sk_dst_reset(sk);
+       dst_release(sk->sk_rx_dst);
+       sk->sk_rx_dst = NULL;
        tcp_saved_syn_free(tp);
 
        /* Clean up fastopen related fields */
index 686c92375e81d50787adbfb423afba76903948ad..1d2dbace42ffadda98d6b324170101bd31d7c9a7 100644 (file)
@@ -3369,6 +3369,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct netdev_notifier_changeupper_info *info;
        struct inet6_dev *idev = __in6_dev_get(dev);
+       struct net *net = dev_net(dev);
        int run_pending = 0;
        int err;
 
@@ -3384,7 +3385,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
        case NETDEV_CHANGEMTU:
                /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
                if (dev->mtu < IPV6_MIN_MTU) {
-                       addrconf_ifdown(dev, 1);
+                       addrconf_ifdown(dev, dev != net->loopback_dev);
                        break;
                }
 
@@ -3500,7 +3501,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                         * IPV6_MIN_MTU stop IPv6 on this interface.
                         */
                        if (dev->mtu < IPV6_MIN_MTU)
-                               addrconf_ifdown(dev, 1);
+                               addrconf_ifdown(dev, dev != net->loopback_dev);
                }
                break;
 
index e011122ebd43c190aec3812099345ec852444284..5c786f5ab961c5230ce325eebd465dcc5da93904 100644 (file)
@@ -250,8 +250,14 @@ ipv4_connected:
         */
 
        err = ip6_datagram_dst_update(sk, true);
-       if (err)
+       if (err) {
+               /* Reset daddr and dport so that udp_v6_early_demux()
+                * fails to find this socket
+                */
+               memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
+               inet->inet_dport = 0;
                goto out;
+       }
 
        sk->sk_state = TCP_ESTABLISHED;
        sk_set_txhash(sk);
index d950d43ba255442cf3079546a46b95693029f10c..f02f131f6435a967de395b9a7069051c93a039d7 100644 (file)
 #include <net/ipv6.h>
 #include <linux/icmpv6.h>
 
+static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
+{
+       int off = sizeof(struct ipv6hdr);
+       struct ipv6_opt_hdr *exthdr;
+
+       if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP))
+               return offsetof(struct ipv6hdr, nexthdr);
+
+       while (off < nhlen) {
+               exthdr = (void *)ipv6_hdr + off;
+               if (exthdr->nexthdr == NEXTHDR_ESP)
+                       return off;
+
+               off += ipv6_optlen(exthdr);
+       }
+
+       return 0;
+}
+
 static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
                                         struct sk_buff *skb)
 {
@@ -38,6 +57,7 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
        struct xfrm_state *x;
        __be32 seq;
        __be32 spi;
+       int nhoff;
        int err;
 
        skb_pull(skb, offset);
@@ -72,6 +92,11 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
 
        xo->flags |= XFRM_GRO;
 
+       nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
+       if (!nhoff)
+               goto out;
+
+       IP6CB(skb)->nhoff = nhoff;
        XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
        XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
        XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
index bf8a58a1c32d83a9605844075da5815be23a6bf1..1699acb2fa2c835cb9fb18cee9148f2732878c5d 100644 (file)
@@ -1390,7 +1390,7 @@ emsgsize:
         */
 
        cork->length += length;
-       if ((((length + fragheaderlen) > mtu) ||
+       if ((((length + (skb ? skb->len : headersize)) > mtu) ||
             (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
index 7cebd954d5bb4263017f8f2d577ecfb88ca7c093..322bd62e688bfb5d2a88766a9537af51db0a69e8 100644 (file)
@@ -3722,7 +3722,11 @@ static int ip6_route_dev_notify(struct notifier_block *this,
                net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
                net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
 #endif
-        } else if (event == NETDEV_UNREGISTER) {
+        } else if (event == NETDEV_UNREGISTER &&
+                   dev->reg_state != NETREG_UNREGISTERED) {
+               /* NETDEV_UNREGISTER could be fired for multiple times by
+                * netdev_wait_allrefs(). Make sure we only call this once.
+                */
                in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
                in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
index 2378503577b0c8823049b7d17f857466481077b3..f8ad15891cd75576765560bd76b45417b5836f57 100644 (file)
@@ -305,7 +305,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
         * we try harder to allocate.
         */
        kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ?
-               kcalloc(cmax, sizeof(*kp), GFP_KERNEL) :
+               kcalloc(cmax, sizeof(*kp), GFP_KERNEL | __GFP_NOWARN) :
                NULL;
 
        rcu_read_lock();
index 06ec39b796092ad5e8954c0cfd10e75205ffce54..75703fda23e7703b0df9f379d48208c0bf202c69 100644 (file)
@@ -879,7 +879,8 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
        struct sock *sk;
 
        udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
-               if (INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif))
+               if (sk->sk_state == TCP_ESTABLISHED &&
+                   INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif))
                        return sk;
                /* Only check first socket in chain */
                break;
index 08a807b29298f5a2c14c30eaedcba87f3057431d..3ef5d913e7a3b5a7407bb926d5ff354ae778ccb8 100644 (file)
@@ -43,8 +43,8 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
                return 1;
 #endif
 
-       ipv6_hdr(skb)->payload_len = htons(skb->len);
        __skb_push(skb, skb->data - skb_network_header(skb));
+       ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
 
        if (xo && (xo->flags & XFRM_GRO)) {
                skb_mac_header_rebuild(skb);
index 512dc43d0ce6814f0a6d0507805025d75234eece..b1432b6680338cf60942f3dd5567a6c7a11a603f 100644 (file)
@@ -1157,6 +1157,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                        goto out;
        }
 
+       err = -ENOBUFS;
        key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
        if (sa->sadb_sa_auth) {
                int keysize = 0;
@@ -1168,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                if (key)
                        keysize = (key->sadb_key_bits + 7) / 8;
                x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
-               if (!x->aalg)
+               if (!x->aalg) {
+                       err = -ENOMEM;
                        goto out;
+               }
                strcpy(x->aalg->alg_name, a->name);
                x->aalg->alg_key_len = 0;
                if (key) {
@@ -1188,8 +1191,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                                goto out;
                        }
                        x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
-                       if (!x->calg)
+                       if (!x->calg) {
+                               err = -ENOMEM;
                                goto out;
+                       }
                        strcpy(x->calg->alg_name, a->name);
                        x->props.calgo = sa->sadb_sa_encrypt;
                } else {
@@ -1203,8 +1208,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                        if (key)
                                keysize = (key->sadb_key_bits + 7) / 8;
                        x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
-                       if (!x->ealg)
+                       if (!x->ealg) {
+                               err = -ENOMEM;
                                goto out;
+                       }
                        strcpy(x->ealg->alg_name, a->name);
                        x->ealg->alg_key_len = 0;
                        if (key) {
@@ -1249,8 +1256,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
                struct xfrm_encap_tmpl *natt;
 
                x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
-               if (!x->encap)
+               if (!x->encap) {
+                       err = -ENOMEM;
                        goto out;
+               }
 
                natt = x->encap;
                n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
@@ -2755,6 +2764,8 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
        int err, err2;
 
        err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true);
+       if (!err)
+               xfrm_garbage_collect(net);
        err2 = unicast_flush_resp(sk, hdr);
        if (err || err2) {
                if (err == -ESRCH) /* empty table - old silent behavior */
index e88342fde1bc409aed6a3c86e7a628030eaac66f..cfdbfa18a95eb01deceecdad56be7de07547db2a 100644 (file)
@@ -1019,7 +1019,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
                return sch;
        }
        /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
-       ops->destroy(sch);
+       if (ops->destroy)
+               ops->destroy(sch);
 err_out3:
        dev_put(dev);
        kfree((char *) sch - sch->padded);
index abf81b329dc1f3276e1cf5631ebae4ec31a229de..55b2ac3009955aacd09db22c7fc4d082807c3ed9 100644 (file)
@@ -4,8 +4,7 @@
 
 obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
                      xfrm_input.o xfrm_output.o \
-                     xfrm_sysctl.o xfrm_replay.o
-obj-$(CONFIG_XFRM_OFFLOAD) += xfrm_device.o
+                     xfrm_sysctl.o xfrm_replay.o xfrm_device.o
 obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
 obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
 obj-$(CONFIG_XFRM_USER) += xfrm_user.o
index 574e6f32f94f29a496ef20f1a673a0e718a1a31b..5aba03685d7da54931c9287c071a13403c6edec5 100644 (file)
@@ -22,6 +22,7 @@
 #include <net/xfrm.h>
 #include <linux/notifier.h>
 
+#ifdef CONFIG_XFRM_OFFLOAD
 int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
 {
        int err;
@@ -137,6 +138,7 @@ ok:
        return true;
 }
 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
+#endif
 
 int xfrm_dev_register(struct net_device *dev)
 {
index ed4e52d95172e2d8dcca603cdf62cfb55517f3c9..643a18f720321c52f0103baf010f3c5921d68482 100644 (file)
@@ -1006,10 +1006,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
                err = -ESRCH;
 out:
        spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
-
-       if (cnt)
-               xfrm_garbage_collect(net);
-
        return err;
 }
 EXPORT_SYMBOL(xfrm_policy_flush);
index 38614df33ec8d5751bd88a48d0068926b9032556..86116e9aaf3d9cee13b7c96d4d420e33842c1270 100644 (file)
@@ -2027,6 +2027,7 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
                        return 0;
                return err;
        }
+       xfrm_garbage_collect(net);
 
        c.data.type = type;
        c.event = nlh->nlmsg_type;
index d6fb2d5d01a70ef21eb533f1b86cb9c9c8f2be84..60ce1cfc300f9cb419acbc67425076c120829f13 100644 (file)
@@ -295,6 +295,8 @@ struct hda_codec {
 
 #define list_for_each_codec(c, bus) \
        list_for_each_entry(c, &(bus)->core.codec_list, core.list)
+#define list_for_each_codec_safe(c, n, bus)                            \
+       list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list)
 
 /* snd_hda_codec_read/write optional flags */
 #define HDA_RW_NO_RESPONSE_FALLBACK    (1 << 0)
index 3715a5725613bd8b8bdbcafc79b380044cacb491..1c60beb5b70a63a0ab25a828392a3cb175a4da8b 100644 (file)
@@ -1337,8 +1337,12 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
 /* configure each codec instance */
 int azx_codec_configure(struct azx *chip)
 {
-       struct hda_codec *codec;
-       list_for_each_codec(codec, &chip->bus) {
+       struct hda_codec *codec, *next;
+
+       /* use _safe version here since snd_hda_codec_configure() deregisters
+        * the device upon error and deletes itself from the bus list.
+        */
+       list_for_each_codec_safe(codec, next, &chip->bus) {
                snd_hda_codec_configure(codec);
        }
        return 0;
index 2842c82363c0435f90edfe06793df254c36d43ef..71545b56b4c82bf3cca37e3d99474c8cad78971f 100644 (file)
@@ -3174,6 +3174,7 @@ static int check_dyn_adc_switch(struct hda_codec *codec)
                                                spec->input_paths[i][nums]);
                                        spec->input_paths[i][nums] =
                                                spec->input_paths[i][n];
+                                       spec->input_paths[i][n] = 0;
                                }
                        }
                        nums++;
index cabb19b1e3718b289910fcc921c698f44f162768..0ff8c55c0464a62bddd2bf6bd99ffc5041798ac3 100644 (file)
@@ -3748,6 +3748,72 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .errstr = "invalid bpf_context access",
        },
+       {
+               "leak pointer into ctx 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_LD_MAP_FD(BPF_REG_2, 0),
+                       BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
+                                     offsetof(struct __sk_buff, cb[0])),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 2 },
+               .errstr_unpriv = "R2 leaks addr into mem",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+       },
+       {
+               "leak pointer into ctx 2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
+                                     offsetof(struct __sk_buff, cb[0])),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "R10 leaks addr into mem",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+       },
+       {
+               "leak pointer into ctx 3",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_2, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
+                                     offsetof(struct __sk_buff, cb[0])),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 1 },
+               .errstr_unpriv = "R2 leaks addr into ctx",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+       },
+       {
+               "leak pointer into map val",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+                       BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr_unpriv = "R6 leaks addr into mem",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+       },
        {
                "helper access to map: full range",
                .insns = {