]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
Merge branch 'stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 9 Jul 2015 00:15:55 +0000 (17:15 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 9 Jul 2015 00:15:55 +0000 (17:15 -0700)
Pull arch/tile fix from Chris Metcalf:
 "This fix eliminates a "section mismatch" warning caused by the new
  __ex_table checking code in modpost"

* 'stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
  modpost: work correctly with tile coldtext sections

37 files changed:
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/include/asm/io.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/pgtable-2level.h
arch/arm/kernel/armksyms.c
arch/arm/kernel/entry-armv.S
arch/arm/kernel/smp.c
arch/arm/lib/memcpy.S
arch/arm/lib/memset.S
arch/arm/mm/ioremap.c
arch/arm/mm/mmu.c
arch/arm/mm/nommu.c
arch/arm/vdso/vdsomunge.c
arch/x86/lib/usercopy.c
drivers/misc/mei/bus.c
drivers/misc/mei/init.c
drivers/misc/mei/nfc.c
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/migrate.c
include/linux/buffer_head.h
kernel/auditsc.c
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
kernel/module.c
tools/include/linux/compiler.h
tools/include/linux/export.h [deleted file]
tools/include/linux/rbtree.h [new file with mode: 0644]
tools/include/linux/rbtree_augmented.h [new file with mode: 0644]
tools/lib/rbtree.c [new file with mode: 0644]
tools/perf/MANIFEST
tools/perf/util/Build
tools/perf/util/include/linux/rbtree.h [deleted file]
tools/perf/util/include/linux/rbtree_augmented.h [deleted file]

index a750c1425c3a64e8a1f2d138b8bc4d9fd2224b8e..1c5021002fe40b06a2e57daad459bf1b4181b822 100644 (file)
@@ -1693,6 +1693,12 @@ config HIGHMEM
 config HIGHPTE
        bool "Allocate 2nd-level pagetables from highmem"
        depends on HIGHMEM
+       help
+         The VM uses one page of physical memory for each page table.
+         For systems with a lot of processes, this can use a lot of
+         precious low memory, eventually leading to low memory being
+         consumed by page tables.  Setting this option will allow
+         user-space 2nd level page tables to reside in high memory.
 
 config HW_PERF_EVENTS
        bool "Enable hardware performance counter support for perf events"
index f1b15797136649872ab20aa87b89cdcf667b9a05..a2e16f940394c301cdcbdaa00bbd8488127867f2 100644 (file)
@@ -1635,7 +1635,7 @@ config PID_IN_CONTEXTIDR
 
 config DEBUG_SET_MODULE_RONX
        bool "Set loadable kernel module data as NX and text as RO"
-       depends on MODULES
+       depends on MODULES && MMU
        ---help---
          This option helps catch unintended modifications to loadable
          kernel module's text and read-only data. It also prevents execution
index 1c3938f26beba9837d968be7f75d08dfa2b13136..485982084fe96aef7218aeb134527b41f90beb1e 100644 (file)
@@ -140,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
  * The _caller variety takes a __builtin_return_address(0) value for
  * /proc/vmalloc to use - and should only be used in non-inline functions.
  */
-extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
-       size_t, unsigned int, void *);
 extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
        void *);
-
 extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
 extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
 extern void __iounmap(volatile void __iomem *addr);
-extern void __arm_iounmap(volatile void __iomem *addr);
 
 extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
        unsigned int, void *);
@@ -321,21 +316,24 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
 static inline void memset_io(volatile void __iomem *dst, unsigned c,
        size_t count)
 {
-       memset((void __force *)dst, c, count);
+       extern void mmioset(void *, unsigned int, size_t);
+       mmioset((void __force *)dst, c, count);
 }
 #define memset_io(dst,c,count) memset_io(dst,c,count)
 
 static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
        size_t count)
 {
-       memcpy(to, (const void __force *)from, count);
+       extern void mmiocpy(void *, const void *, size_t);
+       mmiocpy(to, (const void __force *)from, count);
 }
 #define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
 
 static inline void memcpy_toio(volatile void __iomem *to, const void *from,
        size_t count)
 {
-       memcpy((void __force *)to, from, count);
+       extern void mmiocpy(void *, const void *, size_t);
+       mmiocpy((void __force *)to, from, count);
 }
 #define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
 
@@ -348,18 +346,61 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
 #endif /* readl */
 
 /*
- * ioremap and friends.
+ * ioremap() and friends.
+ *
+ * ioremap() takes a resource address, and size.  Due to the ARM memory
+ * types, it is important to use the correct ioremap() function as each
+ * mapping has specific properties.
+ *
+ * Function            Memory type     Cacheability    Cache hint
+ * ioremap()           Device          n/a             n/a
+ * ioremap_nocache()   Device          n/a             n/a
+ * ioremap_cache()     Normal          Writeback       Read allocate
+ * ioremap_wc()                Normal          Non-cacheable   n/a
+ * ioremap_wt()                Normal          Non-cacheable   n/a
+ *
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
+ * - writes may be delayed before they hit the endpoint device
  *
- * ioremap takes a PCI memory address, as specified in
- * Documentation/io-mapping.txt.
+ * ioremap_nocache() is the same as ioremap() as there are too many device
+ * drivers using this for device registers, and documentation which tells
+ * people to use it for such for this to be any different.  This is not a
+ * safe fallback for memory-like mappings, or memory regions where the
+ * compiler may generate unaligned accesses - eg, via inlining its own
+ * memcpy.
  *
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ * - ordering is not guaranteed without explicit dependencies or barrier
+ *   instructions
+ * - writes may be delayed before they hit the endpoint memory
+ *
+ * The cache hint is only a performance hint: CPUs may alias these hints.
+ * Eg, a CPU not implementing read allocate but implementing write allocate
+ * will provide a write allocate mapping instead.
  */
-#define ioremap(cookie,size)           __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_nocache(cookie,size)   __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cache(cookie,size)     __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
-#define ioremap_wc(cookie,size)                __arm_ioremap((cookie), (size), MT_DEVICE_WC)
-#define ioremap_wt(cookie,size)                __arm_ioremap((cookie), (size), MT_DEVICE)
-#define iounmap                                __arm_iounmap
+void __iomem *ioremap(resource_size_t res_cookie, size_t size);
+#define ioremap ioremap
+#define ioremap_nocache ioremap
+
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
+#define ioremap_cache ioremap_cache
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
+#define ioremap_wc ioremap_wc
+#define ioremap_wt ioremap_wc
+
+void iounmap(volatile void __iomem *iomem_cookie);
+#define iounmap iounmap
 
 /*
  * io{read,write}{16,32}be() macros
index 3a72d69b3255e8fa0d53c41ae4df90c126777020..6f225acc07c56bdef12a3ca68ae526d8fe836bf4 100644 (file)
@@ -275,7 +275,7 @@ static inline void *phys_to_virt(phys_addr_t x)
  */
 #define __pa(x)                        __virt_to_phys((unsigned long)(x))
 #define __va(x)                        ((void *)__phys_to_virt((phys_addr_t)(x)))
-#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
+#define pfn_to_kaddr(pfn)      __va((phys_addr_t)(pfn) << PAGE_SHIFT)
 
 extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
 
index bfd662e49a25efb04b1929f7dba98a0befe3e684..aeddd28b3595515aa70fb849bd65179a7b800443 100644 (file)
 
 /*
  * These are the memory types, defined to be compatible with
- * pre-ARMv6 CPUs cacheable and bufferable bits:   XXCB
+ * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
+ * ARMv6+ without TEX remapping, they are a table index.
+ * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B
+ *
+ * MT type             Pre-ARMv6       ARMv6+ type / cacheable status
+ * UNCACHED            Uncached        Strongly ordered
+ * BUFFERABLE          Bufferable      Normal memory / non-cacheable
+ * WRITETHROUGH                Writethrough    Normal memory / write through
+ * WRITEBACK           Writeback       Normal memory / write back, read alloc
+ * MINICACHE           Minicache       N/A
+ * WRITEALLOC          Writeback       Normal memory / write back, write alloc
+ * DEV_SHARED          Uncached        Device memory (shared)
+ * DEV_NONSHARED       Uncached        Device memory (non-shared)
+ * DEV_WC              Bufferable      Normal memory / non-cacheable
+ * DEV_CACHED          Writeback       Normal memory / write back, read alloc
+ * VECTORS             Variable        Normal memory / variable
+ *
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ *
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
  */
 #define L_PTE_MT_UNCACHED      (_AT(pteval_t, 0x00) << 2)      /* 0000 */
 #define L_PTE_MT_BUFFERABLE    (_AT(pteval_t, 0x01) << 2)      /* 0001 */
index a88671cfe1ffb1e1ee43b06a7c8c6fd704f38580..5e5a51a99e68ec77b38b8101611bc853eb8496af 100644 (file)
@@ -50,6 +50,9 @@ extern void __aeabi_ulcmp(void);
 
 extern void fpundefinstr(void);
 
+void mmioset(void *, unsigned int, size_t);
+void mmiocpy(void *, const void *, size_t);
+
        /* platform dependent support */
 EXPORT_SYMBOL(arm_delay_ops);
 
@@ -88,6 +91,9 @@ EXPORT_SYMBOL(memmove);
 EXPORT_SYMBOL(memchr);
 EXPORT_SYMBOL(__memzero);
 
+EXPORT_SYMBOL(mmioset);
+EXPORT_SYMBOL(mmiocpy);
+
 #ifdef CONFIG_MMU
 EXPORT_SYMBOL(copy_page);
 
index 7dac3086e361c8e3680d5209e5b72184c9c48cdb..cb4fb1e69778603d41356f3ed7a98695f4cc0cdb 100644 (file)
@@ -410,7 +410,7 @@ ENDPROC(__fiq_abt)
        zero_fp
 
        .if     \trace
-#ifdef CONFIG_IRQSOFF_TRACER
+#ifdef CONFIG_TRACE_IRQFLAGS
        bl      trace_hardirqs_off
 #endif
        ct_user_exit save = 0
index 90dfbedfbfb8527b74ddcfcef125beb151f875f5..3d6b7821cff8c952e73c72cad5a1f8d90cdf1758 100644 (file)
@@ -578,7 +578,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        struct pt_regs *old_regs = set_irq_regs(regs);
 
        if ((unsigned)ipinr < NR_IPI) {
-               trace_ipi_entry(ipi_types[ipinr]);
+               trace_ipi_entry_rcuidle(ipi_types[ipinr]);
                __inc_irq_stat(cpu, ipi_irqs[ipinr]);
        }
 
@@ -637,7 +637,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        }
 
        if ((unsigned)ipinr < NR_IPI)
-               trace_ipi_exit(ipi_types[ipinr]);
+               trace_ipi_exit_rcuidle(ipi_types[ipinr]);
        set_irq_regs(old_regs);
 }
 
index 7797e81e40e00f72a85ba90dc5abcc1227504b44..64111bd4440b1aa3702c469ce349b303a0244ebd 100644 (file)
 
 /* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
 
+ENTRY(mmiocpy)
 ENTRY(memcpy)
 
 #include "copy_template.S"
 
 ENDPROC(memcpy)
+ENDPROC(mmiocpy)
index a4ee97b5a2bfcfd3099353b44e8678c4df5bff6b..3c65e3bd790fe1f7aec59d9568cb4c0799be0e9a 100644 (file)
@@ -16,6 +16,7 @@
        .text
        .align  5
 
+ENTRY(mmioset)
 ENTRY(memset)
 UNWIND( .fnstart         )
        ands    r3, r0, #3              @ 1 unaligned?
@@ -133,3 +134,4 @@ UNWIND( .fnstart            )
        b       1b
 UNWIND( .fnend   )
 ENDPROC(memset)
+ENDPROC(mmioset)
index d1e5ad7ab3bc6ae9b3a538a3601bccf929b0e998..0c81056c1dd7edac30e1d86bdd2103d1e2d6832f 100644 (file)
@@ -255,7 +255,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
 }
 #endif
 
-void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
        unsigned long offset, size_t size, unsigned int mtype, void *caller)
 {
        const struct mem_type *type;
@@ -363,7 +363,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
                  unsigned int mtype)
 {
        return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
-                       __builtin_return_address(0));
+                                       __builtin_return_address(0));
 }
 EXPORT_SYMBOL(__arm_ioremap_pfn);
 
@@ -371,13 +371,26 @@ void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
                                      unsigned int, void *) =
        __arm_ioremap_caller;
 
-void __iomem *
-__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype)
+void __iomem *ioremap(resource_size_t res_cookie, size_t size)
+{
+       return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
+                                  __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap);
+
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+{
+       return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
+                                  __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
 {
-       return arch_ioremap_caller(phys_addr, size, mtype,
-               __builtin_return_address(0));
+       return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
+                                  __builtin_return_address(0));
 }
-EXPORT_SYMBOL(__arm_ioremap);
+EXPORT_SYMBOL(ioremap_wc);
 
 /*
  * Remap an arbitrary physical address space into the kernel virtual
@@ -431,11 +444,11 @@ void __iounmap(volatile void __iomem *io_addr)
 
 void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
 
-void __arm_iounmap(volatile void __iomem *io_addr)
+void iounmap(volatile void __iomem *cookie)
 {
-       arch_iounmap(io_addr);
+       arch_iounmap(cookie);
 }
-EXPORT_SYMBOL(__arm_iounmap);
+EXPORT_SYMBOL(iounmap);
 
 #ifdef CONFIG_PCI
 static int pci_ioremap_mem_type = MT_DEVICE;
index 6ca7d9aa896f4e14804e1096407b16e9b1020456..870838a46d524141a9cb1d65980c1ba7456465cb 100644 (file)
@@ -1072,6 +1072,7 @@ void __init sanity_check_meminfo(void)
        int highmem = 0;
        phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
        struct memblock_region *reg;
+       bool should_use_highmem = false;
 
        for_each_memblock(memory, reg) {
                phys_addr_t block_start = reg->base;
@@ -1090,6 +1091,7 @@ void __init sanity_check_meminfo(void)
                                pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
                                          &block_start, &block_end);
                                memblock_remove(reg->base, reg->size);
+                               should_use_highmem = true;
                                continue;
                        }
 
@@ -1100,6 +1102,7 @@ void __init sanity_check_meminfo(void)
                                          &block_start, &block_end, &vmalloc_limit);
                                memblock_remove(vmalloc_limit, overlap_size);
                                block_end = vmalloc_limit;
+                               should_use_highmem = true;
                        }
                }
 
@@ -1134,6 +1137,9 @@ void __init sanity_check_meminfo(void)
                }
        }
 
+       if (should_use_highmem)
+               pr_notice("Consider using a HIGHMEM enabled kernel.\n");
+
        high_memory = __va(arm_lowmem_limit - 1) + 1;
 
        /*
@@ -1494,6 +1500,7 @@ void __init paging_init(const struct machine_desc *mdesc)
        build_mem_type_table();
        prepare_page_table();
        map_lowmem();
+       memblock_set_current_limit(arm_lowmem_limit);
        dma_contiguous_remap();
        devicemaps_init(mdesc);
        kmap_init();
index afd7e05d95f1884becf11913aa4b075048a4d4f8..1dd10936d68d0422b328128fbb6a234d77adee1d 100644 (file)
@@ -351,30 +351,43 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
 }
 EXPORT_SYMBOL(__arm_ioremap_pfn);
 
-void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
-                          size_t size, unsigned int mtype, void *caller)
+void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
+                                  unsigned int mtype, void *caller)
 {
-       return __arm_ioremap_pfn(pfn, offset, size, mtype);
+       return (void __iomem *)phys_addr;
 }
 
-void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size,
-                           unsigned int mtype)
+void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
+
+void __iomem *ioremap(resource_size_t res_cookie, size_t size)
 {
-       return (void __iomem *)phys_addr;
+       return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
+                                   __builtin_return_address(0));
 }
-EXPORT_SYMBOL(__arm_ioremap);
+EXPORT_SYMBOL(ioremap);
 
-void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+{
+       return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
+                                   __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
 
-void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
-                                  unsigned int mtype, void *caller)
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
+{
+       return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
+                                   __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wc);
+
+void __iounmap(volatile void __iomem *addr)
 {
-       return __arm_ioremap(phys_addr, size, mtype);
 }
+EXPORT_SYMBOL(__iounmap);
 
 void (*arch_iounmap)(volatile void __iomem *);
 
-void __arm_iounmap(volatile void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
 {
 }
-EXPORT_SYMBOL(__arm_iounmap);
+EXPORT_SYMBOL(iounmap);
index 9005b07296c8b38c728ff6466ebd365fda1753ed..aedec81d11988181e3e6f0631113fd01e4737ac8 100644 (file)
  * it does.
  */
 
-#define _GNU_SOURCE
-
 #include <byteswap.h>
 #include <elf.h>
 #include <errno.h>
-#include <error.h>
 #include <fcntl.h>
+#include <stdarg.h>
 #include <stdbool.h>
 #include <stdio.h>
 #include <stdlib.h>
 #define EF_ARM_ABI_FLOAT_HARD 0x400
 #endif
 
+static int failed;
+static const char *argv0;
 static const char *outfile;
 
+static void fail(const char *fmt, ...)
+{
+       va_list ap;
+
+       failed = 1;
+       fprintf(stderr, "%s: ", argv0);
+       va_start(ap, fmt);
+       vfprintf(stderr, fmt, ap);
+       va_end(ap);
+       exit(EXIT_FAILURE);
+}
+
 static void cleanup(void)
 {
-       if (error_message_count > 0 && outfile != NULL)
+       if (failed && outfile != NULL)
                unlink(outfile);
 }
 
@@ -119,68 +131,66 @@ int main(int argc, char **argv)
        int infd;
 
        atexit(cleanup);
+       argv0 = argv[0];
 
        if (argc != 3)
-               error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]);
+               fail("Usage: %s [infile] [outfile]\n", argv[0]);
 
        infile = argv[1];
        outfile = argv[2];
 
        infd = open(infile, O_RDONLY);
        if (infd < 0)
-               error(EXIT_FAILURE, errno, "Cannot open %s", infile);
+               fail("Cannot open %s: %s\n", infile, strerror(errno));
 
        if (fstat(infd, &stat) != 0)
-               error(EXIT_FAILURE, errno, "Failed stat for %s", infile);
+               fail("Failed stat for %s: %s\n", infile, strerror(errno));
 
        inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
        if (inbuf == MAP_FAILED)
-               error(EXIT_FAILURE, errno, "Failed to map %s", infile);
+               fail("Failed to map %s: %s\n", infile, strerror(errno));
 
        close(infd);
 
        inhdr = inbuf;
 
        if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
-               error(EXIT_FAILURE, 0, "Not an ELF file");
+               fail("Not an ELF file\n");
 
        if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
-               error(EXIT_FAILURE, 0, "Unsupported ELF class");
+               fail("Unsupported ELF class\n");
 
        swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
 
        if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
-               error(EXIT_FAILURE, 0, "Not a shared object");
+               fail("Not a shared object\n");
 
-       if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) {
-               error(EXIT_FAILURE, 0, "Unsupported architecture %#x",
-                     inhdr->e_machine);
-       }
+       if (read_elf_half(inhdr->e_machine, swap) != EM_ARM)
+               fail("Unsupported architecture %#x\n", inhdr->e_machine);
 
        e_flags = read_elf_word(inhdr->e_flags, swap);
 
        if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
-               error(EXIT_FAILURE, 0, "Unsupported EABI version %#x",
-                     EF_ARM_EABI_VERSION(e_flags));
+               fail("Unsupported EABI version %#x\n",
+                    EF_ARM_EABI_VERSION(e_flags));
        }
 
        if (e_flags & EF_ARM_ABI_FLOAT_HARD)
-               error(EXIT_FAILURE, 0,
-                     "Unexpected hard-float flag set in e_flags");
+               fail("Unexpected hard-float flag set in e_flags\n");
 
        clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
 
        outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
        if (outfd < 0)
-               error(EXIT_FAILURE, errno, "Cannot open %s", outfile);
+               fail("Cannot open %s: %s\n", outfile, strerror(errno));
 
        if (ftruncate(outfd, stat.st_size) != 0)
-               error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile);
+               fail("Cannot truncate %s: %s\n", outfile, strerror(errno));
 
        outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
                      outfd, 0);
        if (outbuf == MAP_FAILED)
-               error(EXIT_FAILURE, errno, "Failed to map %s", outfile);
+               fail("Failed to map %s: %s\n", outfile, strerror(errno));
 
        close(outfd);
 
@@ -195,7 +205,7 @@ int main(int argc, char **argv)
        }
 
        if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
-               error(EXIT_FAILURE, errno, "Failed to sync %s", outfile);
+               fail("Failed to sync %s: %s\n", outfile, strerror(errno));
 
        return EXIT_SUCCESS;
 }
index ddf9ecb53cc3e23171ea889dc47339e67ba5d2c8..e342586db6e4b08230ce6a596c91e27bb5fcf2db 100644 (file)
@@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
        unsigned long ret;
 
        if (__range_not_ok(from, n, TASK_SIZE))
-               return 0;
+               return n;
 
        /*
         * Even though this function is typically called from NMI/IRQ context
index 357b6ae4d207d785176c22d19a2c572617b8220f..458aa5a09c522816febc6abef20d034a67cfea5d 100644 (file)
@@ -552,22 +552,6 @@ void mei_cl_bus_rx_event(struct mei_cl *cl)
        schedule_work(&device->event_work);
 }
 
-void mei_cl_bus_remove_devices(struct mei_device *dev)
-{
-       struct mei_cl *cl, *next;
-
-       mutex_lock(&dev->device_lock);
-       list_for_each_entry_safe(cl, next, &dev->device_list, device_link) {
-               if (cl->device)
-                       mei_cl_remove_device(cl->device);
-
-               list_del(&cl->device_link);
-               mei_cl_unlink(cl);
-               kfree(cl);
-       }
-       mutex_unlock(&dev->device_lock);
-}
-
 int __init mei_cl_bus_init(void)
 {
        return bus_register(&mei_cl_bus_type);
index 94514b2c7a50277ba4adf8604ce6084b01309967..00c3865ca3b1d42042d78d5558eac547ea8c03a8 100644 (file)
@@ -333,8 +333,6 @@ void mei_stop(struct mei_device *dev)
 
        mei_nfc_host_exit(dev);
 
-       mei_cl_bus_remove_devices(dev);
-
        mutex_lock(&dev->device_lock);
 
        mei_wd_stop(dev);
index b983c4ecad3800d34e21a453e034ed15d84574f4..290ef3037437816114e6d373d4889c6ee2855c1b 100644 (file)
@@ -402,11 +402,12 @@ void mei_nfc_host_exit(struct mei_device *dev)
 
        cldev->priv_data = NULL;
 
-       mutex_lock(&dev->device_lock);
        /* Need to remove the device here
         * since mei_nfc_free will unlink the clients
         */
        mei_cl_remove_device(cldev);
+
+       mutex_lock(&dev->device_lock);
        mei_nfc_free(ndev);
        mutex_unlock(&dev->device_lock);
 }
index aadb7282883493597f8dae099f20c3e86694bea8..2553aa8b608d84d1673190ea634c5e84c86d9f0b 100644 (file)
@@ -504,7 +504,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
        struct buffer_head              *bh;
        int                             err;
 
-       bh = sb_getblk(inode->i_sb, pblk);
+       bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
        if (unlikely(!bh))
                return ERR_PTR(-ENOMEM);
 
@@ -1089,7 +1089,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                err = -EIO;
                goto cleanup;
        }
-       bh = sb_getblk(inode->i_sb, newblock);
+       bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
        if (unlikely(!bh)) {
                err = -ENOMEM;
                goto cleanup;
@@ -1283,7 +1283,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
        if (newblock == 0)
                return err;
 
-       bh = sb_getblk(inode->i_sb, newblock);
+       bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
        if (unlikely(!bh))
                return -ENOMEM;
        lock_buffer(bh);
index 41f8e55afcd11491c5f25bf0989f74c12b9baf69..cecf9aa1081134d255455ee329a4e118b8355216 100644 (file)
@@ -1323,7 +1323,7 @@ static void ext4_da_page_release_reservation(struct page *page,
                                             unsigned int offset,
                                             unsigned int length)
 {
-       int to_release = 0;
+       int to_release = 0, contiguous_blks = 0;
        struct buffer_head *head, *bh;
        unsigned int curr_off = 0;
        struct inode *inode = page->mapping->host;
@@ -1344,14 +1344,23 @@ static void ext4_da_page_release_reservation(struct page *page,
 
                if ((offset <= curr_off) && (buffer_delay(bh))) {
                        to_release++;
+                       contiguous_blks++;
                        clear_buffer_delay(bh);
+               } else if (contiguous_blks) {
+                       lblk = page->index <<
+                              (PAGE_CACHE_SHIFT - inode->i_blkbits);
+                       lblk += (curr_off >> inode->i_blkbits) -
+                               contiguous_blks;
+                       ext4_es_remove_extent(inode, lblk, contiguous_blks);
+                       contiguous_blks = 0;
                }
                curr_off = next_off;
        } while ((bh = bh->b_this_page) != head);
 
-       if (to_release) {
+       if (contiguous_blks) {
                lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-               ext4_es_remove_extent(inode, lblk, to_release);
+               lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
+               ext4_es_remove_extent(inode, lblk, contiguous_blks);
        }
 
        /* If we have released all the blocks belonging to a cluster, then we
@@ -4344,7 +4353,12 @@ static void ext4_update_other_inodes_time(struct super_block *sb,
        int inode_size = EXT4_INODE_SIZE(sb);
 
        oi.orig_ino = orig_ino;
-       ino = (orig_ino & ~(inodes_per_block - 1)) + 1;
+       /*
+        * Calculate the first inode in the inode table block.  Inode
+        * numbers are one-based.  That is, the first inode in a block
+        * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
+        */
+       ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
        for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
                if (ino == orig_ino)
                        continue;
index f6aedf88da437ee324c314bb1020baa51db0423c..34b610ea503053674b3b87ffd3503d6c641c573d 100644 (file)
@@ -4816,18 +4816,12 @@ do_more:
                /*
                 * blocks being freed are metadata. these blocks shouldn't
                 * be used until this transaction is committed
+                *
+                * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
+                * to fail.
                 */
-       retry:
-               new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
-               if (!new_entry) {
-                       /*
-                        * We use a retry loop because
-                        * ext4_free_blocks() is not allowed to fail.
-                        */
-                       cond_resched();
-                       congestion_wait(BLK_RW_ASYNC, HZ/50);
-                       goto retry;
-               }
+               new_entry = kmem_cache_alloc(ext4_free_data_cachep,
+                               GFP_NOFS|__GFP_NOFAIL);
                new_entry->efd_start_cluster = bit;
                new_entry->efd_group = block_group;
                new_entry->efd_count = count_clusters;
index b52374e4210221bc02c9cb3ad23f8027f21e77ad..6163ad21cb0ef6b184bdfceb700c1dfd89317590 100644 (file)
@@ -620,6 +620,7 @@ int ext4_ind_migrate(struct inode *inode)
        struct ext4_inode_info          *ei = EXT4_I(inode);
        struct ext4_extent              *ex;
        unsigned int                    i, len;
+       ext4_lblk_t                     start, end;
        ext4_fsblk_t                    blk;
        handle_t                        *handle;
        int                             ret;
@@ -633,6 +634,14 @@ int ext4_ind_migrate(struct inode *inode)
                                       EXT4_FEATURE_RO_COMPAT_BIGALLOC))
                return -EOPNOTSUPP;
 
+       /*
+        * In order to get correct extent info, force all delayed allocation
+        * blocks to be allocated, otherwise delayed allocation blocks may not
+        * be reflected and bypass the checks on extent header.
+        */
+       if (test_opt(inode->i_sb, DELALLOC))
+               ext4_alloc_da_blocks(inode);
+
        handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
        if (IS_ERR(handle))
                return PTR_ERR(handle);
@@ -650,11 +659,13 @@ int ext4_ind_migrate(struct inode *inode)
                goto errout;
        }
        if (eh->eh_entries == 0)
-               blk = len = 0;
+               blk = len = start = end = 0;
        else {
                len = le16_to_cpu(ex->ee_len);
                blk = ext4_ext_pblock(ex);
-               if (len > EXT4_NDIR_BLOCKS) {
+               start = le32_to_cpu(ex->ee_block);
+               end = start + len - 1;
+               if (end >= EXT4_NDIR_BLOCKS) {
                        ret = -EOPNOTSUPP;
                        goto errout;
                }
@@ -662,7 +673,7 @@ int ext4_ind_migrate(struct inode *inode)
 
        ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
        memset(ei->i_data, 0, sizeof(ei->i_data));
-       for (i=0; i < len; i++)
+       for (i = start; i <= end; i++)
                ei->i_data[i] = cpu_to_le32(blk++);
        ext4_mark_inode_dirty(handle, inode);
 errout:
index 73b45225a7ca1bbe749ea3b62e38a35add22936b..e6797ded700ec8f7ef6a8fccccddf71212715077 100644 (file)
@@ -317,6 +317,13 @@ sb_getblk(struct super_block *sb, sector_t block)
        return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
 }
 
+
+static inline struct buffer_head *
+sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
+{
+       return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
+}
+
 static inline struct buffer_head *
 sb_find_get_block(struct super_block *sb, sector_t block)
 {
index 09c65640cad6156f0ea702bc32ba9847b9465249..e85bdfd15fedd4c8fed46818edae6191673bff07 100644 (file)
@@ -1021,8 +1021,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
         * for strings that are too long, we should not have created
         * any.
         */
-       if (unlikely((len == 0) || len > MAX_ARG_STRLEN - 1)) {
-               WARN_ON(1);
+       if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) {
                send_sig(SIGKILL, current, 0);
                return -1;
        }
index e965cfae420725645349c4facd145828e4e59d13..d3dae3419b99566c127f1682b29f39bb184bbdb1 100644 (file)
@@ -4358,14 +4358,6 @@ static void ring_buffer_wakeup(struct perf_event *event)
        rcu_read_unlock();
 }
 
-static void rb_free_rcu(struct rcu_head *rcu_head)
-{
-       struct ring_buffer *rb;
-
-       rb = container_of(rcu_head, struct ring_buffer, rcu_head);
-       rb_free(rb);
-}
-
 struct ring_buffer *ring_buffer_get(struct perf_event *event)
 {
        struct ring_buffer *rb;
index 2deb24c7a40dd979313eb1fcff58044d2d948888..2bbad9c1274c3199338e653bbb5c8bd640815fa7 100644 (file)
@@ -11,6 +11,7 @@
 struct ring_buffer {
        atomic_t                        refcount;
        struct rcu_head                 rcu_head;
+       struct irq_work                 irq_work;
 #ifdef CONFIG_PERF_USE_VMALLOC
        struct work_struct              work;
        int                             page_order;     /* allocation order  */
@@ -55,6 +56,15 @@ struct ring_buffer {
 };
 
 extern void rb_free(struct ring_buffer *rb);
+
+static inline void rb_free_rcu(struct rcu_head *rcu_head)
+{
+       struct ring_buffer *rb;
+
+       rb = container_of(rcu_head, struct ring_buffer, rcu_head);
+       rb_free(rb);
+}
+
 extern struct ring_buffer *
 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
 extern void perf_event_wakeup(struct perf_event *event);
index 96472824a752f76fe651ec1bfb7ab7a52411a12c..b2be01b1aa9dcb7a70792fa381c264b229a106d0 100644 (file)
@@ -221,6 +221,8 @@ void perf_output_end(struct perf_output_handle *handle)
        rcu_read_unlock();
 }
 
+static void rb_irq_work(struct irq_work *work);
+
 static void
 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
 {
@@ -241,6 +243,16 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
 
        INIT_LIST_HEAD(&rb->event_list);
        spin_lock_init(&rb->event_lock);
+       init_irq_work(&rb->irq_work, rb_irq_work);
+}
+
+static void ring_buffer_put_async(struct ring_buffer *rb)
+{
+       if (!atomic_dec_and_test(&rb->refcount))
+               return;
+
+       rb->rcu_head.next = (void *)rb;
+       irq_work_queue(&rb->irq_work);
 }
 
 /*
@@ -319,7 +331,7 @@ err_put:
        rb_free_aux(rb);
 
 err:
-       ring_buffer_put(rb);
+       ring_buffer_put_async(rb);
        handle->event = NULL;
 
        return NULL;
@@ -370,7 +382,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
 
        local_set(&rb->aux_nest, 0);
        rb_free_aux(rb);
-       ring_buffer_put(rb);
+       ring_buffer_put_async(rb);
 }
 
 /*
@@ -557,7 +569,18 @@ static void __rb_free_aux(struct ring_buffer *rb)
 void rb_free_aux(struct ring_buffer *rb)
 {
        if (atomic_dec_and_test(&rb->aux_refcount))
+               irq_work_queue(&rb->irq_work);
+}
+
+static void rb_irq_work(struct irq_work *work)
+{
+       struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
+
+       if (!atomic_read(&rb->aux_refcount))
                __rb_free_aux(rb);
+
+       if (rb->rcu_head.next == (void *)rb)
+               call_rcu(&rb->rcu_head, rb_free_rcu);
 }
 
 #ifndef CONFIG_PERF_USE_VMALLOC
index 3e0e19763d246a998acb109c6979e4df593e3958..4d2b82e610e2a48f429a700f0529d6dc5942700a 100644 (file)
@@ -3557,6 +3557,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
        mutex_lock(&module_mutex);
        /* Unlink carefully: kallsyms could be walking list. */
        list_del_rcu(&mod->list);
+       mod_tree_remove(mod);
        wake_up_all(&module_wq);
        /* Wait for RCU-sched synchronizing before releasing mod->list. */
        synchronize_sched();
index f0e72674c52d2c9b88b46cb281db39fe8fb68d8d..9098083869c85a62bc4df5bcf8df96f5ffcc7e00 100644 (file)
 
 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
 
+#include <linux/types.h>
+
+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
+{
+       switch (size) {
+       case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
+       case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
+       case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
+       case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
+       default:
+               barrier();
+               __builtin_memcpy((void *)res, (const void *)p, size);
+               barrier();
+       }
+}
+
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+{
+       switch (size) {
+       case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
+       case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
+       case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
+       case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
+       default:
+               barrier();
+               __builtin_memcpy((void *)p, (const void *)res, size);
+               barrier();
+       }
+}
+
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
+ * compiler is aware of some particular ordering.  One way to make the
+ * compiler aware of ordering is to put the two invocations of READ_ONCE,
+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+ *
+ * In contrast to ACCESS_ONCE these two macros will also work on aggregate
+ * data types like structs or unions. If the size of the accessed data
+ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
+ * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
+ * compile-time warning.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+
+#define READ_ONCE(x) \
+       ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
+#define WRITE_ONCE(x, val) \
+       ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
 #endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/include/linux/export.h b/tools/include/linux/export.h
deleted file mode 100644 (file)
index d07e586..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _TOOLS_LINUX_EXPORT_H_
-#define _TOOLS_LINUX_EXPORT_H_
-
-#define EXPORT_SYMBOL(sym)
-#define EXPORT_SYMBOL_GPL(sym)
-#define EXPORT_SYMBOL_GPL_FUTURE(sym)
-#define EXPORT_UNUSED_SYMBOL(sym)
-#define EXPORT_UNUSED_SYMBOL_GPL(sym)
-
-#endif
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h
new file mode 100644 (file)
index 0000000..1125822
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+  Red Black Trees
+  (C) 1999  Andrea Arcangeli <andrea@suse.de>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+  linux/include/linux/rbtree.h
+
+  To use rbtrees you'll have to implement your own insert and search cores.
+  This will avoid us to use callbacks and to drop drammatically performances.
+  I know it's not the cleaner way,  but in C (not in C++) to get
+  performances and genericity...
+
+  See Documentation/rbtree.txt for documentation and samples.
+*/
+
+#ifndef __TOOLS_LINUX_PERF_RBTREE_H
+#define __TOOLS_LINUX_PERF_RBTREE_H
+
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+
+struct rb_node {
+       unsigned long  __rb_parent_color;
+       struct rb_node *rb_right;
+       struct rb_node *rb_left;
+} __attribute__((aligned(sizeof(long))));
+    /* The alignment might seem pointless, but allegedly CRIS needs it */
+
+struct rb_root {
+       struct rb_node *rb_node;
+};
+
+
+#define rb_parent(r)   ((struct rb_node *)((r)->__rb_parent_color & ~3))
+
+#define RB_ROOT        (struct rb_root) { NULL, }
+#define        rb_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define RB_EMPTY_ROOT(root)  ((root)->rb_node == NULL)
+
+/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
+#define RB_EMPTY_NODE(node)  \
+       ((node)->__rb_parent_color == (unsigned long)(node))
+#define RB_CLEAR_NODE(node)  \
+       ((node)->__rb_parent_color = (unsigned long)(node))
+
+
+extern void rb_insert_color(struct rb_node *, struct rb_root *);
+extern void rb_erase(struct rb_node *, struct rb_root *);
+
+
+/* Find logical next and previous nodes in a tree */
+extern struct rb_node *rb_next(const struct rb_node *);
+extern struct rb_node *rb_prev(const struct rb_node *);
+extern struct rb_node *rb_first(const struct rb_root *);
+extern struct rb_node *rb_last(const struct rb_root *);
+
+/* Postorder iteration - always visit the parent after its children */
+extern struct rb_node *rb_first_postorder(const struct rb_root *);
+extern struct rb_node *rb_next_postorder(const struct rb_node *);
+
+/* Fast replacement of a single node without remove/rebalance/add/rebalance */
+extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+                           struct rb_root *root);
+
+static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
+                               struct rb_node **rb_link)
+{
+       node->__rb_parent_color = (unsigned long)parent;
+       node->rb_left = node->rb_right = NULL;
+
+       *rb_link = node;
+}
+
+#define rb_entry_safe(ptr, type, member) \
+       ({ typeof(ptr) ____ptr = (ptr); \
+          ____ptr ? rb_entry(____ptr, type, member) : NULL; \
+       })
+
+
+/*
+ * Handy for checking that we are not deleting an entry that is
+ * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
+ * probably should be moved to lib/rbtree.c...
+ */
+static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
+{
+       rb_erase(n, root);
+       RB_CLEAR_NODE(n);
+}
+#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h
new file mode 100644 (file)
index 0000000..43be941
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+  Red Black Trees
+  (C) 1999  Andrea Arcangeli <andrea@suse.de>
+  (C) 2002  David Woodhouse <dwmw2@infradead.org>
+  (C) 2012  Michel Lespinasse <walken@google.com>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+  tools/linux/include/linux/rbtree_augmented.h
+
+  Copied from:
+  linux/include/linux/rbtree_augmented.h
+*/
+
+#ifndef _TOOLS_LINUX_RBTREE_AUGMENTED_H
+#define _TOOLS_LINUX_RBTREE_AUGMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+
+/*
+ * Please note - only struct rb_augment_callbacks and the prototypes for
+ * rb_insert_augmented() and rb_erase_augmented() are intended to be public.
+ * The rest are implementation details you are not expected to depend on.
+ *
+ * See Documentation/rbtree.txt for documentation and samples.
+ */
+
+struct rb_augment_callbacks {
+       void (*propagate)(struct rb_node *node, struct rb_node *stop);
+       void (*copy)(struct rb_node *old, struct rb_node *new);
+       void (*rotate)(struct rb_node *old, struct rb_node *new);
+};
+
+extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+/*
+ * Fixup the rbtree and update the augmented information when rebalancing.
+ *
+ * On insertion, the user must update the augmented information on the path
+ * leading to the inserted node, then call rb_link_node() as usual and
+ * rb_augment_inserted() instead of the usual rb_insert_color() call.
+ * If rb_augment_inserted() rebalances the rbtree, it will callback into
+ * a user provided function to update the augmented information on the
+ * affected subtrees.
+ */
+static inline void
+rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+                   const struct rb_augment_callbacks *augment)
+{
+       __rb_insert_augmented(node, root, augment->rotate);
+}
+
+#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield,      \
+                            rbtype, rbaugmented, rbcompute)            \
+static inline void                                                     \
+rbname ## _propagate(struct rb_node *rb, struct rb_node *stop)         \
+{                                                                      \
+       while (rb != stop) {                                            \
+               rbstruct *node = rb_entry(rb, rbstruct, rbfield);       \
+               rbtype augmented = rbcompute(node);                     \
+               if (node->rbaugmented == augmented)                     \
+                       break;                                          \
+               node->rbaugmented = augmented;                          \
+               rb = rb_parent(&node->rbfield);                         \
+       }                                                               \
+}                                                                      \
+static inline void                                                     \
+rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new)                \
+{                                                                      \
+       rbstruct *old = rb_entry(rb_old, rbstruct, rbfield);            \
+       rbstruct *new = rb_entry(rb_new, rbstruct, rbfield);            \
+       new->rbaugmented = old->rbaugmented;                            \
+}                                                                      \
+static void                                                            \
+rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new)      \
+{                                                                      \
+       rbstruct *old = rb_entry(rb_old, rbstruct, rbfield);            \
+       rbstruct *new = rb_entry(rb_new, rbstruct, rbfield);            \
+       new->rbaugmented = old->rbaugmented;                            \
+       old->rbaugmented = rbcompute(old);                              \
+}                                                                      \
+rbstatic const struct rb_augment_callbacks rbname = {                  \
+       rbname ## _propagate, rbname ## _copy, rbname ## _rotate        \
+};
+
+
+#define        RB_RED          0
+#define        RB_BLACK        1
+
+#define __rb_parent(pc)    ((struct rb_node *)(pc & ~3))
+
+#define __rb_color(pc)     ((pc) & 1)
+#define __rb_is_black(pc)  __rb_color(pc)
+#define __rb_is_red(pc)    (!__rb_color(pc))
+#define rb_color(rb)       __rb_color((rb)->__rb_parent_color)
+#define rb_is_red(rb)      __rb_is_red((rb)->__rb_parent_color)
+#define rb_is_black(rb)    __rb_is_black((rb)->__rb_parent_color)
+
+static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
+{
+       rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
+}
+
+static inline void rb_set_parent_color(struct rb_node *rb,
+                                      struct rb_node *p, int color)
+{
+       rb->__rb_parent_color = (unsigned long)p | color;
+}
+
+static inline void
+__rb_change_child(struct rb_node *old, struct rb_node *new,
+                 struct rb_node *parent, struct rb_root *root)
+{
+       if (parent) {
+               if (parent->rb_left == old)
+                       parent->rb_left = new;
+               else
+                       parent->rb_right = new;
+       } else
+               root->rb_node = new;
+}
+
+extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+
+static __always_inline struct rb_node *
+__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+                    const struct rb_augment_callbacks *augment)
+{
+       struct rb_node *child = node->rb_right, *tmp = node->rb_left;
+       struct rb_node *parent, *rebalance;
+       unsigned long pc;
+
+       if (!tmp) {
+               /*
+                * Case 1: node to erase has no more than 1 child (easy!)
+                *
+                * Note that if there is one child it must be red due to 5)
+                * and node must be black due to 4). We adjust colors locally
+                * so as to bypass __rb_erase_color() later on.
+                */
+               pc = node->__rb_parent_color;
+               parent = __rb_parent(pc);
+               __rb_change_child(node, child, parent, root);
+               if (child) {
+                       child->__rb_parent_color = pc;
+                       rebalance = NULL;
+               } else
+                       rebalance = __rb_is_black(pc) ? parent : NULL;
+               tmp = parent;
+       } else if (!child) {
+               /* Still case 1, but this time the child is node->rb_left */
+               tmp->__rb_parent_color = pc = node->__rb_parent_color;
+               parent = __rb_parent(pc);
+               __rb_change_child(node, tmp, parent, root);
+               rebalance = NULL;
+               tmp = parent;
+       } else {
+               struct rb_node *successor = child, *child2;
+               tmp = child->rb_left;
+               if (!tmp) {
+                       /*
+                        * Case 2: node's successor is its right child
+                        *
+                        *    (n)          (s)
+                        *    / \          / \
+                        *  (x) (s)  ->  (x) (c)
+                        *        \
+                        *        (c)
+                        */
+                       parent = successor;
+                       child2 = successor->rb_right;
+                       augment->copy(node, successor);
+               } else {
+                       /*
+                        * Case 3: node's successor is leftmost under
+                        * node's right child subtree
+                        *
+                        *    (n)          (s)
+                        *    / \          / \
+                        *  (x) (y)  ->  (x) (y)
+                        *      /            /
+                        *    (p)          (p)
+                        *    /            /
+                        *  (s)          (c)
+                        *    \
+                        *    (c)
+                        */
+                       do {
+                               parent = successor;
+                               successor = tmp;
+                               tmp = tmp->rb_left;
+                       } while (tmp);
+                       parent->rb_left = child2 = successor->rb_right;
+                       successor->rb_right = child;
+                       rb_set_parent(child, successor);
+                       augment->copy(node, successor);
+                       augment->propagate(parent, successor);
+               }
+
+               successor->rb_left = tmp = node->rb_left;
+               rb_set_parent(tmp, successor);
+
+               pc = node->__rb_parent_color;
+               tmp = __rb_parent(pc);
+               __rb_change_child(node, successor, tmp, root);
+               if (child2) {
+                       successor->__rb_parent_color = pc;
+                       rb_set_parent_color(child2, parent, RB_BLACK);
+                       rebalance = NULL;
+               } else {
+                       unsigned long pc2 = successor->__rb_parent_color;
+                       successor->__rb_parent_color = pc;
+                       rebalance = __rb_is_black(pc2) ? parent : NULL;
+               }
+               tmp = successor;
+       }
+
+       augment->propagate(tmp, NULL);
+       return rebalance;
+}
+
+static __always_inline void
+rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+                  const struct rb_augment_callbacks *augment)
+{
+       struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
+       if (rebalance)
+               __rb_erase_color(rebalance, root, augment->rotate);
+}
+
+#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
new file mode 100644 (file)
index 0000000..17c2b59
--- /dev/null
@@ -0,0 +1,548 @@
+/*
+  Red Black Trees
+  (C) 1999  Andrea Arcangeli <andrea@suse.de>
+  (C) 2002  David Woodhouse <dwmw2@infradead.org>
+  (C) 2012  Michel Lespinasse <walken@google.com>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+  linux/lib/rbtree.c
+*/
+
+#include <linux/rbtree_augmented.h>
+
+/*
+ * red-black trees properties:  http://en.wikipedia.org/wiki/Rbtree
+ *
+ *  1) A node is either red or black
+ *  2) The root is black
+ *  3) All leaves (NULL) are black
+ *  4) Both children of every red node are black
+ *  5) Every simple path from root to leaves contains the same number
+ *     of black nodes.
+ *
+ *  4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
+ *  consecutive red nodes in a path and every red node is therefore followed by
+ *  a black. So if B is the number of black nodes on every simple path (as per
+ *  5), then the longest possible path due to 4 is 2B.
+ *
+ *  We shall indicate color with case, where black nodes are uppercase and red
+ *  nodes will be lowercase. Unknown color nodes shall be drawn as red within
+ *  parentheses and have some accompanying text comment.
+ */
+
+static inline void rb_set_black(struct rb_node *rb)
+{
+       rb->__rb_parent_color |= RB_BLACK;
+}
+
+static inline struct rb_node *rb_red_parent(struct rb_node *red)
+{
+       return (struct rb_node *)red->__rb_parent_color;
+}
+
+/*
+ * Helper function for rotations:
+ * - old's parent and color get assigned to new
+ * - old gets assigned new as a parent and 'color' as a color.
+ */
+static inline void
+__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
+                       struct rb_root *root, int color)
+{
+       struct rb_node *parent = rb_parent(old);
+       new->__rb_parent_color = old->__rb_parent_color;
+       rb_set_parent_color(old, new, color);
+       __rb_change_child(old, new, parent, root);
+}
+
+static __always_inline void
+__rb_insert(struct rb_node *node, struct rb_root *root,
+           void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
+
+       while (true) {
+               /*
+                * Loop invariant: node is red
+                *
+                * If there is a black parent, we are done.
+                * Otherwise, take some corrective action as we don't
+                * want a red root or two consecutive red nodes.
+                */
+               if (!parent) {
+                       rb_set_parent_color(node, NULL, RB_BLACK);
+                       break;
+               } else if (rb_is_black(parent))
+                       break;
+
+               gparent = rb_red_parent(parent);
+
+               tmp = gparent->rb_right;
+               if (parent != tmp) {    /* parent == gparent->rb_left */
+                       if (tmp && rb_is_red(tmp)) {
+                               /*
+                                * Case 1 - color flips
+                                *
+                                *       G            g
+                                *      / \          / \
+                                *     p   u  -->   P   U
+                                *    /            /
+                                *   n            n
+                                *
+                                * However, since g's parent might be red, and
+                                * 4) does not allow this, we need to recurse
+                                * at g.
+                                */
+                               rb_set_parent_color(tmp, gparent, RB_BLACK);
+                               rb_set_parent_color(parent, gparent, RB_BLACK);
+                               node = gparent;
+                               parent = rb_parent(node);
+                               rb_set_parent_color(node, parent, RB_RED);
+                               continue;
+                       }
+
+                       tmp = parent->rb_right;
+                       if (node == tmp) {
+                               /*
+                                * Case 2 - left rotate at parent
+                                *
+                                *      G             G
+                                *     / \           / \
+                                *    p   U  -->    n   U
+                                *     \           /
+                                *      n         p
+                                *
+                                * This still leaves us in violation of 4), the
+                                * continuation into Case 3 will fix that.
+                                */
+                               parent->rb_right = tmp = node->rb_left;
+                               node->rb_left = parent;
+                               if (tmp)
+                                       rb_set_parent_color(tmp, parent,
+                                                           RB_BLACK);
+                               rb_set_parent_color(parent, node, RB_RED);
+                               augment_rotate(parent, node);
+                               parent = node;
+                               tmp = node->rb_right;
+                       }
+
+                       /*
+                        * Case 3 - right rotate at gparent
+                        *
+                        *        G           P
+                        *       / \         / \
+                        *      p   U  -->  n   g
+                        *     /                 \
+                        *    n                   U
+                        */
+                       gparent->rb_left = tmp;  /* == parent->rb_right */
+                       parent->rb_right = gparent;
+                       if (tmp)
+                               rb_set_parent_color(tmp, gparent, RB_BLACK);
+                       __rb_rotate_set_parents(gparent, parent, root, RB_RED);
+                       augment_rotate(gparent, parent);
+                       break;
+               } else {
+                       tmp = gparent->rb_left;
+                       if (tmp && rb_is_red(tmp)) {
+                               /* Case 1 - color flips */
+                               rb_set_parent_color(tmp, gparent, RB_BLACK);
+                               rb_set_parent_color(parent, gparent, RB_BLACK);
+                               node = gparent;
+                               parent = rb_parent(node);
+                               rb_set_parent_color(node, parent, RB_RED);
+                               continue;
+                       }
+
+                       tmp = parent->rb_left;
+                       if (node == tmp) {
+                               /* Case 2 - right rotate at parent */
+                               parent->rb_left = tmp = node->rb_right;
+                               node->rb_right = parent;
+                               if (tmp)
+                                       rb_set_parent_color(tmp, parent,
+                                                           RB_BLACK);
+                               rb_set_parent_color(parent, node, RB_RED);
+                               augment_rotate(parent, node);
+                               parent = node;
+                               tmp = node->rb_left;
+                       }
+
+                       /* Case 3 - left rotate at gparent */
+                       gparent->rb_right = tmp;  /* == parent->rb_left */
+                       parent->rb_left = gparent;
+                       if (tmp)
+                               rb_set_parent_color(tmp, gparent, RB_BLACK);
+                       __rb_rotate_set_parents(gparent, parent, root, RB_RED);
+                       augment_rotate(gparent, parent);
+                       break;
+               }
+       }
+}
+
+/*
+ * Inline version for rb_erase() use - we want to be able to inline
+ * and eliminate the dummy_rotate callback there
+ */
+static __always_inline void
+____rb_erase_color(struct rb_node *parent, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
+
+       while (true) {
+               /*
+                * Loop invariants:
+                * - node is black (or NULL on first iteration)
+                * - node is not the root (parent is not NULL)
+                * - All leaf paths going through parent and node have a
+                *   black node count that is 1 lower than other leaf paths.
+                */
+               sibling = parent->rb_right;
+               if (node != sibling) {  /* node == parent->rb_left */
+                       if (rb_is_red(sibling)) {
+                               /*
+                                * Case 1 - left rotate at parent
+                                *
+                                *     P               S
+                                *    / \             / \
+                                *   N   s    -->    p   Sr
+                                *      / \         / \
+                                *     Sl  Sr      N   Sl
+                                */
+                               parent->rb_right = tmp1 = sibling->rb_left;
+                               sibling->rb_left = parent;
+                               rb_set_parent_color(tmp1, parent, RB_BLACK);
+                               __rb_rotate_set_parents(parent, sibling, root,
+                                                       RB_RED);
+                               augment_rotate(parent, sibling);
+                               sibling = tmp1;
+                       }
+                       tmp1 = sibling->rb_right;
+                       if (!tmp1 || rb_is_black(tmp1)) {
+                               tmp2 = sibling->rb_left;
+                               if (!tmp2 || rb_is_black(tmp2)) {
+                                       /*
+                                        * Case 2 - sibling color flip
+                                        * (p could be either color here)
+                                        *
+                                        *    (p)           (p)
+                                        *    / \           / \
+                                        *   N   S    -->  N   s
+                                        *      / \           / \
+                                        *     Sl  Sr        Sl  Sr
+                                        *
+                                        * This leaves us violating 5) which
+                                        * can be fixed by flipping p to black
+                                        * if it was red, or by recursing at p.
+                                        * p is red when coming from Case 1.
+                                        */
+                                       rb_set_parent_color(sibling, parent,
+                                                           RB_RED);
+                                       if (rb_is_red(parent))
+                                               rb_set_black(parent);
+                                       else {
+                                               node = parent;
+                                               parent = rb_parent(node);
+                                               if (parent)
+                                                       continue;
+                                       }
+                                       break;
+                               }
+                               /*
+                                * Case 3 - right rotate at sibling
+                                * (p could be either color here)
+                                *
+                                *   (p)           (p)
+                                *   / \           / \
+                                *  N   S    -->  N   Sl
+                                *     / \             \
+                                *    sl  Sr            s
+                                *                       \
+                                *                        Sr
+                                */
+                               sibling->rb_left = tmp1 = tmp2->rb_right;
+                               tmp2->rb_right = sibling;
+                               parent->rb_right = tmp2;
+                               if (tmp1)
+                                       rb_set_parent_color(tmp1, sibling,
+                                                           RB_BLACK);
+                               augment_rotate(sibling, tmp2);
+                               tmp1 = sibling;
+                               sibling = tmp2;
+                       }
+                       /*
+                        * Case 4 - left rotate at parent + color flips
+                        * (p and sl could be either color here.
+                        *  After rotation, p becomes black, s acquires
+                        *  p's color, and sl keeps its color)
+                        *
+                        *      (p)             (s)
+                        *      / \             / \
+                        *     N   S     -->   P   Sr
+                        *        / \         / \
+                        *      (sl) sr      N  (sl)
+                        */
+                       parent->rb_right = tmp2 = sibling->rb_left;
+                       sibling->rb_left = parent;
+                       rb_set_parent_color(tmp1, sibling, RB_BLACK);
+                       if (tmp2)
+                               rb_set_parent(tmp2, parent);
+                       __rb_rotate_set_parents(parent, sibling, root,
+                                               RB_BLACK);
+                       augment_rotate(parent, sibling);
+                       break;
+               } else {
+                       sibling = parent->rb_left;
+                       if (rb_is_red(sibling)) {
+                               /* Case 1 - right rotate at parent */
+                               parent->rb_left = tmp1 = sibling->rb_right;
+                               sibling->rb_right = parent;
+                               rb_set_parent_color(tmp1, parent, RB_BLACK);
+                               __rb_rotate_set_parents(parent, sibling, root,
+                                                       RB_RED);
+                               augment_rotate(parent, sibling);
+                               sibling = tmp1;
+                       }
+                       tmp1 = sibling->rb_left;
+                       if (!tmp1 || rb_is_black(tmp1)) {
+                               tmp2 = sibling->rb_right;
+                               if (!tmp2 || rb_is_black(tmp2)) {
+                                       /* Case 2 - sibling color flip */
+                                       rb_set_parent_color(sibling, parent,
+                                                           RB_RED);
+                                       if (rb_is_red(parent))
+                                               rb_set_black(parent);
+                                       else {
+                                               node = parent;
+                                               parent = rb_parent(node);
+                                               if (parent)
+                                                       continue;
+                                       }
+                                       break;
+                               }
+                               /* Case 3 - right rotate at sibling */
+                               sibling->rb_right = tmp1 = tmp2->rb_left;
+                               tmp2->rb_left = sibling;
+                               parent->rb_left = tmp2;
+                               if (tmp1)
+                                       rb_set_parent_color(tmp1, sibling,
+                                                           RB_BLACK);
+                               augment_rotate(sibling, tmp2);
+                               tmp1 = sibling;
+                               sibling = tmp2;
+                       }
+                       /* Case 4 - left rotate at parent + color flips */
+                       parent->rb_left = tmp2 = sibling->rb_right;
+                       sibling->rb_right = parent;
+                       rb_set_parent_color(tmp1, sibling, RB_BLACK);
+                       if (tmp2)
+                               rb_set_parent(tmp2, parent);
+                       __rb_rotate_set_parents(parent, sibling, root,
+                                               RB_BLACK);
+                       augment_rotate(parent, sibling);
+                       break;
+               }
+       }
+}
+
+/* Non-inline version for rb_erase_augmented() use */
+void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       ____rb_erase_color(parent, root, augment_rotate);
+}
+
+/*
+ * Non-augmented rbtree manipulation functions.
+ *
+ * We use dummy augmented callbacks here, and have the compiler optimize them
+ * out of the rb_insert_color() and rb_erase() function definitions.
+ */
+
+static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
+static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
+static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
+
+static const struct rb_augment_callbacks dummy_callbacks = {
+       dummy_propagate, dummy_copy, dummy_rotate
+};
+
+void rb_insert_color(struct rb_node *node, struct rb_root *root)
+{
+       __rb_insert(node, root, dummy_rotate);
+}
+
+void rb_erase(struct rb_node *node, struct rb_root *root)
+{
+       struct rb_node *rebalance;
+       rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
+       if (rebalance)
+               ____rb_erase_color(rebalance, root, dummy_rotate);
+}
+
+/*
+ * Augmented rbtree manipulation functions.
+ *
+ * This instantiates the same __always_inline functions as in the non-augmented
+ * case, but this time with user-defined callbacks.
+ */
+
+void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       __rb_insert(node, root, augment_rotate);
+}
+
+/*
+ * This function returns the first node (in sort order) of the tree.
+ */
+struct rb_node *rb_first(const struct rb_root *root)
+{
+       struct rb_node  *n;
+
+       n = root->rb_node;
+       if (!n)
+               return NULL;
+       while (n->rb_left)
+               n = n->rb_left;
+       return n;
+}
+
+struct rb_node *rb_last(const struct rb_root *root)
+{
+       struct rb_node  *n;
+
+       n = root->rb_node;
+       if (!n)
+               return NULL;
+       while (n->rb_right)
+               n = n->rb_right;
+       return n;
+}
+
+struct rb_node *rb_next(const struct rb_node *node)
+{
+       struct rb_node *parent;
+
+       if (RB_EMPTY_NODE(node))
+               return NULL;
+
+       /*
+        * If we have a right-hand child, go down and then left as far
+        * as we can.
+        */
+       if (node->rb_right) {
+               node = node->rb_right;
+               while (node->rb_left)
+                       node=node->rb_left;
+               return (struct rb_node *)node;
+       }
+
+       /*
+        * No right-hand children. Everything down and left is smaller than us,
+        * so any 'next' node must be in the general direction of our parent.
+        * Go up the tree; any time the ancestor is a right-hand child of its
+        * parent, keep going up. First time it's a left-hand child of its
+        * parent, said parent is our 'next' node.
+        */
+       while ((parent = rb_parent(node)) && node == parent->rb_right)
+               node = parent;
+
+       return parent;
+}
+
+struct rb_node *rb_prev(const struct rb_node *node)
+{
+       struct rb_node *parent;
+
+       if (RB_EMPTY_NODE(node))
+               return NULL;
+
+       /*
+        * If we have a left-hand child, go down and then right as far
+        * as we can.
+        */
+       if (node->rb_left) {
+               node = node->rb_left;
+               while (node->rb_right)
+                       node=node->rb_right;
+               return (struct rb_node *)node;
+       }
+
+       /*
+        * No left-hand children. Go up till we find an ancestor which
+        * is a right-hand child of its parent.
+        */
+       while ((parent = rb_parent(node)) && node == parent->rb_left)
+               node = parent;
+
+       return parent;
+}
+
+void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+                    struct rb_root *root)
+{
+       struct rb_node *parent = rb_parent(victim);
+
+       /* Set the surrounding nodes to point to the replacement */
+       __rb_change_child(victim, new, parent, root);
+       if (victim->rb_left)
+               rb_set_parent(victim->rb_left, new);
+       if (victim->rb_right)
+               rb_set_parent(victim->rb_right, new);
+
+       /* Copy the pointers/colour from the victim to the replacement */
+       *new = *victim;
+}
+
+static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
+{
+       for (;;) {
+               if (node->rb_left)
+                       node = node->rb_left;
+               else if (node->rb_right)
+                       node = node->rb_right;
+               else
+                       return (struct rb_node *)node;
+       }
+}
+
+struct rb_node *rb_next_postorder(const struct rb_node *node)
+{
+       const struct rb_node *parent;
+       if (!node)
+               return NULL;
+       parent = rb_parent(node);
+
+       /* If we're sitting on node, we've already seen our children */
+       if (parent && node == parent->rb_left && parent->rb_right) {
+               /* If we are the parent's left node, go to the parent's right
+                * node then all the way down to the left */
+               return rb_left_deepest_node(parent->rb_right);
+       } else
+               /* Otherwise we are the parent's right node, and the parent
+                * should be next */
+               return (struct rb_node *)parent;
+}
+
+struct rb_node *rb_first_postorder(const struct rb_root *root)
+{
+       if (!root->rb_node)
+               return NULL;
+
+       return rb_left_deepest_node(root->rb_node);
+}
index fe50a1b34aa0035dec38a07a752ae33df9ad0e5d..09dc0aabb5154cb0906b6299afed4a3d0fc009c8 100644 (file)
@@ -18,6 +18,7 @@ tools/arch/x86/include/asm/atomic.h
 tools/arch/x86/include/asm/rmwcc.h
 tools/lib/traceevent
 tools/lib/api
+tools/lib/rbtree.c
 tools/lib/symbol/kallsyms.c
 tools/lib/symbol/kallsyms.h
 tools/lib/util/find_next_bit.c
@@ -44,6 +45,8 @@ tools/include/linux/kernel.h
 tools/include/linux/list.h
 tools/include/linux/log2.h
 tools/include/linux/poison.h
+tools/include/linux/rbtree.h
+tools/include/linux/rbtree_augmented.h
 tools/include/linux/types.h
 include/asm-generic/bitops/arch_hweight.h
 include/asm-generic/bitops/const_hweight.h
@@ -51,12 +54,10 @@ include/asm-generic/bitops/fls64.h
 include/asm-generic/bitops/__fls.h
 include/asm-generic/bitops/fls.h
 include/linux/perf_event.h
-include/linux/rbtree.h
 include/linux/list.h
 include/linux/hash.h
 include/linux/stringify.h
 lib/hweight.c
-lib/rbtree.c
 include/linux/swab.h
 arch/*/include/asm/unistd*.h
 arch/*/include/uapi/asm/unistd*.h
@@ -65,7 +66,6 @@ arch/*/lib/memcpy*.S
 arch/*/lib/memset*.S
 include/linux/poison.h
 include/linux/hw_breakpoint.h
-include/linux/rbtree_augmented.h
 include/uapi/linux/perf_event.h
 include/uapi/linux/const.h
 include/uapi/linux/swab.h
index 586a59d46022a9fc8901807f5c02be4e2551db25..601d11440596dfd2b1d5244f56e972487d3fecff 100644 (file)
@@ -139,7 +139,7 @@ $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
 
-$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c FORCE
+$(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
 
diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h
deleted file mode 100644 (file)
index f06d89f..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __TOOLS_LINUX_PERF_RBTREE_H
-#define __TOOLS_LINUX_PERF_RBTREE_H
-#include <stdbool.h>
-#include "../../../../include/linux/rbtree.h"
-
-/*
- * Handy for checking that we are not deleting an entry that is
- * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
- * probably should be moved to lib/rbtree.c...
- */
-static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
-{
-       rb_erase(n, root);
-       RB_CLEAR_NODE(n);
-}
-#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/perf/util/include/linux/rbtree_augmented.h b/tools/perf/util/include/linux/rbtree_augmented.h
deleted file mode 100644 (file)
index 9d6fcdf..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <stdbool.h>
-#include "../../../../include/linux/rbtree_augmented.h"