]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/stringhash.h
Merge branch 'sched/urgent' into sched/core, to pick up fixes
[mirror_ubuntu-artful-kernel.git] / include / linux / stringhash.h
1 #ifndef __LINUX_STRINGHASH_H
2 #define __LINUX_STRINGHASH_H
3
4 #include <linux/compiler.h> /* For __pure */
5 #include <linux/types.h> /* For u32, u64 */
6
7 /*
8 * Routines for hashing strings of bytes to a 32-bit hash value.
9 *
10 * These hash functions are NOT GUARANTEED STABLE between kernel
11 * versions, architectures, or even repeated boots of the same kernel.
12 * (E.g. they may depend on boot-time hardware detection or be
13 * deliberately randomized.)
14 *
15 * They are also not intended to be secure against collisions caused by
16 * malicious inputs; much slower hash functions are required for that.
17 *
18 * They are optimized for pathname components, meaning short strings.
19 * Even if a majority of files have longer names, the dynamic profile of
20 * pathname components skews short due to short directory names.
21 * (E.g. /usr/lib/libsesquipedalianism.so.3.141.)
22 */
23
24 /*
25 * Version 1: one byte at a time. Example of use:
26 *
27 * unsigned long hash = init_name_hash;
28 * while (*p)
29 * hash = partial_name_hash(tolower(*p++), hash);
30 * hash = end_name_hash(hash);
31 *
32 * Although this is designed for bytes, fs/hfsplus/unicode.c
33 * abuses it to hash 16-bit values.
34 */
35
36 /* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
37 #define init_name_hash() 0
38
39 /* partial hash update function. Assume roughly 4 bits per character */
40 static inline unsigned long
41 partial_name_hash(unsigned long c, unsigned long prevhash)
42 {
43 return (prevhash + (c << 4) + (c >> 4)) * 11;
44 }
45
46 /*
47 * Finally: cut down the number of bits to a int value (and try to avoid
48 * losing bits)
49 */
50 static inline unsigned long end_name_hash(unsigned long hash)
51 {
52 return (unsigned int)hash;
53 }
54
55 /*
56 * Version 2: One word (32 or 64 bits) at a time.
57 * If CONFIG_DCACHE_WORD_ACCESS is defined (meaning <asm/word-at-a-time.h>
58 * exists, which describes major Linux platforms like x86 and ARM), then
59 * this computes a different hash function much faster.
60 *
61 * If not set, this falls back to a wrapper around the preceding.
62 */
63 extern unsigned int __pure full_name_hash(const char *, unsigned int);
64
65 /*
66 * A hash_len is a u64 with the hash of a string in the low
67 * half and the length in the high half.
68 */
69 #define hashlen_hash(hashlen) ((u32)(hashlen))
70 #define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
71 #define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash))
72
73 /* Return the "hash_len" (hash and length) of a null-terminated string */
74 extern u64 __pure hashlen_string(const char *name);
75
76 #endif /* __LINUX_STRINGHASH_H */