]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * random.c -- A strong random number generator | |
3 | * | |
9e95ce27 | 4 | * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 |
1da177e4 LT |
5 | * |
6 | * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All | |
7 | * rights reserved. | |
8 | * | |
9 | * Redistribution and use in source and binary forms, with or without | |
10 | * modification, are permitted provided that the following conditions | |
11 | * are met: | |
12 | * 1. Redistributions of source code must retain the above copyright | |
13 | * notice, and the entire permission notice in its entirety, | |
14 | * including the disclaimer of warranties. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | |
18 | * 3. The name of the author may not be used to endorse or promote | |
19 | * products derived from this software without specific prior | |
20 | * written permission. | |
21 | * | |
22 | * ALTERNATIVELY, this product may be distributed under the terms of | |
23 | * the GNU General Public License, in which case the provisions of the GPL are | |
24 | * required INSTEAD OF the above restrictions. (This clause is | |
25 | * necessary due to a potential bad interaction between the GPL and | |
26 | * the restrictions contained in a BSD-style copyright.) | |
27 | * | |
28 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | |
29 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
30 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF | |
31 | * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE | |
32 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
33 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT | |
34 | * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | |
35 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | |
36 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
37 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | |
38 | * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH | |
39 | * DAMAGE. | |
40 | */ | |
41 | ||
42 | /* | |
43 | * (now, with legal B.S. out of the way.....) | |
44 | * | |
45 | * This routine gathers environmental noise from device drivers, etc., | |
46 | * and returns good random numbers, suitable for cryptographic use. | |
47 | * Besides the obvious cryptographic uses, these numbers are also good | |
48 | * for seeding TCP sequence numbers, and other places where it is | |
49 | * desirable to have numbers which are not only random, but hard to | |
50 | * predict by an attacker. | |
51 | * | |
52 | * Theory of operation | |
53 | * =================== | |
54 | * | |
55 | * Computers are very predictable devices. Hence it is extremely hard | |
56 | * to produce truly random numbers on a computer --- as opposed to | |
57 | * pseudo-random numbers, which can easily generated by using a | |
58 | * algorithm. Unfortunately, it is very easy for attackers to guess | |
59 | * the sequence of pseudo-random number generators, and for some | |
60 | * applications this is not acceptable. So instead, we must try to | |
61 | * gather "environmental noise" from the computer's environment, which | |
62 | * must be hard for outside attackers to observe, and use that to | |
63 | * generate random numbers. In a Unix environment, this is best done | |
64 | * from inside the kernel. | |
65 | * | |
66 | * Sources of randomness from the environment include inter-keyboard | |
67 | * timings, inter-interrupt timings from some interrupts, and other | |
68 | * events which are both (a) non-deterministic and (b) hard for an | |
69 | * outside observer to measure. Randomness from these sources are | |
70 | * added to an "entropy pool", which is mixed using a CRC-like function. | |
71 | * This is not cryptographically strong, but it is adequate assuming | |
72 | * the randomness is not chosen maliciously, and it is fast enough that | |
73 | * the overhead of doing it on every interrupt is very reasonable. | |
74 | * As random bytes are mixed into the entropy pool, the routines keep | |
75 | * an *estimate* of how many bits of randomness have been stored into | |
76 | * the random number generator's internal state. | |
77 | * | |
78 | * When random bytes are desired, they are obtained by taking the SHA | |
79 | * hash of the contents of the "entropy pool". The SHA hash avoids | |
80 | * exposing the internal state of the entropy pool. It is believed to | |
81 | * be computationally infeasible to derive any useful information | |
82 | * about the input of SHA from its output. Even if it is possible to | |
83 | * analyze SHA in some clever way, as long as the amount of data | |
84 | * returned from the generator is less than the inherent entropy in | |
85 | * the pool, the output data is totally unpredictable. For this | |
86 | * reason, the routine decreases its internal estimate of how many | |
87 | * bits of "true randomness" are contained in the entropy pool as it | |
88 | * outputs random numbers. | |
89 | * | |
90 | * If this estimate goes to zero, the routine can still generate | |
91 | * random numbers; however, an attacker may (at least in theory) be | |
92 | * able to infer the future output of the generator from prior | |
93 | * outputs. This requires successful cryptanalysis of SHA, which is | |
94 | * not believed to be feasible, but there is a remote possibility. | |
95 | * Nonetheless, these numbers should be useful for the vast majority | |
96 | * of purposes. | |
97 | * | |
98 | * Exported interfaces ---- output | |
99 | * =============================== | |
100 | * | |
101 | * There are three exported interfaces; the first is one designed to | |
102 | * be used from within the kernel: | |
103 | * | |
104 | * void get_random_bytes(void *buf, int nbytes); | |
105 | * | |
106 | * This interface will return the requested number of random bytes, | |
107 | * and place it in the requested buffer. | |
108 | * | |
109 | * The two other interfaces are two character devices /dev/random and | |
110 | * /dev/urandom. /dev/random is suitable for use when very high | |
111 | * quality randomness is desired (for example, for key generation or | |
112 | * one-time pads), as it will only return a maximum of the number of | |
113 | * bits of randomness (as estimated by the random number generator) | |
114 | * contained in the entropy pool. | |
115 | * | |
116 | * The /dev/urandom device does not have this limit, and will return | |
117 | * as many bytes as are requested. As more and more random bytes are | |
118 | * requested without giving time for the entropy pool to recharge, | |
119 | * this will result in random numbers that are merely cryptographically | |
120 | * strong. For many applications, however, this is acceptable. | |
121 | * | |
122 | * Exported interfaces ---- input | |
123 | * ============================== | |
124 | * | |
125 | * The current exported interfaces for gathering environmental noise | |
126 | * from the devices are: | |
127 | * | |
128 | * void add_input_randomness(unsigned int type, unsigned int code, | |
129 | * unsigned int value); | |
130 | * void add_interrupt_randomness(int irq); | |
131 | * | |
132 | * add_input_randomness() uses the input layer interrupt timing, as well as | |
133 | * the event type information from the hardware. | |
134 | * | |
135 | * add_interrupt_randomness() uses the inter-interrupt timing as random | |
136 | * inputs to the entropy pool. Note that not all interrupts are good | |
137 | * sources of randomness! For example, the timer interrupts is not a | |
138 | * good choice, because the periodicity of the interrupts is too | |
139 | * regular, and hence predictable to an attacker. Disk interrupts are | |
140 | * a better measure, since the timing of the disk interrupts are more | |
141 | * unpredictable. | |
142 | * | |
143 | * All of these routines try to estimate how many bits of randomness a | |
144 | * particular randomness source. They do this by keeping track of the | |
145 | * first and second order deltas of the event timings. | |
146 | * | |
147 | * Ensuring unpredictability at system startup | |
148 | * ============================================ | |
149 | * | |
150 | * When any operating system starts up, it will go through a sequence | |
151 | * of actions that are fairly predictable by an adversary, especially | |
152 | * if the start-up does not involve interaction with a human operator. | |
153 | * This reduces the actual number of bits of unpredictability in the | |
154 | * entropy pool below the value in entropy_count. In order to | |
155 | * counteract this effect, it helps to carry information in the | |
156 | * entropy pool across shut-downs and start-ups. To do this, put the | |
157 | * following lines an appropriate script which is run during the boot | |
158 | * sequence: | |
159 | * | |
160 | * echo "Initializing random number generator..." | |
161 | * random_seed=/var/run/random-seed | |
162 | * # Carry a random seed from start-up to start-up | |
163 | * # Load and then save the whole entropy pool | |
164 | * if [ -f $random_seed ]; then | |
165 | * cat $random_seed >/dev/urandom | |
166 | * else | |
167 | * touch $random_seed | |
168 | * fi | |
169 | * chmod 600 $random_seed | |
170 | * dd if=/dev/urandom of=$random_seed count=1 bs=512 | |
171 | * | |
172 | * and the following lines in an appropriate script which is run as | |
173 | * the system is shutdown: | |
174 | * | |
175 | * # Carry a random seed from shut-down to start-up | |
176 | * # Save the whole entropy pool | |
177 | * echo "Saving random seed..." | |
178 | * random_seed=/var/run/random-seed | |
179 | * touch $random_seed | |
180 | * chmod 600 $random_seed | |
181 | * dd if=/dev/urandom of=$random_seed count=1 bs=512 | |
182 | * | |
183 | * For example, on most modern systems using the System V init | |
184 | * scripts, such code fragments would be found in | |
185 | * /etc/rc.d/init.d/random. On older Linux systems, the correct script | |
186 | * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0. | |
187 | * | |
188 | * Effectively, these commands cause the contents of the entropy pool | |
189 | * to be saved at shut-down time and reloaded into the entropy pool at | |
190 | * start-up. (The 'dd' in the addition to the bootup script is to | |
191 | * make sure that /etc/random-seed is different for every start-up, | |
192 | * even if the system crashes without executing rc.0.) Even with | |
193 | * complete knowledge of the start-up activities, predicting the state | |
194 | * of the entropy pool requires knowledge of the previous history of | |
195 | * the system. | |
196 | * | |
197 | * Configuring the /dev/random driver under Linux | |
198 | * ============================================== | |
199 | * | |
200 | * The /dev/random driver under Linux uses minor numbers 8 and 9 of | |
201 | * the /dev/mem major number (#1). So if your system does not have | |
202 | * /dev/random and /dev/urandom created already, they can be created | |
203 | * by using the commands: | |
204 | * | |
205 | * mknod /dev/random c 1 8 | |
206 | * mknod /dev/urandom c 1 9 | |
207 | * | |
208 | * Acknowledgements: | |
209 | * ================= | |
210 | * | |
211 | * Ideas for constructing this random number generator were derived | |
212 | * from Pretty Good Privacy's random number generator, and from private | |
213 | * discussions with Phil Karn. Colin Plumb provided a faster random | |
214 | * number generator, which speed up the mixing function of the entropy | |
215 | * pool, taken from PGPfone. Dale Worley has also contributed many | |
216 | * useful ideas and suggestions to improve this driver. | |
217 | * | |
218 | * Any flaws in the design are solely my responsibility, and should | |
219 | * not be attributed to the Phil, Colin, or any of authors of PGP. | |
220 | * | |
221 | * Further background information on this topic may be obtained from | |
222 | * RFC 1750, "Randomness Recommendations for Security", by Donald | |
223 | * Eastlake, Steve Crocker, and Jeff Schiller. | |
224 | */ | |
225 | ||
226 | #include <linux/utsname.h> | |
1da177e4 LT |
227 | #include <linux/module.h> |
228 | #include <linux/kernel.h> | |
229 | #include <linux/major.h> | |
230 | #include <linux/string.h> | |
231 | #include <linux/fcntl.h> | |
232 | #include <linux/slab.h> | |
233 | #include <linux/random.h> | |
234 | #include <linux/poll.h> | |
235 | #include <linux/init.h> | |
236 | #include <linux/fs.h> | |
237 | #include <linux/genhd.h> | |
238 | #include <linux/interrupt.h> | |
27ac792c | 239 | #include <linux/mm.h> |
1da177e4 LT |
240 | #include <linux/spinlock.h> |
241 | #include <linux/percpu.h> | |
242 | #include <linux/cryptohash.h> | |
243 | ||
d178a1eb YL |
244 | #ifdef CONFIG_GENERIC_HARDIRQS |
245 | # include <linux/irq.h> | |
246 | #endif | |
247 | ||
1da177e4 LT |
248 | #include <asm/processor.h> |
249 | #include <asm/uaccess.h> | |
250 | #include <asm/irq.h> | |
251 | #include <asm/io.h> | |
252 | ||
253 | /* | |
254 | * Configuration information | |
255 | */ | |
256 | #define INPUT_POOL_WORDS 128 | |
257 | #define OUTPUT_POOL_WORDS 32 | |
258 | #define SEC_XFER_SIZE 512 | |
259 | ||
260 | /* | |
261 | * The minimum number of bits of entropy before we wake up a read on | |
262 | * /dev/random. Should be enough to do a significant reseed. | |
263 | */ | |
264 | static int random_read_wakeup_thresh = 64; | |
265 | ||
266 | /* | |
267 | * If the entropy count falls under this number of bits, then we | |
268 | * should wake up processes which are selecting or polling on write | |
269 | * access to /dev/random. | |
270 | */ | |
271 | static int random_write_wakeup_thresh = 128; | |
272 | ||
273 | /* | |
274 | * When the input pool goes over trickle_thresh, start dropping most | |
275 | * samples to avoid wasting CPU time and reduce lock contention. | |
276 | */ | |
277 | ||
6c036527 | 278 | static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28; |
1da177e4 | 279 | |
90b75ee5 | 280 | static DEFINE_PER_CPU(int, trickle_count); |
1da177e4 LT |
281 | |
282 | /* | |
283 | * A pool of size .poolwords is stirred with a primitive polynomial | |
284 | * of degree .poolwords over GF(2). The taps for various sizes are | |
285 | * defined below. They are chosen to be evenly spaced (minimum RMS | |
286 | * distance from evenly spaced; the numbers in the comments are a | |
287 | * scaled squared error sum) except for the last tap, which is 1 to | |
288 | * get the twisting happening as fast as possible. | |
289 | */ | |
290 | static struct poolinfo { | |
291 | int poolwords; | |
292 | int tap1, tap2, tap3, tap4, tap5; | |
293 | } poolinfo_table[] = { | |
294 | /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */ | |
295 | { 128, 103, 76, 51, 25, 1 }, | |
296 | /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */ | |
297 | { 32, 26, 20, 14, 7, 1 }, | |
298 | #if 0 | |
299 | /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */ | |
300 | { 2048, 1638, 1231, 819, 411, 1 }, | |
301 | ||
302 | /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */ | |
303 | { 1024, 817, 615, 412, 204, 1 }, | |
304 | ||
305 | /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */ | |
306 | { 1024, 819, 616, 410, 207, 2 }, | |
307 | ||
308 | /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */ | |
309 | { 512, 411, 308, 208, 104, 1 }, | |
310 | ||
311 | /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */ | |
312 | { 512, 409, 307, 206, 102, 2 }, | |
313 | /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */ | |
314 | { 512, 409, 309, 205, 103, 2 }, | |
315 | ||
316 | /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */ | |
317 | { 256, 205, 155, 101, 52, 1 }, | |
318 | ||
319 | /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */ | |
320 | { 128, 103, 78, 51, 27, 2 }, | |
321 | ||
322 | /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */ | |
323 | { 64, 52, 39, 26, 14, 1 }, | |
324 | #endif | |
325 | }; | |
326 | ||
327 | #define POOLBITS poolwords*32 | |
328 | #define POOLBYTES poolwords*4 | |
329 | ||
330 | /* | |
331 | * For the purposes of better mixing, we use the CRC-32 polynomial as | |
332 | * well to make a twisted Generalized Feedback Shift Reigster | |
333 | * | |
334 | * (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR generators. ACM | |
335 | * Transactions on Modeling and Computer Simulation 2(3):179-194. | |
336 | * Also see M. Matsumoto & Y. Kurita, 1994. Twisted GFSR generators | |
337 | * II. ACM Transactions on Mdeling and Computer Simulation 4:254-266) | |
338 | * | |
339 | * Thanks to Colin Plumb for suggesting this. | |
340 | * | |
341 | * We have not analyzed the resultant polynomial to prove it primitive; | |
342 | * in fact it almost certainly isn't. Nonetheless, the irreducible factors | |
343 | * of a random large-degree polynomial over GF(2) are more than large enough | |
344 | * that periodicity is not a concern. | |
345 | * | |
346 | * The input hash is much less sensitive than the output hash. All | |
347 | * that we want of it is that it be a good non-cryptographic hash; | |
348 | * i.e. it not produce collisions when fed "random" data of the sort | |
349 | * we expect to see. As long as the pool state differs for different | |
350 | * inputs, we have preserved the input entropy and done a good job. | |
351 | * The fact that an intelligent attacker can construct inputs that | |
352 | * will produce controlled alterations to the pool's state is not | |
353 | * important because we don't consider such inputs to contribute any | |
354 | * randomness. The only property we need with respect to them is that | |
355 | * the attacker can't increase his/her knowledge of the pool's state. | |
356 | * Since all additions are reversible (knowing the final state and the | |
357 | * input, you can reconstruct the initial state), if an attacker has | |
358 | * any uncertainty about the initial state, he/she can only shuffle | |
359 | * that uncertainty about, but never cause any collisions (which would | |
360 | * decrease the uncertainty). | |
361 | * | |
362 | * The chosen system lets the state of the pool be (essentially) the input | |
363 | * modulo the generator polymnomial. Now, for random primitive polynomials, | |
364 | * this is a universal class of hash functions, meaning that the chance | |
365 | * of a collision is limited by the attacker's knowledge of the generator | |
366 | * polynomail, so if it is chosen at random, an attacker can never force | |
367 | * a collision. Here, we use a fixed polynomial, but we *can* assume that | |
368 | * ###--> it is unknown to the processes generating the input entropy. <-### | |
369 | * Because of this important property, this is a good, collision-resistant | |
370 | * hash; hash collisions will occur no more often than chance. | |
371 | */ | |
372 | ||
373 | /* | |
374 | * Static global variables | |
375 | */ | |
376 | static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); | |
377 | static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); | |
9a6f70bb | 378 | static struct fasync_struct *fasync; |
1da177e4 LT |
379 | |
380 | #if 0 | |
90b75ee5 | 381 | static int debug; |
1da177e4 | 382 | module_param(debug, bool, 0644); |
90b75ee5 MM |
383 | #define DEBUG_ENT(fmt, arg...) do { \ |
384 | if (debug) \ | |
385 | printk(KERN_DEBUG "random %04d %04d %04d: " \ | |
386 | fmt,\ | |
387 | input_pool.entropy_count,\ | |
388 | blocking_pool.entropy_count,\ | |
389 | nonblocking_pool.entropy_count,\ | |
390 | ## arg); } while (0) | |
1da177e4 LT |
391 | #else |
392 | #define DEBUG_ENT(fmt, arg...) do {} while (0) | |
393 | #endif | |
394 | ||
395 | /********************************************************************** | |
396 | * | |
397 | * OS independent entropy store. Here are the functions which handle | |
398 | * storing entropy in an entropy pool. | |
399 | * | |
400 | **********************************************************************/ | |
401 | ||
402 | struct entropy_store; | |
403 | struct entropy_store { | |
43358209 | 404 | /* read-only data: */ |
1da177e4 LT |
405 | struct poolinfo *poolinfo; |
406 | __u32 *pool; | |
407 | const char *name; | |
408 | int limit; | |
409 | struct entropy_store *pull; | |
410 | ||
411 | /* read-write data: */ | |
43358209 | 412 | spinlock_t lock; |
1da177e4 | 413 | unsigned add_ptr; |
cda796a3 | 414 | int entropy_count; |
1da177e4 LT |
415 | int input_rotate; |
416 | }; | |
417 | ||
418 | static __u32 input_pool_data[INPUT_POOL_WORDS]; | |
419 | static __u32 blocking_pool_data[OUTPUT_POOL_WORDS]; | |
420 | static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS]; | |
421 | ||
422 | static struct entropy_store input_pool = { | |
423 | .poolinfo = &poolinfo_table[0], | |
424 | .name = "input", | |
425 | .limit = 1, | |
e4d91918 | 426 | .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock), |
1da177e4 LT |
427 | .pool = input_pool_data |
428 | }; | |
429 | ||
430 | static struct entropy_store blocking_pool = { | |
431 | .poolinfo = &poolinfo_table[1], | |
432 | .name = "blocking", | |
433 | .limit = 1, | |
434 | .pull = &input_pool, | |
e4d91918 | 435 | .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock), |
1da177e4 LT |
436 | .pool = blocking_pool_data |
437 | }; | |
438 | ||
439 | static struct entropy_store nonblocking_pool = { | |
440 | .poolinfo = &poolinfo_table[1], | |
441 | .name = "nonblocking", | |
442 | .pull = &input_pool, | |
e4d91918 | 443 | .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock), |
1da177e4 LT |
444 | .pool = nonblocking_pool_data |
445 | }; | |
446 | ||
447 | /* | |
e68e5b66 | 448 | * This function adds bytes into the entropy "pool". It does not |
1da177e4 | 449 | * update the entropy estimate. The caller should call |
adc782da | 450 | * credit_entropy_bits if this is appropriate. |
1da177e4 LT |
451 | * |
452 | * The pool is stirred with a primitive polynomial of the appropriate | |
453 | * degree, and then twisted. We twist by three bits at a time because | |
454 | * it's cheap to do so and helps slightly in the expected case where | |
455 | * the entropy is concentrated in the low-order bits. | |
456 | */ | |
e68e5b66 MM |
457 | static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, |
458 | int nbytes, __u8 out[64]) | |
1da177e4 LT |
459 | { |
460 | static __u32 const twist_table[8] = { | |
461 | 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, | |
462 | 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; | |
993ba211 | 463 | unsigned long i, j, tap1, tap2, tap3, tap4, tap5; |
feee7697 | 464 | int input_rotate; |
1da177e4 | 465 | int wordmask = r->poolinfo->poolwords - 1; |
e68e5b66 | 466 | const char *bytes = in; |
6d38b827 | 467 | __u32 w; |
1da177e4 LT |
468 | unsigned long flags; |
469 | ||
470 | /* Taps are constant, so we can load them without holding r->lock. */ | |
471 | tap1 = r->poolinfo->tap1; | |
472 | tap2 = r->poolinfo->tap2; | |
473 | tap3 = r->poolinfo->tap3; | |
474 | tap4 = r->poolinfo->tap4; | |
475 | tap5 = r->poolinfo->tap5; | |
1da177e4 LT |
476 | |
477 | spin_lock_irqsave(&r->lock, flags); | |
1da177e4 | 478 | input_rotate = r->input_rotate; |
993ba211 | 479 | i = r->add_ptr; |
1da177e4 | 480 | |
e68e5b66 MM |
481 | /* mix one byte at a time to simplify size handling and churn faster */ |
482 | while (nbytes--) { | |
483 | w = rol32(*bytes++, input_rotate & 31); | |
993ba211 | 484 | i = (i - 1) & wordmask; |
1da177e4 LT |
485 | |
486 | /* XOR in the various taps */ | |
993ba211 | 487 | w ^= r->pool[i]; |
1da177e4 LT |
488 | w ^= r->pool[(i + tap1) & wordmask]; |
489 | w ^= r->pool[(i + tap2) & wordmask]; | |
490 | w ^= r->pool[(i + tap3) & wordmask]; | |
491 | w ^= r->pool[(i + tap4) & wordmask]; | |
492 | w ^= r->pool[(i + tap5) & wordmask]; | |
993ba211 MM |
493 | |
494 | /* Mix the result back in with a twist */ | |
1da177e4 | 495 | r->pool[i] = (w >> 3) ^ twist_table[w & 7]; |
feee7697 MM |
496 | |
497 | /* | |
498 | * Normally, we add 7 bits of rotation to the pool. | |
499 | * At the beginning of the pool, add an extra 7 bits | |
500 | * rotation, so that successive passes spread the | |
501 | * input bits across the pool evenly. | |
502 | */ | |
503 | input_rotate += i ? 7 : 14; | |
1da177e4 LT |
504 | } |
505 | ||
506 | r->input_rotate = input_rotate; | |
993ba211 | 507 | r->add_ptr = i; |
1da177e4 | 508 | |
993ba211 MM |
509 | if (out) |
510 | for (j = 0; j < 16; j++) | |
e68e5b66 | 511 | ((__u32 *)out)[j] = r->pool[(i - j) & wordmask]; |
1da177e4 LT |
512 | |
513 | spin_unlock_irqrestore(&r->lock, flags); | |
514 | } | |
515 | ||
e68e5b66 | 516 | static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes) |
1da177e4 | 517 | { |
e68e5b66 | 518 | mix_pool_bytes_extract(r, in, bytes, NULL); |
1da177e4 LT |
519 | } |
520 | ||
521 | /* | |
522 | * Credit (or debit) the entropy store with n bits of entropy | |
523 | */ | |
adc782da | 524 | static void credit_entropy_bits(struct entropy_store *r, int nbits) |
1da177e4 LT |
525 | { |
526 | unsigned long flags; | |
8b76f46a | 527 | int entropy_count; |
1da177e4 | 528 | |
adc782da MM |
529 | if (!nbits) |
530 | return; | |
531 | ||
1da177e4 LT |
532 | spin_lock_irqsave(&r->lock, flags); |
533 | ||
adc782da | 534 | DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); |
8b76f46a AM |
535 | entropy_count = r->entropy_count; |
536 | entropy_count += nbits; | |
537 | if (entropy_count < 0) { | |
adc782da | 538 | DEBUG_ENT("negative entropy/overflow\n"); |
8b76f46a AM |
539 | entropy_count = 0; |
540 | } else if (entropy_count > r->poolinfo->POOLBITS) | |
541 | entropy_count = r->poolinfo->POOLBITS; | |
542 | r->entropy_count = entropy_count; | |
1da177e4 | 543 | |
88c730da | 544 | /* should we wake readers? */ |
8b76f46a | 545 | if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) { |
88c730da | 546 | wake_up_interruptible(&random_read_wait); |
9a6f70bb JD |
547 | kill_fasync(&fasync, SIGIO, POLL_IN); |
548 | } | |
1da177e4 LT |
549 | spin_unlock_irqrestore(&r->lock, flags); |
550 | } | |
551 | ||
552 | /********************************************************************* | |
553 | * | |
554 | * Entropy input management | |
555 | * | |
556 | *********************************************************************/ | |
557 | ||
558 | /* There is one of these per entropy source */ | |
559 | struct timer_rand_state { | |
560 | cycles_t last_time; | |
90b75ee5 | 561 | long last_delta, last_delta2; |
1da177e4 LT |
562 | unsigned dont_count_entropy:1; |
563 | }; | |
564 | ||
d7e51e66 | 565 | #ifndef CONFIG_GENERIC_HARDIRQS |
2f983570 YL |
566 | |
567 | static struct timer_rand_state *irq_timer_state[NR_IRQS]; | |
568 | ||
569 | static struct timer_rand_state *get_timer_rand_state(unsigned int irq) | |
570 | { | |
571 | return irq_timer_state[irq]; | |
572 | } | |
573 | ||
574 | static void set_timer_rand_state(unsigned int irq, | |
575 | struct timer_rand_state *state) | |
576 | { | |
577 | irq_timer_state[irq] = state; | |
578 | } | |
579 | ||
580 | #else | |
581 | ||
582 | static struct timer_rand_state *get_timer_rand_state(unsigned int irq) | |
583 | { | |
584 | struct irq_desc *desc; | |
585 | ||
586 | desc = irq_to_desc(irq); | |
587 | ||
588 | return desc->timer_rand_state; | |
589 | } | |
590 | ||
591 | static void set_timer_rand_state(unsigned int irq, | |
592 | struct timer_rand_state *state) | |
593 | { | |
594 | struct irq_desc *desc; | |
595 | ||
596 | desc = irq_to_desc(irq); | |
597 | ||
598 | desc->timer_rand_state = state; | |
599 | } | |
0b8f1efa | 600 | #endif |
3060d6fe | 601 | |
3060d6fe YL |
602 | static struct timer_rand_state input_timer_state; |
603 | ||
1da177e4 LT |
604 | /* |
605 | * This function adds entropy to the entropy "pool" by using timing | |
606 | * delays. It uses the timer_rand_state structure to make an estimate | |
607 | * of how many bits of entropy this call has added to the pool. | |
608 | * | |
609 | * The number "num" is also added to the pool - it should somehow describe | |
610 | * the type of event which just happened. This is currently 0-255 for | |
611 | * keyboard scan codes, and 256 upwards for interrupts. | |
612 | * | |
613 | */ | |
614 | static void add_timer_randomness(struct timer_rand_state *state, unsigned num) | |
615 | { | |
616 | struct { | |
617 | cycles_t cycles; | |
618 | long jiffies; | |
619 | unsigned num; | |
620 | } sample; | |
621 | long delta, delta2, delta3; | |
622 | ||
623 | preempt_disable(); | |
624 | /* if over the trickle threshold, use only 1 in 4096 samples */ | |
625 | if (input_pool.entropy_count > trickle_thresh && | |
626 | (__get_cpu_var(trickle_count)++ & 0xfff)) | |
627 | goto out; | |
628 | ||
629 | sample.jiffies = jiffies; | |
630 | sample.cycles = get_cycles(); | |
631 | sample.num = num; | |
e68e5b66 | 632 | mix_pool_bytes(&input_pool, &sample, sizeof(sample)); |
1da177e4 LT |
633 | |
634 | /* | |
635 | * Calculate number of bits of randomness we probably added. | |
636 | * We take into account the first, second and third-order deltas | |
637 | * in order to make our estimate. | |
638 | */ | |
639 | ||
640 | if (!state->dont_count_entropy) { | |
641 | delta = sample.jiffies - state->last_time; | |
642 | state->last_time = sample.jiffies; | |
643 | ||
644 | delta2 = delta - state->last_delta; | |
645 | state->last_delta = delta; | |
646 | ||
647 | delta3 = delta2 - state->last_delta2; | |
648 | state->last_delta2 = delta2; | |
649 | ||
650 | if (delta < 0) | |
651 | delta = -delta; | |
652 | if (delta2 < 0) | |
653 | delta2 = -delta2; | |
654 | if (delta3 < 0) | |
655 | delta3 = -delta3; | |
656 | if (delta > delta2) | |
657 | delta = delta2; | |
658 | if (delta > delta3) | |
659 | delta = delta3; | |
660 | ||
661 | /* | |
662 | * delta is now minimum absolute delta. | |
663 | * Round down by 1 bit on general principles, | |
664 | * and limit entropy entimate to 12 bits. | |
665 | */ | |
adc782da MM |
666 | credit_entropy_bits(&input_pool, |
667 | min_t(int, fls(delta>>1), 11)); | |
1da177e4 | 668 | } |
1da177e4 LT |
669 | out: |
670 | preempt_enable(); | |
671 | } | |
672 | ||
d251575a | 673 | void add_input_randomness(unsigned int type, unsigned int code, |
1da177e4 LT |
674 | unsigned int value) |
675 | { | |
676 | static unsigned char last_value; | |
677 | ||
678 | /* ignore autorepeat and the like */ | |
679 | if (value == last_value) | |
680 | return; | |
681 | ||
682 | DEBUG_ENT("input event\n"); | |
683 | last_value = value; | |
684 | add_timer_randomness(&input_timer_state, | |
685 | (type << 4) ^ code ^ (code >> 4) ^ value); | |
686 | } | |
80fc9f53 | 687 | EXPORT_SYMBOL_GPL(add_input_randomness); |
1da177e4 LT |
688 | |
689 | void add_interrupt_randomness(int irq) | |
690 | { | |
3060d6fe YL |
691 | struct timer_rand_state *state; |
692 | ||
693 | state = get_timer_rand_state(irq); | |
694 | ||
695 | if (state == NULL) | |
1da177e4 LT |
696 | return; |
697 | ||
698 | DEBUG_ENT("irq event %d\n", irq); | |
3060d6fe | 699 | add_timer_randomness(state, 0x100 + irq); |
1da177e4 LT |
700 | } |
701 | ||
9361401e | 702 | #ifdef CONFIG_BLOCK |
1da177e4 LT |
703 | void add_disk_randomness(struct gendisk *disk) |
704 | { | |
705 | if (!disk || !disk->random) | |
706 | return; | |
707 | /* first major is 1, so we get >= 0x200 here */ | |
f331c029 TH |
708 | DEBUG_ENT("disk event %d:%d\n", |
709 | MAJOR(disk_devt(disk)), MINOR(disk_devt(disk))); | |
1da177e4 | 710 | |
f331c029 | 711 | add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); |
1da177e4 | 712 | } |
9361401e | 713 | #endif |
1da177e4 LT |
714 | |
715 | #define EXTRACT_SIZE 10 | |
716 | ||
717 | /********************************************************************* | |
718 | * | |
719 | * Entropy extraction routines | |
720 | * | |
721 | *********************************************************************/ | |
722 | ||
90b75ee5 | 723 | static ssize_t extract_entropy(struct entropy_store *r, void *buf, |
1da177e4 LT |
724 | size_t nbytes, int min, int rsvd); |
725 | ||
726 | /* | |
727 | * This utility inline function is responsible for transfering entropy | |
728 | * from the primary pool to the secondary extraction pool. We make | |
729 | * sure we pull enough for a 'catastrophic reseed'. | |
730 | */ | |
731 | static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) | |
732 | { | |
733 | __u32 tmp[OUTPUT_POOL_WORDS]; | |
734 | ||
735 | if (r->pull && r->entropy_count < nbytes * 8 && | |
736 | r->entropy_count < r->poolinfo->POOLBITS) { | |
5a021e9f | 737 | /* If we're limited, always leave two wakeup worth's BITS */ |
1da177e4 | 738 | int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4; |
5a021e9f MM |
739 | int bytes = nbytes; |
740 | ||
741 | /* pull at least as many as BYTES as wakeup BITS */ | |
742 | bytes = max_t(int, bytes, random_read_wakeup_thresh / 8); | |
743 | /* but never more than the buffer size */ | |
744 | bytes = min_t(int, bytes, sizeof(tmp)); | |
1da177e4 LT |
745 | |
746 | DEBUG_ENT("going to reseed %s with %d bits " | |
747 | "(%d of %d requested)\n", | |
748 | r->name, bytes * 8, nbytes * 8, r->entropy_count); | |
749 | ||
90b75ee5 MM |
750 | bytes = extract_entropy(r->pull, tmp, bytes, |
751 | random_read_wakeup_thresh / 8, rsvd); | |
e68e5b66 | 752 | mix_pool_bytes(r, tmp, bytes); |
adc782da | 753 | credit_entropy_bits(r, bytes*8); |
1da177e4 LT |
754 | } |
755 | } | |
756 | ||
757 | /* | |
758 | * These functions extracts randomness from the "entropy pool", and | |
759 | * returns it in a buffer. | |
760 | * | |
761 | * The min parameter specifies the minimum amount we can pull before | |
762 | * failing to avoid races that defeat catastrophic reseeding while the | |
763 | * reserved parameter indicates how much entropy we must leave in the | |
764 | * pool after each pull to avoid starving other readers. | |
765 | * | |
766 | * Note: extract_entropy() assumes that .poolwords is a multiple of 16 words. | |
767 | */ | |
768 | ||
769 | static size_t account(struct entropy_store *r, size_t nbytes, int min, | |
770 | int reserved) | |
771 | { | |
772 | unsigned long flags; | |
773 | ||
1da177e4 LT |
774 | /* Hold lock while accounting */ |
775 | spin_lock_irqsave(&r->lock, flags); | |
776 | ||
cda796a3 | 777 | BUG_ON(r->entropy_count > r->poolinfo->POOLBITS); |
1da177e4 LT |
778 | DEBUG_ENT("trying to extract %d bits from %s\n", |
779 | nbytes * 8, r->name); | |
780 | ||
781 | /* Can we pull enough? */ | |
782 | if (r->entropy_count / 8 < min + reserved) { | |
783 | nbytes = 0; | |
784 | } else { | |
785 | /* If limited, never pull more than available */ | |
786 | if (r->limit && nbytes + reserved >= r->entropy_count / 8) | |
787 | nbytes = r->entropy_count/8 - reserved; | |
788 | ||
90b75ee5 | 789 | if (r->entropy_count / 8 >= nbytes + reserved) |
1da177e4 LT |
790 | r->entropy_count -= nbytes*8; |
791 | else | |
792 | r->entropy_count = reserved; | |
793 | ||
9a6f70bb | 794 | if (r->entropy_count < random_write_wakeup_thresh) { |
1da177e4 | 795 | wake_up_interruptible(&random_write_wait); |
9a6f70bb JD |
796 | kill_fasync(&fasync, SIGIO, POLL_OUT); |
797 | } | |
1da177e4 LT |
798 | } |
799 | ||
800 | DEBUG_ENT("debiting %d entropy credits from %s%s\n", | |
801 | nbytes * 8, r->name, r->limit ? "" : " (unlimited)"); | |
802 | ||
803 | spin_unlock_irqrestore(&r->lock, flags); | |
804 | ||
805 | return nbytes; | |
806 | } | |
807 | ||
808 | static void extract_buf(struct entropy_store *r, __u8 *out) | |
809 | { | |
602b6aee | 810 | int i; |
e68e5b66 MM |
811 | __u32 hash[5], workspace[SHA_WORKSPACE_WORDS]; |
812 | __u8 extract[64]; | |
1da177e4 | 813 | |
1c0ad3d4 | 814 | /* Generate a hash across the pool, 16 words (512 bits) at a time */ |
ffd8d3fa | 815 | sha_init(hash); |
1c0ad3d4 MM |
816 | for (i = 0; i < r->poolinfo->poolwords; i += 16) |
817 | sha_transform(hash, (__u8 *)(r->pool + i), workspace); | |
818 | ||
1da177e4 | 819 | /* |
1c0ad3d4 MM |
820 | * We mix the hash back into the pool to prevent backtracking |
821 | * attacks (where the attacker knows the state of the pool | |
822 | * plus the current outputs, and attempts to find previous | |
823 | * ouputs), unless the hash function can be inverted. By | |
824 | * mixing at least a SHA1 worth of hash data back, we make | |
825 | * brute-forcing the feedback as hard as brute-forcing the | |
826 | * hash. | |
1da177e4 | 827 | */ |
e68e5b66 | 828 | mix_pool_bytes_extract(r, hash, sizeof(hash), extract); |
1da177e4 LT |
829 | |
830 | /* | |
1c0ad3d4 MM |
831 | * To avoid duplicates, we atomically extract a portion of the |
832 | * pool while mixing, and hash one final time. | |
1da177e4 | 833 | */ |
e68e5b66 | 834 | sha_transform(hash, extract, workspace); |
ffd8d3fa MM |
835 | memset(extract, 0, sizeof(extract)); |
836 | memset(workspace, 0, sizeof(workspace)); | |
1da177e4 LT |
837 | |
838 | /* | |
1c0ad3d4 MM |
839 | * In case the hash function has some recognizable output |
840 | * pattern, we fold it in half. Thus, we always feed back | |
841 | * twice as much data as we output. | |
1da177e4 | 842 | */ |
ffd8d3fa MM |
843 | hash[0] ^= hash[3]; |
844 | hash[1] ^= hash[4]; | |
845 | hash[2] ^= rol32(hash[2], 16); | |
846 | memcpy(out, hash, EXTRACT_SIZE); | |
847 | memset(hash, 0, sizeof(hash)); | |
1da177e4 LT |
848 | } |
849 | ||
90b75ee5 | 850 | static ssize_t extract_entropy(struct entropy_store *r, void *buf, |
1da177e4 LT |
851 | size_t nbytes, int min, int reserved) |
852 | { | |
853 | ssize_t ret = 0, i; | |
854 | __u8 tmp[EXTRACT_SIZE]; | |
855 | ||
856 | xfer_secondary_pool(r, nbytes); | |
857 | nbytes = account(r, nbytes, min, reserved); | |
858 | ||
859 | while (nbytes) { | |
860 | extract_buf(r, tmp); | |
861 | i = min_t(int, nbytes, EXTRACT_SIZE); | |
862 | memcpy(buf, tmp, i); | |
863 | nbytes -= i; | |
864 | buf += i; | |
865 | ret += i; | |
866 | } | |
867 | ||
868 | /* Wipe data just returned from memory */ | |
869 | memset(tmp, 0, sizeof(tmp)); | |
870 | ||
871 | return ret; | |
872 | } | |
873 | ||
874 | static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, | |
875 | size_t nbytes) | |
876 | { | |
877 | ssize_t ret = 0, i; | |
878 | __u8 tmp[EXTRACT_SIZE]; | |
879 | ||
880 | xfer_secondary_pool(r, nbytes); | |
881 | nbytes = account(r, nbytes, 0, 0); | |
882 | ||
883 | while (nbytes) { | |
884 | if (need_resched()) { | |
885 | if (signal_pending(current)) { | |
886 | if (ret == 0) | |
887 | ret = -ERESTARTSYS; | |
888 | break; | |
889 | } | |
890 | schedule(); | |
891 | } | |
892 | ||
893 | extract_buf(r, tmp); | |
894 | i = min_t(int, nbytes, EXTRACT_SIZE); | |
895 | if (copy_to_user(buf, tmp, i)) { | |
896 | ret = -EFAULT; | |
897 | break; | |
898 | } | |
899 | ||
900 | nbytes -= i; | |
901 | buf += i; | |
902 | ret += i; | |
903 | } | |
904 | ||
905 | /* Wipe data just returned from memory */ | |
906 | memset(tmp, 0, sizeof(tmp)); | |
907 | ||
908 | return ret; | |
909 | } | |
910 | ||
911 | /* | |
912 | * This function is the exported kernel interface. It returns some | |
913 | * number of good random numbers, suitable for seeding TCP sequence | |
914 | * numbers, etc. | |
915 | */ | |
916 | void get_random_bytes(void *buf, int nbytes) | |
917 | { | |
918 | extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); | |
919 | } | |
1da177e4 LT |
920 | EXPORT_SYMBOL(get_random_bytes); |
921 | ||
922 | /* | |
923 | * init_std_data - initialize pool with system data | |
924 | * | |
925 | * @r: pool to initialize | |
926 | * | |
927 | * This function clears the pool's entropy count and mixes some system | |
928 | * data into the pool to prepare it for use. The pool is not cleared | |
929 | * as that can only decrease the entropy in the pool. | |
930 | */ | |
931 | static void init_std_data(struct entropy_store *r) | |
932 | { | |
f8595815 | 933 | ktime_t now; |
1da177e4 LT |
934 | unsigned long flags; |
935 | ||
936 | spin_lock_irqsave(&r->lock, flags); | |
937 | r->entropy_count = 0; | |
938 | spin_unlock_irqrestore(&r->lock, flags); | |
939 | ||
f8595815 | 940 | now = ktime_get_real(); |
e68e5b66 MM |
941 | mix_pool_bytes(r, &now, sizeof(now)); |
942 | mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); | |
1da177e4 LT |
943 | } |
944 | ||
53c3f63e | 945 | static int rand_initialize(void) |
1da177e4 LT |
946 | { |
947 | init_std_data(&input_pool); | |
948 | init_std_data(&blocking_pool); | |
949 | init_std_data(&nonblocking_pool); | |
950 | return 0; | |
951 | } | |
952 | module_init(rand_initialize); | |
953 | ||
954 | void rand_initialize_irq(int irq) | |
955 | { | |
956 | struct timer_rand_state *state; | |
957 | ||
3060d6fe YL |
958 | state = get_timer_rand_state(irq); |
959 | ||
960 | if (state) | |
1da177e4 LT |
961 | return; |
962 | ||
963 | /* | |
f8595815 | 964 | * If kzalloc returns null, we just won't use that entropy |
1da177e4 LT |
965 | * source. |
966 | */ | |
f8595815 ED |
967 | state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); |
968 | if (state) | |
3060d6fe | 969 | set_timer_rand_state(irq, state); |
1da177e4 LT |
970 | } |
971 | ||
9361401e | 972 | #ifdef CONFIG_BLOCK |
1da177e4 LT |
973 | void rand_initialize_disk(struct gendisk *disk) |
974 | { | |
975 | struct timer_rand_state *state; | |
976 | ||
977 | /* | |
f8595815 | 978 | * If kzalloc returns null, we just won't use that entropy |
1da177e4 LT |
979 | * source. |
980 | */ | |
f8595815 ED |
981 | state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); |
982 | if (state) | |
1da177e4 | 983 | disk->random = state; |
1da177e4 | 984 | } |
9361401e | 985 | #endif |
1da177e4 LT |
986 | |
987 | static ssize_t | |
90b75ee5 | 988 | random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) |
1da177e4 LT |
989 | { |
990 | ssize_t n, retval = 0, count = 0; | |
991 | ||
992 | if (nbytes == 0) | |
993 | return 0; | |
994 | ||
995 | while (nbytes > 0) { | |
996 | n = nbytes; | |
997 | if (n > SEC_XFER_SIZE) | |
998 | n = SEC_XFER_SIZE; | |
999 | ||
1000 | DEBUG_ENT("reading %d bits\n", n*8); | |
1001 | ||
1002 | n = extract_entropy_user(&blocking_pool, buf, n); | |
1003 | ||
1004 | DEBUG_ENT("read got %d bits (%d still needed)\n", | |
1005 | n*8, (nbytes-n)*8); | |
1006 | ||
1007 | if (n == 0) { | |
1008 | if (file->f_flags & O_NONBLOCK) { | |
1009 | retval = -EAGAIN; | |
1010 | break; | |
1011 | } | |
1012 | ||
1013 | DEBUG_ENT("sleeping?\n"); | |
1014 | ||
1015 | wait_event_interruptible(random_read_wait, | |
1016 | input_pool.entropy_count >= | |
1017 | random_read_wakeup_thresh); | |
1018 | ||
1019 | DEBUG_ENT("awake\n"); | |
1020 | ||
1021 | if (signal_pending(current)) { | |
1022 | retval = -ERESTARTSYS; | |
1023 | break; | |
1024 | } | |
1025 | ||
1026 | continue; | |
1027 | } | |
1028 | ||
1029 | if (n < 0) { | |
1030 | retval = n; | |
1031 | break; | |
1032 | } | |
1033 | count += n; | |
1034 | buf += n; | |
1035 | nbytes -= n; | |
1036 | break; /* This break makes the device work */ | |
1037 | /* like a named pipe */ | |
1038 | } | |
1039 | ||
1040 | /* | |
1041 | * If we gave the user some bytes, update the access time. | |
1042 | */ | |
1043 | if (count) | |
1044 | file_accessed(file); | |
1045 | ||
1046 | return (count ? count : retval); | |
1047 | } | |
1048 | ||
1049 | static ssize_t | |
90b75ee5 | 1050 | urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) |
1da177e4 LT |
1051 | { |
1052 | return extract_entropy_user(&nonblocking_pool, buf, nbytes); | |
1053 | } | |
1054 | ||
1055 | static unsigned int | |
1056 | random_poll(struct file *file, poll_table * wait) | |
1057 | { | |
1058 | unsigned int mask; | |
1059 | ||
1060 | poll_wait(file, &random_read_wait, wait); | |
1061 | poll_wait(file, &random_write_wait, wait); | |
1062 | mask = 0; | |
1063 | if (input_pool.entropy_count >= random_read_wakeup_thresh) | |
1064 | mask |= POLLIN | POLLRDNORM; | |
1065 | if (input_pool.entropy_count < random_write_wakeup_thresh) | |
1066 | mask |= POLLOUT | POLLWRNORM; | |
1067 | return mask; | |
1068 | } | |
1069 | ||
7f397dcd MM |
1070 | static int |
1071 | write_pool(struct entropy_store *r, const char __user *buffer, size_t count) | |
1da177e4 | 1072 | { |
1da177e4 LT |
1073 | size_t bytes; |
1074 | __u32 buf[16]; | |
1075 | const char __user *p = buffer; | |
1da177e4 | 1076 | |
7f397dcd MM |
1077 | while (count > 0) { |
1078 | bytes = min(count, sizeof(buf)); | |
1079 | if (copy_from_user(&buf, p, bytes)) | |
1080 | return -EFAULT; | |
1da177e4 | 1081 | |
7f397dcd | 1082 | count -= bytes; |
1da177e4 LT |
1083 | p += bytes; |
1084 | ||
e68e5b66 | 1085 | mix_pool_bytes(r, buf, bytes); |
91f3f1e3 | 1086 | cond_resched(); |
1da177e4 | 1087 | } |
7f397dcd MM |
1088 | |
1089 | return 0; | |
1090 | } | |
1091 | ||
90b75ee5 MM |
1092 | static ssize_t random_write(struct file *file, const char __user *buffer, |
1093 | size_t count, loff_t *ppos) | |
7f397dcd MM |
1094 | { |
1095 | size_t ret; | |
1096 | struct inode *inode = file->f_path.dentry->d_inode; | |
1097 | ||
1098 | ret = write_pool(&blocking_pool, buffer, count); | |
1099 | if (ret) | |
1100 | return ret; | |
1101 | ret = write_pool(&nonblocking_pool, buffer, count); | |
1102 | if (ret) | |
1103 | return ret; | |
1104 | ||
1105 | inode->i_mtime = current_fs_time(inode->i_sb); | |
1106 | mark_inode_dirty(inode); | |
1107 | return (ssize_t)count; | |
1da177e4 LT |
1108 | } |
1109 | ||
43ae4860 | 1110 | static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) |
1da177e4 LT |
1111 | { |
1112 | int size, ent_count; | |
1113 | int __user *p = (int __user *)arg; | |
1114 | int retval; | |
1115 | ||
1116 | switch (cmd) { | |
1117 | case RNDGETENTCNT: | |
43ae4860 MM |
1118 | /* inherently racy, no point locking */ |
1119 | if (put_user(input_pool.entropy_count, p)) | |
1da177e4 LT |
1120 | return -EFAULT; |
1121 | return 0; | |
1122 | case RNDADDTOENTCNT: | |
1123 | if (!capable(CAP_SYS_ADMIN)) | |
1124 | return -EPERM; | |
1125 | if (get_user(ent_count, p)) | |
1126 | return -EFAULT; | |
adc782da | 1127 | credit_entropy_bits(&input_pool, ent_count); |
1da177e4 LT |
1128 | return 0; |
1129 | case RNDADDENTROPY: | |
1130 | if (!capable(CAP_SYS_ADMIN)) | |
1131 | return -EPERM; | |
1132 | if (get_user(ent_count, p++)) | |
1133 | return -EFAULT; | |
1134 | if (ent_count < 0) | |
1135 | return -EINVAL; | |
1136 | if (get_user(size, p++)) | |
1137 | return -EFAULT; | |
7f397dcd MM |
1138 | retval = write_pool(&input_pool, (const char __user *)p, |
1139 | size); | |
1da177e4 LT |
1140 | if (retval < 0) |
1141 | return retval; | |
adc782da | 1142 | credit_entropy_bits(&input_pool, ent_count); |
1da177e4 LT |
1143 | return 0; |
1144 | case RNDZAPENTCNT: | |
1145 | case RNDCLEARPOOL: | |
1146 | /* Clear the entropy pool counters. */ | |
1147 | if (!capable(CAP_SYS_ADMIN)) | |
1148 | return -EPERM; | |
53c3f63e | 1149 | rand_initialize(); |
1da177e4 LT |
1150 | return 0; |
1151 | default: | |
1152 | return -EINVAL; | |
1153 | } | |
1154 | } | |
1155 | ||
9a6f70bb JD |
1156 | static int random_fasync(int fd, struct file *filp, int on) |
1157 | { | |
1158 | return fasync_helper(fd, filp, on, &fasync); | |
1159 | } | |
1160 | ||
2b8693c0 | 1161 | const struct file_operations random_fops = { |
1da177e4 LT |
1162 | .read = random_read, |
1163 | .write = random_write, | |
1164 | .poll = random_poll, | |
43ae4860 | 1165 | .unlocked_ioctl = random_ioctl, |
9a6f70bb | 1166 | .fasync = random_fasync, |
1da177e4 LT |
1167 | }; |
1168 | ||
2b8693c0 | 1169 | const struct file_operations urandom_fops = { |
1da177e4 LT |
1170 | .read = urandom_read, |
1171 | .write = random_write, | |
43ae4860 | 1172 | .unlocked_ioctl = random_ioctl, |
9a6f70bb | 1173 | .fasync = random_fasync, |
1da177e4 LT |
1174 | }; |
1175 | ||
1176 | /*************************************************************** | |
1177 | * Random UUID interface | |
1178 | * | |
1179 | * Used here for a Boot ID, but can be useful for other kernel | |
1180 | * drivers. | |
1181 | ***************************************************************/ | |
1182 | ||
1183 | /* | |
1184 | * Generate random UUID | |
1185 | */ | |
1186 | void generate_random_uuid(unsigned char uuid_out[16]) | |
1187 | { | |
1188 | get_random_bytes(uuid_out, 16); | |
1189 | /* Set UUID version to 4 --- truely random generation */ | |
1190 | uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40; | |
1191 | /* Set the UUID variant to DCE */ | |
1192 | uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80; | |
1193 | } | |
1da177e4 LT |
1194 | EXPORT_SYMBOL(generate_random_uuid); |
1195 | ||
1196 | /******************************************************************** | |
1197 | * | |
1198 | * Sysctl interface | |
1199 | * | |
1200 | ********************************************************************/ | |
1201 | ||
1202 | #ifdef CONFIG_SYSCTL | |
1203 | ||
1204 | #include <linux/sysctl.h> | |
1205 | ||
1206 | static int min_read_thresh = 8, min_write_thresh; | |
1207 | static int max_read_thresh = INPUT_POOL_WORDS * 32; | |
1208 | static int max_write_thresh = INPUT_POOL_WORDS * 32; | |
1209 | static char sysctl_bootid[16]; | |
1210 | ||
1211 | /* | |
1212 | * These functions is used to return both the bootid UUID, and random | |
1213 | * UUID. The difference is in whether table->data is NULL; if it is, | |
1214 | * then a new UUID is generated and returned to the user. | |
1215 | * | |
1216 | * If the user accesses this via the proc interface, it will be returned | |
1217 | * as an ASCII string in the standard UUID format. If accesses via the | |
1218 | * sysctl system call, it is returned as 16 bytes of binary data. | |
1219 | */ | |
1220 | static int proc_do_uuid(ctl_table *table, int write, struct file *filp, | |
1221 | void __user *buffer, size_t *lenp, loff_t *ppos) | |
1222 | { | |
1223 | ctl_table fake_table; | |
1224 | unsigned char buf[64], tmp_uuid[16], *uuid; | |
1225 | ||
1226 | uuid = table->data; | |
1227 | if (!uuid) { | |
1228 | uuid = tmp_uuid; | |
1229 | uuid[8] = 0; | |
1230 | } | |
1231 | if (uuid[8] == 0) | |
1232 | generate_random_uuid(uuid); | |
1233 | ||
1234 | sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-" | |
1235 | "%02x%02x%02x%02x%02x%02x", | |
1236 | uuid[0], uuid[1], uuid[2], uuid[3], | |
1237 | uuid[4], uuid[5], uuid[6], uuid[7], | |
1238 | uuid[8], uuid[9], uuid[10], uuid[11], | |
1239 | uuid[12], uuid[13], uuid[14], uuid[15]); | |
1240 | fake_table.data = buf; | |
1241 | fake_table.maxlen = sizeof(buf); | |
1242 | ||
1243 | return proc_dostring(&fake_table, write, filp, buffer, lenp, ppos); | |
1244 | } | |
1245 | ||
f221e726 | 1246 | static int uuid_strategy(ctl_table *table, |
1da177e4 | 1247 | void __user *oldval, size_t __user *oldlenp, |
1f29bcd7 | 1248 | void __user *newval, size_t newlen) |
1da177e4 LT |
1249 | { |
1250 | unsigned char tmp_uuid[16], *uuid; | |
1251 | unsigned int len; | |
1252 | ||
1253 | if (!oldval || !oldlenp) | |
1254 | return 1; | |
1255 | ||
1256 | uuid = table->data; | |
1257 | if (!uuid) { | |
1258 | uuid = tmp_uuid; | |
1259 | uuid[8] = 0; | |
1260 | } | |
1261 | if (uuid[8] == 0) | |
1262 | generate_random_uuid(uuid); | |
1263 | ||
1264 | if (get_user(len, oldlenp)) | |
1265 | return -EFAULT; | |
1266 | if (len) { | |
1267 | if (len > 16) | |
1268 | len = 16; | |
1269 | if (copy_to_user(oldval, uuid, len) || | |
1270 | put_user(len, oldlenp)) | |
1271 | return -EFAULT; | |
1272 | } | |
1273 | return 1; | |
1274 | } | |
1275 | ||
1276 | static int sysctl_poolsize = INPUT_POOL_WORDS * 32; | |
1277 | ctl_table random_table[] = { | |
1278 | { | |
1279 | .ctl_name = RANDOM_POOLSIZE, | |
1280 | .procname = "poolsize", | |
1281 | .data = &sysctl_poolsize, | |
1282 | .maxlen = sizeof(int), | |
1283 | .mode = 0444, | |
1284 | .proc_handler = &proc_dointvec, | |
1285 | }, | |
1286 | { | |
1287 | .ctl_name = RANDOM_ENTROPY_COUNT, | |
1288 | .procname = "entropy_avail", | |
1289 | .maxlen = sizeof(int), | |
1290 | .mode = 0444, | |
1291 | .proc_handler = &proc_dointvec, | |
1292 | .data = &input_pool.entropy_count, | |
1293 | }, | |
1294 | { | |
1295 | .ctl_name = RANDOM_READ_THRESH, | |
1296 | .procname = "read_wakeup_threshold", | |
1297 | .data = &random_read_wakeup_thresh, | |
1298 | .maxlen = sizeof(int), | |
1299 | .mode = 0644, | |
1300 | .proc_handler = &proc_dointvec_minmax, | |
1301 | .strategy = &sysctl_intvec, | |
1302 | .extra1 = &min_read_thresh, | |
1303 | .extra2 = &max_read_thresh, | |
1304 | }, | |
1305 | { | |
1306 | .ctl_name = RANDOM_WRITE_THRESH, | |
1307 | .procname = "write_wakeup_threshold", | |
1308 | .data = &random_write_wakeup_thresh, | |
1309 | .maxlen = sizeof(int), | |
1310 | .mode = 0644, | |
1311 | .proc_handler = &proc_dointvec_minmax, | |
1312 | .strategy = &sysctl_intvec, | |
1313 | .extra1 = &min_write_thresh, | |
1314 | .extra2 = &max_write_thresh, | |
1315 | }, | |
1316 | { | |
1317 | .ctl_name = RANDOM_BOOT_ID, | |
1318 | .procname = "boot_id", | |
1319 | .data = &sysctl_bootid, | |
1320 | .maxlen = 16, | |
1321 | .mode = 0444, | |
1322 | .proc_handler = &proc_do_uuid, | |
1323 | .strategy = &uuid_strategy, | |
1324 | }, | |
1325 | { | |
1326 | .ctl_name = RANDOM_UUID, | |
1327 | .procname = "uuid", | |
1328 | .maxlen = 16, | |
1329 | .mode = 0444, | |
1330 | .proc_handler = &proc_do_uuid, | |
1331 | .strategy = &uuid_strategy, | |
1332 | }, | |
1333 | { .ctl_name = 0 } | |
1334 | }; | |
1335 | #endif /* CONFIG_SYSCTL */ | |
1336 | ||
1337 | /******************************************************************** | |
1338 | * | |
1339 | * Random funtions for networking | |
1340 | * | |
1341 | ********************************************************************/ | |
1342 | ||
1343 | /* | |
1344 | * TCP initial sequence number picking. This uses the random number | |
1345 | * generator to pick an initial secret value. This value is hashed | |
1346 | * along with the TCP endpoint information to provide a unique | |
1347 | * starting point for each pair of TCP endpoints. This defeats | |
1348 | * attacks which rely on guessing the initial TCP sequence number. | |
1349 | * This algorithm was suggested by Steve Bellovin. | |
1350 | * | |
1351 | * Using a very strong hash was taking an appreciable amount of the total | |
1352 | * TCP connection establishment time, so this is a weaker hash, | |
1353 | * compensated for by changing the secret periodically. | |
1354 | */ | |
1355 | ||
1356 | /* F, G and H are basic MD4 functions: selection, majority, parity */ | |
1357 | #define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) | |
1358 | #define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z))) | |
1359 | #define H(x, y, z) ((x) ^ (y) ^ (z)) | |
1360 | ||
1361 | /* | |
1362 | * The generic round function. The application is so specific that | |
1363 | * we don't bother protecting all the arguments with parens, as is generally | |
1364 | * good macro practice, in favor of extra legibility. | |
1365 | * Rotation is separate from addition to prevent recomputation | |
1366 | */ | |
1367 | #define ROUND(f, a, b, c, d, x, s) \ | |
1368 | (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s))) | |
1369 | #define K1 0 | |
1370 | #define K2 013240474631UL | |
1371 | #define K3 015666365641UL | |
1372 | ||
1373 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
1374 | ||
90b75ee5 | 1375 | static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12]) |
1da177e4 LT |
1376 | { |
1377 | __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; | |
1378 | ||
1379 | /* Round 1 */ | |
1380 | ROUND(F, a, b, c, d, in[ 0] + K1, 3); | |
1381 | ROUND(F, d, a, b, c, in[ 1] + K1, 7); | |
1382 | ROUND(F, c, d, a, b, in[ 2] + K1, 11); | |
1383 | ROUND(F, b, c, d, a, in[ 3] + K1, 19); | |
1384 | ROUND(F, a, b, c, d, in[ 4] + K1, 3); | |
1385 | ROUND(F, d, a, b, c, in[ 5] + K1, 7); | |
1386 | ROUND(F, c, d, a, b, in[ 6] + K1, 11); | |
1387 | ROUND(F, b, c, d, a, in[ 7] + K1, 19); | |
1388 | ROUND(F, a, b, c, d, in[ 8] + K1, 3); | |
1389 | ROUND(F, d, a, b, c, in[ 9] + K1, 7); | |
1390 | ROUND(F, c, d, a, b, in[10] + K1, 11); | |
1391 | ROUND(F, b, c, d, a, in[11] + K1, 19); | |
1392 | ||
1393 | /* Round 2 */ | |
1394 | ROUND(G, a, b, c, d, in[ 1] + K2, 3); | |
1395 | ROUND(G, d, a, b, c, in[ 3] + K2, 5); | |
1396 | ROUND(G, c, d, a, b, in[ 5] + K2, 9); | |
1397 | ROUND(G, b, c, d, a, in[ 7] + K2, 13); | |
1398 | ROUND(G, a, b, c, d, in[ 9] + K2, 3); | |
1399 | ROUND(G, d, a, b, c, in[11] + K2, 5); | |
1400 | ROUND(G, c, d, a, b, in[ 0] + K2, 9); | |
1401 | ROUND(G, b, c, d, a, in[ 2] + K2, 13); | |
1402 | ROUND(G, a, b, c, d, in[ 4] + K2, 3); | |
1403 | ROUND(G, d, a, b, c, in[ 6] + K2, 5); | |
1404 | ROUND(G, c, d, a, b, in[ 8] + K2, 9); | |
1405 | ROUND(G, b, c, d, a, in[10] + K2, 13); | |
1406 | ||
1407 | /* Round 3 */ | |
1408 | ROUND(H, a, b, c, d, in[ 3] + K3, 3); | |
1409 | ROUND(H, d, a, b, c, in[ 7] + K3, 9); | |
1410 | ROUND(H, c, d, a, b, in[11] + K3, 11); | |
1411 | ROUND(H, b, c, d, a, in[ 2] + K3, 15); | |
1412 | ROUND(H, a, b, c, d, in[ 6] + K3, 3); | |
1413 | ROUND(H, d, a, b, c, in[10] + K3, 9); | |
1414 | ROUND(H, c, d, a, b, in[ 1] + K3, 11); | |
1415 | ROUND(H, b, c, d, a, in[ 5] + K3, 15); | |
1416 | ROUND(H, a, b, c, d, in[ 9] + K3, 3); | |
1417 | ROUND(H, d, a, b, c, in[ 0] + K3, 9); | |
1418 | ROUND(H, c, d, a, b, in[ 4] + K3, 11); | |
1419 | ROUND(H, b, c, d, a, in[ 8] + K3, 15); | |
1420 | ||
1421 | return buf[1] + b; /* "most hashed" word */ | |
1422 | /* Alternative: return sum of all words? */ | |
1423 | } | |
1424 | #endif | |
1425 | ||
1426 | #undef ROUND | |
1427 | #undef F | |
1428 | #undef G | |
1429 | #undef H | |
1430 | #undef K1 | |
1431 | #undef K2 | |
1432 | #undef K3 | |
1433 | ||
1434 | /* This should not be decreased so low that ISNs wrap too fast. */ | |
1435 | #define REKEY_INTERVAL (300 * HZ) | |
1436 | /* | |
1437 | * Bit layout of the tcp sequence numbers (before adding current time): | |
1438 | * bit 24-31: increased after every key exchange | |
1439 | * bit 0-23: hash(source,dest) | |
1440 | * | |
1441 | * The implementation is similar to the algorithm described | |
1442 | * in the Appendix of RFC 1185, except that | |
1443 | * - it uses a 1 MHz clock instead of a 250 kHz clock | |
1444 | * - it performs a rekey every 5 minutes, which is equivalent | |
1445 | * to a (source,dest) tulple dependent forward jump of the | |
1446 | * clock by 0..2^(HASH_BITS+1) | |
1447 | * | |
1448 | * Thus the average ISN wraparound time is 68 minutes instead of | |
1449 | * 4.55 hours. | |
1450 | * | |
1451 | * SMP cleanup and lock avoidance with poor man's RCU. | |
1452 | * Manfred Spraul <manfred@colorfullife.com> | |
1453 | * | |
1454 | */ | |
1455 | #define COUNT_BITS 8 | |
1456 | #define COUNT_MASK ((1 << COUNT_BITS) - 1) | |
1457 | #define HASH_BITS 24 | |
1458 | #define HASH_MASK ((1 << HASH_BITS) - 1) | |
1459 | ||
1460 | static struct keydata { | |
1461 | __u32 count; /* already shifted to the final position */ | |
1462 | __u32 secret[12]; | |
1463 | } ____cacheline_aligned ip_keydata[2]; | |
1464 | ||
1465 | static unsigned int ip_cnt; | |
1466 | ||
65f27f38 | 1467 | static void rekey_seq_generator(struct work_struct *work); |
1da177e4 | 1468 | |
65f27f38 | 1469 | static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator); |
1da177e4 LT |
1470 | |
1471 | /* | |
1472 | * Lock avoidance: | |
1473 | * The ISN generation runs lockless - it's just a hash over random data. | |
1474 | * State changes happen every 5 minutes when the random key is replaced. | |
1475 | * Synchronization is performed by having two copies of the hash function | |
1476 | * state and rekey_seq_generator always updates the inactive copy. | |
1477 | * The copy is then activated by updating ip_cnt. | |
1478 | * The implementation breaks down if someone blocks the thread | |
1479 | * that processes SYN requests for more than 5 minutes. Should never | |
1480 | * happen, and even if that happens only a not perfectly compliant | |
1481 | * ISN is generated, nothing fatal. | |
1482 | */ | |
65f27f38 | 1483 | static void rekey_seq_generator(struct work_struct *work) |
1da177e4 LT |
1484 | { |
1485 | struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; | |
1486 | ||
1487 | get_random_bytes(keyptr->secret, sizeof(keyptr->secret)); | |
1488 | keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS; | |
1489 | smp_wmb(); | |
1490 | ip_cnt++; | |
417b43d4 AB |
1491 | schedule_delayed_work(&rekey_work, |
1492 | round_jiffies_relative(REKEY_INTERVAL)); | |
1da177e4 LT |
1493 | } |
1494 | ||
1495 | static inline struct keydata *get_keyptr(void) | |
1496 | { | |
1497 | struct keydata *keyptr = &ip_keydata[ip_cnt & 1]; | |
1498 | ||
1499 | smp_rmb(); | |
1500 | ||
1501 | return keyptr; | |
1502 | } | |
1503 | ||
1504 | static __init int seqgen_init(void) | |
1505 | { | |
1506 | rekey_seq_generator(NULL); | |
1507 | return 0; | |
1508 | } | |
1509 | late_initcall(seqgen_init); | |
1510 | ||
1511 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
b09b845c AV |
1512 | __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, |
1513 | __be16 sport, __be16 dport) | |
1da177e4 | 1514 | { |
1da177e4 LT |
1515 | __u32 seq; |
1516 | __u32 hash[12]; | |
1517 | struct keydata *keyptr = get_keyptr(); | |
1518 | ||
1519 | /* The procedure is the same as for IPv4, but addresses are longer. | |
1520 | * Thus we must use twothirdsMD4Transform. | |
1521 | */ | |
1522 | ||
1523 | memcpy(hash, saddr, 16); | |
90b75ee5 MM |
1524 | hash[4] = ((__force u16)sport << 16) + (__force u16)dport; |
1525 | memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7); | |
1da177e4 | 1526 | |
b09b845c | 1527 | seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; |
1da177e4 LT |
1528 | seq += keyptr->count; |
1529 | ||
6dd10a62 | 1530 | seq += ktime_to_ns(ktime_get_real()); |
1da177e4 LT |
1531 | |
1532 | return seq; | |
1533 | } | |
1534 | EXPORT_SYMBOL(secure_tcpv6_sequence_number); | |
1535 | #endif | |
1536 | ||
1537 | /* The code below is shamelessly stolen from secure_tcp_sequence_number(). | |
1538 | * All blames to Andrey V. Savochkin <saw@msu.ru>. | |
1539 | */ | |
b09b845c | 1540 | __u32 secure_ip_id(__be32 daddr) |
1da177e4 LT |
1541 | { |
1542 | struct keydata *keyptr; | |
1543 | __u32 hash[4]; | |
1544 | ||
1545 | keyptr = get_keyptr(); | |
1546 | ||
1547 | /* | |
1548 | * Pick a unique starting offset for each IP destination. | |
1549 | * The dest ip address is placed in the starting vector, | |
1550 | * which is then hashed with random data. | |
1551 | */ | |
b09b845c | 1552 | hash[0] = (__force __u32)daddr; |
1da177e4 LT |
1553 | hash[1] = keyptr->secret[9]; |
1554 | hash[2] = keyptr->secret[10]; | |
1555 | hash[3] = keyptr->secret[11]; | |
1556 | ||
1557 | return half_md4_transform(hash, keyptr->secret); | |
1558 | } | |
1559 | ||
1560 | #ifdef CONFIG_INET | |
1561 | ||
b09b845c AV |
1562 | __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, |
1563 | __be16 sport, __be16 dport) | |
1da177e4 | 1564 | { |
1da177e4 LT |
1565 | __u32 seq; |
1566 | __u32 hash[4]; | |
1567 | struct keydata *keyptr = get_keyptr(); | |
1568 | ||
1569 | /* | |
1570 | * Pick a unique starting offset for each TCP connection endpoints | |
1571 | * (saddr, daddr, sport, dport). | |
1572 | * Note that the words are placed into the starting vector, which is | |
1573 | * then mixed with a partial MD4 over random data. | |
1574 | */ | |
90b75ee5 MM |
1575 | hash[0] = (__force u32)saddr; |
1576 | hash[1] = (__force u32)daddr; | |
1577 | hash[2] = ((__force u16)sport << 16) + (__force u16)dport; | |
1578 | hash[3] = keyptr->secret[11]; | |
1da177e4 LT |
1579 | |
1580 | seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK; | |
1581 | seq += keyptr->count; | |
1582 | /* | |
1583 | * As close as possible to RFC 793, which | |
1584 | * suggests using a 250 kHz clock. | |
1585 | * Further reading shows this assumes 2 Mb/s networks. | |
9b42c336 ED |
1586 | * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate. |
1587 | * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but | |
1588 | * we also need to limit the resolution so that the u32 seq | |
1589 | * overlaps less than one time per MSL (2 minutes). | |
1590 | * Choosing a clock of 64 ns period is OK. (period of 274 s) | |
1da177e4 | 1591 | */ |
6dd10a62 | 1592 | seq += ktime_to_ns(ktime_get_real()) >> 6; |
90b75ee5 | 1593 | |
1da177e4 LT |
1594 | return seq; |
1595 | } | |
1596 | ||
a7f5e7f1 | 1597 | /* Generate secure starting point for ephemeral IPV4 transport port search */ |
b09b845c | 1598 | u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) |
1da177e4 LT |
1599 | { |
1600 | struct keydata *keyptr = get_keyptr(); | |
1601 | u32 hash[4]; | |
1602 | ||
1603 | /* | |
1604 | * Pick a unique starting offset for each ephemeral port search | |
1605 | * (saddr, daddr, dport) and 48bits of random data. | |
1606 | */ | |
b09b845c AV |
1607 | hash[0] = (__force u32)saddr; |
1608 | hash[1] = (__force u32)daddr; | |
1609 | hash[2] = (__force u32)dport ^ keyptr->secret[10]; | |
1da177e4 LT |
1610 | hash[3] = keyptr->secret[11]; |
1611 | ||
1612 | return half_md4_transform(hash, keyptr->secret); | |
1613 | } | |
9f593653 | 1614 | EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral); |
1da177e4 LT |
1615 | |
1616 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
90b75ee5 MM |
1617 | u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, |
1618 | __be16 dport) | |
1da177e4 LT |
1619 | { |
1620 | struct keydata *keyptr = get_keyptr(); | |
1621 | u32 hash[12]; | |
1622 | ||
1623 | memcpy(hash, saddr, 16); | |
b09b845c | 1624 | hash[4] = (__force u32)dport; |
90b75ee5 | 1625 | memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7); |
1da177e4 | 1626 | |
b09b845c | 1627 | return twothirdsMD4Transform((const __u32 *)daddr, hash); |
1da177e4 | 1628 | } |
1da177e4 LT |
1629 | #endif |
1630 | ||
c4365c92 ACM |
1631 | #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) |
1632 | /* Similar to secure_tcp_sequence_number but generate a 48 bit value | |
1633 | * bit's 32-47 increase every key exchange | |
1634 | * 0-31 hash(source, dest) | |
1635 | */ | |
b09b845c AV |
1636 | u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, |
1637 | __be16 sport, __be16 dport) | |
c4365c92 | 1638 | { |
c4365c92 ACM |
1639 | u64 seq; |
1640 | __u32 hash[4]; | |
1641 | struct keydata *keyptr = get_keyptr(); | |
1642 | ||
b09b845c AV |
1643 | hash[0] = (__force u32)saddr; |
1644 | hash[1] = (__force u32)daddr; | |
1645 | hash[2] = ((__force u16)sport << 16) + (__force u16)dport; | |
c4365c92 ACM |
1646 | hash[3] = keyptr->secret[11]; |
1647 | ||
1648 | seq = half_md4_transform(hash, keyptr->secret); | |
1649 | seq |= ((u64)keyptr->count) << (32 - HASH_BITS); | |
1650 | ||
6dd10a62 | 1651 | seq += ktime_to_ns(ktime_get_real()); |
c4365c92 | 1652 | seq &= (1ull << 48) - 1; |
90b75ee5 | 1653 | |
c4365c92 ACM |
1654 | return seq; |
1655 | } | |
c4365c92 ACM |
1656 | EXPORT_SYMBOL(secure_dccp_sequence_number); |
1657 | #endif | |
1658 | ||
1da177e4 LT |
1659 | #endif /* CONFIG_INET */ |
1660 | ||
1661 | ||
1662 | /* | |
1663 | * Get a random word for internal kernel use only. Similar to urandom but | |
1664 | * with the goal of minimal entropy pool depletion. As a result, the random | |
1665 | * value is not cryptographically secure but for several uses the cost of | |
1666 | * depleting entropy is too high | |
1667 | */ | |
8a0a9bd4 | 1668 | DEFINE_PER_CPU(__u32 [4], get_random_int_hash); |
1da177e4 LT |
1669 | unsigned int get_random_int(void) |
1670 | { | |
8a0a9bd4 LT |
1671 | struct keydata *keyptr; |
1672 | __u32 *hash = get_cpu_var(get_random_int_hash); | |
1673 | int ret; | |
1674 | ||
1675 | keyptr = get_keyptr(); | |
1676 | hash[0] += current->pid + jiffies + get_cycles() + (int)(long)&ret; | |
1677 | ||
1678 | ret = half_md4_transform(hash, keyptr->secret); | |
1679 | put_cpu_var(get_random_int_hash); | |
1680 | ||
1681 | return ret; | |
1da177e4 LT |
1682 | } |
1683 | ||
1684 | /* | |
1685 | * randomize_range() returns a start address such that | |
1686 | * | |
1687 | * [...... <range> .....] | |
1688 | * start end | |
1689 | * | |
1690 | * a <range> with size "len" starting at the return value is inside in the | |
1691 | * area defined by [start, end], but is otherwise randomized. | |
1692 | */ | |
1693 | unsigned long | |
1694 | randomize_range(unsigned long start, unsigned long end, unsigned long len) | |
1695 | { | |
1696 | unsigned long range = end - len - start; | |
1697 | ||
1698 | if (end <= start + len) | |
1699 | return 0; | |
1700 | return PAGE_ALIGN(get_random_int() % range + start); | |
1701 | } |