2 * random.c -- A strong random number generator
4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
6 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, and the entire permission notice in its entirety,
14 * including the disclaimer of warranties.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote
19 * products derived from this software without specific prior
22 * ALTERNATIVELY, this product may be distributed under the terms of
23 * the GNU General Public License, in which case the provisions of the GPL are
24 * required INSTEAD OF the above restrictions. (This clause is
25 * necessary due to a potential bad interaction between the GPL and
26 * the restrictions contained in a BSD-style copyright.)
28 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
29 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
31 * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
34 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
35 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
36 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
38 * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
43 * (now, with legal B.S. out of the way.....)
45 * This routine gathers environmental noise from device drivers, etc.,
46 * and returns good random numbers, suitable for cryptographic use.
47 * Besides the obvious cryptographic uses, these numbers are also good
48 * for seeding TCP sequence numbers, and other places where it is
49 * desirable to have numbers which are not only random, but hard to
50 * predict by an attacker.
55 * Computers are very predictable devices. Hence it is extremely hard
56 * to produce truly random numbers on a computer --- as opposed to
57 * pseudo-random numbers, which can easily generated by using a
58 * algorithm. Unfortunately, it is very easy for attackers to guess
59 * the sequence of pseudo-random number generators, and for some
60 * applications this is not acceptable. So instead, we must try to
61 * gather "environmental noise" from the computer's environment, which
62 * must be hard for outside attackers to observe, and use that to
63 * generate random numbers. In a Unix environment, this is best done
64 * from inside the kernel.
66 * Sources of randomness from the environment include inter-keyboard
67 * timings, inter-interrupt timings from some interrupts, and other
68 * events which are both (a) non-deterministic and (b) hard for an
69 * outside observer to measure. Randomness from these sources are
70 * added to an "entropy pool", which is mixed using a CRC-like function.
71 * This is not cryptographically strong, but it is adequate assuming
72 * the randomness is not chosen maliciously, and it is fast enough that
73 * the overhead of doing it on every interrupt is very reasonable.
74 * As random bytes are mixed into the entropy pool, the routines keep
75 * an *estimate* of how many bits of randomness have been stored into
76 * the random number generator's internal state.
78 * When random bytes are desired, they are obtained by taking the SHA
79 * hash of the contents of the "entropy pool". The SHA hash avoids
80 * exposing the internal state of the entropy pool. It is believed to
81 * be computationally infeasible to derive any useful information
82 * about the input of SHA from its output. Even if it is possible to
83 * analyze SHA in some clever way, as long as the amount of data
84 * returned from the generator is less than the inherent entropy in
85 * the pool, the output data is totally unpredictable. For this
86 * reason, the routine decreases its internal estimate of how many
87 * bits of "true randomness" are contained in the entropy pool as it
88 * outputs random numbers.
90 * If this estimate goes to zero, the routine can still generate
91 * random numbers; however, an attacker may (at least in theory) be
92 * able to infer the future output of the generator from prior
93 * outputs. This requires successful cryptanalysis of SHA, which is
94 * not believed to be feasible, but there is a remote possibility.
95 * Nonetheless, these numbers should be useful for the vast majority
98 * Exported interfaces ---- output
99 * ===============================
101 * There are three exported interfaces; the first is one designed to
102 * be used from within the kernel:
104 * void get_random_bytes(void *buf, int nbytes);
106 * This interface will return the requested number of random bytes,
107 * and place it in the requested buffer.
109 * The two other interfaces are two character devices /dev/random and
110 * /dev/urandom. /dev/random is suitable for use when very high
111 * quality randomness is desired (for example, for key generation or
112 * one-time pads), as it will only return a maximum of the number of
113 * bits of randomness (as estimated by the random number generator)
114 * contained in the entropy pool.
116 * The /dev/urandom device does not have this limit, and will return
117 * as many bytes as are requested. As more and more random bytes are
118 * requested without giving time for the entropy pool to recharge,
119 * this will result in random numbers that are merely cryptographically
120 * strong. For many applications, however, this is acceptable.
122 * Exported interfaces ---- input
123 * ==============================
125 * The current exported interfaces for gathering environmental noise
126 * from the devices are:
128 * void add_device_randomness(const void *buf, unsigned int size);
129 * void add_input_randomness(unsigned int type, unsigned int code,
130 * unsigned int value);
131 * void add_interrupt_randomness(int irq, int irq_flags);
132 * void add_disk_randomness(struct gendisk *disk);
134 * add_device_randomness() is for adding data to the random pool that
135 * is likely to differ between two devices (or possibly even per boot).
136 * This would be things like MAC addresses or serial numbers, or the
137 * read-out of the RTC. This does *not* add any actual entropy to the
138 * pool, but it initializes the pool to different values for devices
139 * that might otherwise be identical and have very little entropy
140 * available to them (particularly common in the embedded world).
142 * add_input_randomness() uses the input layer interrupt timing, as well as
143 * the event type information from the hardware.
145 * add_interrupt_randomness() uses the interrupt timing as random
146 * inputs to the entropy pool. Using the cycle counters and the irq source
147 * as inputs, it feeds the randomness roughly once a second.
149 * add_disk_randomness() uses what amounts to the seek time of block
150 * layer request events, on a per-disk_devt basis, as input to the
151 * entropy pool. Note that high-speed solid state drives with very low
152 * seek times do not make for good sources of entropy, as their seek
153 * times are usually fairly consistent.
155 * All of these routines try to estimate how many bits of randomness a
156 * particular randomness source. They do this by keeping track of the
157 * first and second order deltas of the event timings.
159 * Ensuring unpredictability at system startup
160 * ============================================
162 * When any operating system starts up, it will go through a sequence
163 * of actions that are fairly predictable by an adversary, especially
164 * if the start-up does not involve interaction with a human operator.
165 * This reduces the actual number of bits of unpredictability in the
166 * entropy pool below the value in entropy_count. In order to
167 * counteract this effect, it helps to carry information in the
168 * entropy pool across shut-downs and start-ups. To do this, put the
169 * following lines an appropriate script which is run during the boot
172 * echo "Initializing random number generator..."
173 * random_seed=/var/run/random-seed
174 * # Carry a random seed from start-up to start-up
175 * # Load and then save the whole entropy pool
176 * if [ -f $random_seed ]; then
177 * cat $random_seed >/dev/urandom
181 * chmod 600 $random_seed
182 * dd if=/dev/urandom of=$random_seed count=1 bs=512
184 * and the following lines in an appropriate script which is run as
185 * the system is shutdown:
187 * # Carry a random seed from shut-down to start-up
188 * # Save the whole entropy pool
189 * echo "Saving random seed..."
190 * random_seed=/var/run/random-seed
192 * chmod 600 $random_seed
193 * dd if=/dev/urandom of=$random_seed count=1 bs=512
195 * For example, on most modern systems using the System V init
196 * scripts, such code fragments would be found in
197 * /etc/rc.d/init.d/random. On older Linux systems, the correct script
198 * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
200 * Effectively, these commands cause the contents of the entropy pool
201 * to be saved at shut-down time and reloaded into the entropy pool at
202 * start-up. (The 'dd' in the addition to the bootup script is to
203 * make sure that /etc/random-seed is different for every start-up,
204 * even if the system crashes without executing rc.0.) Even with
205 * complete knowledge of the start-up activities, predicting the state
206 * of the entropy pool requires knowledge of the previous history of
209 * Configuring the /dev/random driver under Linux
210 * ==============================================
212 * The /dev/random driver under Linux uses minor numbers 8 and 9 of
213 * the /dev/mem major number (#1). So if your system does not have
214 * /dev/random and /dev/urandom created already, they can be created
215 * by using the commands:
217 * mknod /dev/random c 1 8
218 * mknod /dev/urandom c 1 9
223 * Ideas for constructing this random number generator were derived
224 * from Pretty Good Privacy's random number generator, and from private
225 * discussions with Phil Karn. Colin Plumb provided a faster random
226 * number generator, which speed up the mixing function of the entropy
227 * pool, taken from PGPfone. Dale Worley has also contributed many
228 * useful ideas and suggestions to improve this driver.
230 * Any flaws in the design are solely my responsibility, and should
231 * not be attributed to the Phil, Colin, or any of authors of PGP.
233 * Further background information on this topic may be obtained from
234 * RFC 1750, "Randomness Recommendations for Security", by Donald
235 * Eastlake, Steve Crocker, and Jeff Schiller.
238 #include <linux/utsname.h>
239 #include <linux/module.h>
240 #include <linux/kernel.h>
241 #include <linux/major.h>
242 #include <linux/string.h>
243 #include <linux/fcntl.h>
244 #include <linux/slab.h>
245 #include <linux/random.h>
246 #include <linux/poll.h>
247 #include <linux/init.h>
248 #include <linux/fs.h>
249 #include <linux/genhd.h>
250 #include <linux/interrupt.h>
251 #include <linux/mm.h>
252 #include <linux/nodemask.h>
253 #include <linux/spinlock.h>
254 #include <linux/kthread.h>
255 #include <linux/percpu.h>
256 #include <linux/cryptohash.h>
257 #include <linux/fips.h>
258 #include <linux/ptrace.h>
259 #include <linux/kmemcheck.h>
260 #include <linux/workqueue.h>
261 #include <linux/irq.h>
262 #include <linux/syscalls.h>
263 #include <linux/completion.h>
264 #include <linux/uuid.h>
265 #include <crypto/chacha20.h>
267 #include <asm/processor.h>
268 #include <linux/uaccess.h>
270 #include <asm/irq_regs.h>
273 #define CREATE_TRACE_POINTS
274 #include <trace/events/random.h>
276 /* #define ADD_INTERRUPT_BENCH */
279 * Configuration information
281 #define INPUT_POOL_SHIFT 12
282 #define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
283 #define OUTPUT_POOL_SHIFT 10
284 #define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
285 #define SEC_XFER_SIZE 512
286 #define EXTRACT_SIZE 10
288 #define DEBUG_RANDOM_BOOT 0
290 #define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
293 * To allow fractional bits to be tracked, the entropy_count field is
294 * denominated in units of 1/8th bits.
296 * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
297 * credit_entropy_bits() needs to be 64 bits wide.
299 #define ENTROPY_SHIFT 3
300 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
303 * The minimum number of bits of entropy before we wake up a read on
304 * /dev/random. Should be enough to do a significant reseed.
306 static int random_read_wakeup_bits
= 64;
309 * If the entropy count falls under this number of bits, then we
310 * should wake up processes which are selecting or polling on write
311 * access to /dev/random.
313 static int random_write_wakeup_bits
= 28 * OUTPUT_POOL_WORDS
;
316 * Variable is currently unused by left for user space compatibility.
318 static int random_min_urandom_seed
= 60;
321 * Originally, we used a primitive polynomial of degree .poolwords
322 * over GF(2). The taps for various sizes are defined below. They
323 * were chosen to be evenly spaced except for the last tap, which is 1
324 * to get the twisting happening as fast as possible.
326 * For the purposes of better mixing, we use the CRC-32 polynomial as
327 * well to make a (modified) twisted Generalized Feedback Shift
328 * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
329 * generators. ACM Transactions on Modeling and Computer Simulation
330 * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
331 * GFSR generators II. ACM Transactions on Modeling and Computer
332 * Simulation 4:254-266)
334 * Thanks to Colin Plumb for suggesting this.
336 * The mixing operation is much less sensitive than the output hash,
337 * where we use SHA-1. All that we want of mixing operation is that
338 * it be a good non-cryptographic hash; i.e. it not produce collisions
339 * when fed "random" data of the sort we expect to see. As long as
340 * the pool state differs for different inputs, we have preserved the
341 * input entropy and done a good job. The fact that an intelligent
342 * attacker can construct inputs that will produce controlled
343 * alterations to the pool's state is not important because we don't
344 * consider such inputs to contribute any randomness. The only
345 * property we need with respect to them is that the attacker can't
346 * increase his/her knowledge of the pool's state. Since all
347 * additions are reversible (knowing the final state and the input,
348 * you can reconstruct the initial state), if an attacker has any
349 * uncertainty about the initial state, he/she can only shuffle that
350 * uncertainty about, but never cause any collisions (which would
351 * decrease the uncertainty).
353 * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
354 * Videau in their paper, "The Linux Pseudorandom Number Generator
355 * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
356 * paper, they point out that we are not using a true Twisted GFSR,
357 * since Matsumoto & Kurita used a trinomial feedback polynomial (that
358 * is, with only three taps, instead of the six that we are using).
359 * As a result, the resulting polynomial is neither primitive nor
360 * irreducible, and hence does not have a maximal period over
361 * GF(2**32). They suggest a slight change to the generator
362 * polynomial which improves the resulting TGFSR polynomial to be
363 * irreducible, which we have made here.
365 static struct poolinfo
{
366 int poolbitshift
, poolwords
, poolbytes
, poolbits
, poolfracbits
;
367 #define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
368 int tap1
, tap2
, tap3
, tap4
, tap5
;
369 } poolinfo_table
[] = {
370 /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
371 /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
372 { S(128), 104, 76, 51, 25, 1 },
373 /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
374 /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
375 { S(32), 26, 19, 14, 7, 1 },
377 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
378 { S(2048), 1638, 1231, 819, 411, 1 },
380 /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
381 { S(1024), 817, 615, 412, 204, 1 },
383 /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
384 { S(1024), 819, 616, 410, 207, 2 },
386 /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
387 { S(512), 411, 308, 208, 104, 1 },
389 /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
390 { S(512), 409, 307, 206, 102, 2 },
391 /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
392 { S(512), 409, 309, 205, 103, 2 },
394 /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
395 { S(256), 205, 155, 101, 52, 1 },
397 /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
398 { S(128), 103, 78, 51, 27, 2 },
400 /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
401 { S(64), 52, 39, 26, 14, 1 },
406 * Static global variables
408 static DECLARE_WAIT_QUEUE_HEAD(random_read_wait
);
409 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait
);
410 static struct fasync_struct
*fasync
;
412 static DEFINE_SPINLOCK(random_ready_list_lock
);
413 static LIST_HEAD(random_ready_list
);
417 unsigned long init_time
;
421 struct crng_state primary_crng
= {
422 .lock
= __SPIN_LOCK_UNLOCKED(primary_crng
.lock
),
426 * crng_init = 0 --> Uninitialized
428 * 2 --> Initialized from input_pool
430 * crng_init is protected by primary_crng->lock, and only increases
431 * its value (from 0->1->2).
433 static int crng_init
= 0;
434 #define crng_ready() (likely(crng_init > 0))
435 static int crng_init_cnt
= 0;
436 #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
437 static void _extract_crng(struct crng_state
*crng
,
438 __u8 out
[CHACHA20_BLOCK_SIZE
]);
439 static void _crng_backtrack_protect(struct crng_state
*crng
,
440 __u8 tmp
[CHACHA20_BLOCK_SIZE
], int used
);
441 static void process_random_ready_list(void);
443 /**********************************************************************
445 * OS independent entropy store. Here are the functions which handle
446 * storing entropy in an entropy pool.
448 **********************************************************************/
450 struct entropy_store
;
451 struct entropy_store
{
452 /* read-only data: */
453 const struct poolinfo
*poolinfo
;
456 struct entropy_store
*pull
;
457 struct work_struct push_work
;
459 /* read-write data: */
460 unsigned long last_pulled
;
462 unsigned short add_ptr
;
463 unsigned short input_rotate
;
466 unsigned int initialized
:1;
467 unsigned int last_data_init
:1;
468 __u8 last_data
[EXTRACT_SIZE
];
471 static ssize_t
extract_entropy(struct entropy_store
*r
, void *buf
,
472 size_t nbytes
, int min
, int rsvd
);
473 static ssize_t
_extract_entropy(struct entropy_store
*r
, void *buf
,
474 size_t nbytes
, int fips
);
476 static void crng_reseed(struct crng_state
*crng
, struct entropy_store
*r
);
477 static void push_to_pool(struct work_struct
*work
);
478 static __u32 input_pool_data
[INPUT_POOL_WORDS
] __latent_entropy
;
479 static __u32 blocking_pool_data
[OUTPUT_POOL_WORDS
] __latent_entropy
;
481 static struct entropy_store input_pool
= {
482 .poolinfo
= &poolinfo_table
[0],
484 .lock
= __SPIN_LOCK_UNLOCKED(input_pool
.lock
),
485 .pool
= input_pool_data
488 static struct entropy_store blocking_pool
= {
489 .poolinfo
= &poolinfo_table
[1],
492 .lock
= __SPIN_LOCK_UNLOCKED(blocking_pool
.lock
),
493 .pool
= blocking_pool_data
,
494 .push_work
= __WORK_INITIALIZER(blocking_pool
.push_work
,
498 static __u32
const twist_table
[8] = {
499 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
500 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
503 * This function adds bytes into the entropy "pool". It does not
504 * update the entropy estimate. The caller should call
505 * credit_entropy_bits if this is appropriate.
507 * The pool is stirred with a primitive polynomial of the appropriate
508 * degree, and then twisted. We twist by three bits at a time because
509 * it's cheap to do so and helps slightly in the expected case where
510 * the entropy is concentrated in the low-order bits.
512 static void _mix_pool_bytes(struct entropy_store
*r
, const void *in
,
515 unsigned long i
, tap1
, tap2
, tap3
, tap4
, tap5
;
517 int wordmask
= r
->poolinfo
->poolwords
- 1;
518 const char *bytes
= in
;
521 tap1
= r
->poolinfo
->tap1
;
522 tap2
= r
->poolinfo
->tap2
;
523 tap3
= r
->poolinfo
->tap3
;
524 tap4
= r
->poolinfo
->tap4
;
525 tap5
= r
->poolinfo
->tap5
;
527 input_rotate
= r
->input_rotate
;
530 /* mix one byte at a time to simplify size handling and churn faster */
532 w
= rol32(*bytes
++, input_rotate
);
533 i
= (i
- 1) & wordmask
;
535 /* XOR in the various taps */
537 w
^= r
->pool
[(i
+ tap1
) & wordmask
];
538 w
^= r
->pool
[(i
+ tap2
) & wordmask
];
539 w
^= r
->pool
[(i
+ tap3
) & wordmask
];
540 w
^= r
->pool
[(i
+ tap4
) & wordmask
];
541 w
^= r
->pool
[(i
+ tap5
) & wordmask
];
543 /* Mix the result back in with a twist */
544 r
->pool
[i
] = (w
>> 3) ^ twist_table
[w
& 7];
547 * Normally, we add 7 bits of rotation to the pool.
548 * At the beginning of the pool, add an extra 7 bits
549 * rotation, so that successive passes spread the
550 * input bits across the pool evenly.
552 input_rotate
= (input_rotate
+ (i
? 7 : 14)) & 31;
555 r
->input_rotate
= input_rotate
;
559 static void __mix_pool_bytes(struct entropy_store
*r
, const void *in
,
562 trace_mix_pool_bytes_nolock(r
->name
, nbytes
, _RET_IP_
);
563 _mix_pool_bytes(r
, in
, nbytes
);
566 static void mix_pool_bytes(struct entropy_store
*r
, const void *in
,
571 trace_mix_pool_bytes(r
->name
, nbytes
, _RET_IP_
);
572 spin_lock_irqsave(&r
->lock
, flags
);
573 _mix_pool_bytes(r
, in
, nbytes
);
574 spin_unlock_irqrestore(&r
->lock
, flags
);
580 unsigned short reg_idx
;
585 * This is a fast mixing routine used by the interrupt randomness
586 * collector. It's hardcoded for an 128 bit pool and assumes that any
587 * locks that might be needed are taken by the caller.
589 static void fast_mix(struct fast_pool
*f
)
591 __u32 a
= f
->pool
[0], b
= f
->pool
[1];
592 __u32 c
= f
->pool
[2], d
= f
->pool
[3];
595 b
= rol32(b
, 6); d
= rol32(d
, 27);
599 b
= rol32(b
, 16); d
= rol32(d
, 14);
603 b
= rol32(b
, 6); d
= rol32(d
, 27);
607 b
= rol32(b
, 16); d
= rol32(d
, 14);
610 f
->pool
[0] = a
; f
->pool
[1] = b
;
611 f
->pool
[2] = c
; f
->pool
[3] = d
;
615 static void process_random_ready_list(void)
618 struct random_ready_callback
*rdy
, *tmp
;
620 spin_lock_irqsave(&random_ready_list_lock
, flags
);
621 list_for_each_entry_safe(rdy
, tmp
, &random_ready_list
, list
) {
622 struct module
*owner
= rdy
->owner
;
624 list_del_init(&rdy
->list
);
628 spin_unlock_irqrestore(&random_ready_list_lock
, flags
);
632 * Credit (or debit) the entropy store with n bits of entropy.
633 * Use credit_entropy_bits_safe() if the value comes from userspace
634 * or otherwise should be checked for extreme values.
636 static void credit_entropy_bits(struct entropy_store
*r
, int nbits
)
638 int entropy_count
, orig
;
639 const int pool_size
= r
->poolinfo
->poolfracbits
;
640 int nfrac
= nbits
<< ENTROPY_SHIFT
;
646 entropy_count
= orig
= ACCESS_ONCE(r
->entropy_count
);
649 entropy_count
+= nfrac
;
652 * Credit: we have to account for the possibility of
653 * overwriting already present entropy. Even in the
654 * ideal case of pure Shannon entropy, new contributions
655 * approach the full value asymptotically:
657 * entropy <- entropy + (pool_size - entropy) *
658 * (1 - exp(-add_entropy/pool_size))
660 * For add_entropy <= pool_size/2 then
661 * (1 - exp(-add_entropy/pool_size)) >=
662 * (add_entropy/pool_size)*0.7869...
663 * so we can approximate the exponential with
664 * 3/4*add_entropy/pool_size and still be on the
665 * safe side by adding at most pool_size/2 at a time.
667 * The use of pool_size-2 in the while statement is to
668 * prevent rounding artifacts from making the loop
669 * arbitrarily long; this limits the loop to log2(pool_size)*2
670 * turns no matter how large nbits is.
673 const int s
= r
->poolinfo
->poolbitshift
+ ENTROPY_SHIFT
+ 2;
674 /* The +2 corresponds to the /4 in the denominator */
677 unsigned int anfrac
= min(pnfrac
, pool_size
/2);
679 ((pool_size
- entropy_count
)*anfrac
*3) >> s
;
681 entropy_count
+= add
;
683 } while (unlikely(entropy_count
< pool_size
-2 && pnfrac
));
686 if (unlikely(entropy_count
< 0)) {
687 pr_warn("random: negative entropy/overflow: pool %s count %d\n",
688 r
->name
, entropy_count
);
691 } else if (entropy_count
> pool_size
)
692 entropy_count
= pool_size
;
693 if (cmpxchg(&r
->entropy_count
, orig
, entropy_count
) != orig
)
696 r
->entropy_total
+= nbits
;
697 if (!r
->initialized
&& r
->entropy_total
> 128) {
699 r
->entropy_total
= 0;
702 trace_credit_entropy_bits(r
->name
, nbits
,
703 entropy_count
>> ENTROPY_SHIFT
,
704 r
->entropy_total
, _RET_IP_
);
706 if (r
== &input_pool
) {
707 int entropy_bits
= entropy_count
>> ENTROPY_SHIFT
;
709 if (crng_init
< 2 && entropy_bits
>= 128) {
710 crng_reseed(&primary_crng
, r
);
711 entropy_bits
= r
->entropy_count
>> ENTROPY_SHIFT
;
714 /* should we wake readers? */
715 if (entropy_bits
>= random_read_wakeup_bits
) {
716 wake_up_interruptible(&random_read_wait
);
717 kill_fasync(&fasync
, SIGIO
, POLL_IN
);
719 /* If the input pool is getting full, send some
720 * entropy to the blocking pool until it is 75% full.
722 if (entropy_bits
> random_write_wakeup_bits
&&
724 r
->entropy_total
>= 2*random_read_wakeup_bits
) {
725 struct entropy_store
*other
= &blocking_pool
;
727 if (other
->entropy_count
<=
728 3 * other
->poolinfo
->poolfracbits
/ 4) {
729 schedule_work(&other
->push_work
);
730 r
->entropy_total
= 0;
736 static int credit_entropy_bits_safe(struct entropy_store
*r
, int nbits
)
738 const int nbits_max
= (int)(~0U >> (ENTROPY_SHIFT
+ 1));
743 /* Cap the value to avoid overflows */
744 nbits
= min(nbits
, nbits_max
);
746 credit_entropy_bits(r
, nbits
);
750 /*********************************************************************
752 * CRNG using CHACHA20
754 *********************************************************************/
756 #define CRNG_RESEED_INTERVAL (300*HZ)
758 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait
);
762 * Hack to deal with crazy userspace progams when they are all trying
763 * to access /dev/urandom in parallel. The programs are almost
764 * certainly doing something terribly wrong, but we'll work around
765 * their brain damage.
767 static struct crng_state
**crng_node_pool __read_mostly
;
770 static void crng_initialize(struct crng_state
*crng
)
775 memcpy(&crng
->state
[0], "expand 32-byte k", 16);
776 if (crng
== &primary_crng
)
777 _extract_entropy(&input_pool
, &crng
->state
[4],
778 sizeof(__u32
) * 12, 0);
780 get_random_bytes(&crng
->state
[4], sizeof(__u32
) * 12);
781 for (i
= 4; i
< 16; i
++) {
782 if (!arch_get_random_seed_long(&rv
) &&
783 !arch_get_random_long(&rv
))
784 rv
= random_get_entropy();
785 crng
->state
[i
] ^= rv
;
787 crng
->init_time
= jiffies
- CRNG_RESEED_INTERVAL
- 1;
790 static int crng_fast_load(const char *cp
, size_t len
)
795 if (!spin_trylock_irqsave(&primary_crng
.lock
, flags
))
798 spin_unlock_irqrestore(&primary_crng
.lock
, flags
);
801 p
= (unsigned char *) &primary_crng
.state
[4];
802 while (len
> 0 && crng_init_cnt
< CRNG_INIT_CNT_THRESH
) {
803 p
[crng_init_cnt
% CHACHA20_KEY_SIZE
] ^= *cp
;
804 cp
++; crng_init_cnt
++; len
--;
806 if (crng_init_cnt
>= CRNG_INIT_CNT_THRESH
) {
808 wake_up_interruptible(&crng_init_wait
);
809 pr_notice("random: fast init done\n");
811 spin_unlock_irqrestore(&primary_crng
.lock
, flags
);
815 static void crng_reseed(struct crng_state
*crng
, struct entropy_store
*r
)
820 __u8 block
[CHACHA20_BLOCK_SIZE
];
825 num
= extract_entropy(r
, &buf
, 32, 16, 0);
829 _extract_crng(&primary_crng
, buf
.block
);
830 _crng_backtrack_protect(&primary_crng
, buf
.block
,
833 spin_lock_irqsave(&primary_crng
.lock
, flags
);
834 for (i
= 0; i
< 8; i
++) {
836 if (!arch_get_random_seed_long(&rv
) &&
837 !arch_get_random_long(&rv
))
838 rv
= random_get_entropy();
839 crng
->state
[i
+4] ^= buf
.key
[i
] ^ rv
;
841 memzero_explicit(&buf
, sizeof(buf
));
842 crng
->init_time
= jiffies
;
843 if (crng
== &primary_crng
&& crng_init
< 2) {
845 process_random_ready_list();
846 wake_up_interruptible(&crng_init_wait
);
847 pr_notice("random: crng init done\n");
849 spin_unlock_irqrestore(&primary_crng
.lock
, flags
);
852 static inline void crng_wait_ready(void)
854 wait_event_interruptible(crng_init_wait
, crng_ready());
857 static void _extract_crng(struct crng_state
*crng
,
858 __u8 out
[CHACHA20_BLOCK_SIZE
])
860 unsigned long v
, flags
;
863 time_after(jiffies
, crng
->init_time
+ CRNG_RESEED_INTERVAL
))
864 crng_reseed(crng
, crng
== &primary_crng
? &input_pool
: NULL
);
865 spin_lock_irqsave(&crng
->lock
, flags
);
866 if (arch_get_random_long(&v
))
867 crng
->state
[14] ^= v
;
868 chacha20_block(&crng
->state
[0], out
);
869 if (crng
->state
[12] == 0)
871 spin_unlock_irqrestore(&crng
->lock
, flags
);
874 static void extract_crng(__u8 out
[CHACHA20_BLOCK_SIZE
])
876 struct crng_state
*crng
= NULL
;
880 crng
= crng_node_pool
[numa_node_id()];
883 crng
= &primary_crng
;
884 _extract_crng(crng
, out
);
888 * Use the leftover bytes from the CRNG block output (if there is
889 * enough) to mutate the CRNG key to provide backtracking protection.
891 static void _crng_backtrack_protect(struct crng_state
*crng
,
892 __u8 tmp
[CHACHA20_BLOCK_SIZE
], int used
)
898 used
= round_up(used
, sizeof(__u32
));
899 if (used
+ CHACHA20_KEY_SIZE
> CHACHA20_BLOCK_SIZE
) {
903 spin_lock_irqsave(&crng
->lock
, flags
);
904 s
= (__u32
*) &tmp
[used
];
906 for (i
=0; i
< 8; i
++)
908 spin_unlock_irqrestore(&crng
->lock
, flags
);
911 static void crng_backtrack_protect(__u8 tmp
[CHACHA20_BLOCK_SIZE
], int used
)
913 struct crng_state
*crng
= NULL
;
917 crng
= crng_node_pool
[numa_node_id()];
920 crng
= &primary_crng
;
921 _crng_backtrack_protect(crng
, tmp
, used
);
924 static ssize_t
extract_crng_user(void __user
*buf
, size_t nbytes
)
926 ssize_t ret
= 0, i
= CHACHA20_BLOCK_SIZE
;
927 __u8 tmp
[CHACHA20_BLOCK_SIZE
];
928 int large_request
= (nbytes
> 256);
931 if (large_request
&& need_resched()) {
932 if (signal_pending(current
)) {
941 i
= min_t(int, nbytes
, CHACHA20_BLOCK_SIZE
);
942 if (copy_to_user(buf
, tmp
, i
)) {
951 crng_backtrack_protect(tmp
, i
);
953 /* Wipe data just written to memory */
954 memzero_explicit(tmp
, sizeof(tmp
));
960 /*********************************************************************
962 * Entropy input management
964 *********************************************************************/
966 /* There is one of these per entropy source */
967 struct timer_rand_state
{
969 long last_delta
, last_delta2
;
970 unsigned dont_count_entropy
:1;
973 #define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
976 * Add device- or boot-specific data to the input pool to help
979 * None of this adds any entropy; it is meant to avoid the problem of
980 * the entropy pool having similar initial state across largely
983 void add_device_randomness(const void *buf
, unsigned int size
)
985 unsigned long time
= random_get_entropy() ^ jiffies
;
988 trace_add_device_randomness(size
, _RET_IP_
);
989 spin_lock_irqsave(&input_pool
.lock
, flags
);
990 _mix_pool_bytes(&input_pool
, buf
, size
);
991 _mix_pool_bytes(&input_pool
, &time
, sizeof(time
));
992 spin_unlock_irqrestore(&input_pool
.lock
, flags
);
994 EXPORT_SYMBOL(add_device_randomness
);
996 static struct timer_rand_state input_timer_state
= INIT_TIMER_RAND_STATE
;
999 * This function adds entropy to the entropy "pool" by using timing
1000 * delays. It uses the timer_rand_state structure to make an estimate
1001 * of how many bits of entropy this call has added to the pool.
1003 * The number "num" is also added to the pool - it should somehow describe
1004 * the type of event which just happened. This is currently 0-255 for
1005 * keyboard scan codes, and 256 upwards for interrupts.
1008 static void add_timer_randomness(struct timer_rand_state
*state
, unsigned num
)
1010 struct entropy_store
*r
;
1016 long delta
, delta2
, delta3
;
1020 sample
.jiffies
= jiffies
;
1021 sample
.cycles
= random_get_entropy();
1024 mix_pool_bytes(r
, &sample
, sizeof(sample
));
1027 * Calculate number of bits of randomness we probably added.
1028 * We take into account the first, second and third-order deltas
1029 * in order to make our estimate.
1032 if (!state
->dont_count_entropy
) {
1033 delta
= sample
.jiffies
- state
->last_time
;
1034 state
->last_time
= sample
.jiffies
;
1036 delta2
= delta
- state
->last_delta
;
1037 state
->last_delta
= delta
;
1039 delta3
= delta2
- state
->last_delta2
;
1040 state
->last_delta2
= delta2
;
1054 * delta is now minimum absolute delta.
1055 * Round down by 1 bit on general principles,
1056 * and limit entropy entimate to 12 bits.
1058 credit_entropy_bits(r
, min_t(int, fls(delta
>>1), 11));
1063 void add_input_randomness(unsigned int type
, unsigned int code
,
1066 static unsigned char last_value
;
1068 /* ignore autorepeat and the like */
1069 if (value
== last_value
)
1073 add_timer_randomness(&input_timer_state
,
1074 (type
<< 4) ^ code
^ (code
>> 4) ^ value
);
1075 trace_add_input_randomness(ENTROPY_BITS(&input_pool
));
1077 EXPORT_SYMBOL_GPL(add_input_randomness
);
1079 static DEFINE_PER_CPU(struct fast_pool
, irq_randomness
);
1081 #ifdef ADD_INTERRUPT_BENCH
1082 static unsigned long avg_cycles
, avg_deviation
;
1084 #define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
1085 #define FIXED_1_2 (1 << (AVG_SHIFT-1))
1087 static void add_interrupt_bench(cycles_t start
)
1089 long delta
= random_get_entropy() - start
;
1091 /* Use a weighted moving average */
1092 delta
= delta
- ((avg_cycles
+ FIXED_1_2
) >> AVG_SHIFT
);
1093 avg_cycles
+= delta
;
1094 /* And average deviation */
1095 delta
= abs(delta
) - ((avg_deviation
+ FIXED_1_2
) >> AVG_SHIFT
);
1096 avg_deviation
+= delta
;
1099 #define add_interrupt_bench(x)
1102 static __u32
get_reg(struct fast_pool
*f
, struct pt_regs
*regs
)
1104 __u32
*ptr
= (__u32
*) regs
;
1108 if (f
->reg_idx
>= sizeof(struct pt_regs
) / sizeof(__u32
))
1110 return *(ptr
+ f
->reg_idx
++);
1113 void add_interrupt_randomness(int irq
, int irq_flags
)
1115 struct entropy_store
*r
;
1116 struct fast_pool
*fast_pool
= this_cpu_ptr(&irq_randomness
);
1117 struct pt_regs
*regs
= get_irq_regs();
1118 unsigned long now
= jiffies
;
1119 cycles_t cycles
= random_get_entropy();
1120 __u32 c_high
, j_high
;
1126 cycles
= get_reg(fast_pool
, regs
);
1127 c_high
= (sizeof(cycles
) > 4) ? cycles
>> 32 : 0;
1128 j_high
= (sizeof(now
) > 4) ? now
>> 32 : 0;
1129 fast_pool
->pool
[0] ^= cycles
^ j_high
^ irq
;
1130 fast_pool
->pool
[1] ^= now
^ c_high
;
1131 ip
= regs
? instruction_pointer(regs
) : _RET_IP_
;
1132 fast_pool
->pool
[2] ^= ip
;
1133 fast_pool
->pool
[3] ^= (sizeof(ip
) > 4) ? ip
>> 32 :
1134 get_reg(fast_pool
, regs
);
1136 fast_mix(fast_pool
);
1137 add_interrupt_bench(cycles
);
1139 if (!crng_ready()) {
1140 if ((fast_pool
->count
>= 64) &&
1141 crng_fast_load((char *) fast_pool
->pool
,
1142 sizeof(fast_pool
->pool
))) {
1143 fast_pool
->count
= 0;
1144 fast_pool
->last
= now
;
1149 if ((fast_pool
->count
< 64) &&
1150 !time_after(now
, fast_pool
->last
+ HZ
))
1154 if (!spin_trylock(&r
->lock
))
1157 fast_pool
->last
= now
;
1158 __mix_pool_bytes(r
, &fast_pool
->pool
, sizeof(fast_pool
->pool
));
1161 * If we have architectural seed generator, produce a seed and
1162 * add it to the pool. For the sake of paranoia don't let the
1163 * architectural seed generator dominate the input from the
1166 if (arch_get_random_seed_long(&seed
)) {
1167 __mix_pool_bytes(r
, &seed
, sizeof(seed
));
1170 spin_unlock(&r
->lock
);
1172 fast_pool
->count
= 0;
1174 /* award one bit for the contents of the fast pool */
1175 credit_entropy_bits(r
, credit
+ 1);
1177 EXPORT_SYMBOL_GPL(add_interrupt_randomness
);
1180 void add_disk_randomness(struct gendisk
*disk
)
1182 if (!disk
|| !disk
->random
)
1184 /* first major is 1, so we get >= 0x200 here */
1185 add_timer_randomness(disk
->random
, 0x100 + disk_devt(disk
));
1186 trace_add_disk_randomness(disk_devt(disk
), ENTROPY_BITS(&input_pool
));
1188 EXPORT_SYMBOL_GPL(add_disk_randomness
);
1191 /*********************************************************************
1193 * Entropy extraction routines
1195 *********************************************************************/
1198 * This utility inline function is responsible for transferring entropy
1199 * from the primary pool to the secondary extraction pool. We make
1200 * sure we pull enough for a 'catastrophic reseed'.
1202 static void _xfer_secondary_pool(struct entropy_store
*r
, size_t nbytes
);
1203 static void xfer_secondary_pool(struct entropy_store
*r
, size_t nbytes
)
1206 r
->entropy_count
>= (nbytes
<< (ENTROPY_SHIFT
+ 3)) ||
1207 r
->entropy_count
> r
->poolinfo
->poolfracbits
)
1210 _xfer_secondary_pool(r
, nbytes
);
1213 static void _xfer_secondary_pool(struct entropy_store
*r
, size_t nbytes
)
1215 __u32 tmp
[OUTPUT_POOL_WORDS
];
1219 /* pull at least as much as a wakeup */
1220 bytes
= max_t(int, bytes
, random_read_wakeup_bits
/ 8);
1221 /* but never more than the buffer size */
1222 bytes
= min_t(int, bytes
, sizeof(tmp
));
1224 trace_xfer_secondary_pool(r
->name
, bytes
* 8, nbytes
* 8,
1225 ENTROPY_BITS(r
), ENTROPY_BITS(r
->pull
));
1226 bytes
= extract_entropy(r
->pull
, tmp
, bytes
,
1227 random_read_wakeup_bits
/ 8, 0);
1228 mix_pool_bytes(r
, tmp
, bytes
);
1229 credit_entropy_bits(r
, bytes
*8);
1233 * Used as a workqueue function so that when the input pool is getting
1234 * full, we can "spill over" some entropy to the output pools. That
1235 * way the output pools can store some of the excess entropy instead
1236 * of letting it go to waste.
1238 static void push_to_pool(struct work_struct
*work
)
1240 struct entropy_store
*r
= container_of(work
, struct entropy_store
,
1243 _xfer_secondary_pool(r
, random_read_wakeup_bits
/8);
1244 trace_push_to_pool(r
->name
, r
->entropy_count
>> ENTROPY_SHIFT
,
1245 r
->pull
->entropy_count
>> ENTROPY_SHIFT
);
1249 * This function decides how many bytes to actually take from the
1250 * given pool, and also debits the entropy count accordingly.
1252 static size_t account(struct entropy_store
*r
, size_t nbytes
, int min
,
1255 int entropy_count
, orig
, have_bytes
;
1256 size_t ibytes
, nfrac
;
1258 BUG_ON(r
->entropy_count
> r
->poolinfo
->poolfracbits
);
1260 /* Can we pull enough? */
1262 entropy_count
= orig
= ACCESS_ONCE(r
->entropy_count
);
1264 /* never pull more than available */
1265 have_bytes
= entropy_count
>> (ENTROPY_SHIFT
+ 3);
1267 if ((have_bytes
-= reserved
) < 0)
1269 ibytes
= min_t(size_t, ibytes
, have_bytes
);
1273 if (unlikely(entropy_count
< 0)) {
1274 pr_warn("random: negative entropy count: pool %s count %d\n",
1275 r
->name
, entropy_count
);
1279 nfrac
= ibytes
<< (ENTROPY_SHIFT
+ 3);
1280 if ((size_t) entropy_count
> nfrac
)
1281 entropy_count
-= nfrac
;
1285 if (cmpxchg(&r
->entropy_count
, orig
, entropy_count
) != orig
)
1288 trace_debit_entropy(r
->name
, 8 * ibytes
);
1290 (r
->entropy_count
>> ENTROPY_SHIFT
) < random_write_wakeup_bits
) {
1291 wake_up_interruptible(&random_write_wait
);
1292 kill_fasync(&fasync
, SIGIO
, POLL_OUT
);
1299 * This function does the actual extraction for extract_entropy and
1300 * extract_entropy_user.
1302 * Note: we assume that .poolwords is a multiple of 16 words.
1304 static void extract_buf(struct entropy_store
*r
, __u8
*out
)
1309 unsigned long l
[LONGS(20)];
1311 __u32 workspace
[SHA_WORKSPACE_WORDS
];
1312 unsigned long flags
;
1315 * If we have an architectural hardware random number
1316 * generator, use it for SHA's initial vector
1319 for (i
= 0; i
< LONGS(20); i
++) {
1321 if (!arch_get_random_long(&v
))
1326 /* Generate a hash across the pool, 16 words (512 bits) at a time */
1327 spin_lock_irqsave(&r
->lock
, flags
);
1328 for (i
= 0; i
< r
->poolinfo
->poolwords
; i
+= 16)
1329 sha_transform(hash
.w
, (__u8
*)(r
->pool
+ i
), workspace
);
1332 * We mix the hash back into the pool to prevent backtracking
1333 * attacks (where the attacker knows the state of the pool
1334 * plus the current outputs, and attempts to find previous
1335 * ouputs), unless the hash function can be inverted. By
1336 * mixing at least a SHA1 worth of hash data back, we make
1337 * brute-forcing the feedback as hard as brute-forcing the
1340 __mix_pool_bytes(r
, hash
.w
, sizeof(hash
.w
));
1341 spin_unlock_irqrestore(&r
->lock
, flags
);
1343 memzero_explicit(workspace
, sizeof(workspace
));
1346 * In case the hash function has some recognizable output
1347 * pattern, we fold it in half. Thus, we always feed back
1348 * twice as much data as we output.
1350 hash
.w
[0] ^= hash
.w
[3];
1351 hash
.w
[1] ^= hash
.w
[4];
1352 hash
.w
[2] ^= rol32(hash
.w
[2], 16);
1354 memcpy(out
, &hash
, EXTRACT_SIZE
);
1355 memzero_explicit(&hash
, sizeof(hash
));
1358 static ssize_t
_extract_entropy(struct entropy_store
*r
, void *buf
,
1359 size_t nbytes
, int fips
)
1362 __u8 tmp
[EXTRACT_SIZE
];
1363 unsigned long flags
;
1366 extract_buf(r
, tmp
);
1369 spin_lock_irqsave(&r
->lock
, flags
);
1370 if (!memcmp(tmp
, r
->last_data
, EXTRACT_SIZE
))
1371 panic("Hardware RNG duplicated output!\n");
1372 memcpy(r
->last_data
, tmp
, EXTRACT_SIZE
);
1373 spin_unlock_irqrestore(&r
->lock
, flags
);
1375 i
= min_t(int, nbytes
, EXTRACT_SIZE
);
1376 memcpy(buf
, tmp
, i
);
1382 /* Wipe data just returned from memory */
1383 memzero_explicit(tmp
, sizeof(tmp
));
1389 * This function extracts randomness from the "entropy pool", and
1390 * returns it in a buffer.
1392 * The min parameter specifies the minimum amount we can pull before
1393 * failing to avoid races that defeat catastrophic reseeding while the
1394 * reserved parameter indicates how much entropy we must leave in the
1395 * pool after each pull to avoid starving other readers.
1397 static ssize_t
extract_entropy(struct entropy_store
*r
, void *buf
,
1398 size_t nbytes
, int min
, int reserved
)
1400 __u8 tmp
[EXTRACT_SIZE
];
1401 unsigned long flags
;
1403 /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
1405 spin_lock_irqsave(&r
->lock
, flags
);
1406 if (!r
->last_data_init
) {
1407 r
->last_data_init
= 1;
1408 spin_unlock_irqrestore(&r
->lock
, flags
);
1409 trace_extract_entropy(r
->name
, EXTRACT_SIZE
,
1410 ENTROPY_BITS(r
), _RET_IP_
);
1411 xfer_secondary_pool(r
, EXTRACT_SIZE
);
1412 extract_buf(r
, tmp
);
1413 spin_lock_irqsave(&r
->lock
, flags
);
1414 memcpy(r
->last_data
, tmp
, EXTRACT_SIZE
);
1416 spin_unlock_irqrestore(&r
->lock
, flags
);
1419 trace_extract_entropy(r
->name
, nbytes
, ENTROPY_BITS(r
), _RET_IP_
);
1420 xfer_secondary_pool(r
, nbytes
);
1421 nbytes
= account(r
, nbytes
, min
, reserved
);
1423 return _extract_entropy(r
, buf
, nbytes
, fips_enabled
);
1427 * This function extracts randomness from the "entropy pool", and
1428 * returns it in a userspace buffer.
1430 static ssize_t
extract_entropy_user(struct entropy_store
*r
, void __user
*buf
,
1434 __u8 tmp
[EXTRACT_SIZE
];
1435 int large_request
= (nbytes
> 256);
1437 trace_extract_entropy_user(r
->name
, nbytes
, ENTROPY_BITS(r
), _RET_IP_
);
1438 xfer_secondary_pool(r
, nbytes
);
1439 nbytes
= account(r
, nbytes
, 0, 0);
1442 if (large_request
&& need_resched()) {
1443 if (signal_pending(current
)) {
1451 extract_buf(r
, tmp
);
1452 i
= min_t(int, nbytes
, EXTRACT_SIZE
);
1453 if (copy_to_user(buf
, tmp
, i
)) {
1463 /* Wipe data just returned from memory */
1464 memzero_explicit(tmp
, sizeof(tmp
));
1470 * This function is the exported kernel interface. It returns some
1471 * number of good random numbers, suitable for key generation, seeding
1472 * TCP sequence numbers, etc. It does not rely on the hardware random
1473 * number generator. For random bytes direct from the hardware RNG
1474 * (when available), use get_random_bytes_arch().
1476 void get_random_bytes(void *buf
, int nbytes
)
1478 __u8 tmp
[CHACHA20_BLOCK_SIZE
];
1480 #if DEBUG_RANDOM_BOOT > 0
1482 printk(KERN_NOTICE
"random: %pF get_random_bytes called "
1483 "with crng_init = %d\n", (void *) _RET_IP_
, crng_init
);
1485 trace_get_random_bytes(nbytes
, _RET_IP_
);
1487 while (nbytes
>= CHACHA20_BLOCK_SIZE
) {
1489 buf
+= CHACHA20_BLOCK_SIZE
;
1490 nbytes
-= CHACHA20_BLOCK_SIZE
;
1495 memcpy(buf
, tmp
, nbytes
);
1496 crng_backtrack_protect(tmp
, nbytes
);
1498 crng_backtrack_protect(tmp
, CHACHA20_BLOCK_SIZE
);
1499 memzero_explicit(tmp
, sizeof(tmp
));
1501 EXPORT_SYMBOL(get_random_bytes
);
1504 * Add a callback function that will be invoked when the nonblocking
1505 * pool is initialised.
1507 * returns: 0 if callback is successfully added
1508 * -EALREADY if pool is already initialised (callback not called)
1509 * -ENOENT if module for callback is not alive
1511 int add_random_ready_callback(struct random_ready_callback
*rdy
)
1513 struct module
*owner
;
1514 unsigned long flags
;
1515 int err
= -EALREADY
;
1521 if (!try_module_get(owner
))
1524 spin_lock_irqsave(&random_ready_list_lock
, flags
);
1530 list_add(&rdy
->list
, &random_ready_list
);
1534 spin_unlock_irqrestore(&random_ready_list_lock
, flags
);
1540 EXPORT_SYMBOL(add_random_ready_callback
);
1543 * Delete a previously registered readiness callback function.
1545 void del_random_ready_callback(struct random_ready_callback
*rdy
)
1547 unsigned long flags
;
1548 struct module
*owner
= NULL
;
1550 spin_lock_irqsave(&random_ready_list_lock
, flags
);
1551 if (!list_empty(&rdy
->list
)) {
1552 list_del_init(&rdy
->list
);
1555 spin_unlock_irqrestore(&random_ready_list_lock
, flags
);
1559 EXPORT_SYMBOL(del_random_ready_callback
);
1562 * This function will use the architecture-specific hardware random
1563 * number generator if it is available. The arch-specific hw RNG will
1564 * almost certainly be faster than what we can do in software, but it
1565 * is impossible to verify that it is implemented securely (as
1566 * opposed, to, say, the AES encryption of a sequence number using a
1567 * key known by the NSA). So it's useful if we need the speed, but
1568 * only if we're willing to trust the hardware manufacturer not to
1569 * have put in a back door.
1571 void get_random_bytes_arch(void *buf
, int nbytes
)
1575 trace_get_random_bytes_arch(nbytes
, _RET_IP_
);
1578 int chunk
= min(nbytes
, (int)sizeof(unsigned long));
1580 if (!arch_get_random_long(&v
))
1583 memcpy(p
, &v
, chunk
);
1589 get_random_bytes(p
, nbytes
);
1591 EXPORT_SYMBOL(get_random_bytes_arch
);
1595 * init_std_data - initialize pool with system data
1597 * @r: pool to initialize
1599 * This function clears the pool's entropy count and mixes some system
1600 * data into the pool to prepare it for use. The pool is not cleared
1601 * as that can only decrease the entropy in the pool.
1603 static void init_std_data(struct entropy_store
*r
)
1606 ktime_t now
= ktime_get_real();
1609 r
->last_pulled
= jiffies
;
1610 mix_pool_bytes(r
, &now
, sizeof(now
));
1611 for (i
= r
->poolinfo
->poolbytes
; i
> 0; i
-= sizeof(rv
)) {
1612 if (!arch_get_random_seed_long(&rv
) &&
1613 !arch_get_random_long(&rv
))
1614 rv
= random_get_entropy();
1615 mix_pool_bytes(r
, &rv
, sizeof(rv
));
1617 mix_pool_bytes(r
, utsname(), sizeof(*(utsname())));
1621 * Note that setup_arch() may call add_device_randomness()
1622 * long before we get here. This allows seeding of the pools
1623 * with some platform dependent data very early in the boot
1624 * process. But it limits our options here. We must use
1625 * statically allocated structures that already have all
1626 * initializations complete at compile time. We should also
1627 * take care not to overwrite the precious per platform data
1630 static int rand_initialize(void)
1634 struct crng_state
*crng
;
1635 struct crng_state
**pool
;
1638 init_std_data(&input_pool
);
1639 init_std_data(&blocking_pool
);
1640 crng_initialize(&primary_crng
);
1643 pool
= kcalloc(nr_node_ids
, sizeof(*pool
), GFP_KERNEL
|__GFP_NOFAIL
);
1644 for_each_online_node(i
) {
1645 crng
= kmalloc_node(sizeof(struct crng_state
),
1646 GFP_KERNEL
| __GFP_NOFAIL
, i
);
1647 spin_lock_init(&crng
->lock
);
1648 crng_initialize(crng
);
1652 crng_node_pool
= pool
;
1656 early_initcall(rand_initialize
);
1659 void rand_initialize_disk(struct gendisk
*disk
)
1661 struct timer_rand_state
*state
;
1664 * If kzalloc returns null, we just won't use that entropy
1667 state
= kzalloc(sizeof(struct timer_rand_state
), GFP_KERNEL
);
1669 state
->last_time
= INITIAL_JIFFIES
;
1670 disk
->random
= state
;
1676 _random_read(int nonblock
, char __user
*buf
, size_t nbytes
)
1683 nbytes
= min_t(size_t, nbytes
, SEC_XFER_SIZE
);
1685 n
= extract_entropy_user(&blocking_pool
, buf
, nbytes
);
1688 trace_random_read(n
*8, (nbytes
-n
)*8,
1689 ENTROPY_BITS(&blocking_pool
),
1690 ENTROPY_BITS(&input_pool
));
1694 /* Pool is (near) empty. Maybe wait and retry. */
1698 wait_event_interruptible(random_read_wait
,
1699 ENTROPY_BITS(&input_pool
) >=
1700 random_read_wakeup_bits
);
1701 if (signal_pending(current
))
1702 return -ERESTARTSYS
;
1707 random_read(struct file
*file
, char __user
*buf
, size_t nbytes
, loff_t
*ppos
)
1709 return _random_read(file
->f_flags
& O_NONBLOCK
, buf
, nbytes
);
1713 urandom_read(struct file
*file
, char __user
*buf
, size_t nbytes
, loff_t
*ppos
)
1715 unsigned long flags
;
1716 static int maxwarn
= 10;
1719 if (!crng_ready() && maxwarn
> 0) {
1721 printk(KERN_NOTICE
"random: %s: uninitialized urandom read "
1722 "(%zd bytes read)\n",
1723 current
->comm
, nbytes
);
1724 spin_lock_irqsave(&primary_crng
.lock
, flags
);
1726 spin_unlock_irqrestore(&primary_crng
.lock
, flags
);
1728 nbytes
= min_t(size_t, nbytes
, INT_MAX
>> (ENTROPY_SHIFT
+ 3));
1729 ret
= extract_crng_user(buf
, nbytes
);
1730 trace_urandom_read(8 * nbytes
, 0, ENTROPY_BITS(&input_pool
));
1735 random_poll(struct file
*file
, poll_table
* wait
)
1739 poll_wait(file
, &random_read_wait
, wait
);
1740 poll_wait(file
, &random_write_wait
, wait
);
1742 if (ENTROPY_BITS(&input_pool
) >= random_read_wakeup_bits
)
1743 mask
|= POLLIN
| POLLRDNORM
;
1744 if (ENTROPY_BITS(&input_pool
) < random_write_wakeup_bits
)
1745 mask
|= POLLOUT
| POLLWRNORM
;
1750 write_pool(struct entropy_store
*r
, const char __user
*buffer
, size_t count
)
1754 const char __user
*p
= buffer
;
1757 bytes
= min(count
, sizeof(buf
));
1758 if (copy_from_user(&buf
, p
, bytes
))
1764 mix_pool_bytes(r
, buf
, bytes
);
1771 static ssize_t
random_write(struct file
*file
, const char __user
*buffer
,
1772 size_t count
, loff_t
*ppos
)
1776 ret
= write_pool(&input_pool
, buffer
, count
);
1780 return (ssize_t
)count
;
1783 static long random_ioctl(struct file
*f
, unsigned int cmd
, unsigned long arg
)
1785 int size
, ent_count
;
1786 int __user
*p
= (int __user
*)arg
;
1791 /* inherently racy, no point locking */
1792 ent_count
= ENTROPY_BITS(&input_pool
);
1793 if (put_user(ent_count
, p
))
1796 case RNDADDTOENTCNT
:
1797 if (!capable(CAP_SYS_ADMIN
))
1799 if (get_user(ent_count
, p
))
1801 return credit_entropy_bits_safe(&input_pool
, ent_count
);
1803 if (!capable(CAP_SYS_ADMIN
))
1805 if (get_user(ent_count
, p
++))
1809 if (get_user(size
, p
++))
1811 retval
= write_pool(&input_pool
, (const char __user
*)p
,
1815 return credit_entropy_bits_safe(&input_pool
, ent_count
);
1819 * Clear the entropy pool counters. We no longer clear
1820 * the entropy pool, as that's silly.
1822 if (!capable(CAP_SYS_ADMIN
))
1824 input_pool
.entropy_count
= 0;
1825 blocking_pool
.entropy_count
= 0;
1832 static int random_fasync(int fd
, struct file
*filp
, int on
)
1834 return fasync_helper(fd
, filp
, on
, &fasync
);
1837 const struct file_operations random_fops
= {
1838 .read
= random_read
,
1839 .write
= random_write
,
1840 .poll
= random_poll
,
1841 .unlocked_ioctl
= random_ioctl
,
1842 .fasync
= random_fasync
,
1843 .llseek
= noop_llseek
,
1846 const struct file_operations urandom_fops
= {
1847 .read
= urandom_read
,
1848 .write
= random_write
,
1849 .unlocked_ioctl
= random_ioctl
,
1850 .fasync
= random_fasync
,
1851 .llseek
= noop_llseek
,
1854 SYSCALL_DEFINE3(getrandom
, char __user
*, buf
, size_t, count
,
1855 unsigned int, flags
)
1857 if (flags
& ~(GRND_NONBLOCK
|GRND_RANDOM
))
1860 if (count
> INT_MAX
)
1863 if (flags
& GRND_RANDOM
)
1864 return _random_read(flags
& GRND_NONBLOCK
, buf
, count
);
1866 if (!crng_ready()) {
1867 if (flags
& GRND_NONBLOCK
)
1870 if (signal_pending(current
))
1871 return -ERESTARTSYS
;
1873 return urandom_read(NULL
, buf
, count
, NULL
);
1876 /********************************************************************
1880 ********************************************************************/
1882 #ifdef CONFIG_SYSCTL
1884 #include <linux/sysctl.h>
1886 static int min_read_thresh
= 8, min_write_thresh
;
1887 static int max_read_thresh
= OUTPUT_POOL_WORDS
* 32;
1888 static int max_write_thresh
= INPUT_POOL_WORDS
* 32;
1889 static char sysctl_bootid
[16];
1892 * This function is used to return both the bootid UUID, and random
1893 * UUID. The difference is in whether table->data is NULL; if it is,
1894 * then a new UUID is generated and returned to the user.
1896 * If the user accesses this via the proc interface, the UUID will be
1897 * returned as an ASCII string in the standard UUID format; if via the
1898 * sysctl system call, as 16 bytes of binary data.
1900 static int proc_do_uuid(struct ctl_table
*table
, int write
,
1901 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1903 struct ctl_table fake_table
;
1904 unsigned char buf
[64], tmp_uuid
[16], *uuid
;
1909 generate_random_uuid(uuid
);
1911 static DEFINE_SPINLOCK(bootid_spinlock
);
1913 spin_lock(&bootid_spinlock
);
1915 generate_random_uuid(uuid
);
1916 spin_unlock(&bootid_spinlock
);
1919 sprintf(buf
, "%pU", uuid
);
1921 fake_table
.data
= buf
;
1922 fake_table
.maxlen
= sizeof(buf
);
1924 return proc_dostring(&fake_table
, write
, buffer
, lenp
, ppos
);
1928 * Return entropy available scaled to integral bits
1930 static int proc_do_entropy(struct ctl_table
*table
, int write
,
1931 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1933 struct ctl_table fake_table
;
1936 entropy_count
= *(int *)table
->data
>> ENTROPY_SHIFT
;
1938 fake_table
.data
= &entropy_count
;
1939 fake_table
.maxlen
= sizeof(entropy_count
);
1941 return proc_dointvec(&fake_table
, write
, buffer
, lenp
, ppos
);
1944 static int sysctl_poolsize
= INPUT_POOL_WORDS
* 32;
1945 extern struct ctl_table random_table
[];
1946 struct ctl_table random_table
[] = {
1948 .procname
= "poolsize",
1949 .data
= &sysctl_poolsize
,
1950 .maxlen
= sizeof(int),
1952 .proc_handler
= proc_dointvec
,
1955 .procname
= "entropy_avail",
1956 .maxlen
= sizeof(int),
1958 .proc_handler
= proc_do_entropy
,
1959 .data
= &input_pool
.entropy_count
,
1962 .procname
= "read_wakeup_threshold",
1963 .data
= &random_read_wakeup_bits
,
1964 .maxlen
= sizeof(int),
1966 .proc_handler
= proc_dointvec_minmax
,
1967 .extra1
= &min_read_thresh
,
1968 .extra2
= &max_read_thresh
,
1971 .procname
= "write_wakeup_threshold",
1972 .data
= &random_write_wakeup_bits
,
1973 .maxlen
= sizeof(int),
1975 .proc_handler
= proc_dointvec_minmax
,
1976 .extra1
= &min_write_thresh
,
1977 .extra2
= &max_write_thresh
,
1980 .procname
= "urandom_min_reseed_secs",
1981 .data
= &random_min_urandom_seed
,
1982 .maxlen
= sizeof(int),
1984 .proc_handler
= proc_dointvec
,
1987 .procname
= "boot_id",
1988 .data
= &sysctl_bootid
,
1991 .proc_handler
= proc_do_uuid
,
1997 .proc_handler
= proc_do_uuid
,
1999 #ifdef ADD_INTERRUPT_BENCH
2001 .procname
= "add_interrupt_avg_cycles",
2002 .data
= &avg_cycles
,
2003 .maxlen
= sizeof(avg_cycles
),
2005 .proc_handler
= proc_doulongvec_minmax
,
2008 .procname
= "add_interrupt_avg_deviation",
2009 .data
= &avg_deviation
,
2010 .maxlen
= sizeof(avg_deviation
),
2012 .proc_handler
= proc_doulongvec_minmax
,
2017 #endif /* CONFIG_SYSCTL */
2019 struct batched_entropy
{
2021 u64 entropy_u64
[CHACHA20_BLOCK_SIZE
/ sizeof(u64
)];
2022 u32 entropy_u32
[CHACHA20_BLOCK_SIZE
/ sizeof(u32
)];
2024 unsigned int position
;
2028 * Get a random word for internal kernel use only. The quality of the random
2029 * number is either as good as RDRAND or as good as /dev/urandom, with the
2030 * goal of being quite fast and not depleting entropy.
2032 static DEFINE_PER_CPU(struct batched_entropy
, batched_entropy_u64
);
2033 u64
get_random_u64(void)
2036 struct batched_entropy
*batch
;
2038 #if BITS_PER_LONG == 64
2039 if (arch_get_random_long((unsigned long *)&ret
))
2042 if (arch_get_random_long((unsigned long *)&ret
) &&
2043 arch_get_random_long((unsigned long *)&ret
+ 1))
2047 batch
= &get_cpu_var(batched_entropy_u64
);
2048 if (batch
->position
% ARRAY_SIZE(batch
->entropy_u64
) == 0) {
2049 extract_crng((u8
*)batch
->entropy_u64
);
2050 batch
->position
= 0;
2052 ret
= batch
->entropy_u64
[batch
->position
++];
2053 put_cpu_var(batched_entropy_u64
);
2056 EXPORT_SYMBOL(get_random_u64
);
2058 static DEFINE_PER_CPU(struct batched_entropy
, batched_entropy_u32
);
2059 u32
get_random_u32(void)
2062 struct batched_entropy
*batch
;
2064 if (arch_get_random_int(&ret
))
2067 batch
= &get_cpu_var(batched_entropy_u32
);
2068 if (batch
->position
% ARRAY_SIZE(batch
->entropy_u32
) == 0) {
2069 extract_crng((u8
*)batch
->entropy_u32
);
2070 batch
->position
= 0;
2072 ret
= batch
->entropy_u32
[batch
->position
++];
2073 put_cpu_var(batched_entropy_u32
);
2076 EXPORT_SYMBOL(get_random_u32
);
2079 * randomize_page - Generate a random, page aligned address
2080 * @start: The smallest acceptable address the caller will take.
2081 * @range: The size of the area, starting at @start, within which the
2082 * random address must fall.
2084 * If @start + @range would overflow, @range is capped.
2086 * NOTE: Historical use of randomize_range, which this replaces, presumed that
2087 * @start was already page aligned. We now align it regardless.
2089 * Return: A page aligned address within [start, start + range). On error,
2090 * @start is returned.
2093 randomize_page(unsigned long start
, unsigned long range
)
2095 if (!PAGE_ALIGNED(start
)) {
2096 range
-= PAGE_ALIGN(start
) - start
;
2097 start
= PAGE_ALIGN(start
);
2100 if (start
> ULONG_MAX
- range
)
2101 range
= ULONG_MAX
- start
;
2103 range
>>= PAGE_SHIFT
;
2108 return start
+ (get_random_long() % range
<< PAGE_SHIFT
);
2111 /* Interface for in-kernel drivers of true hardware RNGs.
2112 * Those devices may produce endless random bits and will be throttled
2113 * when our pool is full.
2115 void add_hwgenerator_randomness(const char *buffer
, size_t count
,
2118 struct entropy_store
*poolp
= &input_pool
;
2120 if (!crng_ready()) {
2121 crng_fast_load(buffer
, count
);
2125 /* Suspend writing if we're above the trickle threshold.
2126 * We'll be woken up again once below random_write_wakeup_thresh,
2127 * or when the calling thread is about to terminate.
2129 wait_event_interruptible(random_write_wait
, kthread_should_stop() ||
2130 ENTROPY_BITS(&input_pool
) <= random_write_wakeup_bits
);
2131 mix_pool_bytes(poolp
, buffer
, count
);
2132 credit_entropy_bits(poolp
, entropy
);
2134 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness
);