]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/char/random.c
Merge tag 'csky-for-linus-5.4-rc1' of git://github.com/c-sky/csky-linux
[mirror_ubuntu-hirsute-kernel.git] / drivers / char / random.c
1 /*
2 * random.c -- A strong random number generator
3 *
4 * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
5 * Rights Reserved.
6 *
7 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
8 *
9 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
10 * rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, and the entire permission notice in its entirety,
17 * including the disclaimer of warranties.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. The name of the author may not be used to endorse or promote
22 * products derived from this software without specific prior
23 * written permission.
24 *
25 * ALTERNATIVELY, this product may be distributed under the terms of
26 * the GNU General Public License, in which case the provisions of the GPL are
27 * required INSTEAD OF the above restrictions. (This clause is
28 * necessary due to a potential bad interaction between the GPL and
29 * the restrictions contained in a BSD-style copyright.)
30 *
31 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
32 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
34 * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
37 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
39 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
41 * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
42 * DAMAGE.
43 */
44
45 /*
46 * (now, with legal B.S. out of the way.....)
47 *
48 * This routine gathers environmental noise from device drivers, etc.,
49 * and returns good random numbers, suitable for cryptographic use.
50 * Besides the obvious cryptographic uses, these numbers are also good
51 * for seeding TCP sequence numbers, and other places where it is
52 * desirable to have numbers which are not only random, but hard to
53 * predict by an attacker.
54 *
55 * Theory of operation
56 * ===================
57 *
58 * Computers are very predictable devices. Hence it is extremely hard
59 * to produce truly random numbers on a computer --- as opposed to
60 * pseudo-random numbers, which can easily generated by using a
61 * algorithm. Unfortunately, it is very easy for attackers to guess
62 * the sequence of pseudo-random number generators, and for some
63 * applications this is not acceptable. So instead, we must try to
64 * gather "environmental noise" from the computer's environment, which
65 * must be hard for outside attackers to observe, and use that to
66 * generate random numbers. In a Unix environment, this is best done
67 * from inside the kernel.
68 *
69 * Sources of randomness from the environment include inter-keyboard
70 * timings, inter-interrupt timings from some interrupts, and other
71 * events which are both (a) non-deterministic and (b) hard for an
72 * outside observer to measure. Randomness from these sources are
73 * added to an "entropy pool", which is mixed using a CRC-like function.
74 * This is not cryptographically strong, but it is adequate assuming
75 * the randomness is not chosen maliciously, and it is fast enough that
76 * the overhead of doing it on every interrupt is very reasonable.
77 * As random bytes are mixed into the entropy pool, the routines keep
78 * an *estimate* of how many bits of randomness have been stored into
79 * the random number generator's internal state.
80 *
81 * When random bytes are desired, they are obtained by taking the SHA
82 * hash of the contents of the "entropy pool". The SHA hash avoids
83 * exposing the internal state of the entropy pool. It is believed to
84 * be computationally infeasible to derive any useful information
85 * about the input of SHA from its output. Even if it is possible to
86 * analyze SHA in some clever way, as long as the amount of data
87 * returned from the generator is less than the inherent entropy in
88 * the pool, the output data is totally unpredictable. For this
89 * reason, the routine decreases its internal estimate of how many
90 * bits of "true randomness" are contained in the entropy pool as it
91 * outputs random numbers.
92 *
93 * If this estimate goes to zero, the routine can still generate
94 * random numbers; however, an attacker may (at least in theory) be
95 * able to infer the future output of the generator from prior
96 * outputs. This requires successful cryptanalysis of SHA, which is
97 * not believed to be feasible, but there is a remote possibility.
98 * Nonetheless, these numbers should be useful for the vast majority
99 * of purposes.
100 *
101 * Exported interfaces ---- output
102 * ===============================
103 *
104 * There are four exported interfaces; two for use within the kernel,
105 * and two or use from userspace.
106 *
107 * Exported interfaces ---- userspace output
108 * -----------------------------------------
109 *
110 * The userspace interfaces are two character devices /dev/random and
111 * /dev/urandom. /dev/random is suitable for use when very high
112 * quality randomness is desired (for example, for key generation or
113 * one-time pads), as it will only return a maximum of the number of
114 * bits of randomness (as estimated by the random number generator)
115 * contained in the entropy pool.
116 *
117 * The /dev/urandom device does not have this limit, and will return
118 * as many bytes as are requested. As more and more random bytes are
119 * requested without giving time for the entropy pool to recharge,
120 * this will result in random numbers that are merely cryptographically
121 * strong. For many applications, however, this is acceptable.
122 *
123 * Exported interfaces ---- kernel output
124 * --------------------------------------
125 *
126 * The primary kernel interface is
127 *
128 * void get_random_bytes(void *buf, int nbytes);
129 *
130 * This interface will return the requested number of random bytes,
131 * and place it in the requested buffer. This is equivalent to a
132 * read from /dev/urandom.
133 *
134 * For less critical applications, there are the functions:
135 *
136 * u32 get_random_u32()
137 * u64 get_random_u64()
138 * unsigned int get_random_int()
139 * unsigned long get_random_long()
140 *
141 * These are produced by a cryptographic RNG seeded from get_random_bytes,
142 * and so do not deplete the entropy pool as much. These are recommended
143 * for most in-kernel operations *if the result is going to be stored in
144 * the kernel*.
145 *
146 * Specifically, the get_random_int() family do not attempt to do
147 * "anti-backtracking". If you capture the state of the kernel (e.g.
148 * by snapshotting the VM), you can figure out previous get_random_int()
149 * return values. But if the value is stored in the kernel anyway,
150 * this is not a problem.
151 *
152 * It *is* safe to expose get_random_int() output to attackers (e.g. as
153 * network cookies); given outputs 1..n, it's not feasible to predict
154 * outputs 0 or n+1. The only concern is an attacker who breaks into
155 * the kernel later; the get_random_int() engine is not reseeded as
156 * often as the get_random_bytes() one.
157 *
158 * get_random_bytes() is needed for keys that need to stay secret after
159 * they are erased from the kernel. For example, any key that will
160 * be wrapped and stored encrypted. And session encryption keys: we'd
161 * like to know that after the session is closed and the keys erased,
162 * the plaintext is unrecoverable to someone who recorded the ciphertext.
163 *
164 * But for network ports/cookies, stack canaries, PRNG seeds, address
165 * space layout randomization, session *authentication* keys, or other
166 * applications where the sensitive data is stored in the kernel in
167 * plaintext for as long as it's sensitive, the get_random_int() family
168 * is just fine.
169 *
170 * Consider ASLR. We want to keep the address space secret from an
171 * outside attacker while the process is running, but once the address
172 * space is torn down, it's of no use to an attacker any more. And it's
173 * stored in kernel data structures as long as it's alive, so worrying
174 * about an attacker's ability to extrapolate it from the get_random_int()
175 * CRNG is silly.
176 *
177 * Even some cryptographic keys are safe to generate with get_random_int().
178 * In particular, keys for SipHash are generally fine. Here, knowledge
179 * of the key authorizes you to do something to a kernel object (inject
180 * packets to a network connection, or flood a hash table), and the
181 * key is stored with the object being protected. Once it goes away,
182 * we no longer care if anyone knows the key.
183 *
184 * prandom_u32()
185 * -------------
186 *
187 * For even weaker applications, see the pseudorandom generator
188 * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
189 * numbers aren't security-critical at all, these are *far* cheaper.
190 * Useful for self-tests, random error simulation, randomized backoffs,
191 * and any other application where you trust that nobody is trying to
192 * maliciously mess with you by guessing the "random" numbers.
193 *
194 * Exported interfaces ---- input
195 * ==============================
196 *
197 * The current exported interfaces for gathering environmental noise
198 * from the devices are:
199 *
200 * void add_device_randomness(const void *buf, unsigned int size);
201 * void add_input_randomness(unsigned int type, unsigned int code,
202 * unsigned int value);
203 * void add_interrupt_randomness(int irq, int irq_flags);
204 * void add_disk_randomness(struct gendisk *disk);
205 *
206 * add_device_randomness() is for adding data to the random pool that
207 * is likely to differ between two devices (or possibly even per boot).
208 * This would be things like MAC addresses or serial numbers, or the
209 * read-out of the RTC. This does *not* add any actual entropy to the
210 * pool, but it initializes the pool to different values for devices
211 * that might otherwise be identical and have very little entropy
212 * available to them (particularly common in the embedded world).
213 *
214 * add_input_randomness() uses the input layer interrupt timing, as well as
215 * the event type information from the hardware.
216 *
217 * add_interrupt_randomness() uses the interrupt timing as random
218 * inputs to the entropy pool. Using the cycle counters and the irq source
219 * as inputs, it feeds the randomness roughly once a second.
220 *
221 * add_disk_randomness() uses what amounts to the seek time of block
222 * layer request events, on a per-disk_devt basis, as input to the
223 * entropy pool. Note that high-speed solid state drives with very low
224 * seek times do not make for good sources of entropy, as their seek
225 * times are usually fairly consistent.
226 *
227 * All of these routines try to estimate how many bits of randomness a
228 * particular randomness source. They do this by keeping track of the
229 * first and second order deltas of the event timings.
230 *
231 * Ensuring unpredictability at system startup
232 * ============================================
233 *
234 * When any operating system starts up, it will go through a sequence
235 * of actions that are fairly predictable by an adversary, especially
236 * if the start-up does not involve interaction with a human operator.
237 * This reduces the actual number of bits of unpredictability in the
238 * entropy pool below the value in entropy_count. In order to
239 * counteract this effect, it helps to carry information in the
240 * entropy pool across shut-downs and start-ups. To do this, put the
241 * following lines an appropriate script which is run during the boot
242 * sequence:
243 *
244 * echo "Initializing random number generator..."
245 * random_seed=/var/run/random-seed
246 * # Carry a random seed from start-up to start-up
247 * # Load and then save the whole entropy pool
248 * if [ -f $random_seed ]; then
249 * cat $random_seed >/dev/urandom
250 * else
251 * touch $random_seed
252 * fi
253 * chmod 600 $random_seed
254 * dd if=/dev/urandom of=$random_seed count=1 bs=512
255 *
256 * and the following lines in an appropriate script which is run as
257 * the system is shutdown:
258 *
259 * # Carry a random seed from shut-down to start-up
260 * # Save the whole entropy pool
261 * echo "Saving random seed..."
262 * random_seed=/var/run/random-seed
263 * touch $random_seed
264 * chmod 600 $random_seed
265 * dd if=/dev/urandom of=$random_seed count=1 bs=512
266 *
267 * For example, on most modern systems using the System V init
268 * scripts, such code fragments would be found in
269 * /etc/rc.d/init.d/random. On older Linux systems, the correct script
270 * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
271 *
272 * Effectively, these commands cause the contents of the entropy pool
273 * to be saved at shut-down time and reloaded into the entropy pool at
274 * start-up. (The 'dd' in the addition to the bootup script is to
275 * make sure that /etc/random-seed is different for every start-up,
276 * even if the system crashes without executing rc.0.) Even with
277 * complete knowledge of the start-up activities, predicting the state
278 * of the entropy pool requires knowledge of the previous history of
279 * the system.
280 *
281 * Configuring the /dev/random driver under Linux
282 * ==============================================
283 *
284 * The /dev/random driver under Linux uses minor numbers 8 and 9 of
285 * the /dev/mem major number (#1). So if your system does not have
286 * /dev/random and /dev/urandom created already, they can be created
287 * by using the commands:
288 *
289 * mknod /dev/random c 1 8
290 * mknod /dev/urandom c 1 9
291 *
292 * Acknowledgements:
293 * =================
294 *
295 * Ideas for constructing this random number generator were derived
296 * from Pretty Good Privacy's random number generator, and from private
297 * discussions with Phil Karn. Colin Plumb provided a faster random
298 * number generator, which speed up the mixing function of the entropy
299 * pool, taken from PGPfone. Dale Worley has also contributed many
300 * useful ideas and suggestions to improve this driver.
301 *
302 * Any flaws in the design are solely my responsibility, and should
303 * not be attributed to the Phil, Colin, or any of authors of PGP.
304 *
305 * Further background information on this topic may be obtained from
306 * RFC 1750, "Randomness Recommendations for Security", by Donald
307 * Eastlake, Steve Crocker, and Jeff Schiller.
308 */
309
310 #include <linux/utsname.h>
311 #include <linux/module.h>
312 #include <linux/kernel.h>
313 #include <linux/major.h>
314 #include <linux/string.h>
315 #include <linux/fcntl.h>
316 #include <linux/slab.h>
317 #include <linux/random.h>
318 #include <linux/poll.h>
319 #include <linux/init.h>
320 #include <linux/fs.h>
321 #include <linux/genhd.h>
322 #include <linux/interrupt.h>
323 #include <linux/mm.h>
324 #include <linux/nodemask.h>
325 #include <linux/spinlock.h>
326 #include <linux/kthread.h>
327 #include <linux/percpu.h>
328 #include <linux/cryptohash.h>
329 #include <linux/fips.h>
330 #include <linux/freezer.h>
331 #include <linux/ptrace.h>
332 #include <linux/workqueue.h>
333 #include <linux/irq.h>
334 #include <linux/ratelimit.h>
335 #include <linux/syscalls.h>
336 #include <linux/completion.h>
337 #include <linux/uuid.h>
338 #include <crypto/chacha.h>
339
340 #include <asm/processor.h>
341 #include <linux/uaccess.h>
342 #include <asm/irq.h>
343 #include <asm/irq_regs.h>
344 #include <asm/io.h>
345
346 #define CREATE_TRACE_POINTS
347 #include <trace/events/random.h>
348
349 /* #define ADD_INTERRUPT_BENCH */
350
351 /*
352 * Configuration information
353 */
354 #define INPUT_POOL_SHIFT 12
355 #define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
356 #define OUTPUT_POOL_SHIFT 10
357 #define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
358 #define SEC_XFER_SIZE 512
359 #define EXTRACT_SIZE 10
360
361
362 #define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
363
364 /*
365 * To allow fractional bits to be tracked, the entropy_count field is
366 * denominated in units of 1/8th bits.
367 *
368 * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in
369 * credit_entropy_bits() needs to be 64 bits wide.
370 */
371 #define ENTROPY_SHIFT 3
372 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
373
374 /*
375 * The minimum number of bits of entropy before we wake up a read on
376 * /dev/random. Should be enough to do a significant reseed.
377 */
378 static int random_read_wakeup_bits = 64;
379
380 /*
381 * If the entropy count falls under this number of bits, then we
382 * should wake up processes which are selecting or polling on write
383 * access to /dev/random.
384 */
385 static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
386
387 /*
388 * Originally, we used a primitive polynomial of degree .poolwords
389 * over GF(2). The taps for various sizes are defined below. They
390 * were chosen to be evenly spaced except for the last tap, which is 1
391 * to get the twisting happening as fast as possible.
392 *
393 * For the purposes of better mixing, we use the CRC-32 polynomial as
394 * well to make a (modified) twisted Generalized Feedback Shift
395 * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
396 * generators. ACM Transactions on Modeling and Computer Simulation
397 * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
398 * GFSR generators II. ACM Transactions on Modeling and Computer
399 * Simulation 4:254-266)
400 *
401 * Thanks to Colin Plumb for suggesting this.
402 *
403 * The mixing operation is much less sensitive than the output hash,
404 * where we use SHA-1. All that we want of mixing operation is that
405 * it be a good non-cryptographic hash; i.e. it not produce collisions
406 * when fed "random" data of the sort we expect to see. As long as
407 * the pool state differs for different inputs, we have preserved the
408 * input entropy and done a good job. The fact that an intelligent
409 * attacker can construct inputs that will produce controlled
410 * alterations to the pool's state is not important because we don't
411 * consider such inputs to contribute any randomness. The only
412 * property we need with respect to them is that the attacker can't
413 * increase his/her knowledge of the pool's state. Since all
414 * additions are reversible (knowing the final state and the input,
415 * you can reconstruct the initial state), if an attacker has any
416 * uncertainty about the initial state, he/she can only shuffle that
417 * uncertainty about, but never cause any collisions (which would
418 * decrease the uncertainty).
419 *
420 * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
421 * Videau in their paper, "The Linux Pseudorandom Number Generator
422 * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
423 * paper, they point out that we are not using a true Twisted GFSR,
424 * since Matsumoto & Kurita used a trinomial feedback polynomial (that
425 * is, with only three taps, instead of the six that we are using).
426 * As a result, the resulting polynomial is neither primitive nor
427 * irreducible, and hence does not have a maximal period over
428 * GF(2**32). They suggest a slight change to the generator
429 * polynomial which improves the resulting TGFSR polynomial to be
430 * irreducible, which we have made here.
431 */
432 static const struct poolinfo {
433 int poolbitshift, poolwords, poolbytes, poolfracbits;
434 #define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
435 int tap1, tap2, tap3, tap4, tap5;
436 } poolinfo_table[] = {
437 /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
438 /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
439 { S(128), 104, 76, 51, 25, 1 },
440 /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
441 /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
442 { S(32), 26, 19, 14, 7, 1 },
443 #if 0
444 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
445 { S(2048), 1638, 1231, 819, 411, 1 },
446
447 /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
448 { S(1024), 817, 615, 412, 204, 1 },
449
450 /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
451 { S(1024), 819, 616, 410, 207, 2 },
452
453 /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
454 { S(512), 411, 308, 208, 104, 1 },
455
456 /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
457 { S(512), 409, 307, 206, 102, 2 },
458 /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
459 { S(512), 409, 309, 205, 103, 2 },
460
461 /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
462 { S(256), 205, 155, 101, 52, 1 },
463
464 /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
465 { S(128), 103, 78, 51, 27, 2 },
466
467 /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
468 { S(64), 52, 39, 26, 14, 1 },
469 #endif
470 };
471
472 /*
473 * Static global variables
474 */
475 static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
476 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
477 static struct fasync_struct *fasync;
478
479 static DEFINE_SPINLOCK(random_ready_list_lock);
480 static LIST_HEAD(random_ready_list);
481
482 struct crng_state {
483 __u32 state[16];
484 unsigned long init_time;
485 spinlock_t lock;
486 };
487
488 static struct crng_state primary_crng = {
489 .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
490 };
491
492 /*
493 * crng_init = 0 --> Uninitialized
494 * 1 --> Initialized
495 * 2 --> Initialized from input_pool
496 *
497 * crng_init is protected by primary_crng->lock, and only increases
498 * its value (from 0->1->2).
499 */
500 static int crng_init = 0;
501 #define crng_ready() (likely(crng_init > 1))
502 static int crng_init_cnt = 0;
503 static unsigned long crng_global_init_time = 0;
504 #define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
505 static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
506 static void _crng_backtrack_protect(struct crng_state *crng,
507 __u8 tmp[CHACHA_BLOCK_SIZE], int used);
508 static void process_random_ready_list(void);
509 static void _get_random_bytes(void *buf, int nbytes);
510
511 static struct ratelimit_state unseeded_warning =
512 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
513 static struct ratelimit_state urandom_warning =
514 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
515
516 static int ratelimit_disable __read_mostly;
517
518 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
519 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
520
521 /**********************************************************************
522 *
523 * OS independent entropy store. Here are the functions which handle
524 * storing entropy in an entropy pool.
525 *
526 **********************************************************************/
527
528 struct entropy_store;
529 struct entropy_store {
530 /* read-only data: */
531 const struct poolinfo *poolinfo;
532 __u32 *pool;
533 const char *name;
534 struct entropy_store *pull;
535 struct work_struct push_work;
536
537 /* read-write data: */
538 unsigned long last_pulled;
539 spinlock_t lock;
540 unsigned short add_ptr;
541 unsigned short input_rotate;
542 int entropy_count;
543 unsigned int initialized:1;
544 unsigned int last_data_init:1;
545 __u8 last_data[EXTRACT_SIZE];
546 };
547
548 static ssize_t extract_entropy(struct entropy_store *r, void *buf,
549 size_t nbytes, int min, int rsvd);
550 static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
551 size_t nbytes, int fips);
552
553 static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
554 static void push_to_pool(struct work_struct *work);
555 static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
556 static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
557
558 static struct entropy_store input_pool = {
559 .poolinfo = &poolinfo_table[0],
560 .name = "input",
561 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
562 .pool = input_pool_data
563 };
564
565 static struct entropy_store blocking_pool = {
566 .poolinfo = &poolinfo_table[1],
567 .name = "blocking",
568 .pull = &input_pool,
569 .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
570 .pool = blocking_pool_data,
571 .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
572 push_to_pool),
573 };
574
575 static __u32 const twist_table[8] = {
576 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
577 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
578
579 /*
580 * This function adds bytes into the entropy "pool". It does not
581 * update the entropy estimate. The caller should call
582 * credit_entropy_bits if this is appropriate.
583 *
584 * The pool is stirred with a primitive polynomial of the appropriate
585 * degree, and then twisted. We twist by three bits at a time because
586 * it's cheap to do so and helps slightly in the expected case where
587 * the entropy is concentrated in the low-order bits.
588 */
589 static void _mix_pool_bytes(struct entropy_store *r, const void *in,
590 int nbytes)
591 {
592 unsigned long i, tap1, tap2, tap3, tap4, tap5;
593 int input_rotate;
594 int wordmask = r->poolinfo->poolwords - 1;
595 const char *bytes = in;
596 __u32 w;
597
598 tap1 = r->poolinfo->tap1;
599 tap2 = r->poolinfo->tap2;
600 tap3 = r->poolinfo->tap3;
601 tap4 = r->poolinfo->tap4;
602 tap5 = r->poolinfo->tap5;
603
604 input_rotate = r->input_rotate;
605 i = r->add_ptr;
606
607 /* mix one byte at a time to simplify size handling and churn faster */
608 while (nbytes--) {
609 w = rol32(*bytes++, input_rotate);
610 i = (i - 1) & wordmask;
611
612 /* XOR in the various taps */
613 w ^= r->pool[i];
614 w ^= r->pool[(i + tap1) & wordmask];
615 w ^= r->pool[(i + tap2) & wordmask];
616 w ^= r->pool[(i + tap3) & wordmask];
617 w ^= r->pool[(i + tap4) & wordmask];
618 w ^= r->pool[(i + tap5) & wordmask];
619
620 /* Mix the result back in with a twist */
621 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
622
623 /*
624 * Normally, we add 7 bits of rotation to the pool.
625 * At the beginning of the pool, add an extra 7 bits
626 * rotation, so that successive passes spread the
627 * input bits across the pool evenly.
628 */
629 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
630 }
631
632 r->input_rotate = input_rotate;
633 r->add_ptr = i;
634 }
635
636 static void __mix_pool_bytes(struct entropy_store *r, const void *in,
637 int nbytes)
638 {
639 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
640 _mix_pool_bytes(r, in, nbytes);
641 }
642
643 static void mix_pool_bytes(struct entropy_store *r, const void *in,
644 int nbytes)
645 {
646 unsigned long flags;
647
648 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
649 spin_lock_irqsave(&r->lock, flags);
650 _mix_pool_bytes(r, in, nbytes);
651 spin_unlock_irqrestore(&r->lock, flags);
652 }
653
654 struct fast_pool {
655 __u32 pool[4];
656 unsigned long last;
657 unsigned short reg_idx;
658 unsigned char count;
659 };
660
661 /*
662 * This is a fast mixing routine used by the interrupt randomness
663 * collector. It's hardcoded for an 128 bit pool and assumes that any
664 * locks that might be needed are taken by the caller.
665 */
666 static void fast_mix(struct fast_pool *f)
667 {
668 __u32 a = f->pool[0], b = f->pool[1];
669 __u32 c = f->pool[2], d = f->pool[3];
670
671 a += b; c += d;
672 b = rol32(b, 6); d = rol32(d, 27);
673 d ^= a; b ^= c;
674
675 a += b; c += d;
676 b = rol32(b, 16); d = rol32(d, 14);
677 d ^= a; b ^= c;
678
679 a += b; c += d;
680 b = rol32(b, 6); d = rol32(d, 27);
681 d ^= a; b ^= c;
682
683 a += b; c += d;
684 b = rol32(b, 16); d = rol32(d, 14);
685 d ^= a; b ^= c;
686
687 f->pool[0] = a; f->pool[1] = b;
688 f->pool[2] = c; f->pool[3] = d;
689 f->count++;
690 }
691
692 static void process_random_ready_list(void)
693 {
694 unsigned long flags;
695 struct random_ready_callback *rdy, *tmp;
696
697 spin_lock_irqsave(&random_ready_list_lock, flags);
698 list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
699 struct module *owner = rdy->owner;
700
701 list_del_init(&rdy->list);
702 rdy->func(rdy);
703 module_put(owner);
704 }
705 spin_unlock_irqrestore(&random_ready_list_lock, flags);
706 }
707
708 /*
709 * Credit (or debit) the entropy store with n bits of entropy.
710 * Use credit_entropy_bits_safe() if the value comes from userspace
711 * or otherwise should be checked for extreme values.
712 */
713 static void credit_entropy_bits(struct entropy_store *r, int nbits)
714 {
715 int entropy_count, orig, has_initialized = 0;
716 const int pool_size = r->poolinfo->poolfracbits;
717 int nfrac = nbits << ENTROPY_SHIFT;
718
719 if (!nbits)
720 return;
721
722 retry:
723 entropy_count = orig = READ_ONCE(r->entropy_count);
724 if (nfrac < 0) {
725 /* Debit */
726 entropy_count += nfrac;
727 } else {
728 /*
729 * Credit: we have to account for the possibility of
730 * overwriting already present entropy. Even in the
731 * ideal case of pure Shannon entropy, new contributions
732 * approach the full value asymptotically:
733 *
734 * entropy <- entropy + (pool_size - entropy) *
735 * (1 - exp(-add_entropy/pool_size))
736 *
737 * For add_entropy <= pool_size/2 then
738 * (1 - exp(-add_entropy/pool_size)) >=
739 * (add_entropy/pool_size)*0.7869...
740 * so we can approximate the exponential with
741 * 3/4*add_entropy/pool_size and still be on the
742 * safe side by adding at most pool_size/2 at a time.
743 *
744 * The use of pool_size-2 in the while statement is to
745 * prevent rounding artifacts from making the loop
746 * arbitrarily long; this limits the loop to log2(pool_size)*2
747 * turns no matter how large nbits is.
748 */
749 int pnfrac = nfrac;
750 const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
751 /* The +2 corresponds to the /4 in the denominator */
752
753 do {
754 unsigned int anfrac = min(pnfrac, pool_size/2);
755 unsigned int add =
756 ((pool_size - entropy_count)*anfrac*3) >> s;
757
758 entropy_count += add;
759 pnfrac -= anfrac;
760 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
761 }
762
763 if (unlikely(entropy_count < 0)) {
764 pr_warn("random: negative entropy/overflow: pool %s count %d\n",
765 r->name, entropy_count);
766 WARN_ON(1);
767 entropy_count = 0;
768 } else if (entropy_count > pool_size)
769 entropy_count = pool_size;
770 if ((r == &blocking_pool) && !r->initialized &&
771 (entropy_count >> ENTROPY_SHIFT) > 128)
772 has_initialized = 1;
773 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
774 goto retry;
775
776 if (has_initialized) {
777 r->initialized = 1;
778 wake_up_interruptible(&random_read_wait);
779 kill_fasync(&fasync, SIGIO, POLL_IN);
780 }
781
782 trace_credit_entropy_bits(r->name, nbits,
783 entropy_count >> ENTROPY_SHIFT, _RET_IP_);
784
785 if (r == &input_pool) {
786 int entropy_bits = entropy_count >> ENTROPY_SHIFT;
787 struct entropy_store *other = &blocking_pool;
788
789 if (crng_init < 2) {
790 if (entropy_bits < 128)
791 return;
792 crng_reseed(&primary_crng, r);
793 entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
794 }
795
796 /* initialize the blocking pool if necessary */
797 if (entropy_bits >= random_read_wakeup_bits &&
798 !other->initialized) {
799 schedule_work(&other->push_work);
800 return;
801 }
802
803 /* should we wake readers? */
804 if (entropy_bits >= random_read_wakeup_bits &&
805 wq_has_sleeper(&random_read_wait)) {
806 wake_up_interruptible(&random_read_wait);
807 kill_fasync(&fasync, SIGIO, POLL_IN);
808 }
809 /* If the input pool is getting full, and the blocking
810 * pool has room, send some entropy to the blocking
811 * pool.
812 */
813 if (!work_pending(&other->push_work) &&
814 (ENTROPY_BITS(r) > 6 * r->poolinfo->poolbytes) &&
815 (ENTROPY_BITS(other) <= 6 * other->poolinfo->poolbytes))
816 schedule_work(&other->push_work);
817 }
818 }
819
820 static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
821 {
822 const int nbits_max = r->poolinfo->poolwords * 32;
823
824 if (nbits < 0)
825 return -EINVAL;
826
827 /* Cap the value to avoid overflows */
828 nbits = min(nbits, nbits_max);
829
830 credit_entropy_bits(r, nbits);
831 return 0;
832 }
833
834 /*********************************************************************
835 *
836 * CRNG using CHACHA20
837 *
838 *********************************************************************/
839
840 #define CRNG_RESEED_INTERVAL (300*HZ)
841
842 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
843
844 #ifdef CONFIG_NUMA
845 /*
846 * Hack to deal with crazy userspace progams when they are all trying
847 * to access /dev/urandom in parallel. The programs are almost
848 * certainly doing something terribly wrong, but we'll work around
849 * their brain damage.
850 */
851 static struct crng_state **crng_node_pool __read_mostly;
852 #endif
853
854 static void invalidate_batched_entropy(void);
855 static void numa_crng_init(void);
856
857 static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
858 static int __init parse_trust_cpu(char *arg)
859 {
860 return kstrtobool(arg, &trust_cpu);
861 }
862 early_param("random.trust_cpu", parse_trust_cpu);
863
864 static void crng_initialize(struct crng_state *crng)
865 {
866 int i;
867 int arch_init = 1;
868 unsigned long rv;
869
870 memcpy(&crng->state[0], "expand 32-byte k", 16);
871 if (crng == &primary_crng)
872 _extract_entropy(&input_pool, &crng->state[4],
873 sizeof(__u32) * 12, 0);
874 else
875 _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
876 for (i = 4; i < 16; i++) {
877 if (!arch_get_random_seed_long(&rv) &&
878 !arch_get_random_long(&rv)) {
879 rv = random_get_entropy();
880 arch_init = 0;
881 }
882 crng->state[i] ^= rv;
883 }
884 if (trust_cpu && arch_init && crng == &primary_crng) {
885 invalidate_batched_entropy();
886 numa_crng_init();
887 crng_init = 2;
888 pr_notice("random: crng done (trusting CPU's manufacturer)\n");
889 }
890 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
891 }
892
893 #ifdef CONFIG_NUMA
894 static void do_numa_crng_init(struct work_struct *work)
895 {
896 int i;
897 struct crng_state *crng;
898 struct crng_state **pool;
899
900 pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
901 for_each_online_node(i) {
902 crng = kmalloc_node(sizeof(struct crng_state),
903 GFP_KERNEL | __GFP_NOFAIL, i);
904 spin_lock_init(&crng->lock);
905 crng_initialize(crng);
906 pool[i] = crng;
907 }
908 mb();
909 if (cmpxchg(&crng_node_pool, NULL, pool)) {
910 for_each_node(i)
911 kfree(pool[i]);
912 kfree(pool);
913 }
914 }
915
916 static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
917
918 static void numa_crng_init(void)
919 {
920 schedule_work(&numa_crng_init_work);
921 }
922 #else
923 static void numa_crng_init(void) {}
924 #endif
925
926 /*
927 * crng_fast_load() can be called by code in the interrupt service
928 * path. So we can't afford to dilly-dally.
929 */
930 static int crng_fast_load(const char *cp, size_t len)
931 {
932 unsigned long flags;
933 char *p;
934
935 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
936 return 0;
937 if (crng_init != 0) {
938 spin_unlock_irqrestore(&primary_crng.lock, flags);
939 return 0;
940 }
941 p = (unsigned char *) &primary_crng.state[4];
942 while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
943 p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
944 cp++; crng_init_cnt++; len--;
945 }
946 spin_unlock_irqrestore(&primary_crng.lock, flags);
947 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
948 invalidate_batched_entropy();
949 crng_init = 1;
950 wake_up_interruptible(&crng_init_wait);
951 pr_notice("random: fast init done\n");
952 }
953 return 1;
954 }
955
956 /*
957 * crng_slow_load() is called by add_device_randomness, which has two
958 * attributes. (1) We can't trust the buffer passed to it is
959 * guaranteed to be unpredictable (so it might not have any entropy at
960 * all), and (2) it doesn't have the performance constraints of
961 * crng_fast_load().
962 *
963 * So we do something more comprehensive which is guaranteed to touch
964 * all of the primary_crng's state, and which uses a LFSR with a
965 * period of 255 as part of the mixing algorithm. Finally, we do
966 * *not* advance crng_init_cnt since buffer we may get may be something
967 * like a fixed DMI table (for example), which might very well be
968 * unique to the machine, but is otherwise unvarying.
969 */
970 static int crng_slow_load(const char *cp, size_t len)
971 {
972 unsigned long flags;
973 static unsigned char lfsr = 1;
974 unsigned char tmp;
975 unsigned i, max = CHACHA_KEY_SIZE;
976 const char * src_buf = cp;
977 char * dest_buf = (char *) &primary_crng.state[4];
978
979 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
980 return 0;
981 if (crng_init != 0) {
982 spin_unlock_irqrestore(&primary_crng.lock, flags);
983 return 0;
984 }
985 if (len > max)
986 max = len;
987
988 for (i = 0; i < max ; i++) {
989 tmp = lfsr;
990 lfsr >>= 1;
991 if (tmp & 1)
992 lfsr ^= 0xE1;
993 tmp = dest_buf[i % CHACHA_KEY_SIZE];
994 dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
995 lfsr += (tmp << 3) | (tmp >> 5);
996 }
997 spin_unlock_irqrestore(&primary_crng.lock, flags);
998 return 1;
999 }
1000
1001 static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
1002 {
1003 unsigned long flags;
1004 int i, num;
1005 union {
1006 __u8 block[CHACHA_BLOCK_SIZE];
1007 __u32 key[8];
1008 } buf;
1009
1010 if (r) {
1011 num = extract_entropy(r, &buf, 32, 16, 0);
1012 if (num == 0)
1013 return;
1014 } else {
1015 _extract_crng(&primary_crng, buf.block);
1016 _crng_backtrack_protect(&primary_crng, buf.block,
1017 CHACHA_KEY_SIZE);
1018 }
1019 spin_lock_irqsave(&crng->lock, flags);
1020 for (i = 0; i < 8; i++) {
1021 unsigned long rv;
1022 if (!arch_get_random_seed_long(&rv) &&
1023 !arch_get_random_long(&rv))
1024 rv = random_get_entropy();
1025 crng->state[i+4] ^= buf.key[i] ^ rv;
1026 }
1027 memzero_explicit(&buf, sizeof(buf));
1028 crng->init_time = jiffies;
1029 spin_unlock_irqrestore(&crng->lock, flags);
1030 if (crng == &primary_crng && crng_init < 2) {
1031 invalidate_batched_entropy();
1032 numa_crng_init();
1033 crng_init = 2;
1034 process_random_ready_list();
1035 wake_up_interruptible(&crng_init_wait);
1036 pr_notice("random: crng init done\n");
1037 if (unseeded_warning.missed) {
1038 pr_notice("random: %d get_random_xx warning(s) missed "
1039 "due to ratelimiting\n",
1040 unseeded_warning.missed);
1041 unseeded_warning.missed = 0;
1042 }
1043 if (urandom_warning.missed) {
1044 pr_notice("random: %d urandom warning(s) missed "
1045 "due to ratelimiting\n",
1046 urandom_warning.missed);
1047 urandom_warning.missed = 0;
1048 }
1049 }
1050 }
1051
1052 static void _extract_crng(struct crng_state *crng,
1053 __u8 out[CHACHA_BLOCK_SIZE])
1054 {
1055 unsigned long v, flags;
1056
1057 if (crng_ready() &&
1058 (time_after(crng_global_init_time, crng->init_time) ||
1059 time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
1060 crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
1061 spin_lock_irqsave(&crng->lock, flags);
1062 if (arch_get_random_long(&v))
1063 crng->state[14] ^= v;
1064 chacha20_block(&crng->state[0], out);
1065 if (crng->state[12] == 0)
1066 crng->state[13]++;
1067 spin_unlock_irqrestore(&crng->lock, flags);
1068 }
1069
1070 static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
1071 {
1072 struct crng_state *crng = NULL;
1073
1074 #ifdef CONFIG_NUMA
1075 if (crng_node_pool)
1076 crng = crng_node_pool[numa_node_id()];
1077 if (crng == NULL)
1078 #endif
1079 crng = &primary_crng;
1080 _extract_crng(crng, out);
1081 }
1082
1083 /*
1084 * Use the leftover bytes from the CRNG block output (if there is
1085 * enough) to mutate the CRNG key to provide backtracking protection.
1086 */
1087 static void _crng_backtrack_protect(struct crng_state *crng,
1088 __u8 tmp[CHACHA_BLOCK_SIZE], int used)
1089 {
1090 unsigned long flags;
1091 __u32 *s, *d;
1092 int i;
1093
1094 used = round_up(used, sizeof(__u32));
1095 if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
1096 extract_crng(tmp);
1097 used = 0;
1098 }
1099 spin_lock_irqsave(&crng->lock, flags);
1100 s = (__u32 *) &tmp[used];
1101 d = &crng->state[4];
1102 for (i=0; i < 8; i++)
1103 *d++ ^= *s++;
1104 spin_unlock_irqrestore(&crng->lock, flags);
1105 }
1106
1107 static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
1108 {
1109 struct crng_state *crng = NULL;
1110
1111 #ifdef CONFIG_NUMA
1112 if (crng_node_pool)
1113 crng = crng_node_pool[numa_node_id()];
1114 if (crng == NULL)
1115 #endif
1116 crng = &primary_crng;
1117 _crng_backtrack_protect(crng, tmp, used);
1118 }
1119
1120 static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
1121 {
1122 ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
1123 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1124 int large_request = (nbytes > 256);
1125
1126 while (nbytes) {
1127 if (large_request && need_resched()) {
1128 if (signal_pending(current)) {
1129 if (ret == 0)
1130 ret = -ERESTARTSYS;
1131 break;
1132 }
1133 schedule();
1134 }
1135
1136 extract_crng(tmp);
1137 i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
1138 if (copy_to_user(buf, tmp, i)) {
1139 ret = -EFAULT;
1140 break;
1141 }
1142
1143 nbytes -= i;
1144 buf += i;
1145 ret += i;
1146 }
1147 crng_backtrack_protect(tmp, i);
1148
1149 /* Wipe data just written to memory */
1150 memzero_explicit(tmp, sizeof(tmp));
1151
1152 return ret;
1153 }
1154
1155
1156 /*********************************************************************
1157 *
1158 * Entropy input management
1159 *
1160 *********************************************************************/
1161
1162 /* There is one of these per entropy source */
1163 struct timer_rand_state {
1164 cycles_t last_time;
1165 long last_delta, last_delta2;
1166 };
1167
1168 #define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
1169
1170 /*
1171 * Add device- or boot-specific data to the input pool to help
1172 * initialize it.
1173 *
1174 * None of this adds any entropy; it is meant to avoid the problem of
1175 * the entropy pool having similar initial state across largely
1176 * identical devices.
1177 */
1178 void add_device_randomness(const void *buf, unsigned int size)
1179 {
1180 unsigned long time = random_get_entropy() ^ jiffies;
1181 unsigned long flags;
1182
1183 if (!crng_ready() && size)
1184 crng_slow_load(buf, size);
1185
1186 trace_add_device_randomness(size, _RET_IP_);
1187 spin_lock_irqsave(&input_pool.lock, flags);
1188 _mix_pool_bytes(&input_pool, buf, size);
1189 _mix_pool_bytes(&input_pool, &time, sizeof(time));
1190 spin_unlock_irqrestore(&input_pool.lock, flags);
1191 }
1192 EXPORT_SYMBOL(add_device_randomness);
1193
1194 static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
1195
1196 /*
1197 * This function adds entropy to the entropy "pool" by using timing
1198 * delays. It uses the timer_rand_state structure to make an estimate
1199 * of how many bits of entropy this call has added to the pool.
1200 *
1201 * The number "num" is also added to the pool - it should somehow describe
1202 * the type of event which just happened. This is currently 0-255 for
1203 * keyboard scan codes, and 256 upwards for interrupts.
1204 *
1205 */
1206 static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
1207 {
1208 struct entropy_store *r;
1209 struct {
1210 long jiffies;
1211 unsigned cycles;
1212 unsigned num;
1213 } sample;
1214 long delta, delta2, delta3;
1215
1216 sample.jiffies = jiffies;
1217 sample.cycles = random_get_entropy();
1218 sample.num = num;
1219 r = &input_pool;
1220 mix_pool_bytes(r, &sample, sizeof(sample));
1221
1222 /*
1223 * Calculate number of bits of randomness we probably added.
1224 * We take into account the first, second and third-order deltas
1225 * in order to make our estimate.
1226 */
1227 delta = sample.jiffies - state->last_time;
1228 state->last_time = sample.jiffies;
1229
1230 delta2 = delta - state->last_delta;
1231 state->last_delta = delta;
1232
1233 delta3 = delta2 - state->last_delta2;
1234 state->last_delta2 = delta2;
1235
1236 if (delta < 0)
1237 delta = -delta;
1238 if (delta2 < 0)
1239 delta2 = -delta2;
1240 if (delta3 < 0)
1241 delta3 = -delta3;
1242 if (delta > delta2)
1243 delta = delta2;
1244 if (delta > delta3)
1245 delta = delta3;
1246
1247 /*
1248 * delta is now minimum absolute delta.
1249 * Round down by 1 bit on general principles,
1250 * and limit entropy entimate to 12 bits.
1251 */
1252 credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
1253 }
1254
1255 void add_input_randomness(unsigned int type, unsigned int code,
1256 unsigned int value)
1257 {
1258 static unsigned char last_value;
1259
1260 /* ignore autorepeat and the like */
1261 if (value == last_value)
1262 return;
1263
1264 last_value = value;
1265 add_timer_randomness(&input_timer_state,
1266 (type << 4) ^ code ^ (code >> 4) ^ value);
1267 trace_add_input_randomness(ENTROPY_BITS(&input_pool));
1268 }
1269 EXPORT_SYMBOL_GPL(add_input_randomness);
1270
1271 static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1272
1273 #ifdef ADD_INTERRUPT_BENCH
1274 static unsigned long avg_cycles, avg_deviation;
1275
1276 #define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
1277 #define FIXED_1_2 (1 << (AVG_SHIFT-1))
1278
1279 static void add_interrupt_bench(cycles_t start)
1280 {
1281 long delta = random_get_entropy() - start;
1282
1283 /* Use a weighted moving average */
1284 delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
1285 avg_cycles += delta;
1286 /* And average deviation */
1287 delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
1288 avg_deviation += delta;
1289 }
1290 #else
1291 #define add_interrupt_bench(x)
1292 #endif
1293
1294 static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1295 {
1296 __u32 *ptr = (__u32 *) regs;
1297 unsigned int idx;
1298
1299 if (regs == NULL)
1300 return 0;
1301 idx = READ_ONCE(f->reg_idx);
1302 if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
1303 idx = 0;
1304 ptr += idx++;
1305 WRITE_ONCE(f->reg_idx, idx);
1306 return *ptr;
1307 }
1308
1309 void add_interrupt_randomness(int irq, int irq_flags)
1310 {
1311 struct entropy_store *r;
1312 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1313 struct pt_regs *regs = get_irq_regs();
1314 unsigned long now = jiffies;
1315 cycles_t cycles = random_get_entropy();
1316 __u32 c_high, j_high;
1317 __u64 ip;
1318 unsigned long seed;
1319 int credit = 0;
1320
1321 if (cycles == 0)
1322 cycles = get_reg(fast_pool, regs);
1323 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
1324 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
1325 fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
1326 fast_pool->pool[1] ^= now ^ c_high;
1327 ip = regs ? instruction_pointer(regs) : _RET_IP_;
1328 fast_pool->pool[2] ^= ip;
1329 fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
1330 get_reg(fast_pool, regs);
1331
1332 fast_mix(fast_pool);
1333 add_interrupt_bench(cycles);
1334
1335 if (unlikely(crng_init == 0)) {
1336 if ((fast_pool->count >= 64) &&
1337 crng_fast_load((char *) fast_pool->pool,
1338 sizeof(fast_pool->pool))) {
1339 fast_pool->count = 0;
1340 fast_pool->last = now;
1341 }
1342 return;
1343 }
1344
1345 if ((fast_pool->count < 64) &&
1346 !time_after(now, fast_pool->last + HZ))
1347 return;
1348
1349 r = &input_pool;
1350 if (!spin_trylock(&r->lock))
1351 return;
1352
1353 fast_pool->last = now;
1354 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
1355
1356 /*
1357 * If we have architectural seed generator, produce a seed and
1358 * add it to the pool. For the sake of paranoia don't let the
1359 * architectural seed generator dominate the input from the
1360 * interrupt noise.
1361 */
1362 if (arch_get_random_seed_long(&seed)) {
1363 __mix_pool_bytes(r, &seed, sizeof(seed));
1364 credit = 1;
1365 }
1366 spin_unlock(&r->lock);
1367
1368 fast_pool->count = 0;
1369
1370 /* award one bit for the contents of the fast pool */
1371 credit_entropy_bits(r, credit + 1);
1372 }
1373 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1374
1375 #ifdef CONFIG_BLOCK
1376 void add_disk_randomness(struct gendisk *disk)
1377 {
1378 if (!disk || !disk->random)
1379 return;
1380 /* first major is 1, so we get >= 0x200 here */
1381 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1382 trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
1383 }
1384 EXPORT_SYMBOL_GPL(add_disk_randomness);
1385 #endif
1386
1387 /*********************************************************************
1388 *
1389 * Entropy extraction routines
1390 *
1391 *********************************************************************/
1392
1393 /*
1394 * This utility inline function is responsible for transferring entropy
1395 * from the primary pool to the secondary extraction pool. We make
1396 * sure we pull enough for a 'catastrophic reseed'.
1397 */
1398 static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
1399 static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1400 {
1401 if (!r->pull ||
1402 r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) ||
1403 r->entropy_count > r->poolinfo->poolfracbits)
1404 return;
1405
1406 _xfer_secondary_pool(r, nbytes);
1407 }
1408
1409 static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1410 {
1411 __u32 tmp[OUTPUT_POOL_WORDS];
1412
1413 int bytes = nbytes;
1414
1415 /* pull at least as much as a wakeup */
1416 bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
1417 /* but never more than the buffer size */
1418 bytes = min_t(int, bytes, sizeof(tmp));
1419
1420 trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
1421 ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
1422 bytes = extract_entropy(r->pull, tmp, bytes,
1423 random_read_wakeup_bits / 8, 0);
1424 mix_pool_bytes(r, tmp, bytes);
1425 credit_entropy_bits(r, bytes*8);
1426 }
1427
1428 /*
1429 * Used as a workqueue function so that when the input pool is getting
1430 * full, we can "spill over" some entropy to the output pools. That
1431 * way the output pools can store some of the excess entropy instead
1432 * of letting it go to waste.
1433 */
1434 static void push_to_pool(struct work_struct *work)
1435 {
1436 struct entropy_store *r = container_of(work, struct entropy_store,
1437 push_work);
1438 BUG_ON(!r);
1439 _xfer_secondary_pool(r, random_read_wakeup_bits/8);
1440 trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
1441 r->pull->entropy_count >> ENTROPY_SHIFT);
1442 }
1443
1444 /*
1445 * This function decides how many bytes to actually take from the
1446 * given pool, and also debits the entropy count accordingly.
1447 */
1448 static size_t account(struct entropy_store *r, size_t nbytes, int min,
1449 int reserved)
1450 {
1451 int entropy_count, orig, have_bytes;
1452 size_t ibytes, nfrac;
1453
1454 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
1455
1456 /* Can we pull enough? */
1457 retry:
1458 entropy_count = orig = READ_ONCE(r->entropy_count);
1459 ibytes = nbytes;
1460 /* never pull more than available */
1461 have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1462
1463 if ((have_bytes -= reserved) < 0)
1464 have_bytes = 0;
1465 ibytes = min_t(size_t, ibytes, have_bytes);
1466 if (ibytes < min)
1467 ibytes = 0;
1468
1469 if (unlikely(entropy_count < 0)) {
1470 pr_warn("random: negative entropy count: pool %s count %d\n",
1471 r->name, entropy_count);
1472 WARN_ON(1);
1473 entropy_count = 0;
1474 }
1475 nfrac = ibytes << (ENTROPY_SHIFT + 3);
1476 if ((size_t) entropy_count > nfrac)
1477 entropy_count -= nfrac;
1478 else
1479 entropy_count = 0;
1480
1481 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1482 goto retry;
1483
1484 trace_debit_entropy(r->name, 8 * ibytes);
1485 if (ibytes &&
1486 (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
1487 wake_up_interruptible(&random_write_wait);
1488 kill_fasync(&fasync, SIGIO, POLL_OUT);
1489 }
1490
1491 return ibytes;
1492 }
1493
1494 /*
1495 * This function does the actual extraction for extract_entropy and
1496 * extract_entropy_user.
1497 *
1498 * Note: we assume that .poolwords is a multiple of 16 words.
1499 */
1500 static void extract_buf(struct entropy_store *r, __u8 *out)
1501 {
1502 int i;
1503 union {
1504 __u32 w[5];
1505 unsigned long l[LONGS(20)];
1506 } hash;
1507 __u32 workspace[SHA_WORKSPACE_WORDS];
1508 unsigned long flags;
1509
1510 /*
1511 * If we have an architectural hardware random number
1512 * generator, use it for SHA's initial vector
1513 */
1514 sha_init(hash.w);
1515 for (i = 0; i < LONGS(20); i++) {
1516 unsigned long v;
1517 if (!arch_get_random_long(&v))
1518 break;
1519 hash.l[i] = v;
1520 }
1521
1522 /* Generate a hash across the pool, 16 words (512 bits) at a time */
1523 spin_lock_irqsave(&r->lock, flags);
1524 for (i = 0; i < r->poolinfo->poolwords; i += 16)
1525 sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1526
1527 /*
1528 * We mix the hash back into the pool to prevent backtracking
1529 * attacks (where the attacker knows the state of the pool
1530 * plus the current outputs, and attempts to find previous
1531 * ouputs), unless the hash function can be inverted. By
1532 * mixing at least a SHA1 worth of hash data back, we make
1533 * brute-forcing the feedback as hard as brute-forcing the
1534 * hash.
1535 */
1536 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1537 spin_unlock_irqrestore(&r->lock, flags);
1538
1539 memzero_explicit(workspace, sizeof(workspace));
1540
1541 /*
1542 * In case the hash function has some recognizable output
1543 * pattern, we fold it in half. Thus, we always feed back
1544 * twice as much data as we output.
1545 */
1546 hash.w[0] ^= hash.w[3];
1547 hash.w[1] ^= hash.w[4];
1548 hash.w[2] ^= rol32(hash.w[2], 16);
1549
1550 memcpy(out, &hash, EXTRACT_SIZE);
1551 memzero_explicit(&hash, sizeof(hash));
1552 }
1553
1554 static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
1555 size_t nbytes, int fips)
1556 {
1557 ssize_t ret = 0, i;
1558 __u8 tmp[EXTRACT_SIZE];
1559 unsigned long flags;
1560
1561 while (nbytes) {
1562 extract_buf(r, tmp);
1563
1564 if (fips) {
1565 spin_lock_irqsave(&r->lock, flags);
1566 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1567 panic("Hardware RNG duplicated output!\n");
1568 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1569 spin_unlock_irqrestore(&r->lock, flags);
1570 }
1571 i = min_t(int, nbytes, EXTRACT_SIZE);
1572 memcpy(buf, tmp, i);
1573 nbytes -= i;
1574 buf += i;
1575 ret += i;
1576 }
1577
1578 /* Wipe data just returned from memory */
1579 memzero_explicit(tmp, sizeof(tmp));
1580
1581 return ret;
1582 }
1583
1584 /*
1585 * This function extracts randomness from the "entropy pool", and
1586 * returns it in a buffer.
1587 *
1588 * The min parameter specifies the minimum amount we can pull before
1589 * failing to avoid races that defeat catastrophic reseeding while the
1590 * reserved parameter indicates how much entropy we must leave in the
1591 * pool after each pull to avoid starving other readers.
1592 */
1593 static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1594 size_t nbytes, int min, int reserved)
1595 {
1596 __u8 tmp[EXTRACT_SIZE];
1597 unsigned long flags;
1598
1599 /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
1600 if (fips_enabled) {
1601 spin_lock_irqsave(&r->lock, flags);
1602 if (!r->last_data_init) {
1603 r->last_data_init = 1;
1604 spin_unlock_irqrestore(&r->lock, flags);
1605 trace_extract_entropy(r->name, EXTRACT_SIZE,
1606 ENTROPY_BITS(r), _RET_IP_);
1607 xfer_secondary_pool(r, EXTRACT_SIZE);
1608 extract_buf(r, tmp);
1609 spin_lock_irqsave(&r->lock, flags);
1610 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1611 }
1612 spin_unlock_irqrestore(&r->lock, flags);
1613 }
1614
1615 trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1616 xfer_secondary_pool(r, nbytes);
1617 nbytes = account(r, nbytes, min, reserved);
1618
1619 return _extract_entropy(r, buf, nbytes, fips_enabled);
1620 }
1621
1622 /*
1623 * This function extracts randomness from the "entropy pool", and
1624 * returns it in a userspace buffer.
1625 */
1626 static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1627 size_t nbytes)
1628 {
1629 ssize_t ret = 0, i;
1630 __u8 tmp[EXTRACT_SIZE];
1631 int large_request = (nbytes > 256);
1632
1633 trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1634 if (!r->initialized && r->pull) {
1635 xfer_secondary_pool(r, ENTROPY_BITS(r->pull)/8);
1636 if (!r->initialized)
1637 return 0;
1638 }
1639 xfer_secondary_pool(r, nbytes);
1640 nbytes = account(r, nbytes, 0, 0);
1641
1642 while (nbytes) {
1643 if (large_request && need_resched()) {
1644 if (signal_pending(current)) {
1645 if (ret == 0)
1646 ret = -ERESTARTSYS;
1647 break;
1648 }
1649 schedule();
1650 }
1651
1652 extract_buf(r, tmp);
1653 i = min_t(int, nbytes, EXTRACT_SIZE);
1654 if (copy_to_user(buf, tmp, i)) {
1655 ret = -EFAULT;
1656 break;
1657 }
1658
1659 nbytes -= i;
1660 buf += i;
1661 ret += i;
1662 }
1663
1664 /* Wipe data just returned from memory */
1665 memzero_explicit(tmp, sizeof(tmp));
1666
1667 return ret;
1668 }
1669
1670 #define warn_unseeded_randomness(previous) \
1671 _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
1672
1673 static void _warn_unseeded_randomness(const char *func_name, void *caller,
1674 void **previous)
1675 {
1676 #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1677 const bool print_once = false;
1678 #else
1679 static bool print_once __read_mostly;
1680 #endif
1681
1682 if (print_once ||
1683 crng_ready() ||
1684 (previous && (caller == READ_ONCE(*previous))))
1685 return;
1686 WRITE_ONCE(*previous, caller);
1687 #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1688 print_once = true;
1689 #endif
1690 if (__ratelimit(&unseeded_warning))
1691 pr_notice("random: %s called from %pS with crng_init=%d\n",
1692 func_name, caller, crng_init);
1693 }
1694
1695 /*
1696 * This function is the exported kernel interface. It returns some
1697 * number of good random numbers, suitable for key generation, seeding
1698 * TCP sequence numbers, etc. It does not rely on the hardware random
1699 * number generator. For random bytes direct from the hardware RNG
1700 * (when available), use get_random_bytes_arch(). In order to ensure
1701 * that the randomness provided by this function is okay, the function
1702 * wait_for_random_bytes() should be called and return 0 at least once
1703 * at any point prior.
1704 */
1705 static void _get_random_bytes(void *buf, int nbytes)
1706 {
1707 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1708
1709 trace_get_random_bytes(nbytes, _RET_IP_);
1710
1711 while (nbytes >= CHACHA_BLOCK_SIZE) {
1712 extract_crng(buf);
1713 buf += CHACHA_BLOCK_SIZE;
1714 nbytes -= CHACHA_BLOCK_SIZE;
1715 }
1716
1717 if (nbytes > 0) {
1718 extract_crng(tmp);
1719 memcpy(buf, tmp, nbytes);
1720 crng_backtrack_protect(tmp, nbytes);
1721 } else
1722 crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
1723 memzero_explicit(tmp, sizeof(tmp));
1724 }
1725
1726 void get_random_bytes(void *buf, int nbytes)
1727 {
1728 static void *previous;
1729
1730 warn_unseeded_randomness(&previous);
1731 _get_random_bytes(buf, nbytes);
1732 }
1733 EXPORT_SYMBOL(get_random_bytes);
1734
1735
1736 /*
1737 * Each time the timer fires, we expect that we got an unpredictable
1738 * jump in the cycle counter. Even if the timer is running on another
1739 * CPU, the timer activity will be touching the stack of the CPU that is
1740 * generating entropy..
1741 *
1742 * Note that we don't re-arm the timer in the timer itself - we are
1743 * happy to be scheduled away, since that just makes the load more
1744 * complex, but we do not want the timer to keep ticking unless the
1745 * entropy loop is running.
1746 *
1747 * So the re-arming always happens in the entropy loop itself.
1748 */
1749 static void entropy_timer(struct timer_list *t)
1750 {
1751 credit_entropy_bits(&input_pool, 1);
1752 }
1753
1754 /*
1755 * If we have an actual cycle counter, see if we can
1756 * generate enough entropy with timing noise
1757 */
1758 static void try_to_generate_entropy(void)
1759 {
1760 struct {
1761 unsigned long now;
1762 struct timer_list timer;
1763 } stack;
1764
1765 stack.now = random_get_entropy();
1766
1767 /* Slow counter - or none. Don't even bother */
1768 if (stack.now == random_get_entropy())
1769 return;
1770
1771 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1772 while (!crng_ready()) {
1773 if (!timer_pending(&stack.timer))
1774 mod_timer(&stack.timer, jiffies+1);
1775 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1776 schedule();
1777 stack.now = random_get_entropy();
1778 }
1779
1780 del_timer_sync(&stack.timer);
1781 destroy_timer_on_stack(&stack.timer);
1782 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1783 }
1784
1785 /*
1786 * Wait for the urandom pool to be seeded and thus guaranteed to supply
1787 * cryptographically secure random numbers. This applies to: the /dev/urandom
1788 * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
1789 * family of functions. Using any of these functions without first calling
1790 * this function forfeits the guarantee of security.
1791 *
1792 * Returns: 0 if the urandom pool has been seeded.
1793 * -ERESTARTSYS if the function was interrupted by a signal.
1794 */
1795 int wait_for_random_bytes(void)
1796 {
1797 if (likely(crng_ready()))
1798 return 0;
1799
1800 do {
1801 int ret;
1802 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1803 if (ret)
1804 return ret > 0 ? 0 : ret;
1805
1806 try_to_generate_entropy();
1807 } while (!crng_ready());
1808
1809 return 0;
1810 }
1811 EXPORT_SYMBOL(wait_for_random_bytes);
1812
1813 /*
1814 * Returns whether or not the urandom pool has been seeded and thus guaranteed
1815 * to supply cryptographically secure random numbers. This applies to: the
1816 * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
1817 * ,u64,int,long} family of functions.
1818 *
1819 * Returns: true if the urandom pool has been seeded.
1820 * false if the urandom pool has not been seeded.
1821 */
1822 bool rng_is_initialized(void)
1823 {
1824 return crng_ready();
1825 }
1826 EXPORT_SYMBOL(rng_is_initialized);
1827
1828 /*
1829 * Add a callback function that will be invoked when the nonblocking
1830 * pool is initialised.
1831 *
1832 * returns: 0 if callback is successfully added
1833 * -EALREADY if pool is already initialised (callback not called)
1834 * -ENOENT if module for callback is not alive
1835 */
1836 int add_random_ready_callback(struct random_ready_callback *rdy)
1837 {
1838 struct module *owner;
1839 unsigned long flags;
1840 int err = -EALREADY;
1841
1842 if (crng_ready())
1843 return err;
1844
1845 owner = rdy->owner;
1846 if (!try_module_get(owner))
1847 return -ENOENT;
1848
1849 spin_lock_irqsave(&random_ready_list_lock, flags);
1850 if (crng_ready())
1851 goto out;
1852
1853 owner = NULL;
1854
1855 list_add(&rdy->list, &random_ready_list);
1856 err = 0;
1857
1858 out:
1859 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1860
1861 module_put(owner);
1862
1863 return err;
1864 }
1865 EXPORT_SYMBOL(add_random_ready_callback);
1866
1867 /*
1868 * Delete a previously registered readiness callback function.
1869 */
1870 void del_random_ready_callback(struct random_ready_callback *rdy)
1871 {
1872 unsigned long flags;
1873 struct module *owner = NULL;
1874
1875 spin_lock_irqsave(&random_ready_list_lock, flags);
1876 if (!list_empty(&rdy->list)) {
1877 list_del_init(&rdy->list);
1878 owner = rdy->owner;
1879 }
1880 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1881
1882 module_put(owner);
1883 }
1884 EXPORT_SYMBOL(del_random_ready_callback);
1885
1886 /*
1887 * This function will use the architecture-specific hardware random
1888 * number generator if it is available. The arch-specific hw RNG will
1889 * almost certainly be faster than what we can do in software, but it
1890 * is impossible to verify that it is implemented securely (as
1891 * opposed, to, say, the AES encryption of a sequence number using a
1892 * key known by the NSA). So it's useful if we need the speed, but
1893 * only if we're willing to trust the hardware manufacturer not to
1894 * have put in a back door.
1895 *
1896 * Return number of bytes filled in.
1897 */
1898 int __must_check get_random_bytes_arch(void *buf, int nbytes)
1899 {
1900 int left = nbytes;
1901 char *p = buf;
1902
1903 trace_get_random_bytes_arch(left, _RET_IP_);
1904 while (left) {
1905 unsigned long v;
1906 int chunk = min_t(int, left, sizeof(unsigned long));
1907
1908 if (!arch_get_random_long(&v))
1909 break;
1910
1911 memcpy(p, &v, chunk);
1912 p += chunk;
1913 left -= chunk;
1914 }
1915
1916 return nbytes - left;
1917 }
1918 EXPORT_SYMBOL(get_random_bytes_arch);
1919
1920 /*
1921 * init_std_data - initialize pool with system data
1922 *
1923 * @r: pool to initialize
1924 *
1925 * This function clears the pool's entropy count and mixes some system
1926 * data into the pool to prepare it for use. The pool is not cleared
1927 * as that can only decrease the entropy in the pool.
1928 */
1929 static void __init init_std_data(struct entropy_store *r)
1930 {
1931 int i;
1932 ktime_t now = ktime_get_real();
1933 unsigned long rv;
1934
1935 r->last_pulled = jiffies;
1936 mix_pool_bytes(r, &now, sizeof(now));
1937 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1938 if (!arch_get_random_seed_long(&rv) &&
1939 !arch_get_random_long(&rv))
1940 rv = random_get_entropy();
1941 mix_pool_bytes(r, &rv, sizeof(rv));
1942 }
1943 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1944 }
1945
1946 /*
1947 * Note that setup_arch() may call add_device_randomness()
1948 * long before we get here. This allows seeding of the pools
1949 * with some platform dependent data very early in the boot
1950 * process. But it limits our options here. We must use
1951 * statically allocated structures that already have all
1952 * initializations complete at compile time. We should also
1953 * take care not to overwrite the precious per platform data
1954 * we were given.
1955 */
1956 int __init rand_initialize(void)
1957 {
1958 init_std_data(&input_pool);
1959 init_std_data(&blocking_pool);
1960 crng_initialize(&primary_crng);
1961 crng_global_init_time = jiffies;
1962 if (ratelimit_disable) {
1963 urandom_warning.interval = 0;
1964 unseeded_warning.interval = 0;
1965 }
1966 return 0;
1967 }
1968
1969 #ifdef CONFIG_BLOCK
1970 void rand_initialize_disk(struct gendisk *disk)
1971 {
1972 struct timer_rand_state *state;
1973
1974 /*
1975 * If kzalloc returns null, we just won't use that entropy
1976 * source.
1977 */
1978 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1979 if (state) {
1980 state->last_time = INITIAL_JIFFIES;
1981 disk->random = state;
1982 }
1983 }
1984 #endif
1985
1986 static ssize_t
1987 _random_read(int nonblock, char __user *buf, size_t nbytes)
1988 {
1989 ssize_t n;
1990
1991 if (nbytes == 0)
1992 return 0;
1993
1994 nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
1995 while (1) {
1996 n = extract_entropy_user(&blocking_pool, buf, nbytes);
1997 if (n < 0)
1998 return n;
1999 trace_random_read(n*8, (nbytes-n)*8,
2000 ENTROPY_BITS(&blocking_pool),
2001 ENTROPY_BITS(&input_pool));
2002 if (n > 0)
2003 return n;
2004
2005 /* Pool is (near) empty. Maybe wait and retry. */
2006 if (nonblock)
2007 return -EAGAIN;
2008
2009 wait_event_interruptible(random_read_wait,
2010 blocking_pool.initialized &&
2011 (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits));
2012 if (signal_pending(current))
2013 return -ERESTARTSYS;
2014 }
2015 }
2016
2017 static ssize_t
2018 random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
2019 {
2020 return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
2021 }
2022
2023 static ssize_t
2024 urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
2025 {
2026 unsigned long flags;
2027 static int maxwarn = 10;
2028 int ret;
2029
2030 if (!crng_ready() && maxwarn > 0) {
2031 maxwarn--;
2032 if (__ratelimit(&urandom_warning))
2033 printk(KERN_NOTICE "random: %s: uninitialized "
2034 "urandom read (%zd bytes read)\n",
2035 current->comm, nbytes);
2036 spin_lock_irqsave(&primary_crng.lock, flags);
2037 crng_init_cnt = 0;
2038 spin_unlock_irqrestore(&primary_crng.lock, flags);
2039 }
2040 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
2041 ret = extract_crng_user(buf, nbytes);
2042 trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
2043 return ret;
2044 }
2045
2046 static __poll_t
2047 random_poll(struct file *file, poll_table * wait)
2048 {
2049 __poll_t mask;
2050
2051 poll_wait(file, &random_read_wait, wait);
2052 poll_wait(file, &random_write_wait, wait);
2053 mask = 0;
2054 if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
2055 mask |= EPOLLIN | EPOLLRDNORM;
2056 if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
2057 mask |= EPOLLOUT | EPOLLWRNORM;
2058 return mask;
2059 }
2060
2061 static int
2062 write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
2063 {
2064 size_t bytes;
2065 __u32 t, buf[16];
2066 const char __user *p = buffer;
2067
2068 while (count > 0) {
2069 int b, i = 0;
2070
2071 bytes = min(count, sizeof(buf));
2072 if (copy_from_user(&buf, p, bytes))
2073 return -EFAULT;
2074
2075 for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
2076 if (!arch_get_random_int(&t))
2077 break;
2078 buf[i] ^= t;
2079 }
2080
2081 count -= bytes;
2082 p += bytes;
2083
2084 mix_pool_bytes(r, buf, bytes);
2085 cond_resched();
2086 }
2087
2088 return 0;
2089 }
2090
2091 static ssize_t random_write(struct file *file, const char __user *buffer,
2092 size_t count, loff_t *ppos)
2093 {
2094 size_t ret;
2095
2096 ret = write_pool(&input_pool, buffer, count);
2097 if (ret)
2098 return ret;
2099
2100 return (ssize_t)count;
2101 }
2102
2103 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
2104 {
2105 int size, ent_count;
2106 int __user *p = (int __user *)arg;
2107 int retval;
2108
2109 switch (cmd) {
2110 case RNDGETENTCNT:
2111 /* inherently racy, no point locking */
2112 ent_count = ENTROPY_BITS(&input_pool);
2113 if (put_user(ent_count, p))
2114 return -EFAULT;
2115 return 0;
2116 case RNDADDTOENTCNT:
2117 if (!capable(CAP_SYS_ADMIN))
2118 return -EPERM;
2119 if (get_user(ent_count, p))
2120 return -EFAULT;
2121 return credit_entropy_bits_safe(&input_pool, ent_count);
2122 case RNDADDENTROPY:
2123 if (!capable(CAP_SYS_ADMIN))
2124 return -EPERM;
2125 if (get_user(ent_count, p++))
2126 return -EFAULT;
2127 if (ent_count < 0)
2128 return -EINVAL;
2129 if (get_user(size, p++))
2130 return -EFAULT;
2131 retval = write_pool(&input_pool, (const char __user *)p,
2132 size);
2133 if (retval < 0)
2134 return retval;
2135 return credit_entropy_bits_safe(&input_pool, ent_count);
2136 case RNDZAPENTCNT:
2137 case RNDCLEARPOOL:
2138 /*
2139 * Clear the entropy pool counters. We no longer clear
2140 * the entropy pool, as that's silly.
2141 */
2142 if (!capable(CAP_SYS_ADMIN))
2143 return -EPERM;
2144 input_pool.entropy_count = 0;
2145 blocking_pool.entropy_count = 0;
2146 return 0;
2147 case RNDRESEEDCRNG:
2148 if (!capable(CAP_SYS_ADMIN))
2149 return -EPERM;
2150 if (crng_init < 2)
2151 return -ENODATA;
2152 crng_reseed(&primary_crng, NULL);
2153 crng_global_init_time = jiffies - 1;
2154 return 0;
2155 default:
2156 return -EINVAL;
2157 }
2158 }
2159
2160 static int random_fasync(int fd, struct file *filp, int on)
2161 {
2162 return fasync_helper(fd, filp, on, &fasync);
2163 }
2164
2165 const struct file_operations random_fops = {
2166 .read = random_read,
2167 .write = random_write,
2168 .poll = random_poll,
2169 .unlocked_ioctl = random_ioctl,
2170 .fasync = random_fasync,
2171 .llseek = noop_llseek,
2172 };
2173
2174 const struct file_operations urandom_fops = {
2175 .read = urandom_read,
2176 .write = random_write,
2177 .unlocked_ioctl = random_ioctl,
2178 .fasync = random_fasync,
2179 .llseek = noop_llseek,
2180 };
2181
2182 SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
2183 unsigned int, flags)
2184 {
2185 int ret;
2186
2187 if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
2188 return -EINVAL;
2189
2190 if (count > INT_MAX)
2191 count = INT_MAX;
2192
2193 if (flags & GRND_RANDOM)
2194 return _random_read(flags & GRND_NONBLOCK, buf, count);
2195
2196 if (!crng_ready()) {
2197 if (flags & GRND_NONBLOCK)
2198 return -EAGAIN;
2199 ret = wait_for_random_bytes();
2200 if (unlikely(ret))
2201 return ret;
2202 }
2203 return urandom_read(NULL, buf, count, NULL);
2204 }
2205
2206 /********************************************************************
2207 *
2208 * Sysctl interface
2209 *
2210 ********************************************************************/
2211
2212 #ifdef CONFIG_SYSCTL
2213
2214 #include <linux/sysctl.h>
2215
2216 static int min_read_thresh = 8, min_write_thresh;
2217 static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
2218 static int max_write_thresh = INPUT_POOL_WORDS * 32;
2219 static int random_min_urandom_seed = 60;
2220 static char sysctl_bootid[16];
2221
2222 /*
2223 * This function is used to return both the bootid UUID, and random
2224 * UUID. The difference is in whether table->data is NULL; if it is,
2225 * then a new UUID is generated and returned to the user.
2226 *
2227 * If the user accesses this via the proc interface, the UUID will be
2228 * returned as an ASCII string in the standard UUID format; if via the
2229 * sysctl system call, as 16 bytes of binary data.
2230 */
2231 static int proc_do_uuid(struct ctl_table *table, int write,
2232 void __user *buffer, size_t *lenp, loff_t *ppos)
2233 {
2234 struct ctl_table fake_table;
2235 unsigned char buf[64], tmp_uuid[16], *uuid;
2236
2237 uuid = table->data;
2238 if (!uuid) {
2239 uuid = tmp_uuid;
2240 generate_random_uuid(uuid);
2241 } else {
2242 static DEFINE_SPINLOCK(bootid_spinlock);
2243
2244 spin_lock(&bootid_spinlock);
2245 if (!uuid[8])
2246 generate_random_uuid(uuid);
2247 spin_unlock(&bootid_spinlock);
2248 }
2249
2250 sprintf(buf, "%pU", uuid);
2251
2252 fake_table.data = buf;
2253 fake_table.maxlen = sizeof(buf);
2254
2255 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
2256 }
2257
2258 /*
2259 * Return entropy available scaled to integral bits
2260 */
2261 static int proc_do_entropy(struct ctl_table *table, int write,
2262 void __user *buffer, size_t *lenp, loff_t *ppos)
2263 {
2264 struct ctl_table fake_table;
2265 int entropy_count;
2266
2267 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
2268
2269 fake_table.data = &entropy_count;
2270 fake_table.maxlen = sizeof(entropy_count);
2271
2272 return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
2273 }
2274
2275 static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
2276 extern struct ctl_table random_table[];
2277 struct ctl_table random_table[] = {
2278 {
2279 .procname = "poolsize",
2280 .data = &sysctl_poolsize,
2281 .maxlen = sizeof(int),
2282 .mode = 0444,
2283 .proc_handler = proc_dointvec,
2284 },
2285 {
2286 .procname = "entropy_avail",
2287 .maxlen = sizeof(int),
2288 .mode = 0444,
2289 .proc_handler = proc_do_entropy,
2290 .data = &input_pool.entropy_count,
2291 },
2292 {
2293 .procname = "read_wakeup_threshold",
2294 .data = &random_read_wakeup_bits,
2295 .maxlen = sizeof(int),
2296 .mode = 0644,
2297 .proc_handler = proc_dointvec_minmax,
2298 .extra1 = &min_read_thresh,
2299 .extra2 = &max_read_thresh,
2300 },
2301 {
2302 .procname = "write_wakeup_threshold",
2303 .data = &random_write_wakeup_bits,
2304 .maxlen = sizeof(int),
2305 .mode = 0644,
2306 .proc_handler = proc_dointvec_minmax,
2307 .extra1 = &min_write_thresh,
2308 .extra2 = &max_write_thresh,
2309 },
2310 {
2311 .procname = "urandom_min_reseed_secs",
2312 .data = &random_min_urandom_seed,
2313 .maxlen = sizeof(int),
2314 .mode = 0644,
2315 .proc_handler = proc_dointvec,
2316 },
2317 {
2318 .procname = "boot_id",
2319 .data = &sysctl_bootid,
2320 .maxlen = 16,
2321 .mode = 0444,
2322 .proc_handler = proc_do_uuid,
2323 },
2324 {
2325 .procname = "uuid",
2326 .maxlen = 16,
2327 .mode = 0444,
2328 .proc_handler = proc_do_uuid,
2329 },
2330 #ifdef ADD_INTERRUPT_BENCH
2331 {
2332 .procname = "add_interrupt_avg_cycles",
2333 .data = &avg_cycles,
2334 .maxlen = sizeof(avg_cycles),
2335 .mode = 0444,
2336 .proc_handler = proc_doulongvec_minmax,
2337 },
2338 {
2339 .procname = "add_interrupt_avg_deviation",
2340 .data = &avg_deviation,
2341 .maxlen = sizeof(avg_deviation),
2342 .mode = 0444,
2343 .proc_handler = proc_doulongvec_minmax,
2344 },
2345 #endif
2346 { }
2347 };
2348 #endif /* CONFIG_SYSCTL */
2349
2350 struct batched_entropy {
2351 union {
2352 u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
2353 u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
2354 };
2355 unsigned int position;
2356 spinlock_t batch_lock;
2357 };
2358
2359 /*
2360 * Get a random word for internal kernel use only. The quality of the random
2361 * number is either as good as RDRAND or as good as /dev/urandom, with the
2362 * goal of being quite fast and not depleting entropy. In order to ensure
2363 * that the randomness provided by this function is okay, the function
2364 * wait_for_random_bytes() should be called and return 0 at least once
2365 * at any point prior.
2366 */
2367 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
2368 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
2369 };
2370
2371 u64 get_random_u64(void)
2372 {
2373 u64 ret;
2374 unsigned long flags;
2375 struct batched_entropy *batch;
2376 static void *previous;
2377
2378 #if BITS_PER_LONG == 64
2379 if (arch_get_random_long((unsigned long *)&ret))
2380 return ret;
2381 #else
2382 if (arch_get_random_long((unsigned long *)&ret) &&
2383 arch_get_random_long((unsigned long *)&ret + 1))
2384 return ret;
2385 #endif
2386
2387 warn_unseeded_randomness(&previous);
2388
2389 batch = raw_cpu_ptr(&batched_entropy_u64);
2390 spin_lock_irqsave(&batch->batch_lock, flags);
2391 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2392 extract_crng((u8 *)batch->entropy_u64);
2393 batch->position = 0;
2394 }
2395 ret = batch->entropy_u64[batch->position++];
2396 spin_unlock_irqrestore(&batch->batch_lock, flags);
2397 return ret;
2398 }
2399 EXPORT_SYMBOL(get_random_u64);
2400
2401 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2402 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2403 };
2404 u32 get_random_u32(void)
2405 {
2406 u32 ret;
2407 unsigned long flags;
2408 struct batched_entropy *batch;
2409 static void *previous;
2410
2411 if (arch_get_random_int(&ret))
2412 return ret;
2413
2414 warn_unseeded_randomness(&previous);
2415
2416 batch = raw_cpu_ptr(&batched_entropy_u32);
2417 spin_lock_irqsave(&batch->batch_lock, flags);
2418 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2419 extract_crng((u8 *)batch->entropy_u32);
2420 batch->position = 0;
2421 }
2422 ret = batch->entropy_u32[batch->position++];
2423 spin_unlock_irqrestore(&batch->batch_lock, flags);
2424 return ret;
2425 }
2426 EXPORT_SYMBOL(get_random_u32);
2427
2428 /* It's important to invalidate all potential batched entropy that might
2429 * be stored before the crng is initialized, which we can do lazily by
2430 * simply resetting the counter to zero so that it's re-extracted on the
2431 * next usage. */
2432 static void invalidate_batched_entropy(void)
2433 {
2434 int cpu;
2435 unsigned long flags;
2436
2437 for_each_possible_cpu (cpu) {
2438 struct batched_entropy *batched_entropy;
2439
2440 batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2441 spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2442 batched_entropy->position = 0;
2443 spin_unlock(&batched_entropy->batch_lock);
2444
2445 batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2446 spin_lock(&batched_entropy->batch_lock);
2447 batched_entropy->position = 0;
2448 spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
2449 }
2450 }
2451
2452 /**
2453 * randomize_page - Generate a random, page aligned address
2454 * @start: The smallest acceptable address the caller will take.
2455 * @range: The size of the area, starting at @start, within which the
2456 * random address must fall.
2457 *
2458 * If @start + @range would overflow, @range is capped.
2459 *
2460 * NOTE: Historical use of randomize_range, which this replaces, presumed that
2461 * @start was already page aligned. We now align it regardless.
2462 *
2463 * Return: A page aligned address within [start, start + range). On error,
2464 * @start is returned.
2465 */
2466 unsigned long
2467 randomize_page(unsigned long start, unsigned long range)
2468 {
2469 if (!PAGE_ALIGNED(start)) {
2470 range -= PAGE_ALIGN(start) - start;
2471 start = PAGE_ALIGN(start);
2472 }
2473
2474 if (start > ULONG_MAX - range)
2475 range = ULONG_MAX - start;
2476
2477 range >>= PAGE_SHIFT;
2478
2479 if (range == 0)
2480 return start;
2481
2482 return start + (get_random_long() % range << PAGE_SHIFT);
2483 }
2484
2485 /* Interface for in-kernel drivers of true hardware RNGs.
2486 * Those devices may produce endless random bits and will be throttled
2487 * when our pool is full.
2488 */
2489 void add_hwgenerator_randomness(const char *buffer, size_t count,
2490 size_t entropy)
2491 {
2492 struct entropy_store *poolp = &input_pool;
2493
2494 if (unlikely(crng_init == 0)) {
2495 crng_fast_load(buffer, count);
2496 return;
2497 }
2498
2499 /* Suspend writing if we're above the trickle threshold.
2500 * We'll be woken up again once below random_write_wakeup_thresh,
2501 * or when the calling thread is about to terminate.
2502 */
2503 wait_event_freezable(random_write_wait,
2504 kthread_should_stop() ||
2505 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
2506 mix_pool_bytes(poolp, buffer, count);
2507 credit_entropy_bits(poolp, entropy);
2508 }
2509 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
2510
2511 /* Handle random seed passed by bootloader.
2512 * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
2513 * it would be regarded as device data.
2514 * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
2515 */
2516 void add_bootloader_randomness(const void *buf, unsigned int size)
2517 {
2518 if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
2519 add_hwgenerator_randomness(buf, size, size * 8);
2520 else
2521 add_device_randomness(buf, size);
2522 }
2523 EXPORT_SYMBOL_GPL(add_bootloader_randomness);