1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
3 * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
7 * This driver produces cryptographically secure pseudorandom data. It is divided
8 * into roughly six sections, each with a section header:
10 * - Initialization and readiness waiting.
11 * - Fast key erasure RNG, the "crng".
12 * - Entropy accumulation and extraction routines.
13 * - Entropy collection routines.
14 * - Userspace reader/writer interfaces.
17 * The high level overview is that there is one input pool, into which
18 * various pieces of data are hashed. Prior to initialization, some of that
19 * data is then "credited" as having a certain number of bits of entropy.
20 * When enough bits of entropy are available, the hash is finalized and
21 * handed as a key to a stream cipher that expands it indefinitely for
22 * various consumers. This key is periodically refreshed as the various
23 * entropy collectors, described below, add data to the input pool.
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 #include <linux/utsname.h>
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/major.h>
32 #include <linux/string.h>
33 #include <linux/fcntl.h>
34 #include <linux/slab.h>
35 #include <linux/random.h>
36 #include <linux/poll.h>
37 #include <linux/init.h>
39 #include <linux/genhd.h>
40 #include <linux/interrupt.h>
42 #include <linux/nodemask.h>
43 #include <linux/spinlock.h>
44 #include <linux/kthread.h>
45 #include <linux/percpu.h>
46 #include <linux/ptrace.h>
47 #include <linux/workqueue.h>
48 #include <linux/irq.h>
49 #include <linux/ratelimit.h>
50 #include <linux/syscalls.h>
51 #include <linux/completion.h>
52 #include <linux/uuid.h>
53 #include <linux/uaccess.h>
54 #include <linux/siphash.h>
55 #include <linux/uio.h>
56 #include <crypto/chacha.h>
57 #include <crypto/blake2s.h>
58 #include <asm/processor.h>
60 #include <asm/irq_regs.h>
63 /*********************************************************************
65 * Initialization and readiness waiting.
67 * Much of the RNG infrastructure is devoted to various dependencies
68 * being able to wait until the RNG has collected enough entropy and
69 * is ready for safe consumption.
71 *********************************************************************/
74 * crng_init is protected by base_crng->lock, and only increases
75 * its value (from empty->early->ready).
78 CRNG_EMPTY
= 0, /* Little to no entropy collected */
79 CRNG_EARLY
= 1, /* At least POOL_EARLY_BITS collected */
80 CRNG_READY
= 2 /* Fully initialized with POOL_READY_BITS collected */
81 } crng_init __read_mostly
= CRNG_EMPTY
;
82 #define crng_ready() (likely(crng_init >= CRNG_READY))
83 /* Various types of waiters for crng_init->CRNG_READY transition. */
84 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait
);
85 static struct fasync_struct
*fasync
;
86 static DEFINE_SPINLOCK(random_ready_chain_lock
);
87 static RAW_NOTIFIER_HEAD(random_ready_chain
);
89 /* Control how we warn userspace. */
90 static struct ratelimit_state urandom_warning
=
91 RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ
, 3, RATELIMIT_MSG_ON_RELEASE
);
92 static int ratelimit_disable __read_mostly
=
93 IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM
);
94 module_param_named(ratelimit_disable
, ratelimit_disable
, int, 0644);
95 MODULE_PARM_DESC(ratelimit_disable
, "Disable random ratelimit suppression");
98 * Returns whether or not the input pool has been seeded and thus guaranteed
99 * to supply cryptographically secure random numbers. This applies to: the
100 * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
101 * ,u64,int,long} family of functions.
103 * Returns: true if the input pool has been seeded.
104 * false if the input pool has not been seeded.
106 bool rng_is_initialized(void)
110 EXPORT_SYMBOL(rng_is_initialized
);
112 /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
113 static void try_to_generate_entropy(void);
116 * Wait for the input pool to be seeded and thus guaranteed to supply
117 * cryptographically secure random numbers. This applies to: the /dev/urandom
118 * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
119 * family of functions. Using any of these functions without first calling
120 * this function forfeits the guarantee of security.
122 * Returns: 0 if the input pool has been seeded.
123 * -ERESTARTSYS if the function was interrupted by a signal.
125 int wait_for_random_bytes(void)
127 while (!crng_ready()) {
130 try_to_generate_entropy();
131 ret
= wait_event_interruptible_timeout(crng_init_wait
, crng_ready(), HZ
);
133 return ret
> 0 ? 0 : ret
;
137 EXPORT_SYMBOL(wait_for_random_bytes
);
140 * Add a callback function that will be invoked when the input
141 * pool is initialised.
143 * returns: 0 if callback is successfully added
144 * -EALREADY if pool is already initialised (callback not called)
146 int __cold
register_random_ready_notifier(struct notifier_block
*nb
)
154 spin_lock_irqsave(&random_ready_chain_lock
, flags
);
156 ret
= raw_notifier_chain_register(&random_ready_chain
, nb
);
157 spin_unlock_irqrestore(&random_ready_chain_lock
, flags
);
162 * Delete a previously registered readiness callback function.
164 int __cold
unregister_random_ready_notifier(struct notifier_block
*nb
)
169 spin_lock_irqsave(&random_ready_chain_lock
, flags
);
170 ret
= raw_notifier_chain_unregister(&random_ready_chain
, nb
);
171 spin_unlock_irqrestore(&random_ready_chain_lock
, flags
);
175 static void __cold
process_random_ready_list(void)
179 spin_lock_irqsave(&random_ready_chain_lock
, flags
);
180 raw_notifier_call_chain(&random_ready_chain
, 0, NULL
);
181 spin_unlock_irqrestore(&random_ready_chain_lock
, flags
);
184 #define warn_unseeded_randomness() \
185 if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
186 printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
187 __func__, (void *)_RET_IP_, crng_init)
190 /*********************************************************************
192 * Fast key erasure RNG, the "crng".
194 * These functions expand entropy from the entropy extractor into
195 * long streams for external consumption using the "fast key erasure"
196 * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
198 * There are a few exported interfaces for use by other drivers:
200 * void get_random_bytes(void *buf, size_t len)
201 * u32 get_random_u32()
202 * u64 get_random_u64()
203 * unsigned int get_random_int()
204 * unsigned long get_random_long()
206 * These interfaces will return the requested number of random bytes
207 * into the given buffer or as a return value. This is equivalent to
208 * a read from /dev/urandom. The u32, u64, int, and long family of
209 * functions may be higher performance for one-off random integers,
210 * because they do a bit of buffering and do not invoke reseeding
211 * until the buffer is emptied.
213 *********************************************************************/
216 CRNG_RESEED_START_INTERVAL
= HZ
,
217 CRNG_RESEED_INTERVAL
= 60 * HZ
221 u8 key
[CHACHA_KEY_SIZE
] __aligned(__alignof__(long));
223 unsigned long generation
;
226 .lock
= __SPIN_LOCK_UNLOCKED(base_crng
.lock
)
230 u8 key
[CHACHA_KEY_SIZE
];
231 unsigned long generation
;
235 static DEFINE_PER_CPU(struct crng
, crngs
) = {
236 .generation
= ULONG_MAX
,
237 .lock
= INIT_LOCAL_LOCK(crngs
.lock
),
240 /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
241 static void extract_entropy(void *buf
, size_t len
);
243 /* This extracts a new crng key from the input pool. */
244 static void crng_reseed(void)
247 unsigned long next_gen
;
248 u8 key
[CHACHA_KEY_SIZE
];
250 extract_entropy(key
, sizeof(key
));
253 * We copy the new key into the base_crng, overwriting the old one,
254 * and update the generation counter. We avoid hitting ULONG_MAX,
255 * because the per-cpu crngs are initialized to ULONG_MAX, so this
256 * forces new CPUs that come online to always initialize.
258 spin_lock_irqsave(&base_crng
.lock
, flags
);
259 memcpy(base_crng
.key
, key
, sizeof(base_crng
.key
));
260 next_gen
= base_crng
.generation
+ 1;
261 if (next_gen
== ULONG_MAX
)
263 WRITE_ONCE(base_crng
.generation
, next_gen
);
264 WRITE_ONCE(base_crng
.birth
, jiffies
);
266 crng_init
= CRNG_READY
;
267 spin_unlock_irqrestore(&base_crng
.lock
, flags
);
268 memzero_explicit(key
, sizeof(key
));
272 * This generates a ChaCha block using the provided key, and then
273 * immediately overwites that key with half the block. It returns
274 * the resultant ChaCha state to the user, along with the second
275 * half of the block containing 32 bytes of random data that may
276 * be used; random_data_len may not be greater than 32.
278 * The returned ChaCha state contains within it a copy of the old
279 * key value, at index 4, so the state should always be zeroed out
280 * immediately after using in order to maintain forward secrecy.
281 * If the state cannot be erased in a timely manner, then it is
282 * safer to set the random_data parameter to &chacha_state[4] so
283 * that this function overwrites it before returning.
285 static void crng_fast_key_erasure(u8 key
[CHACHA_KEY_SIZE
],
286 u32 chacha_state
[CHACHA_STATE_WORDS
],
287 u8
*random_data
, size_t random_data_len
)
289 u8 first_block
[CHACHA_BLOCK_SIZE
];
291 BUG_ON(random_data_len
> 32);
293 chacha_init_consts(chacha_state
);
294 memcpy(&chacha_state
[4], key
, CHACHA_KEY_SIZE
);
295 memset(&chacha_state
[12], 0, sizeof(u32
) * 4);
296 chacha20_block(chacha_state
, first_block
);
298 memcpy(key
, first_block
, CHACHA_KEY_SIZE
);
299 memcpy(random_data
, first_block
+ CHACHA_KEY_SIZE
, random_data_len
);
300 memzero_explicit(first_block
, sizeof(first_block
));
304 * Return whether the crng seed is considered to be sufficiently old
305 * that a reseeding is needed. This happens if the last reseeding
306 * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
307 * proportional to the uptime.
309 static bool crng_has_old_seed(void)
311 static bool early_boot
= true;
312 unsigned long interval
= CRNG_RESEED_INTERVAL
;
314 if (unlikely(READ_ONCE(early_boot
))) {
315 time64_t uptime
= ktime_get_seconds();
316 if (uptime
>= CRNG_RESEED_INTERVAL
/ HZ
* 2)
317 WRITE_ONCE(early_boot
, false);
319 interval
= max_t(unsigned int, CRNG_RESEED_START_INTERVAL
,
320 (unsigned int)uptime
/ 2 * HZ
);
322 return time_is_before_jiffies(READ_ONCE(base_crng
.birth
) + interval
);
326 * This function returns a ChaCha state that you may use for generating
327 * random data. It also returns up to 32 bytes on its own of random data
328 * that may be used; random_data_len may not be greater than 32.
330 static void crng_make_state(u32 chacha_state
[CHACHA_STATE_WORDS
],
331 u8
*random_data
, size_t random_data_len
)
336 BUG_ON(random_data_len
> 32);
339 * For the fast path, we check whether we're ready, unlocked first, and
340 * then re-check once locked later. In the case where we're really not
341 * ready, we do fast key erasure with the base_crng directly, extracting
342 * when crng_init is CRNG_EMPTY.
347 spin_lock_irqsave(&base_crng
.lock
, flags
);
348 ready
= crng_ready();
350 if (crng_init
== CRNG_EMPTY
)
351 extract_entropy(base_crng
.key
, sizeof(base_crng
.key
));
352 crng_fast_key_erasure(base_crng
.key
, chacha_state
,
353 random_data
, random_data_len
);
355 spin_unlock_irqrestore(&base_crng
.lock
, flags
);
361 * If the base_crng is old enough, we reseed, which in turn bumps the
362 * generation counter that we check below.
364 if (unlikely(crng_has_old_seed()))
367 local_lock_irqsave(&crngs
.lock
, flags
);
368 crng
= raw_cpu_ptr(&crngs
);
371 * If our per-cpu crng is older than the base_crng, then it means
372 * somebody reseeded the base_crng. In that case, we do fast key
373 * erasure on the base_crng, and use its output as the new key
374 * for our per-cpu crng. This brings us up to date with base_crng.
376 if (unlikely(crng
->generation
!= READ_ONCE(base_crng
.generation
))) {
377 spin_lock(&base_crng
.lock
);
378 crng_fast_key_erasure(base_crng
.key
, chacha_state
,
379 crng
->key
, sizeof(crng
->key
));
380 crng
->generation
= base_crng
.generation
;
381 spin_unlock(&base_crng
.lock
);
385 * Finally, when we've made it this far, our per-cpu crng has an up
386 * to date key, and we can do fast key erasure with it to produce
387 * some random data and a ChaCha state for the caller. All other
388 * branches of this function are "unlikely", so most of the time we
389 * should wind up here immediately.
391 crng_fast_key_erasure(crng
->key
, chacha_state
, random_data
, random_data_len
);
392 local_unlock_irqrestore(&crngs
.lock
, flags
);
395 static void _get_random_bytes(void *buf
, size_t len
)
397 u32 chacha_state
[CHACHA_STATE_WORDS
];
398 u8 tmp
[CHACHA_BLOCK_SIZE
];
399 size_t first_block_len
;
404 first_block_len
= min_t(size_t, 32, len
);
405 crng_make_state(chacha_state
, buf
, first_block_len
);
406 len
-= first_block_len
;
407 buf
+= first_block_len
;
410 if (len
< CHACHA_BLOCK_SIZE
) {
411 chacha20_block(chacha_state
, tmp
);
412 memcpy(buf
, tmp
, len
);
413 memzero_explicit(tmp
, sizeof(tmp
));
417 chacha20_block(chacha_state
, buf
);
418 if (unlikely(chacha_state
[12] == 0))
420 len
-= CHACHA_BLOCK_SIZE
;
421 buf
+= CHACHA_BLOCK_SIZE
;
424 memzero_explicit(chacha_state
, sizeof(chacha_state
));
428 * This function is the exported kernel interface. It returns some
429 * number of good random numbers, suitable for key generation, seeding
430 * TCP sequence numbers, etc. It does not rely on the hardware random
431 * number generator. For random bytes direct from the hardware RNG
432 * (when available), use get_random_bytes_arch(). In order to ensure
433 * that the randomness provided by this function is okay, the function
434 * wait_for_random_bytes() should be called and return 0 at least once
435 * at any point prior.
437 void get_random_bytes(void *buf
, size_t len
)
439 warn_unseeded_randomness();
440 _get_random_bytes(buf
, len
);
442 EXPORT_SYMBOL(get_random_bytes
);
444 static ssize_t
get_random_bytes_user(struct iov_iter
*iter
)
446 u32 chacha_state
[CHACHA_STATE_WORDS
];
447 u8 block
[CHACHA_BLOCK_SIZE
];
448 size_t ret
= 0, copied
;
450 if (unlikely(!iov_iter_count(iter
)))
454 * Immediately overwrite the ChaCha key at index 4 with random
455 * bytes, in case userspace causes copy_to_iter() below to sleep
456 * forever, so that we still retain forward secrecy in that case.
458 crng_make_state(chacha_state
, (u8
*)&chacha_state
[4], CHACHA_KEY_SIZE
);
460 * However, if we're doing a read of len <= 32, we don't need to
461 * use chacha_state after, so we can simply return those bytes to
464 if (iov_iter_count(iter
) <= CHACHA_KEY_SIZE
) {
465 ret
= copy_to_iter(&chacha_state
[4], CHACHA_KEY_SIZE
, iter
);
466 goto out_zero_chacha
;
470 chacha20_block(chacha_state
, block
);
471 if (unlikely(chacha_state
[12] == 0))
474 copied
= copy_to_iter(block
, sizeof(block
), iter
);
476 if (!iov_iter_count(iter
) || copied
!= sizeof(block
))
479 BUILD_BUG_ON(PAGE_SIZE
% sizeof(block
) != 0);
480 if (ret
% PAGE_SIZE
== 0) {
481 if (signal_pending(current
))
487 memzero_explicit(block
, sizeof(block
));
489 memzero_explicit(chacha_state
, sizeof(chacha_state
));
490 return ret
? ret
: -EFAULT
;
494 * Batched entropy returns random integers. The quality of the random
495 * number is good as /dev/urandom. In order to ensure that the randomness
496 * provided by this function is okay, the function wait_for_random_bytes()
497 * should be called and return 0 at least once at any point prior.
500 #define DEFINE_BATCHED_ENTROPY(type) \
501 struct batch_ ##type { \
503 * We make this 1.5x a ChaCha block, so that we get the \
504 * remaining 32 bytes from fast key erasure, plus one full \
505 * block from the detached ChaCha state. We can increase \
506 * the size of this later if needed so long as we keep the \
507 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \
509 type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
511 unsigned long generation; \
512 unsigned int position; \
515 static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
516 .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
517 .position = UINT_MAX \
520 type get_random_ ##type(void) \
523 unsigned long flags; \
524 struct batch_ ##type *batch; \
525 unsigned long next_gen; \
527 warn_unseeded_randomness(); \
529 if (!crng_ready()) { \
530 _get_random_bytes(&ret, sizeof(ret)); \
534 local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
535 batch = raw_cpu_ptr(&batched_entropy_##type); \
537 next_gen = READ_ONCE(base_crng.generation); \
538 if (batch->position >= ARRAY_SIZE(batch->entropy) || \
539 next_gen != batch->generation) { \
540 _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
541 batch->position = 0; \
542 batch->generation = next_gen; \
545 ret = batch->entropy[batch->position]; \
546 batch->entropy[batch->position] = 0; \
548 local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
551 EXPORT_SYMBOL(get_random_ ##type);
553 DEFINE_BATCHED_ENTROPY(u64
)
554 DEFINE_BATCHED_ENTROPY(u32
)
558 * This function is called when the CPU is coming up, with entry
559 * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
561 int __cold
random_prepare_cpu(unsigned int cpu
)
564 * When the cpu comes back online, immediately invalidate both
565 * the per-cpu crng and all batches, so that we serve fresh
568 per_cpu_ptr(&crngs
, cpu
)->generation
= ULONG_MAX
;
569 per_cpu_ptr(&batched_entropy_u32
, cpu
)->position
= UINT_MAX
;
570 per_cpu_ptr(&batched_entropy_u64
, cpu
)->position
= UINT_MAX
;
576 * This function will use the architecture-specific hardware random
577 * number generator if it is available. It is not recommended for
578 * use. Use get_random_bytes() instead. It returns the number of
581 size_t __must_check
get_random_bytes_arch(void *buf
, size_t len
)
588 size_t block_len
= min_t(size_t, left
, sizeof(unsigned long));
590 if (!arch_get_random_long(&v
))
593 memcpy(p
, &v
, block_len
);
600 EXPORT_SYMBOL(get_random_bytes_arch
);
603 /**********************************************************************
605 * Entropy accumulation and extraction routines.
607 * Callers may add entropy via:
609 * static void mix_pool_bytes(const void *buf, size_t len)
611 * After which, if added entropy should be credited:
613 * static void credit_init_bits(size_t bits)
615 * Finally, extract entropy via:
617 * static void extract_entropy(void *buf, size_t len)
619 **********************************************************************/
622 POOL_BITS
= BLAKE2S_HASH_SIZE
* 8,
623 POOL_READY_BITS
= POOL_BITS
, /* When crng_init->CRNG_READY */
624 POOL_EARLY_BITS
= POOL_READY_BITS
/ 2 /* When crng_init->CRNG_EARLY */
628 struct blake2s_state hash
;
630 unsigned int init_bits
;
632 .hash
.h
= { BLAKE2S_IV0
^ (0x01010000 | BLAKE2S_HASH_SIZE
),
633 BLAKE2S_IV1
, BLAKE2S_IV2
, BLAKE2S_IV3
, BLAKE2S_IV4
,
634 BLAKE2S_IV5
, BLAKE2S_IV6
, BLAKE2S_IV7
},
635 .hash
.outlen
= BLAKE2S_HASH_SIZE
,
636 .lock
= __SPIN_LOCK_UNLOCKED(input_pool
.lock
),
639 static void _mix_pool_bytes(const void *buf
, size_t len
)
641 blake2s_update(&input_pool
.hash
, buf
, len
);
645 * This function adds bytes into the input pool. It does not
646 * update the initialization bit counter; the caller should call
647 * credit_init_bits if this is appropriate.
649 static void mix_pool_bytes(const void *buf
, size_t len
)
653 spin_lock_irqsave(&input_pool
.lock
, flags
);
654 _mix_pool_bytes(buf
, len
);
655 spin_unlock_irqrestore(&input_pool
.lock
, flags
);
659 * This is an HKDF-like construction for using the hashed collected entropy
660 * as a PRF key, that's then expanded block-by-block.
662 static void extract_entropy(void *buf
, size_t len
)
665 u8 seed
[BLAKE2S_HASH_SIZE
], next_key
[BLAKE2S_HASH_SIZE
];
667 unsigned long rdseed
[32 / sizeof(long)];
672 for (i
= 0; i
< ARRAY_SIZE(block
.rdseed
); ++i
) {
673 if (!arch_get_random_seed_long(&block
.rdseed
[i
]) &&
674 !arch_get_random_long(&block
.rdseed
[i
]))
675 block
.rdseed
[i
] = random_get_entropy();
678 spin_lock_irqsave(&input_pool
.lock
, flags
);
680 /* seed = HASHPRF(last_key, entropy_input) */
681 blake2s_final(&input_pool
.hash
, seed
);
683 /* next_key = HASHPRF(seed, RDSEED || 0) */
685 blake2s(next_key
, (u8
*)&block
, seed
, sizeof(next_key
), sizeof(block
), sizeof(seed
));
686 blake2s_init_key(&input_pool
.hash
, BLAKE2S_HASH_SIZE
, next_key
, sizeof(next_key
));
688 spin_unlock_irqrestore(&input_pool
.lock
, flags
);
689 memzero_explicit(next_key
, sizeof(next_key
));
692 i
= min_t(size_t, len
, BLAKE2S_HASH_SIZE
);
693 /* output = HASHPRF(seed, RDSEED || ++counter) */
695 blake2s(buf
, (u8
*)&block
, seed
, i
, sizeof(block
), sizeof(seed
));
700 memzero_explicit(seed
, sizeof(seed
));
701 memzero_explicit(&block
, sizeof(block
));
704 #define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
706 static void __cold
_credit_init_bits(size_t bits
)
708 unsigned int new, orig
, add
;
714 add
= min_t(size_t, bits
, POOL_BITS
);
717 orig
= READ_ONCE(input_pool
.init_bits
);
718 new = min_t(unsigned int, POOL_BITS
, orig
+ add
);
719 } while (cmpxchg(&input_pool
.init_bits
, orig
, new) != orig
);
721 if (orig
< POOL_READY_BITS
&& new >= POOL_READY_BITS
) {
722 crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
723 process_random_ready_list();
724 wake_up_interruptible(&crng_init_wait
);
725 kill_fasync(&fasync
, SIGIO
, POLL_IN
);
726 pr_notice("crng init done\n");
727 if (urandom_warning
.missed
)
728 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
729 urandom_warning
.missed
);
730 } else if (orig
< POOL_EARLY_BITS
&& new >= POOL_EARLY_BITS
) {
731 spin_lock_irqsave(&base_crng
.lock
, flags
);
732 /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
733 if (crng_init
== CRNG_EMPTY
) {
734 extract_entropy(base_crng
.key
, sizeof(base_crng
.key
));
735 crng_init
= CRNG_EARLY
;
737 spin_unlock_irqrestore(&base_crng
.lock
, flags
);
742 /**********************************************************************
744 * Entropy collection routines.
746 * The following exported functions are used for pushing entropy into
747 * the above entropy accumulation routines:
749 * void add_device_randomness(const void *buf, size_t len);
750 * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
751 * void add_bootloader_randomness(const void *buf, size_t len);
752 * void add_interrupt_randomness(int irq);
753 * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
754 * void add_disk_randomness(struct gendisk *disk);
756 * add_device_randomness() adds data to the input pool that
757 * is likely to differ between two devices (or possibly even per boot).
758 * This would be things like MAC addresses or serial numbers, or the
759 * read-out of the RTC. This does *not* credit any actual entropy to
760 * the pool, but it initializes the pool to different values for devices
761 * that might otherwise be identical and have very little entropy
762 * available to them (particularly common in the embedded world).
764 * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
765 * entropy as specified by the caller. If the entropy pool is full it will
766 * block until more entropy is needed.
768 * add_bootloader_randomness() is called by bootloader drivers, such as EFI
769 * and device tree, and credits its input depending on whether or not the
770 * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
772 * add_interrupt_randomness() uses the interrupt timing as random
773 * inputs to the entropy pool. Using the cycle counters and the irq source
774 * as inputs, it feeds the input pool roughly once a second or after 64
775 * interrupts, crediting 1 bit of entropy for whichever comes first.
777 * add_input_randomness() uses the input layer interrupt timing, as well
778 * as the event type information from the hardware.
780 * add_disk_randomness() uses what amounts to the seek time of block
781 * layer request events, on a per-disk_devt basis, as input to the
782 * entropy pool. Note that high-speed solid state drives with very low
783 * seek times do not make for good sources of entropy, as their seek
784 * times are usually fairly consistent.
786 * The last two routines try to estimate how many bits of entropy
787 * to credit. They do this by keeping track of the first and second
788 * order deltas of the event timings.
790 **********************************************************************/
792 static bool trust_cpu __initdata
= IS_ENABLED(CONFIG_RANDOM_TRUST_CPU
);
793 static bool trust_bootloader __initdata
= IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER
);
794 static int __init
parse_trust_cpu(char *arg
)
796 return kstrtobool(arg
, &trust_cpu
);
798 static int __init
parse_trust_bootloader(char *arg
)
800 return kstrtobool(arg
, &trust_bootloader
);
802 early_param("random.trust_cpu", parse_trust_cpu
);
803 early_param("random.trust_bootloader", parse_trust_bootloader
);
806 * The first collection of entropy occurs at system boot while interrupts
807 * are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
808 * utsname(), and the command line. Depending on the above configuration knob,
809 * RDSEED may be considered sufficient for initialization. Note that much
810 * earlier setup may already have pushed entropy into the input pool by the
813 int __init
random_init(const char *command_line
)
815 ktime_t now
= ktime_get_real();
816 unsigned int i
, arch_bits
;
817 unsigned long entropy
;
819 #if defined(LATENT_ENTROPY_PLUGIN)
820 static const u8 compiletime_seed
[BLAKE2S_BLOCK_SIZE
] __initconst __latent_entropy
;
821 _mix_pool_bytes(compiletime_seed
, sizeof(compiletime_seed
));
824 for (i
= 0, arch_bits
= BLAKE2S_BLOCK_SIZE
* 8;
825 i
< BLAKE2S_BLOCK_SIZE
; i
+= sizeof(entropy
)) {
826 if (!arch_get_random_seed_long_early(&entropy
) &&
827 !arch_get_random_long_early(&entropy
)) {
828 entropy
= random_get_entropy();
829 arch_bits
-= sizeof(entropy
) * 8;
831 _mix_pool_bytes(&entropy
, sizeof(entropy
));
833 _mix_pool_bytes(&now
, sizeof(now
));
834 _mix_pool_bytes(utsname(), sizeof(*(utsname())));
835 _mix_pool_bytes(command_line
, strlen(command_line
));
836 add_latent_entropy();
841 _credit_init_bits(arch_bits
);
847 * Add device- or boot-specific data to the input pool to help
850 * None of this adds any entropy; it is meant to avoid the problem of
851 * the entropy pool having similar initial state across largely
854 void add_device_randomness(const void *buf
, size_t len
)
856 unsigned long entropy
= random_get_entropy();
859 spin_lock_irqsave(&input_pool
.lock
, flags
);
860 _mix_pool_bytes(&entropy
, sizeof(entropy
));
861 _mix_pool_bytes(buf
, len
);
862 spin_unlock_irqrestore(&input_pool
.lock
, flags
);
864 EXPORT_SYMBOL(add_device_randomness
);
867 * Interface for in-kernel drivers of true hardware RNGs.
868 * Those devices may produce endless random bits and will be throttled
869 * when our pool is full.
871 void add_hwgenerator_randomness(const void *buf
, size_t len
, size_t entropy
)
873 mix_pool_bytes(buf
, len
);
874 credit_init_bits(entropy
);
877 * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
878 * we're not yet initialized.
880 if (!kthread_should_stop() && crng_ready())
881 schedule_timeout_interruptible(CRNG_RESEED_INTERVAL
);
883 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness
);
886 * Handle random seed passed by bootloader, and credit it if
887 * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
889 void __init
add_bootloader_randomness(const void *buf
, size_t len
)
891 mix_pool_bytes(buf
, len
);
892 if (trust_bootloader
)
893 credit_init_bits(len
* 8);
897 struct work_struct mix
;
898 unsigned long pool
[4];
903 static DEFINE_PER_CPU(struct fast_pool
, irq_randomness
) = {
905 #define FASTMIX_PERM SIPHASH_PERMUTATION
906 .pool
= { SIPHASH_CONST_0
, SIPHASH_CONST_1
, SIPHASH_CONST_2
, SIPHASH_CONST_3
}
908 #define FASTMIX_PERM HSIPHASH_PERMUTATION
909 .pool
= { HSIPHASH_CONST_0
, HSIPHASH_CONST_1
, HSIPHASH_CONST_2
, HSIPHASH_CONST_3
}
914 * This is [Half]SipHash-1-x, starting from an empty key. Because
915 * the key is fixed, it assumes that its inputs are non-malicious,
916 * and therefore this has no security on its own. s represents the
917 * four-word SipHash state, while v represents a two-word input.
919 static void fast_mix(unsigned long s
[4], unsigned long v1
, unsigned long v2
)
922 FASTMIX_PERM(s
[0], s
[1], s
[2], s
[3]);
925 FASTMIX_PERM(s
[0], s
[1], s
[2], s
[3]);
931 * This function is called when the CPU has just come online, with
932 * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
934 int __cold
random_online_cpu(unsigned int cpu
)
937 * During CPU shutdown and before CPU onlining, add_interrupt_
938 * randomness() may schedule mix_interrupt_randomness(), and
939 * set the MIX_INFLIGHT flag. However, because the worker can
940 * be scheduled on a different CPU during this period, that
941 * flag will never be cleared. For that reason, we zero out
942 * the flag here, which runs just after workqueues are onlined
943 * for the CPU again. This also has the effect of setting the
944 * irq randomness count to zero so that new accumulated irqs
947 per_cpu_ptr(&irq_randomness
, cpu
)->count
= 0;
952 static void mix_interrupt_randomness(struct work_struct
*work
)
954 struct fast_pool
*fast_pool
= container_of(work
, struct fast_pool
, mix
);
956 * The size of the copied stack pool is explicitly 2 longs so that we
957 * only ever ingest half of the siphash output each time, retaining
958 * the other half as the next "key" that carries over. The entropy is
959 * supposed to be sufficiently dispersed between bits so on average
960 * we don't wind up "losing" some.
962 unsigned long pool
[2];
965 /* Check to see if we're running on the wrong CPU due to hotplug. */
967 if (fast_pool
!= this_cpu_ptr(&irq_randomness
)) {
973 * Copy the pool to the stack so that the mixer always has a
974 * consistent view, before we reenable irqs again.
976 memcpy(pool
, fast_pool
->pool
, sizeof(pool
));
977 count
= fast_pool
->count
;
978 fast_pool
->count
= 0;
979 fast_pool
->last
= jiffies
;
982 mix_pool_bytes(pool
, sizeof(pool
));
983 credit_init_bits(clamp_t(unsigned int, (count
& U16_MAX
) / 64, 1, sizeof(pool
) * 8));
985 memzero_explicit(pool
, sizeof(pool
));
988 void add_interrupt_randomness(int irq
)
990 enum { MIX_INFLIGHT
= 1U << 31 };
991 unsigned long entropy
= random_get_entropy();
992 struct fast_pool
*fast_pool
= this_cpu_ptr(&irq_randomness
);
993 struct pt_regs
*regs
= get_irq_regs();
994 unsigned int new_count
;
996 fast_mix(fast_pool
->pool
, entropy
,
997 (regs
? instruction_pointer(regs
) : _RET_IP_
) ^ swab(irq
));
998 new_count
= ++fast_pool
->count
;
1000 if (new_count
& MIX_INFLIGHT
)
1003 if (new_count
< 1024 && !time_is_before_jiffies(fast_pool
->last
+ HZ
))
1006 if (unlikely(!fast_pool
->mix
.func
))
1007 INIT_WORK(&fast_pool
->mix
, mix_interrupt_randomness
);
1008 fast_pool
->count
|= MIX_INFLIGHT
;
1009 queue_work_on(raw_smp_processor_id(), system_highpri_wq
, &fast_pool
->mix
);
1011 EXPORT_SYMBOL_GPL(add_interrupt_randomness
);
1013 /* There is one of these per entropy source */
1014 struct timer_rand_state
{
1015 unsigned long last_time
;
1016 long last_delta
, last_delta2
;
1020 * This function adds entropy to the entropy "pool" by using timing
1021 * delays. It uses the timer_rand_state structure to make an estimate
1022 * of how many bits of entropy this call has added to the pool. The
1023 * value "num" is also added to the pool; it should somehow describe
1024 * the type of event that just happened.
1026 static void add_timer_randomness(struct timer_rand_state
*state
, unsigned int num
)
1028 unsigned long entropy
= random_get_entropy(), now
= jiffies
, flags
;
1029 long delta
, delta2
, delta3
;
1033 * If we're in a hard IRQ, add_interrupt_randomness() will be called
1034 * sometime after, so mix into the fast pool.
1037 fast_mix(this_cpu_ptr(&irq_randomness
)->pool
, entropy
, num
);
1039 spin_lock_irqsave(&input_pool
.lock
, flags
);
1040 _mix_pool_bytes(&entropy
, sizeof(entropy
));
1041 _mix_pool_bytes(&num
, sizeof(num
));
1042 spin_unlock_irqrestore(&input_pool
.lock
, flags
);
1049 * Calculate number of bits of randomness we probably added.
1050 * We take into account the first, second and third-order deltas
1051 * in order to make our estimate.
1053 delta
= now
- READ_ONCE(state
->last_time
);
1054 WRITE_ONCE(state
->last_time
, now
);
1056 delta2
= delta
- READ_ONCE(state
->last_delta
);
1057 WRITE_ONCE(state
->last_delta
, delta
);
1059 delta3
= delta2
- READ_ONCE(state
->last_delta2
);
1060 WRITE_ONCE(state
->last_delta2
, delta2
);
1074 * delta is now minimum absolute delta. Round down by 1 bit
1075 * on general principles, and limit entropy estimate to 11 bits.
1077 bits
= min(fls(delta
>> 1), 11);
1080 * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
1081 * will run after this, which uses a different crediting scheme of 1 bit
1082 * per every 64 interrupts. In order to let that function do accounting
1083 * close to the one in this function, we credit a full 64/64 bit per bit,
1084 * and then subtract one to account for the extra one added.
1087 this_cpu_ptr(&irq_randomness
)->count
+= max(1u, bits
* 64) - 1;
1089 _credit_init_bits(bits
);
1092 void add_input_randomness(unsigned int type
, unsigned int code
, unsigned int value
)
1094 static unsigned char last_value
;
1095 static struct timer_rand_state input_timer_state
= { INITIAL_JIFFIES
};
1097 /* Ignore autorepeat and the like. */
1098 if (value
== last_value
)
1102 add_timer_randomness(&input_timer_state
,
1103 (type
<< 4) ^ code
^ (code
>> 4) ^ value
);
1105 EXPORT_SYMBOL_GPL(add_input_randomness
);
1108 void add_disk_randomness(struct gendisk
*disk
)
1110 if (!disk
|| !disk
->random
)
1112 /* First major is 1, so we get >= 0x200 here. */
1113 add_timer_randomness(disk
->random
, 0x100 + disk_devt(disk
));
1115 EXPORT_SYMBOL_GPL(add_disk_randomness
);
1117 void __cold
rand_initialize_disk(struct gendisk
*disk
)
1119 struct timer_rand_state
*state
;
1122 * If kzalloc returns null, we just won't use that entropy
1125 state
= kzalloc(sizeof(struct timer_rand_state
), GFP_KERNEL
);
1127 state
->last_time
= INITIAL_JIFFIES
;
1128 disk
->random
= state
;
1134 * Each time the timer fires, we expect that we got an unpredictable
1135 * jump in the cycle counter. Even if the timer is running on another
1136 * CPU, the timer activity will be touching the stack of the CPU that is
1137 * generating entropy..
1139 * Note that we don't re-arm the timer in the timer itself - we are
1140 * happy to be scheduled away, since that just makes the load more
1141 * complex, but we do not want the timer to keep ticking unless the
1142 * entropy loop is running.
1144 * So the re-arming always happens in the entropy loop itself.
1146 static void __cold
entropy_timer(struct timer_list
*t
)
1148 credit_init_bits(1);
1152 * If we have an actual cycle counter, see if we can
1153 * generate enough entropy with timing noise
1155 static void __cold
try_to_generate_entropy(void)
1158 unsigned long entropy
;
1159 struct timer_list timer
;
1162 stack
.entropy
= random_get_entropy();
1164 /* Slow counter - or none. Don't even bother */
1165 if (stack
.entropy
== random_get_entropy())
1168 timer_setup_on_stack(&stack
.timer
, entropy_timer
, 0);
1169 while (!crng_ready() && !signal_pending(current
)) {
1170 if (!timer_pending(&stack
.timer
))
1171 mod_timer(&stack
.timer
, jiffies
+ 1);
1172 mix_pool_bytes(&stack
.entropy
, sizeof(stack
.entropy
));
1174 stack
.entropy
= random_get_entropy();
1177 del_timer_sync(&stack
.timer
);
1178 destroy_timer_on_stack(&stack
.timer
);
1179 mix_pool_bytes(&stack
.entropy
, sizeof(stack
.entropy
));
1183 /**********************************************************************
1185 * Userspace reader/writer interfaces.
1187 * getrandom(2) is the primary modern interface into the RNG and should
1188 * be used in preference to anything else.
1190 * Reading from /dev/random has the same functionality as calling
1191 * getrandom(2) with flags=0. In earlier versions, however, it had
1192 * vastly different semantics and should therefore be avoided, to
1193 * prevent backwards compatibility issues.
1195 * Reading from /dev/urandom has the same functionality as calling
1196 * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1197 * waiting for the RNG to be ready, it should not be used.
1199 * Writing to either /dev/random or /dev/urandom adds entropy to
1200 * the input pool but does not credit it.
1202 * Polling on /dev/random indicates when the RNG is initialized, on
1203 * the read side, and when it wants new entropy, on the write side.
1205 * Both /dev/random and /dev/urandom have the same set of ioctls for
1206 * adding entropy, getting the entropy count, zeroing the count, and
1207 * reseeding the crng.
1209 **********************************************************************/
1211 SYSCALL_DEFINE3(getrandom
, char __user
*, ubuf
, size_t, len
, unsigned int, flags
)
1213 struct iov_iter iter
;
1217 if (flags
& ~(GRND_NONBLOCK
| GRND_RANDOM
| GRND_INSECURE
))
1221 * Requesting insecure and blocking randomness at the same time makes
1224 if ((flags
& (GRND_INSECURE
| GRND_RANDOM
)) == (GRND_INSECURE
| GRND_RANDOM
))
1227 if (!crng_ready() && !(flags
& GRND_INSECURE
)) {
1228 if (flags
& GRND_NONBLOCK
)
1230 ret
= wait_for_random_bytes();
1235 ret
= import_single_range(READ
, ubuf
, len
, &iov
, &iter
);
1238 return get_random_bytes_user(&iter
);
1241 static __poll_t
random_poll(struct file
*file
, poll_table
*wait
)
1243 poll_wait(file
, &crng_init_wait
, wait
);
1244 return crng_ready() ? EPOLLIN
| EPOLLRDNORM
: EPOLLOUT
| EPOLLWRNORM
;
1247 static ssize_t
write_pool_user(struct iov_iter
*iter
)
1249 u8 block
[BLAKE2S_BLOCK_SIZE
];
1253 if (unlikely(!iov_iter_count(iter
)))
1257 copied
= copy_from_iter(block
, sizeof(block
), iter
);
1259 mix_pool_bytes(block
, copied
);
1260 if (!iov_iter_count(iter
) || copied
!= sizeof(block
))
1263 BUILD_BUG_ON(PAGE_SIZE
% sizeof(block
) != 0);
1264 if (ret
% PAGE_SIZE
== 0) {
1265 if (signal_pending(current
))
1271 memzero_explicit(block
, sizeof(block
));
1272 return ret
? ret
: -EFAULT
;
1275 static ssize_t
random_write_iter(struct kiocb
*kiocb
, struct iov_iter
*iter
)
1277 return write_pool_user(iter
);
1280 static ssize_t
urandom_read_iter(struct kiocb
*kiocb
, struct iov_iter
*iter
)
1282 static int maxwarn
= 10;
1284 if (!crng_ready()) {
1285 if (!ratelimit_disable
&& maxwarn
<= 0)
1286 ++urandom_warning
.missed
;
1287 else if (ratelimit_disable
|| __ratelimit(&urandom_warning
)) {
1289 pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
1290 current
->comm
, iov_iter_count(iter
));
1294 return get_random_bytes_user(iter
);
1297 static ssize_t
random_read_iter(struct kiocb
*kiocb
, struct iov_iter
*iter
)
1301 if (!crng_ready() &&
1302 ((kiocb
->ki_flags
& (IOCB_NOWAIT
| IOCB_NOIO
)) ||
1303 (kiocb
->ki_filp
->f_flags
& O_NONBLOCK
)))
1306 ret
= wait_for_random_bytes();
1309 return get_random_bytes_user(iter
);
1312 static long random_ioctl(struct file
*f
, unsigned int cmd
, unsigned long arg
)
1314 int __user
*p
= (int __user
*)arg
;
1319 /* Inherently racy, no point locking. */
1320 if (put_user(input_pool
.init_bits
, p
))
1323 case RNDADDTOENTCNT
:
1324 if (!capable(CAP_SYS_ADMIN
))
1326 if (get_user(ent_count
, p
))
1330 credit_init_bits(ent_count
);
1332 case RNDADDENTROPY
: {
1333 struct iov_iter iter
;
1338 if (!capable(CAP_SYS_ADMIN
))
1340 if (get_user(ent_count
, p
++))
1344 if (get_user(len
, p
++))
1346 ret
= import_single_range(WRITE
, p
, len
, &iov
, &iter
);
1349 ret
= write_pool_user(&iter
);
1350 if (unlikely(ret
< 0))
1352 /* Since we're crediting, enforce that it was all written into the pool. */
1353 if (unlikely(ret
!= len
))
1355 credit_init_bits(ent_count
);
1360 /* No longer has any effect. */
1361 if (!capable(CAP_SYS_ADMIN
))
1365 if (!capable(CAP_SYS_ADMIN
))
1376 static int random_fasync(int fd
, struct file
*filp
, int on
)
1378 return fasync_helper(fd
, filp
, on
, &fasync
);
1381 const struct file_operations random_fops
= {
1382 .read_iter
= random_read_iter
,
1383 .write_iter
= random_write_iter
,
1384 .poll
= random_poll
,
1385 .unlocked_ioctl
= random_ioctl
,
1386 .compat_ioctl
= compat_ptr_ioctl
,
1387 .fasync
= random_fasync
,
1388 .llseek
= noop_llseek
,
1389 .splice_read
= generic_file_splice_read
,
1390 .splice_write
= iter_file_splice_write
,
1393 const struct file_operations urandom_fops
= {
1394 .read_iter
= urandom_read_iter
,
1395 .write_iter
= random_write_iter
,
1396 .unlocked_ioctl
= random_ioctl
,
1397 .compat_ioctl
= compat_ptr_ioctl
,
1398 .fasync
= random_fasync
,
1399 .llseek
= noop_llseek
,
1400 .splice_read
= generic_file_splice_read
,
1401 .splice_write
= iter_file_splice_write
,
1405 /********************************************************************
1409 * These are partly unused legacy knobs with dummy values to not break
1410 * userspace and partly still useful things. They are usually accessible
1411 * in /proc/sys/kernel/random/ and are as follows:
1413 * - boot_id - a UUID representing the current boot.
1415 * - uuid - a random UUID, different each time the file is read.
1417 * - poolsize - the number of bits of entropy that the input pool can
1418 * hold, tied to the POOL_BITS constant.
1420 * - entropy_avail - the number of bits of entropy currently in the
1421 * input pool. Always <= poolsize.
1423 * - write_wakeup_threshold - the amount of entropy in the input pool
1424 * below which write polls to /dev/random will unblock, requesting
1425 * more entropy, tied to the POOL_READY_BITS constant. It is writable
1426 * to avoid breaking old userspaces, but writing to it does not
1427 * change any behavior of the RNG.
1429 * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1430 * It is writable to avoid breaking old userspaces, but writing
1431 * to it does not change any behavior of the RNG.
1433 ********************************************************************/
1435 #ifdef CONFIG_SYSCTL
1437 #include <linux/sysctl.h>
1439 static int sysctl_random_min_urandom_seed
= CRNG_RESEED_INTERVAL
/ HZ
;
1440 static int sysctl_random_write_wakeup_bits
= POOL_READY_BITS
;
1441 static int sysctl_poolsize
= POOL_BITS
;
1442 static u8 sysctl_bootid
[UUID_SIZE
];
1445 * This function is used to return both the bootid UUID, and random
1446 * UUID. The difference is in whether table->data is NULL; if it is,
1447 * then a new UUID is generated and returned to the user.
1449 static int proc_do_uuid(struct ctl_table
*table
, int write
, void *buf
,
1450 size_t *lenp
, loff_t
*ppos
)
1452 u8 tmp_uuid
[UUID_SIZE
], *uuid
;
1453 char uuid_string
[UUID_STRING_LEN
+ 1];
1454 struct ctl_table fake_table
= {
1455 .data
= uuid_string
,
1456 .maxlen
= UUID_STRING_LEN
1465 generate_random_uuid(uuid
);
1467 static DEFINE_SPINLOCK(bootid_spinlock
);
1469 spin_lock(&bootid_spinlock
);
1471 generate_random_uuid(uuid
);
1472 spin_unlock(&bootid_spinlock
);
1475 snprintf(uuid_string
, sizeof(uuid_string
), "%pU", uuid
);
1476 return proc_dostring(&fake_table
, 0, buf
, lenp
, ppos
);
1479 /* The same as proc_dointvec, but writes don't change anything. */
1480 static int proc_do_rointvec(struct ctl_table
*table
, int write
, void *buf
,
1481 size_t *lenp
, loff_t
*ppos
)
1483 return write
? 0 : proc_dointvec(table
, 0, buf
, lenp
, ppos
);
1486 extern struct ctl_table random_table
[];
1487 struct ctl_table random_table
[] = {
1489 .procname
= "poolsize",
1490 .data
= &sysctl_poolsize
,
1491 .maxlen
= sizeof(int),
1493 .proc_handler
= proc_dointvec
,
1496 .procname
= "entropy_avail",
1497 .data
= &input_pool
.init_bits
,
1498 .maxlen
= sizeof(int),
1500 .proc_handler
= proc_dointvec
,
1503 .procname
= "write_wakeup_threshold",
1504 .data
= &sysctl_random_write_wakeup_bits
,
1505 .maxlen
= sizeof(int),
1507 .proc_handler
= proc_do_rointvec
,
1510 .procname
= "urandom_min_reseed_secs",
1511 .data
= &sysctl_random_min_urandom_seed
,
1512 .maxlen
= sizeof(int),
1514 .proc_handler
= proc_do_rointvec
,
1517 .procname
= "boot_id",
1518 .data
= &sysctl_bootid
,
1520 .proc_handler
= proc_do_uuid
,
1525 .proc_handler
= proc_do_uuid
,
1529 #endif /* CONFIG_SYSCTL */