1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
6  *
7  * This driver produces cryptographically secure pseudorandom data. It is divided
8  * into roughly six sections, each with a section header:
9  *
10  *   - Initialization and readiness waiting.
11  *   - Fast key erasure RNG, the "crng".
12  *   - Entropy accumulation and extraction routines.
13  *   - Entropy collection routines.
14  *   - Userspace reader/writer interfaces.
15  *   - Sysctl interface.
16  *
17  * The high level overview is that there is one input pool, into which
18  * various pieces of data are hashed. Prior to initialization, some of that
19  * data is then "credited" as having a certain number of bits of entropy.
20  * When enough bits of entropy are available, the hash is finalized and
21  * handed as a key to a stream cipher that expands it indefinitely for
22  * various consumers. This key is periodically refreshed as the various
23  * entropy collectors, described below, add data to the input pool.
24  */
25 
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 
28 #include <linux/utsname.h>
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/major.h>
32 #include <linux/string.h>
33 #include <linux/fcntl.h>
34 #include <linux/slab.h>
35 #include <linux/random.h>
36 #include <linux/poll.h>
37 #include <linux/init.h>
38 #include <linux/fs.h>
39 #include <linux/genhd.h>
40 #include <linux/interrupt.h>
41 #include <linux/mm.h>
42 #include <linux/nodemask.h>
43 #include <linux/spinlock.h>
44 #include <linux/kthread.h>
45 #include <linux/percpu.h>
46 #include <linux/ptrace.h>
47 #include <linux/workqueue.h>
48 #include <linux/irq.h>
49 #include <linux/ratelimit.h>
50 #include <linux/syscalls.h>
51 #include <linux/completion.h>
52 #include <linux/uuid.h>
53 #include <linux/uaccess.h>
54 #include <linux/siphash.h>
55 #include <linux/uio.h>
56 #include <crypto/chacha20.h>
57 #include <crypto/blake2s.h>
58 #include <asm/processor.h>
59 #include <asm/irq.h>
60 #include <asm/irq_regs.h>
61 #include <asm/io.h>
62 
63 /*********************************************************************
64  *
65  * Initialization and readiness waiting.
66  *
67  * Much of the RNG infrastructure is devoted to various dependencies
68  * being able to wait until the RNG has collected enough entropy and
69  * is ready for safe consumption.
70  *
71  *********************************************************************/
72 
73 /*
74  * crng_init is protected by base_crng->lock, and only increases
75  * its value (from empty->early->ready).
76  */
77 static enum {
78 	CRNG_EMPTY = 0, /* Little to no entropy collected */
79 	CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
80 	CRNG_READY = 2  /* Fully initialized with POOL_READY_BITS collected */
81 } crng_init __read_mostly = CRNG_EMPTY;
82 #define crng_ready() (likely(crng_init >= CRNG_READY))
83 /* Various types of waiters for crng_init->CRNG_READY transition. */
84 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
85 static struct fasync_struct *fasync;
86 static DEFINE_SPINLOCK(random_ready_chain_lock);
87 static RAW_NOTIFIER_HEAD(random_ready_chain);
88 
89 /* Control how we warn userspace. */
90 static struct ratelimit_state urandom_warning =
91 	RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
92 static int ratelimit_disable __read_mostly =
93 	IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
94 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
95 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
96 
97 /*
98  * Returns whether or not the input pool has been seeded and thus guaranteed
99  * to supply cryptographically secure random numbers. This applies to: the
100  * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
101  * ,u64,int,long} family of functions.
102  *
103  * Returns: true if the input pool has been seeded.
104  *          false if the input pool has not been seeded.
105  */
rng_is_initialized(void)106 bool rng_is_initialized(void)
107 {
108 	return crng_ready();
109 }
110 EXPORT_SYMBOL(rng_is_initialized);
111 
112 /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
113 static void try_to_generate_entropy(void);
114 
115 /*
116  * Wait for the input pool to be seeded and thus guaranteed to supply
117  * cryptographically secure random numbers. This applies to: the /dev/urandom
118  * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
119  * family of functions. Using any of these functions without first calling
120  * this function forfeits the guarantee of security.
121  *
122  * Returns: 0 if the input pool has been seeded.
123  *          -ERESTARTSYS if the function was interrupted by a signal.
124  */
wait_for_random_bytes(void)125 int wait_for_random_bytes(void)
126 {
127 	while (!crng_ready()) {
128 		int ret;
129 
130 		try_to_generate_entropy();
131 		ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
132 		if (ret)
133 			return ret > 0 ? 0 : ret;
134 	}
135 	return 0;
136 }
137 EXPORT_SYMBOL(wait_for_random_bytes);
138 
139 /*
140  * Add a callback function that will be invoked when the input
141  * pool is initialised.
142  *
143  * returns: 0 if callback is successfully added
144  *	    -EALREADY if pool is already initialised (callback not called)
145  */
register_random_ready_notifier(struct notifier_block * nb)146 int __cold register_random_ready_notifier(struct notifier_block *nb)
147 {
148 	unsigned long flags;
149 	int ret = -EALREADY;
150 
151 	if (crng_ready())
152 		return ret;
153 
154 	spin_lock_irqsave(&random_ready_chain_lock, flags);
155 	if (!crng_ready())
156 		ret = raw_notifier_chain_register(&random_ready_chain, nb);
157 	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
158 	return ret;
159 }
160 
161 /*
162  * Delete a previously registered readiness callback function.
163  */
unregister_random_ready_notifier(struct notifier_block * nb)164 int __cold unregister_random_ready_notifier(struct notifier_block *nb)
165 {
166 	unsigned long flags;
167 	int ret;
168 
169 	spin_lock_irqsave(&random_ready_chain_lock, flags);
170 	ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
171 	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
172 	return ret;
173 }
174 
process_random_ready_list(void)175 static void __cold process_random_ready_list(void)
176 {
177 	unsigned long flags;
178 
179 	spin_lock_irqsave(&random_ready_chain_lock, flags);
180 	raw_notifier_call_chain(&random_ready_chain, 0, NULL);
181 	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
182 }
183 
184 #define warn_unseeded_randomness() \
185 	if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
186 		printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
187 				__func__, (void *)_RET_IP_, crng_init)
188 
189 
190 /*********************************************************************
191  *
192  * Fast key erasure RNG, the "crng".
193  *
194  * These functions expand entropy from the entropy extractor into
195  * long streams for external consumption using the "fast key erasure"
196  * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
197  *
198  * There are a few exported interfaces for use by other drivers:
199  *
200  *	void get_random_bytes(void *buf, size_t len)
201  *	u32 get_random_u32()
202  *	u64 get_random_u64()
203  *	unsigned int get_random_int()
204  *	unsigned long get_random_long()
205  *
206  * These interfaces will return the requested number of random bytes
207  * into the given buffer or as a return value. This is equivalent to
208  * a read from /dev/urandom. The u32, u64, int, and long family of
209  * functions may be higher performance for one-off random integers,
210  * because they do a bit of buffering and do not invoke reseeding
211  * until the buffer is emptied.
212  *
213  *********************************************************************/
214 
215 enum {
216 	CRNG_RESEED_START_INTERVAL = HZ,
217 	CRNG_RESEED_INTERVAL = 60 * HZ
218 };
219 
220 static struct {
221 	u8 key[CHACHA20_KEY_SIZE] __aligned(__alignof__(long));
222 	unsigned long birth;
223 	unsigned long generation;
224 	spinlock_t lock;
225 } base_crng = {
226 	.lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
227 };
228 
229 struct crng {
230 	u8 key[CHACHA20_KEY_SIZE];
231 	unsigned long generation;
232 };
233 
234 static DEFINE_PER_CPU(struct crng, crngs) = {
235 	.generation = ULONG_MAX
236 };
237 
238 /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
239 static void extract_entropy(void *buf, size_t len);
240 
241 /* This extracts a new crng key from the input pool. */
crng_reseed(void)242 static void crng_reseed(void)
243 {
244 	unsigned long flags;
245 	unsigned long next_gen;
246 	u8 key[CHACHA20_KEY_SIZE];
247 
248 	extract_entropy(key, sizeof(key));
249 
250 	/*
251 	 * We copy the new key into the base_crng, overwriting the old one,
252 	 * and update the generation counter. We avoid hitting ULONG_MAX,
253 	 * because the per-cpu crngs are initialized to ULONG_MAX, so this
254 	 * forces new CPUs that come online to always initialize.
255 	 */
256 	spin_lock_irqsave(&base_crng.lock, flags);
257 	memcpy(base_crng.key, key, sizeof(base_crng.key));
258 	next_gen = base_crng.generation + 1;
259 	if (next_gen == ULONG_MAX)
260 		++next_gen;
261 	WRITE_ONCE(base_crng.generation, next_gen);
262 	WRITE_ONCE(base_crng.birth, jiffies);
263 	if (!crng_ready())
264 		crng_init = CRNG_READY;
265 	spin_unlock_irqrestore(&base_crng.lock, flags);
266 	memzero_explicit(key, sizeof(key));
267 }
268 
269 /*
270  * This generates a ChaCha block using the provided key, and then
271  * immediately overwites that key with half the block. It returns
272  * the resultant ChaCha state to the user, along with the second
273  * half of the block containing 32 bytes of random data that may
274  * be used; random_data_len may not be greater than 32.
275  *
276  * The returned ChaCha state contains within it a copy of the old
277  * key value, at index 4, so the state should always be zeroed out
278  * immediately after using in order to maintain forward secrecy.
279  * If the state cannot be erased in a timely manner, then it is
280  * safer to set the random_data parameter to &chacha_state[4] so
281  * that this function overwrites it before returning.
282  */
crng_fast_key_erasure(u8 key[CHACHA20_KEY_SIZE],u32 chacha_state[CHACHA20_BLOCK_SIZE/sizeof (u32)],u8 * random_data,size_t random_data_len)283 static void crng_fast_key_erasure(u8 key[CHACHA20_KEY_SIZE],
284 				  u32 chacha_state[CHACHA20_BLOCK_SIZE / sizeof(u32)],
285 				  u8 *random_data, size_t random_data_len)
286 {
287 	u8 first_block[CHACHA20_BLOCK_SIZE];
288 
289 	BUG_ON(random_data_len > 32);
290 
291 	chacha_init_consts(chacha_state);
292 	memcpy(&chacha_state[4], key, CHACHA20_KEY_SIZE);
293 	memset(&chacha_state[12], 0, sizeof(u32) * 4);
294 	chacha20_block(chacha_state, first_block);
295 
296 	memcpy(key, first_block, CHACHA20_KEY_SIZE);
297 	memcpy(random_data, first_block + CHACHA20_KEY_SIZE, random_data_len);
298 	memzero_explicit(first_block, sizeof(first_block));
299 }
300 
301 /*
302  * Return whether the crng seed is considered to be sufficiently old
303  * that a reseeding is needed. This happens if the last reseeding
304  * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
305  * proportional to the uptime.
306  */
crng_has_old_seed(void)307 static bool crng_has_old_seed(void)
308 {
309 	static bool early_boot = true;
310 	unsigned long interval = CRNG_RESEED_INTERVAL;
311 
312 	if (unlikely(READ_ONCE(early_boot))) {
313 		time64_t uptime = ktime_get_seconds();
314 		if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
315 			WRITE_ONCE(early_boot, false);
316 		else
317 			interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
318 					 (unsigned int)uptime / 2 * HZ);
319 	}
320 	return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
321 }
322 
323 /*
324  * This function returns a ChaCha state that you may use for generating
325  * random data. It also returns up to 32 bytes on its own of random data
326  * that may be used; random_data_len may not be greater than 32.
327  */
crng_make_state(u32 chacha_state[CHACHA20_BLOCK_SIZE/sizeof (u32)],u8 * random_data,size_t random_data_len)328 static void crng_make_state(u32 chacha_state[CHACHA20_BLOCK_SIZE / sizeof(u32)],
329 			    u8 *random_data, size_t random_data_len)
330 {
331 	unsigned long flags;
332 	struct crng *crng;
333 
334 	BUG_ON(random_data_len > 32);
335 
336 	/*
337 	 * For the fast path, we check whether we're ready, unlocked first, and
338 	 * then re-check once locked later. In the case where we're really not
339 	 * ready, we do fast key erasure with the base_crng directly, extracting
340 	 * when crng_init is CRNG_EMPTY.
341 	 */
342 	if (!crng_ready()) {
343 		bool ready;
344 
345 		spin_lock_irqsave(&base_crng.lock, flags);
346 		ready = crng_ready();
347 		if (!ready) {
348 			if (crng_init == CRNG_EMPTY)
349 				extract_entropy(base_crng.key, sizeof(base_crng.key));
350 			crng_fast_key_erasure(base_crng.key, chacha_state,
351 					      random_data, random_data_len);
352 		}
353 		spin_unlock_irqrestore(&base_crng.lock, flags);
354 		if (!ready)
355 			return;
356 	}
357 
358 	/*
359 	 * If the base_crng is old enough, we reseed, which in turn bumps the
360 	 * generation counter that we check below.
361 	 */
362 	if (unlikely(crng_has_old_seed()))
363 		crng_reseed();
364 
365 	local_irq_save(flags);
366 	crng = raw_cpu_ptr(&crngs);
367 
368 	/*
369 	 * If our per-cpu crng is older than the base_crng, then it means
370 	 * somebody reseeded the base_crng. In that case, we do fast key
371 	 * erasure on the base_crng, and use its output as the new key
372 	 * for our per-cpu crng. This brings us up to date with base_crng.
373 	 */
374 	if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
375 		spin_lock(&base_crng.lock);
376 		crng_fast_key_erasure(base_crng.key, chacha_state,
377 				      crng->key, sizeof(crng->key));
378 		crng->generation = base_crng.generation;
379 		spin_unlock(&base_crng.lock);
380 	}
381 
382 	/*
383 	 * Finally, when we've made it this far, our per-cpu crng has an up
384 	 * to date key, and we can do fast key erasure with it to produce
385 	 * some random data and a ChaCha state for the caller. All other
386 	 * branches of this function are "unlikely", so most of the time we
387 	 * should wind up here immediately.
388 	 */
389 	crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
390 	local_irq_restore(flags);
391 }
392 
_get_random_bytes(void * buf,size_t len)393 static void _get_random_bytes(void *buf, size_t len)
394 {
395 	u32 chacha_state[CHACHA20_BLOCK_SIZE / sizeof(u32)];
396 	u8 tmp[CHACHA20_BLOCK_SIZE];
397 	size_t first_block_len;
398 
399 	if (!len)
400 		return;
401 
402 	first_block_len = min_t(size_t, 32, len);
403 	crng_make_state(chacha_state, buf, first_block_len);
404 	len -= first_block_len;
405 	buf += first_block_len;
406 
407 	while (len) {
408 		if (len < CHACHA20_BLOCK_SIZE) {
409 			chacha20_block(chacha_state, tmp);
410 			memcpy(buf, tmp, len);
411 			memzero_explicit(tmp, sizeof(tmp));
412 			break;
413 		}
414 
415 		chacha20_block(chacha_state, buf);
416 		if (unlikely(chacha_state[12] == 0))
417 			++chacha_state[13];
418 		len -= CHACHA20_BLOCK_SIZE;
419 		buf += CHACHA20_BLOCK_SIZE;
420 	}
421 
422 	memzero_explicit(chacha_state, sizeof(chacha_state));
423 }
424 
425 /*
426  * This function is the exported kernel interface.  It returns some
427  * number of good random numbers, suitable for key generation, seeding
428  * TCP sequence numbers, etc.  It does not rely on the hardware random
429  * number generator.  For random bytes direct from the hardware RNG
430  * (when available), use get_random_bytes_arch(). In order to ensure
431  * that the randomness provided by this function is okay, the function
432  * wait_for_random_bytes() should be called and return 0 at least once
433  * at any point prior.
434  */
get_random_bytes(void * buf,size_t len)435 void get_random_bytes(void *buf, size_t len)
436 {
437 	warn_unseeded_randomness();
438 	_get_random_bytes(buf, len);
439 }
440 EXPORT_SYMBOL(get_random_bytes);
441 
get_random_bytes_user(struct iov_iter * iter)442 static ssize_t get_random_bytes_user(struct iov_iter *iter)
443 {
444 	u32 chacha_state[CHACHA20_BLOCK_SIZE / sizeof(u32)];
445 	u8 block[CHACHA20_BLOCK_SIZE];
446 	size_t ret = 0, copied;
447 
448 	if (unlikely(!iov_iter_count(iter)))
449 		return 0;
450 
451 	/*
452 	 * Immediately overwrite the ChaCha key at index 4 with random
453 	 * bytes, in case userspace causes copy_to_user() below to sleep
454 	 * forever, so that we still retain forward secrecy in that case.
455 	 */
456 	crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA20_KEY_SIZE);
457 	/*
458 	 * However, if we're doing a read of len <= 32, we don't need to
459 	 * use chacha_state after, so we can simply return those bytes to
460 	 * the user directly.
461 	 */
462 	if (iov_iter_count(iter) <= CHACHA20_KEY_SIZE) {
463 		ret = copy_to_iter(&chacha_state[4], CHACHA20_KEY_SIZE, iter);
464 		goto out_zero_chacha;
465 	}
466 
467 	for (;;) {
468 		chacha20_block(chacha_state, block);
469 		if (unlikely(chacha_state[12] == 0))
470 			++chacha_state[13];
471 
472 		copied = copy_to_iter(block, sizeof(block), iter);
473 		ret += copied;
474 		if (!iov_iter_count(iter) || copied != sizeof(block))
475 			break;
476 
477 		BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
478 		if (ret % PAGE_SIZE == 0) {
479 			if (signal_pending(current))
480 				break;
481 			cond_resched();
482 		}
483 	}
484 
485 	memzero_explicit(block, sizeof(block));
486 out_zero_chacha:
487 	memzero_explicit(chacha_state, sizeof(chacha_state));
488 	return ret ? ret : -EFAULT;
489 }
490 
491 /*
492  * Batched entropy returns random integers. The quality of the random
493  * number is good as /dev/urandom. In order to ensure that the randomness
494  * provided by this function is okay, the function wait_for_random_bytes()
495  * should be called and return 0 at least once at any point prior.
496  */
497 
498 #define DEFINE_BATCHED_ENTROPY(type)						\
499 struct batch_ ##type {								\
500 	/*									\
501 	 * We make this 1.5x a ChaCha block, so that we get the			\
502 	 * remaining 32 bytes from fast key erasure, plus one full		\
503 	 * block from the detached ChaCha state. We can increase		\
504 	 * the size of this later if needed so long as we keep the		\
505 	 * formula of (integer_blocks + 0.5) * CHACHA20_BLOCK_SIZE.		\
506 	 */									\
507 	type entropy[CHACHA20_BLOCK_SIZE * 3 / (2 * sizeof(type))];		\
508 	unsigned long generation;						\
509 	unsigned int position;							\
510 };										\
511 										\
512 static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = {	\
513 	.position = UINT_MAX							\
514 };										\
515 										\
516 type get_random_ ##type(void)							\
517 {										\
518 	type ret;								\
519 	unsigned long flags;							\
520 	struct batch_ ##type *batch;						\
521 	unsigned long next_gen;							\
522 										\
523 	warn_unseeded_randomness();						\
524 										\
525 	if  (!crng_ready()) {							\
526 		_get_random_bytes(&ret, sizeof(ret));				\
527 		return ret;							\
528 	}									\
529 										\
530 	local_irq_save(flags);		\
531 	batch = raw_cpu_ptr(&batched_entropy_##type);				\
532 										\
533 	next_gen = READ_ONCE(base_crng.generation);				\
534 	if (batch->position >= ARRAY_SIZE(batch->entropy) ||			\
535 	    next_gen != batch->generation) {					\
536 		_get_random_bytes(batch->entropy, sizeof(batch->entropy));	\
537 		batch->position = 0;						\
538 		batch->generation = next_gen;					\
539 	}									\
540 										\
541 	ret = batch->entropy[batch->position];					\
542 	batch->entropy[batch->position] = 0;					\
543 	++batch->position;							\
544 	local_irq_restore(flags);		\
545 	return ret;								\
546 }										\
547 EXPORT_SYMBOL(get_random_ ##type);
548 
549 DEFINE_BATCHED_ENTROPY(u64)
DEFINE_BATCHED_ENTROPY(u32)550 DEFINE_BATCHED_ENTROPY(u32)
551 
552 #ifdef CONFIG_SMP
553 /*
554  * This function is called when the CPU is coming up, with entry
555  * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
556  */
557 int __cold random_prepare_cpu(unsigned int cpu)
558 {
559 	/*
560 	 * When the cpu comes back online, immediately invalidate both
561 	 * the per-cpu crng and all batches, so that we serve fresh
562 	 * randomness.
563 	 */
564 	per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
565 	per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
566 	per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
567 	return 0;
568 }
569 #endif
570 
571 /*
572  * This function will use the architecture-specific hardware random
573  * number generator if it is available. It is not recommended for
574  * use. Use get_random_bytes() instead. It returns the number of
575  * bytes filled in.
576  */
get_random_bytes_arch(void * buf,size_t len)577 size_t __must_check get_random_bytes_arch(void *buf, size_t len)
578 {
579 	size_t left = len;
580 	u8 *p = buf;
581 
582 	while (left) {
583 		unsigned long v;
584 		size_t block_len = min_t(size_t, left, sizeof(unsigned long));
585 
586 		if (!arch_get_random_long(&v))
587 			break;
588 
589 		memcpy(p, &v, block_len);
590 		p += block_len;
591 		left -= block_len;
592 	}
593 
594 	return len - left;
595 }
596 EXPORT_SYMBOL(get_random_bytes_arch);
597 
598 
599 /**********************************************************************
600  *
601  * Entropy accumulation and extraction routines.
602  *
603  * Callers may add entropy via:
604  *
605  *     static void mix_pool_bytes(const void *buf, size_t len)
606  *
607  * After which, if added entropy should be credited:
608  *
609  *     static void credit_init_bits(size_t bits)
610  *
611  * Finally, extract entropy via:
612  *
613  *     static void extract_entropy(void *buf, size_t len)
614  *
615  **********************************************************************/
616 
617 enum {
618 	POOL_BITS = BLAKE2S_HASH_SIZE * 8,
619 	POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
620 	POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
621 };
622 
623 static struct {
624 	struct blake2s_state hash;
625 	spinlock_t lock;
626 	unsigned int init_bits;
627 } input_pool = {
628 	.hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
629 		    BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
630 		    BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
631 	.hash.outlen = BLAKE2S_HASH_SIZE,
632 	.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
633 };
634 
_mix_pool_bytes(const void * buf,size_t len)635 static void _mix_pool_bytes(const void *buf, size_t len)
636 {
637 	blake2s_update(&input_pool.hash, buf, len);
638 }
639 
640 /*
641  * This function adds bytes into the input pool. It does not
642  * update the initialization bit counter; the caller should call
643  * credit_init_bits if this is appropriate.
644  */
mix_pool_bytes(const void * buf,size_t len)645 static void mix_pool_bytes(const void *buf, size_t len)
646 {
647 	unsigned long flags;
648 
649 	spin_lock_irqsave(&input_pool.lock, flags);
650 	_mix_pool_bytes(buf, len);
651 	spin_unlock_irqrestore(&input_pool.lock, flags);
652 }
653 
654 /*
655  * This is an HKDF-like construction for using the hashed collected entropy
656  * as a PRF key, that's then expanded block-by-block.
657  */
extract_entropy(void * buf,size_t len)658 static void extract_entropy(void *buf, size_t len)
659 {
660 	unsigned long flags;
661 	u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
662 	struct {
663 		unsigned long rdseed[32 / sizeof(long)];
664 		size_t counter;
665 	} block;
666 	size_t i;
667 
668 	for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
669 		if (!arch_get_random_seed_long(&block.rdseed[i]) &&
670 		    !arch_get_random_long(&block.rdseed[i]))
671 			block.rdseed[i] = random_get_entropy();
672 	}
673 
674 	spin_lock_irqsave(&input_pool.lock, flags);
675 
676 	/* seed = HASHPRF(last_key, entropy_input) */
677 	blake2s_final(&input_pool.hash, seed);
678 
679 	/* next_key = HASHPRF(seed, RDSEED || 0) */
680 	block.counter = 0;
681 	blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
682 	blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
683 
684 	spin_unlock_irqrestore(&input_pool.lock, flags);
685 	memzero_explicit(next_key, sizeof(next_key));
686 
687 	while (len) {
688 		i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
689 		/* output = HASHPRF(seed, RDSEED || ++counter) */
690 		++block.counter;
691 		blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
692 		len -= i;
693 		buf += i;
694 	}
695 
696 	memzero_explicit(seed, sizeof(seed));
697 	memzero_explicit(&block, sizeof(block));
698 }
699 
700 #define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
701 
_credit_init_bits(size_t bits)702 static void __cold _credit_init_bits(size_t bits)
703 {
704 	unsigned int new, orig, add;
705 	unsigned long flags;
706 
707 	if (!bits)
708 		return;
709 
710 	add = min_t(size_t, bits, POOL_BITS);
711 
712 	do {
713 		orig = READ_ONCE(input_pool.init_bits);
714 		new = min_t(unsigned int, POOL_BITS, orig + add);
715 	} while (cmpxchg(&input_pool.init_bits, orig, new) != orig);
716 
717 	if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
718 		crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
719 		process_random_ready_list();
720 		wake_up_interruptible(&crng_init_wait);
721 		kill_fasync(&fasync, SIGIO, POLL_IN);
722 		pr_notice("crng init done\n");
723 		if (urandom_warning.missed)
724 			pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
725 				  urandom_warning.missed);
726 	} else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
727 		spin_lock_irqsave(&base_crng.lock, flags);
728 		/* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
729 		if (crng_init == CRNG_EMPTY) {
730 			extract_entropy(base_crng.key, sizeof(base_crng.key));
731 			crng_init = CRNG_EARLY;
732 		}
733 		spin_unlock_irqrestore(&base_crng.lock, flags);
734 	}
735 }
736 
737 
738 /**********************************************************************
739  *
740  * Entropy collection routines.
741  *
742  * The following exported functions are used for pushing entropy into
743  * the above entropy accumulation routines:
744  *
745  *	void add_device_randomness(const void *buf, size_t len);
746  *	void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
747  *	void add_bootloader_randomness(const void *buf, size_t len);
748  *	void add_interrupt_randomness(int irq);
749  *	void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
750  *	void add_disk_randomness(struct gendisk *disk);
751  *
752  * add_device_randomness() adds data to the input pool that
753  * is likely to differ between two devices (or possibly even per boot).
754  * This would be things like MAC addresses or serial numbers, or the
755  * read-out of the RTC. This does *not* credit any actual entropy to
756  * the pool, but it initializes the pool to different values for devices
757  * that might otherwise be identical and have very little entropy
758  * available to them (particularly common in the embedded world).
759  *
760  * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
761  * entropy as specified by the caller. If the entropy pool is full it will
762  * block until more entropy is needed.
763  *
764  * add_bootloader_randomness() is called by bootloader drivers, such as EFI
765  * and device tree, and credits its input depending on whether or not the
766  * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
767  *
768  * add_interrupt_randomness() uses the interrupt timing as random
769  * inputs to the entropy pool. Using the cycle counters and the irq source
770  * as inputs, it feeds the input pool roughly once a second or after 64
771  * interrupts, crediting 1 bit of entropy for whichever comes first.
772  *
773  * add_input_randomness() uses the input layer interrupt timing, as well
774  * as the event type information from the hardware.
775  *
776  * add_disk_randomness() uses what amounts to the seek time of block
777  * layer request events, on a per-disk_devt basis, as input to the
778  * entropy pool. Note that high-speed solid state drives with very low
779  * seek times do not make for good sources of entropy, as their seek
780  * times are usually fairly consistent.
781  *
782  * The last two routines try to estimate how many bits of entropy
783  * to credit. They do this by keeping track of the first and second
784  * order deltas of the event timings.
785  *
786  **********************************************************************/
787 
788 static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
789 static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
parse_trust_cpu(char * arg)790 static int __init parse_trust_cpu(char *arg)
791 {
792 	return kstrtobool(arg, &trust_cpu);
793 }
parse_trust_bootloader(char * arg)794 static int __init parse_trust_bootloader(char *arg)
795 {
796 	return kstrtobool(arg, &trust_bootloader);
797 }
798 early_param("random.trust_cpu", parse_trust_cpu);
799 early_param("random.trust_bootloader", parse_trust_bootloader);
800 
801 /*
802  * The first collection of entropy occurs at system boot while interrupts
803  * are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
804  * utsname(), and the command line. Depending on the above configuration knob,
805  * RDSEED may be considered sufficient for initialization. Note that much
806  * earlier setup may already have pushed entropy into the input pool by the
807  * time we get here.
808  */
random_init(const char * command_line)809 int __init random_init(const char *command_line)
810 {
811 	ktime_t now = ktime_get_real();
812 	unsigned int i, arch_bits;
813 	unsigned long entropy;
814 
815 #if defined(LATENT_ENTROPY_PLUGIN)
816 	static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
817 	_mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
818 #endif
819 
820 	for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8;
821 	     i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
822 		if (!arch_get_random_seed_long_early(&entropy) &&
823 		    !arch_get_random_long_early(&entropy)) {
824 			entropy = random_get_entropy();
825 			arch_bits -= sizeof(entropy) * 8;
826 		}
827 		_mix_pool_bytes(&entropy, sizeof(entropy));
828 	}
829 	_mix_pool_bytes(&now, sizeof(now));
830 	_mix_pool_bytes(utsname(), sizeof(*(utsname())));
831 	_mix_pool_bytes(command_line, strlen(command_line));
832 	add_latent_entropy();
833 
834 	if (crng_ready())
835 		crng_reseed();
836 	else if (trust_cpu)
837 		_credit_init_bits(arch_bits);
838 
839 	return 0;
840 }
841 
842 /*
843  * Add device- or boot-specific data to the input pool to help
844  * initialize it.
845  *
846  * None of this adds any entropy; it is meant to avoid the problem of
847  * the entropy pool having similar initial state across largely
848  * identical devices.
849  */
add_device_randomness(const void * buf,size_t len)850 void add_device_randomness(const void *buf, size_t len)
851 {
852 	unsigned long entropy = random_get_entropy();
853 	unsigned long flags;
854 
855 	spin_lock_irqsave(&input_pool.lock, flags);
856 	_mix_pool_bytes(&entropy, sizeof(entropy));
857 	_mix_pool_bytes(buf, len);
858 	spin_unlock_irqrestore(&input_pool.lock, flags);
859 }
860 EXPORT_SYMBOL(add_device_randomness);
861 
862 /*
863  * Interface for in-kernel drivers of true hardware RNGs.
864  * Those devices may produce endless random bits and will be throttled
865  * when our pool is full.
866  */
add_hwgenerator_randomness(const void * buf,size_t len,size_t entropy)867 void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
868 {
869 	mix_pool_bytes(buf, len);
870 	credit_init_bits(entropy);
871 
872 	/*
873 	 * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
874 	 * we're not yet initialized.
875 	 */
876 	if (!kthread_should_stop() && crng_ready())
877 		schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
878 }
879 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
880 
881 /*
882  * Handle random seed passed by bootloader, and credit it if
883  * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
884  */
add_bootloader_randomness(const void * buf,size_t len)885 void __init add_bootloader_randomness(const void *buf, size_t len)
886 {
887 	mix_pool_bytes(buf, len);
888 	if (trust_bootloader)
889 		credit_init_bits(len * 8);
890 }
891 
892 struct fast_pool {
893 	unsigned long pool[4];
894 	unsigned long last;
895 	unsigned int count;
896 	struct timer_list mix;
897 };
898 
899 static void mix_interrupt_randomness(struct timer_list *work);
900 
901 static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
902 #ifdef CONFIG_64BIT
903 #define FASTMIX_PERM SIPHASH_PERMUTATION
904 	.pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
905 #else
906 #define FASTMIX_PERM HSIPHASH_PERMUTATION
907 	.pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
908 #endif
909 	.mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
910 };
911 
912 /*
913  * This is [Half]SipHash-1-x, starting from an empty key. Because
914  * the key is fixed, it assumes that its inputs are non-malicious,
915  * and therefore this has no security on its own. s represents the
916  * four-word SipHash state, while v represents a two-word input.
917  */
fast_mix(unsigned long s[4],unsigned long v1,unsigned long v2)918 static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
919 {
920 	s[3] ^= v1;
921 	FASTMIX_PERM(s[0], s[1], s[2], s[3]);
922 	s[0] ^= v1;
923 	s[3] ^= v2;
924 	FASTMIX_PERM(s[0], s[1], s[2], s[3]);
925 	s[0] ^= v2;
926 }
927 
928 #ifdef CONFIG_SMP
929 /*
930  * This function is called when the CPU has just come online, with
931  * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
932  */
random_online_cpu(unsigned int cpu)933 int __cold random_online_cpu(unsigned int cpu)
934 {
935 	/*
936 	 * During CPU shutdown and before CPU onlining, add_interrupt_
937 	 * randomness() may schedule mix_interrupt_randomness(), and
938 	 * set the MIX_INFLIGHT flag. However, because the worker can
939 	 * be scheduled on a different CPU during this period, that
940 	 * flag will never be cleared. For that reason, we zero out
941 	 * the flag here, which runs just after workqueues are onlined
942 	 * for the CPU again. This also has the effect of setting the
943 	 * irq randomness count to zero so that new accumulated irqs
944 	 * are fresh.
945 	 */
946 	per_cpu_ptr(&irq_randomness, cpu)->count = 0;
947 	return 0;
948 }
949 #endif
950 
mix_interrupt_randomness(struct timer_list * work)951 static void mix_interrupt_randomness(struct timer_list *work)
952 {
953 	struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
954 	/*
955 	 * The size of the copied stack pool is explicitly 2 longs so that we
956 	 * only ever ingest half of the siphash output each time, retaining
957 	 * the other half as the next "key" that carries over. The entropy is
958 	 * supposed to be sufficiently dispersed between bits so on average
959 	 * we don't wind up "losing" some.
960 	 */
961 	unsigned long pool[2];
962 	unsigned int count;
963 
964 	/* Check to see if we're running on the wrong CPU due to hotplug. */
965 	local_irq_disable();
966 	if (fast_pool != this_cpu_ptr(&irq_randomness)) {
967 		local_irq_enable();
968 		return;
969 	}
970 
971 	/*
972 	 * Copy the pool to the stack so that the mixer always has a
973 	 * consistent view, before we reenable irqs again.
974 	 */
975 	memcpy(pool, fast_pool->pool, sizeof(pool));
976 	count = fast_pool->count;
977 	fast_pool->count = 0;
978 	fast_pool->last = jiffies;
979 	local_irq_enable();
980 
981 	mix_pool_bytes(pool, sizeof(pool));
982 	credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
983 
984 	memzero_explicit(pool, sizeof(pool));
985 }
986 
add_interrupt_randomness(int irq)987 void add_interrupt_randomness(int irq)
988 {
989 	enum { MIX_INFLIGHT = 1U << 31 };
990 	unsigned long entropy = random_get_entropy();
991 	struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
992 	struct pt_regs *regs = get_irq_regs();
993 	unsigned int new_count;
994 
995 	fast_mix(fast_pool->pool, entropy,
996 		 (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
997 	new_count = ++fast_pool->count;
998 
999 	if (new_count & MIX_INFLIGHT)
1000 		return;
1001 
1002 	if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
1003 		return;
1004 
1005 	fast_pool->count |= MIX_INFLIGHT;
1006 	if (!timer_pending(&fast_pool->mix)) {
1007 		fast_pool->mix.expires = jiffies;
1008 		add_timer_on(&fast_pool->mix, raw_smp_processor_id());
1009 	}
1010 }
1011 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1012 
1013 /* There is one of these per entropy source */
1014 struct timer_rand_state {
1015 	unsigned long last_time;
1016 	long last_delta, last_delta2;
1017 };
1018 
1019 /*
1020  * This function adds entropy to the entropy "pool" by using timing
1021  * delays. It uses the timer_rand_state structure to make an estimate
1022  * of how many bits of entropy this call has added to the pool. The
1023  * value "num" is also added to the pool; it should somehow describe
1024  * the type of event that just happened.
1025  */
add_timer_randomness(struct timer_rand_state * state,unsigned int num)1026 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1027 {
1028 	unsigned long entropy = random_get_entropy(), now = jiffies, flags;
1029 	long delta, delta2, delta3;
1030 	unsigned int bits;
1031 
1032 	/*
1033 	 * If we're in a hard IRQ, add_interrupt_randomness() will be called
1034 	 * sometime after, so mix into the fast pool.
1035 	 */
1036 	if (in_irq()) {
1037 		fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
1038 	} else {
1039 		spin_lock_irqsave(&input_pool.lock, flags);
1040 		_mix_pool_bytes(&entropy, sizeof(entropy));
1041 		_mix_pool_bytes(&num, sizeof(num));
1042 		spin_unlock_irqrestore(&input_pool.lock, flags);
1043 	}
1044 
1045 	if (crng_ready())
1046 		return;
1047 
1048 	/*
1049 	 * Calculate number of bits of randomness we probably added.
1050 	 * We take into account the first, second and third-order deltas
1051 	 * in order to make our estimate.
1052 	 */
1053 	delta = now - READ_ONCE(state->last_time);
1054 	WRITE_ONCE(state->last_time, now);
1055 
1056 	delta2 = delta - READ_ONCE(state->last_delta);
1057 	WRITE_ONCE(state->last_delta, delta);
1058 
1059 	delta3 = delta2 - READ_ONCE(state->last_delta2);
1060 	WRITE_ONCE(state->last_delta2, delta2);
1061 
1062 	if (delta < 0)
1063 		delta = -delta;
1064 	if (delta2 < 0)
1065 		delta2 = -delta2;
1066 	if (delta3 < 0)
1067 		delta3 = -delta3;
1068 	if (delta > delta2)
1069 		delta = delta2;
1070 	if (delta > delta3)
1071 		delta = delta3;
1072 
1073 	/*
1074 	 * delta is now minimum absolute delta. Round down by 1 bit
1075 	 * on general principles, and limit entropy estimate to 11 bits.
1076 	 */
1077 	bits = min(fls(delta >> 1), 11);
1078 
1079 	/*
1080 	 * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
1081 	 * will run after this, which uses a different crediting scheme of 1 bit
1082 	 * per every 64 interrupts. In order to let that function do accounting
1083 	 * close to the one in this function, we credit a full 64/64 bit per bit,
1084 	 * and then subtract one to account for the extra one added.
1085 	 */
1086 	if (in_irq())
1087 		this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
1088 	else
1089 		_credit_init_bits(bits);
1090 }
1091 
add_input_randomness(unsigned int type,unsigned int code,unsigned int value)1092 void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
1093 {
1094 	static unsigned char last_value;
1095 	static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
1096 
1097 	/* Ignore autorepeat and the like. */
1098 	if (value == last_value)
1099 		return;
1100 
1101 	last_value = value;
1102 	add_timer_randomness(&input_timer_state,
1103 			     (type << 4) ^ code ^ (code >> 4) ^ value);
1104 }
1105 EXPORT_SYMBOL_GPL(add_input_randomness);
1106 
1107 #ifdef CONFIG_BLOCK
add_disk_randomness(struct gendisk * disk)1108 void add_disk_randomness(struct gendisk *disk)
1109 {
1110 	if (!disk || !disk->random)
1111 		return;
1112 	/* First major is 1, so we get >= 0x200 here. */
1113 	add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1114 }
1115 EXPORT_SYMBOL_GPL(add_disk_randomness);
1116 
rand_initialize_disk(struct gendisk * disk)1117 void __cold rand_initialize_disk(struct gendisk *disk)
1118 {
1119 	struct timer_rand_state *state;
1120 
1121 	/*
1122 	 * If kzalloc returns null, we just won't use that entropy
1123 	 * source.
1124 	 */
1125 	state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1126 	if (state) {
1127 		state->last_time = INITIAL_JIFFIES;
1128 		disk->random = state;
1129 	}
1130 }
1131 #endif
1132 
1133 /*
1134  * Each time the timer fires, we expect that we got an unpredictable
1135  * jump in the cycle counter. Even if the timer is running on another
1136  * CPU, the timer activity will be touching the stack of the CPU that is
1137  * generating entropy..
1138  *
1139  * Note that we don't re-arm the timer in the timer itself - we are
1140  * happy to be scheduled away, since that just makes the load more
1141  * complex, but we do not want the timer to keep ticking unless the
1142  * entropy loop is running.
1143  *
1144  * So the re-arming always happens in the entropy loop itself.
1145  */
entropy_timer(struct timer_list * t)1146 static void __cold entropy_timer(struct timer_list *t)
1147 {
1148 	credit_init_bits(1);
1149 }
1150 
1151 /*
1152  * If we have an actual cycle counter, see if we can
1153  * generate enough entropy with timing noise
1154  */
try_to_generate_entropy(void)1155 static void __cold try_to_generate_entropy(void)
1156 {
1157 	struct {
1158 		unsigned long entropy;
1159 		struct timer_list timer;
1160 	} stack;
1161 
1162 	stack.entropy = random_get_entropy();
1163 
1164 	/* Slow counter - or none. Don't even bother */
1165 	if (stack.entropy == random_get_entropy())
1166 		return;
1167 
1168 	timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1169 	while (!crng_ready() && !signal_pending(current)) {
1170 		if (!timer_pending(&stack.timer))
1171 			mod_timer(&stack.timer, jiffies + 1);
1172 		mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1173 		schedule();
1174 		stack.entropy = random_get_entropy();
1175 	}
1176 
1177 	del_timer_sync(&stack.timer);
1178 	destroy_timer_on_stack(&stack.timer);
1179 	mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1180 }
1181 
1182 
1183 /**********************************************************************
1184  *
1185  * Userspace reader/writer interfaces.
1186  *
1187  * getrandom(2) is the primary modern interface into the RNG and should
1188  * be used in preference to anything else.
1189  *
1190  * Reading from /dev/random has the same functionality as calling
1191  * getrandom(2) with flags=0. In earlier versions, however, it had
1192  * vastly different semantics and should therefore be avoided, to
1193  * prevent backwards compatibility issues.
1194  *
1195  * Reading from /dev/urandom has the same functionality as calling
1196  * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1197  * waiting for the RNG to be ready, it should not be used.
1198  *
1199  * Writing to either /dev/random or /dev/urandom adds entropy to
1200  * the input pool but does not credit it.
1201  *
1202  * Polling on /dev/random indicates when the RNG is initialized, on
1203  * the read side, and when it wants new entropy, on the write side.
1204  *
1205  * Both /dev/random and /dev/urandom have the same set of ioctls for
1206  * adding entropy, getting the entropy count, zeroing the count, and
1207  * reseeding the crng.
1208  *
1209  **********************************************************************/
1210 
SYSCALL_DEFINE3(getrandom,char __user *,ubuf,size_t,len,unsigned int,flags)1211 SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
1212 {
1213 	struct iov_iter iter;
1214 	struct iovec iov;
1215 	int ret;
1216 
1217 	if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1218 		return -EINVAL;
1219 
1220 	/*
1221 	 * Requesting insecure and blocking randomness at the same time makes
1222 	 * no sense.
1223 	 */
1224 	if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1225 		return -EINVAL;
1226 
1227 	if (!crng_ready() && !(flags & GRND_INSECURE)) {
1228 		if (flags & GRND_NONBLOCK)
1229 			return -EAGAIN;
1230 		ret = wait_for_random_bytes();
1231 		if (unlikely(ret))
1232 			return ret;
1233 	}
1234 
1235 	ret = import_single_range(READ, ubuf, len, &iov, &iter);
1236 	if (unlikely(ret))
1237 		return ret;
1238 	return get_random_bytes_user(&iter);
1239 }
1240 
random_poll(struct file * file,poll_table * wait)1241 static __poll_t random_poll(struct file *file, poll_table *wait)
1242 {
1243 	poll_wait(file, &crng_init_wait, wait);
1244 	return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
1245 }
1246 
write_pool_user(struct iov_iter * iter)1247 static ssize_t write_pool_user(struct iov_iter *iter)
1248 {
1249 	u8 block[BLAKE2S_BLOCK_SIZE];
1250 	ssize_t ret = 0;
1251 	size_t copied;
1252 
1253 	if (unlikely(!iov_iter_count(iter)))
1254 		return 0;
1255 
1256 	for (;;) {
1257 		copied = copy_from_iter(block, sizeof(block), iter);
1258 		ret += copied;
1259 		mix_pool_bytes(block, copied);
1260 		if (!iov_iter_count(iter) || copied != sizeof(block))
1261 			break;
1262 
1263 		BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
1264 		if (ret % PAGE_SIZE == 0) {
1265 			if (signal_pending(current))
1266 				break;
1267 			cond_resched();
1268 		}
1269 	}
1270 
1271 	memzero_explicit(block, sizeof(block));
1272 	return ret ? ret : -EFAULT;
1273 }
1274 
random_write_iter(struct kiocb * kiocb,struct iov_iter * iter)1275 static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
1276 {
1277 	return write_pool_user(iter);
1278 }
1279 
urandom_read_iter(struct kiocb * kiocb,struct iov_iter * iter)1280 static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1281 {
1282 	static int maxwarn = 10;
1283 
1284 	if (!crng_ready()) {
1285 		if (!ratelimit_disable && maxwarn <= 0)
1286 			++urandom_warning.missed;
1287 		else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
1288 			--maxwarn;
1289 			pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
1290 				  current->comm, iov_iter_count(iter));
1291 		}
1292 	}
1293 
1294 	return get_random_bytes_user(iter);
1295 }
1296 
random_read_iter(struct kiocb * kiocb,struct iov_iter * iter)1297 static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1298 {
1299 	int ret;
1300 
1301 	if (!crng_ready() &&
1302 	    ((kiocb->ki_flags & IOCB_NOWAIT) ||
1303 	     (kiocb->ki_filp->f_flags & O_NONBLOCK)))
1304 		return -EAGAIN;
1305 
1306 	ret = wait_for_random_bytes();
1307 	if (ret != 0)
1308 		return ret;
1309 	return get_random_bytes_user(iter);
1310 }
1311 
random_ioctl(struct file * f,unsigned int cmd,unsigned long arg)1312 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1313 {
1314 	int __user *p = (int __user *)arg;
1315 	int ent_count;
1316 
1317 	switch (cmd) {
1318 	case RNDGETENTCNT:
1319 		/* Inherently racy, no point locking. */
1320 		if (put_user(input_pool.init_bits, p))
1321 			return -EFAULT;
1322 		return 0;
1323 	case RNDADDTOENTCNT:
1324 		if (!capable(CAP_SYS_ADMIN))
1325 			return -EPERM;
1326 		if (get_user(ent_count, p))
1327 			return -EFAULT;
1328 		if (ent_count < 0)
1329 			return -EINVAL;
1330 		credit_init_bits(ent_count);
1331 		return 0;
1332 	case RNDADDENTROPY: {
1333 		struct iov_iter iter;
1334 		struct iovec iov;
1335 		ssize_t ret;
1336 		int len;
1337 
1338 		if (!capable(CAP_SYS_ADMIN))
1339 			return -EPERM;
1340 		if (get_user(ent_count, p++))
1341 			return -EFAULT;
1342 		if (ent_count < 0)
1343 			return -EINVAL;
1344 		if (get_user(len, p++))
1345 			return -EFAULT;
1346 		ret = import_single_range(WRITE, p, len, &iov, &iter);
1347 		if (unlikely(ret))
1348 			return ret;
1349 		ret = write_pool_user(&iter);
1350 		if (unlikely(ret < 0))
1351 			return ret;
1352 		/* Since we're crediting, enforce that it was all written into the pool. */
1353 		if (unlikely(ret != len))
1354 			return -EFAULT;
1355 		credit_init_bits(ent_count);
1356 		return 0;
1357 	}
1358 	case RNDZAPENTCNT:
1359 	case RNDCLEARPOOL:
1360 		/* No longer has any effect. */
1361 		if (!capable(CAP_SYS_ADMIN))
1362 			return -EPERM;
1363 		return 0;
1364 	case RNDRESEEDCRNG:
1365 		if (!capable(CAP_SYS_ADMIN))
1366 			return -EPERM;
1367 		if (!crng_ready())
1368 			return -ENODATA;
1369 		crng_reseed();
1370 		return 0;
1371 	default:
1372 		return -EINVAL;
1373 	}
1374 }
1375 
random_fasync(int fd,struct file * filp,int on)1376 static int random_fasync(int fd, struct file *filp, int on)
1377 {
1378 	return fasync_helper(fd, filp, on, &fasync);
1379 }
1380 
1381 const struct file_operations random_fops = {
1382 	.read_iter = random_read_iter,
1383 	.write_iter = random_write_iter,
1384 	.poll = random_poll,
1385 	.unlocked_ioctl = random_ioctl,
1386 	.fasync = random_fasync,
1387 	.llseek = noop_llseek,
1388 	.splice_read = generic_file_splice_read,
1389 	.splice_write = iter_file_splice_write,
1390 };
1391 
1392 const struct file_operations urandom_fops = {
1393 	.read_iter = urandom_read_iter,
1394 	.write_iter = random_write_iter,
1395 	.unlocked_ioctl = random_ioctl,
1396 	.fasync = random_fasync,
1397 	.llseek = noop_llseek,
1398 	.splice_read = generic_file_splice_read,
1399 	.splice_write = iter_file_splice_write,
1400 };
1401 
1402 
1403 /********************************************************************
1404  *
1405  * Sysctl interface.
1406  *
1407  * These are partly unused legacy knobs with dummy values to not break
1408  * userspace and partly still useful things. They are usually accessible
1409  * in /proc/sys/kernel/random/ and are as follows:
1410  *
1411  * - boot_id - a UUID representing the current boot.
1412  *
1413  * - uuid - a random UUID, different each time the file is read.
1414  *
1415  * - poolsize - the number of bits of entropy that the input pool can
1416  *   hold, tied to the POOL_BITS constant.
1417  *
1418  * - entropy_avail - the number of bits of entropy currently in the
1419  *   input pool. Always <= poolsize.
1420  *
1421  * - write_wakeup_threshold - the amount of entropy in the input pool
1422  *   below which write polls to /dev/random will unblock, requesting
1423  *   more entropy, tied to the POOL_READY_BITS constant. It is writable
1424  *   to avoid breaking old userspaces, but writing to it does not
1425  *   change any behavior of the RNG.
1426  *
1427  * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1428  *   It is writable to avoid breaking old userspaces, but writing
1429  *   to it does not change any behavior of the RNG.
1430  *
1431  ********************************************************************/
1432 
1433 #ifdef CONFIG_SYSCTL
1434 
1435 #include <linux/sysctl.h>
1436 
1437 static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
1438 static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
1439 static int sysctl_poolsize = POOL_BITS;
1440 static u8 sysctl_bootid[UUID_SIZE];
1441 
1442 /*
1443  * This function is used to return both the bootid UUID, and random
1444  * UUID. The difference is in whether table->data is NULL; if it is,
1445  * then a new UUID is generated and returned to the user.
1446  */
proc_do_uuid(struct ctl_table * table,int write,void __user * buf,size_t * lenp,loff_t * ppos)1447 static int proc_do_uuid(struct ctl_table *table, int write, void __user *buf,
1448 			size_t *lenp, loff_t *ppos)
1449 {
1450 	u8 tmp_uuid[UUID_SIZE], *uuid;
1451 	char uuid_string[UUID_STRING_LEN + 1];
1452 	struct ctl_table fake_table = {
1453 		.data = uuid_string,
1454 		.maxlen = UUID_STRING_LEN
1455 	};
1456 
1457 	if (write)
1458 		return -EPERM;
1459 
1460 	uuid = table->data;
1461 	if (!uuid) {
1462 		uuid = tmp_uuid;
1463 		generate_random_uuid(uuid);
1464 	} else {
1465 		static DEFINE_SPINLOCK(bootid_spinlock);
1466 
1467 		spin_lock(&bootid_spinlock);
1468 		if (!uuid[8])
1469 			generate_random_uuid(uuid);
1470 		spin_unlock(&bootid_spinlock);
1471 	}
1472 
1473 	snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
1474 	return proc_dostring(&fake_table, 0, buf, lenp, ppos);
1475 }
1476 
1477 /* The same as proc_dointvec, but writes don't change anything. */
proc_do_rointvec(struct ctl_table * table,int write,void __user * buf,size_t * lenp,loff_t * ppos)1478 static int proc_do_rointvec(struct ctl_table *table, int write, void __user *buf,
1479 			    size_t *lenp, loff_t *ppos)
1480 {
1481 	return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
1482 }
1483 
1484 extern struct ctl_table random_table[];
1485 struct ctl_table random_table[] = {
1486 	{
1487 		.procname	= "poolsize",
1488 		.data		= &sysctl_poolsize,
1489 		.maxlen		= sizeof(int),
1490 		.mode		= 0444,
1491 		.proc_handler	= proc_dointvec,
1492 	},
1493 	{
1494 		.procname	= "entropy_avail",
1495 		.data		= &input_pool.init_bits,
1496 		.maxlen		= sizeof(int),
1497 		.mode		= 0444,
1498 		.proc_handler	= proc_dointvec,
1499 	},
1500 	{
1501 		.procname	= "write_wakeup_threshold",
1502 		.data		= &sysctl_random_write_wakeup_bits,
1503 		.maxlen		= sizeof(int),
1504 		.mode		= 0644,
1505 		.proc_handler	= proc_do_rointvec,
1506 	},
1507 	{
1508 		.procname	= "urandom_min_reseed_secs",
1509 		.data		= &sysctl_random_min_urandom_seed,
1510 		.maxlen		= sizeof(int),
1511 		.mode		= 0644,
1512 		.proc_handler	= proc_do_rointvec,
1513 	},
1514 	{
1515 		.procname	= "boot_id",
1516 		.data		= &sysctl_bootid,
1517 		.mode		= 0444,
1518 		.proc_handler	= proc_do_uuid,
1519 	},
1520 	{
1521 		.procname	= "uuid",
1522 		.mode		= 0444,
1523 		.proc_handler	= proc_do_uuid,
1524 	},
1525 	{ }
1526 };
1527 #endif	/* CONFIG_SYSCTL */
1528