portage-conf/patches/sys-kernel/gentoo-sources/0001-lrng.patch

11157 lines
335 KiB
Diff

From 96a130d620cb621ab97332b7b276e388e66a7e74 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 22 Jan 2024 15:49:47 +0100
Subject: [PATCH] lrng
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
MAINTAINERS | 7 +
crypto/drbg.c | 16 +-
drivers/char/Kconfig | 2 +
drivers/char/Makefile | 5 +-
drivers/char/lrng/Kconfig | 1017 +++++++++++++++++
drivers/char/lrng/Makefile | 39 +
drivers/char/lrng/lrng_definitions.h | 163 +++
drivers/char/lrng/lrng_drng_atomic.c | 130 +++
drivers/char/lrng/lrng_drng_atomic.h | 23 +
drivers/char/lrng/lrng_drng_chacha20.c | 195 ++++
drivers/char/lrng/lrng_drng_chacha20.h | 42 +
drivers/char/lrng/lrng_drng_drbg.c | 179 +++
drivers/char/lrng/lrng_drng_drbg.h | 13 +
drivers/char/lrng/lrng_drng_kcapi.c | 208 ++++
drivers/char/lrng/lrng_drng_kcapi.h | 13 +
drivers/char/lrng/lrng_drng_mgr.c | 742 ++++++++++++
drivers/char/lrng/lrng_drng_mgr.h | 86 ++
drivers/char/lrng/lrng_es_aux.c | 335 ++++++
drivers/char/lrng/lrng_es_aux.h | 44 +
drivers/char/lrng/lrng_es_cpu.c | 281 +++++
drivers/char/lrng/lrng_es_cpu.h | 17 +
drivers/char/lrng/lrng_es_irq.c | 730 ++++++++++++
drivers/char/lrng/lrng_es_irq.h | 24 +
drivers/char/lrng/lrng_es_jent.c | 356 ++++++
drivers/char/lrng/lrng_es_jent.h | 17 +
drivers/char/lrng/lrng_es_krng.c | 100 ++
drivers/char/lrng/lrng_es_krng.h | 17 +
drivers/char/lrng/lrng_es_mgr.c | 506 ++++++++
drivers/char/lrng/lrng_es_mgr.h | 56 +
drivers/char/lrng/lrng_es_mgr_cb.h | 87 ++
drivers/char/lrng/lrng_es_sched.c | 566 +++++++++
drivers/char/lrng/lrng_es_sched.h | 20 +
drivers/char/lrng/lrng_es_timer_common.c | 144 +++
drivers/char/lrng/lrng_es_timer_common.h | 83 ++
drivers/char/lrng/lrng_hash_kcapi.c | 140 +++
drivers/char/lrng/lrng_health.c | 447 ++++++++
drivers/char/lrng/lrng_health.h | 42 +
drivers/char/lrng/lrng_interface_aux.c | 210 ++++
drivers/char/lrng/lrng_interface_dev.c | 35 +
drivers/char/lrng/lrng_interface_dev_common.c | 315 +++++
drivers/char/lrng/lrng_interface_dev_common.h | 51 +
drivers/char/lrng/lrng_interface_hwrand.c | 68 ++
drivers/char/lrng/lrng_interface_kcapi.c | 129 +++
.../char/lrng/lrng_interface_random_kernel.c | 248 ++++
.../char/lrng/lrng_interface_random_kernel.h | 17 +
.../char/lrng/lrng_interface_random_user.c | 104 ++
drivers/char/lrng/lrng_numa.c | 124 ++
drivers/char/lrng/lrng_numa.h | 15 +
drivers/char/lrng/lrng_proc.c | 74 ++
drivers/char/lrng/lrng_proc.h | 15 +
drivers/char/lrng/lrng_selftest.c | 397 +++++++
drivers/char/lrng/lrng_sha.h | 14 +
drivers/char/lrng/lrng_sha1.c | 88 ++
drivers/char/lrng/lrng_sha256.c | 72 ++
drivers/char/lrng/lrng_switch.c | 286 +++++
drivers/char/lrng/lrng_sysctl.c | 140 +++
drivers/char/lrng/lrng_sysctl.h | 15 +
drivers/char/lrng/lrng_testing.c | 901 +++++++++++++++
drivers/char/lrng/lrng_testing.h | 85 ++
include/crypto/drbg.h | 7 +
include/linux/lrng.h | 251 ++++
kernel/sched/core.c | 3 +
62 files changed, 10549 insertions(+), 7 deletions(-)
create mode 100644 drivers/char/lrng/Kconfig
create mode 100644 drivers/char/lrng/Makefile
create mode 100644 drivers/char/lrng/lrng_definitions.h
create mode 100644 drivers/char/lrng/lrng_drng_atomic.c
create mode 100644 drivers/char/lrng/lrng_drng_atomic.h
create mode 100644 drivers/char/lrng/lrng_drng_chacha20.c
create mode 100644 drivers/char/lrng/lrng_drng_chacha20.h
create mode 100644 drivers/char/lrng/lrng_drng_drbg.c
create mode 100644 drivers/char/lrng/lrng_drng_drbg.h
create mode 100644 drivers/char/lrng/lrng_drng_kcapi.c
create mode 100644 drivers/char/lrng/lrng_drng_kcapi.h
create mode 100644 drivers/char/lrng/lrng_drng_mgr.c
create mode 100644 drivers/char/lrng/lrng_drng_mgr.h
create mode 100644 drivers/char/lrng/lrng_es_aux.c
create mode 100644 drivers/char/lrng/lrng_es_aux.h
create mode 100644 drivers/char/lrng/lrng_es_cpu.c
create mode 100644 drivers/char/lrng/lrng_es_cpu.h
create mode 100644 drivers/char/lrng/lrng_es_irq.c
create mode 100644 drivers/char/lrng/lrng_es_irq.h
create mode 100644 drivers/char/lrng/lrng_es_jent.c
create mode 100644 drivers/char/lrng/lrng_es_jent.h
create mode 100644 drivers/char/lrng/lrng_es_krng.c
create mode 100644 drivers/char/lrng/lrng_es_krng.h
create mode 100644 drivers/char/lrng/lrng_es_mgr.c
create mode 100644 drivers/char/lrng/lrng_es_mgr.h
create mode 100644 drivers/char/lrng/lrng_es_mgr_cb.h
create mode 100644 drivers/char/lrng/lrng_es_sched.c
create mode 100644 drivers/char/lrng/lrng_es_sched.h
create mode 100644 drivers/char/lrng/lrng_es_timer_common.c
create mode 100644 drivers/char/lrng/lrng_es_timer_common.h
create mode 100644 drivers/char/lrng/lrng_hash_kcapi.c
create mode 100644 drivers/char/lrng/lrng_health.c
create mode 100644 drivers/char/lrng/lrng_health.h
create mode 100644 drivers/char/lrng/lrng_interface_aux.c
create mode 100644 drivers/char/lrng/lrng_interface_dev.c
create mode 100644 drivers/char/lrng/lrng_interface_dev_common.c
create mode 100644 drivers/char/lrng/lrng_interface_dev_common.h
create mode 100644 drivers/char/lrng/lrng_interface_hwrand.c
create mode 100644 drivers/char/lrng/lrng_interface_kcapi.c
create mode 100644 drivers/char/lrng/lrng_interface_random_kernel.c
create mode 100644 drivers/char/lrng/lrng_interface_random_kernel.h
create mode 100644 drivers/char/lrng/lrng_interface_random_user.c
create mode 100644 drivers/char/lrng/lrng_numa.c
create mode 100644 drivers/char/lrng/lrng_numa.h
create mode 100644 drivers/char/lrng/lrng_proc.c
create mode 100644 drivers/char/lrng/lrng_proc.h
create mode 100644 drivers/char/lrng/lrng_selftest.c
create mode 100644 drivers/char/lrng/lrng_sha.h
create mode 100644 drivers/char/lrng/lrng_sha1.c
create mode 100644 drivers/char/lrng/lrng_sha256.c
create mode 100644 drivers/char/lrng/lrng_switch.c
create mode 100644 drivers/char/lrng/lrng_sysctl.c
create mode 100644 drivers/char/lrng/lrng_sysctl.h
create mode 100644 drivers/char/lrng/lrng_testing.c
create mode 100644 drivers/char/lrng/lrng_testing.h
create mode 100644 include/linux/lrng.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 8d1052fa6a69..e94319001cd9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -12466,6 +12466,13 @@ S: Supported
B: mailto:linux-next@vger.kernel.org and the appropriate development tree
T: git git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/
+LINUX RANDOM NUMBER GENERATOR (LRNG) DRIVER
+M: Stephan Mueller <smueller@chronox.de>
+S: Maintained
+W: https://www.chronox.de/lrng.html
+F: drivers/char/lrng/
+F: include/linux/lrng.h
+
LIS3LV02D ACCELEROMETER DRIVER
M: Eric Piel <eric.piel@tremplin-utc.net>
S: Maintained
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 3addce90930c..b6cc200e807d 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -115,7 +115,7 @@
* HMAC-SHA512 / SHA256 / AES 256 over other ciphers. Thus, the
* favored DRBGs are the latest entries in this array.
*/
-static const struct drbg_core drbg_cores[] = {
+const struct drbg_core drbg_cores[] = {
#ifdef CONFIG_CRYPTO_DRBG_CTR
{
.flags = DRBG_CTR | DRBG_STRENGTH128,
@@ -180,6 +180,7 @@ static const struct drbg_core drbg_cores[] = {
},
#endif /* CONFIG_CRYPTO_DRBG_HMAC */
};
+EXPORT_SYMBOL(drbg_cores);
static int drbg_uninstantiate(struct drbg_state *drbg);
@@ -195,7 +196,7 @@ static int drbg_uninstantiate(struct drbg_state *drbg);
* Return: normalized strength in *bytes* value or 32 as default
* to counter programming errors
*/
-static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
+unsigned short drbg_sec_strength(drbg_flag_t flags)
{
switch (flags & DRBG_STRENGTH_MASK) {
case DRBG_STRENGTH128:
@@ -208,6 +209,7 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
return 32;
}
}
+EXPORT_SYMBOL(drbg_sec_strength);
/*
* FIPS 140-2 continuous self test for the noise source
@@ -1236,7 +1238,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
}
/* Free all substructures in a DRBG state without the DRBG state structure */
-static inline void drbg_dealloc_state(struct drbg_state *drbg)
+void drbg_dealloc_state(struct drbg_state *drbg)
{
if (!drbg)
return;
@@ -1257,12 +1259,13 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
drbg->fips_primed = false;
}
}
+EXPORT_SYMBOL(drbg_dealloc_state);
/*
* Allocate all sub-structures for a DRBG state.
* The DRBG state structure must already be allocated.
*/
-static inline int drbg_alloc_state(struct drbg_state *drbg)
+int drbg_alloc_state(struct drbg_state *drbg)
{
int ret = -ENOMEM;
unsigned int sb_size = 0;
@@ -1343,6 +1346,7 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
drbg_dealloc_state(drbg);
return ret;
}
+EXPORT_SYMBOL(drbg_alloc_state);
/*************************************************************************
* DRBG interface functions
@@ -1877,8 +1881,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
*
* return: flags
*/
-static inline void drbg_convert_tfm_core(const char *cra_driver_name,
- int *coreref, bool *pr)
+void drbg_convert_tfm_core(const char *cra_driver_name, int *coreref, bool *pr)
{
int i = 0;
size_t start = 0;
@@ -1905,6 +1908,7 @@ static inline void drbg_convert_tfm_core(const char *cra_driver_name,
}
}
}
+EXPORT_SYMBOL(drbg_convert_tfm_core);
static int drbg_kcapi_init(struct crypto_tfm *tfm)
{
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 7c8dd0abcfdf..ffdd6ca797cd 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -422,4 +422,6 @@ config ADI
and SSM (Silicon Secured Memory). Intended consumers of this
driver include crash and makedumpfile.
+source "drivers/char/lrng/Kconfig"
+
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index e9b360cdc99a..f00df53befb3 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -3,7 +3,8 @@
# Makefile for the kernel character device drivers.
#
-obj-y += mem.o random.o
+obj-y += mem.o
+obj-$(CONFIG_RANDOM_DEFAULT_IMPL) += random.o
obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
obj-y += misc.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
@@ -43,3 +44,5 @@ obj-$(CONFIG_PS3_FLASH) += ps3flash.o
obj-$(CONFIG_XILLYBUS_CLASS) += xillybus/
obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
obj-$(CONFIG_ADI) += adi.o
+
+obj-$(CONFIG_LRNG) += lrng/
diff --git a/drivers/char/lrng/Kconfig b/drivers/char/lrng/Kconfig
new file mode 100644
index 000000000000..a8bbefafb35c
--- /dev/null
+++ b/drivers/char/lrng/Kconfig
@@ -0,0 +1,1017 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Linux Random Number Generator configuration
+#
+
+config RANDOM_DEFAULT_IMPL
+ bool "Kernel RNG Default Implementation"
+ default y
+ help
+ The default random number generator as provided with
+ drivers/char/random.c is selected with this option.
+
+config LRNG_AUTO_SELECTED
+ bool
+ default y if !RANDOM_DEFAULT_IMPL
+ default n if RANDOM_DEFAULT_IMPL
+ select LRNG
+
+config LRNG
+ bool "Linux Random Number Generator"
+ default n
+ select CRYPTO_LIB_SHA256 if CRYPTO
+ help
+ The Linux Random Number Generator (LRNG) generates entropy
+ from different entropy sources. Each entropy source can
+ be enabled and configured independently. The interrupt
+ entropy source can be configured to be SP800-90B compliant.
+ The entire LRNG can be configured to be SP800-90C compliant.
+ Runtime-switchable cryptographic support is available.
+ The LRNG delivers significant entropy during boot.
+
+ The LRNG also provides compliance to SP800-90A/B/C.
+
+menu "Linux Random Number Generator Configuration"
+ depends on LRNG
+
+if LRNG
+
+config LRNG_SHA256
+ bool
+ default y if CRYPTO_LIB_SHA256
+
+config LRNG_SHA1
+ bool
+ default y if !CRYPTO_LIB_SHA256
+
+config LRNG_COMMON_DEV_IF
+ bool
+
+config LRNG_DRNG_ATOMIC
+ bool
+ select LRNG_DRNG_CHACHA20
+
+config LRNG_SYSCTL
+ bool
+ depends on SYSCTL
+
+config LRNG_RANDOM_IF
+ bool
+ default n if RANDOM_DEFAULT_IMPL
+ default y if !RANDOM_DEFAULT_IMPL
+ select LRNG_COMMON_DEV_IF
+ select LRNG_DRNG_ATOMIC
+ select LRNG_SYSCTL
+
+menu "Specific DRNG seeding strategies"
+
+config LRNG_AIS2031_NTG1_SEEDING_STRATEGY
+ bool "AIS 20/31 NTG.1 seeding strategy"
+ default n
+ help
+ When enabling this option, two entropy sources must
+ deliver 220 bits of entropy each to consider a DRNG
+ as fully seeded. Any two entropy sources can be used
+ to fulfill this requirement. If specific entropy sources
+ shall not be capable of contributing to this seeding
+ strategy, the respective entropy source must be configured
+ to provide less than 220 bits of entropy.
+
+ The strategy is consistent with the requirements for
+ NTG.1 compliance in German AIS 20/31 draft from 2022
+ and is only enforced with lrng_es_mgr.ntg1=1.
+
+ Compliance with German AIS 20/31 from 2011 is always
+ present when using /dev/random with the flag O_SYNC or
+ getrandom(2) with GRND_RANDOM.
+
+ If unsure, say N.
+
+endmenu # "Specific DRNG seeding strategies"
+
+menu "LRNG Interfaces"
+
+config LRNG_KCAPI_IF
+ tristate "Interface with Kernel Crypto API"
+ depends on CRYPTO_RNG
+ help
+ The LRNG can be registered with the kernel crypto API's
+ random number generator framework. This offers a random
+ number generator with the name "lrng" and a priority that
+ is intended to be higher than the existing RNG
+ implementations.
+
+config LRNG_HWRAND_IF
+ tristate "Interface with Hardware Random Number Generator Framework"
+ depends on HW_RANDOM
+ select LRNG_DRNG_ATOMIC
+ help
+ The LRNG can be registered with the hardware random number
+ generator framework. This offers a random number generator
+ with the name "lrng" that is accessible via the framework.
+ For example it allows pulling data from the LRNG via the
+ /dev/hwrng file.
+
+config LRNG_DEV_IF
+ bool "Character device file interface"
+ select LRNG_COMMON_DEV_IF
+ help
+ The LRNG can create a character device file that operates
+ identically to /dev/random including IOCTL, read and write
+ operations.
+
+endmenu # "LRNG Interfaces"
+
+menu "Entropy Source Configuration"
+
+config LRNG_RUNTIME_ES_CONFIG
+ bool "Enable runtime configuration of entropy sources"
+ help
+ When enabling this option, the LRNG provides the mechanism
+ allowing to alter the entropy rate of each entropy source
+ during boot time and runtime.
+
+ Each entropy source allows its entropy rate changed with
+ a kernel command line option. When not providing any
+ option, the default specified during kernel compilation
+ is applied.
+
+comment "Common Timer-based Entropy Source Configuration"
+
+config LRNG_IRQ_DFLT_TIMER_ES
+ bool
+
+config LRNG_SCHED_DFLT_TIMER_ES
+ bool
+
+config LRNG_TIMER_COMMON
+ bool
+
+choice
+ prompt "Default Timer-based Entropy Source"
+ default LRNG_IRQ_DFLT_TIMER_ES
+ depends on LRNG_TIMER_COMMON
+ help
+ Select the timer-based entropy source that is credited
+ with entropy. The other timer-based entropy sources may
+ be operational and provide data, but are credited with no
+ entropy.
+
+ config LRNG_IRQ_DFLT_TIMER_ES
+ bool "Interrupt Entropy Source"
+ depends on LRNG_IRQ
+ help
+ The interrupt entropy source is selected as a timer-based
+ entropy source to provide entropy.
+
+ config LRNG_SCHED_DFLT_TIMER_ES
+ bool "Scheduler Entropy Source"
+ depends on LRNG_SCHED
+ help
+ The scheduler entropy source is selected as timer-based
+ entropy source to provide entropy.
+endchoice
+
+choice
+ prompt "LRNG Entropy Collection Pool Size"
+ default LRNG_COLLECTION_SIZE_1024
+ depends on LRNG_TIMER_COMMON
+ help
+ Select the size of the LRNG entropy collection pool
+ storing data for the interrupt as well as the scheduler
+ entropy sources without performing a compression
+ operation. The larger the collection size is, the faster
+ the average interrupt handling will be. The collection
+ size represents the number of bytes of the per-CPU memory
+ used to batch up entropy event data.
+
+ The default value is good for regular operations. Choose
+ larger sizes for servers that have no memory limitations.
+ If runtime memory is precious, choose a smaller size.
+
+ The collection size is unrelated to the entropy rate
+ or the amount of entropy the LRNG can process.
+
+ config LRNG_COLLECTION_SIZE_32
+ depends on LRNG_CONTINUOUS_COMPRESSION_ENABLED
+ depends on !LRNG_SWITCHABLE_CONTINUOUS_COMPRESSION
+ depends on !CRYPTO_FIPS
+ bool "32 interrupt events"
+
+ config LRNG_COLLECTION_SIZE_256
+ depends on !CRYPTO_FIPS
+ bool "256 interrupt events"
+
+ config LRNG_COLLECTION_SIZE_512
+ bool "512 interrupt events"
+
+ config LRNG_COLLECTION_SIZE_1024
+ bool "1024 interrupt events (default)"
+
+ config LRNG_COLLECTION_SIZE_2048
+ bool "2048 interrupt events"
+
+ config LRNG_COLLECTION_SIZE_4096
+ bool "4096 interrupt events"
+
+ config LRNG_COLLECTION_SIZE_8192
+ bool "8192 interrupt events"
+
+endchoice
+
+config LRNG_COLLECTION_SIZE
+ int
+ default 32 if LRNG_COLLECTION_SIZE_32
+ default 256 if LRNG_COLLECTION_SIZE_256
+ default 512 if LRNG_COLLECTION_SIZE_512
+ default 1024 if LRNG_COLLECTION_SIZE_1024
+ default 2048 if LRNG_COLLECTION_SIZE_2048
+ default 4096 if LRNG_COLLECTION_SIZE_4096
+ default 8192 if LRNG_COLLECTION_SIZE_8192
+
+config LRNG_HEALTH_TESTS
+ bool "Enable internal entropy source online health tests"
+ depends on LRNG_TIMER_COMMON
+ help
+ The online health tests applied to the interrupt entropy
+ source and to the scheduler entropy source to validate
+ the noise source at runtime for fatal errors. These tests
+ include SP800-90B compliant tests which are invoked if
+ the system is booted with fips=1. In case of fatal errors
+ during active SP800-90B tests, the issue is logged and
+ the noise data is discarded. These tests are required for
+ full compliance of the interrupt entropy source with
+ SP800-90B.
+
+ If both, the scheduler and the interrupt entropy sources,
+ are enabled, the health tests for both are applied
+ independent of each other.
+
+ If unsure, say Y.
+
+config LRNG_RCT_BROKEN
+ bool "SP800-90B RCT with dangerous low cutoff value"
+ depends on LRNG_HEALTH_TESTS
+ depends on BROKEN
+ default n
+ help
+ This option enables a dangerously low SP800-90B repetitive
+ count test (RCT) cutoff value which makes it very likely
+ that the RCT is triggered to raise a self test failure.
+
+ This option is ONLY intended for developers wanting to
+ test the effectiveness of the SP800-90B RCT health test.
+
+ If unsure, say N.
+
+config LRNG_APT_BROKEN
+ bool "SP800-90B APT with dangerous low cutoff value"
+ depends on LRNG_HEALTH_TESTS
+ depends on BROKEN
+ default n
+ help
+ This option enables a dangerously low SP800-90B adaptive
+ proportion test (APT) cutoff value which makes it very
+ likely that the APT is triggered to raise a self test
+ failure.
+
+ This option is ONLY intended for developers wanting to
+ test the effectiveness of the SP800-90B APT health test.
+
+ If unsure, say N.
+
+# Default taken from SP800-90B sec 4.4.1 - significance level 2^-30
+config LRNG_RCT_CUTOFF
+ int
+ default 31 if !LRNG_RCT_BROKEN
+ default 1 if LRNG_RCT_BROKEN
+
+# Default taken from SP800-90B sec 4.4.1 - significance level 2^-80
+config LRNG_RCT_CUTOFF_PERMANENT
+ int
+ default 81 if !LRNG_RCT_BROKEN
+ default 2 if LRNG_RCT_BROKEN
+
+# Default taken from SP800-90B sec 4.4.2 - significance level 2^-30
+config LRNG_APT_CUTOFF
+ int
+ default 325 if !LRNG_APT_BROKEN
+ default 32 if LRNG_APT_BROKEN
+
+# Default taken from SP800-90B sec 4.4.2 - significance level 2^-80
+config LRNG_APT_CUTOFF_PERMANENT
+ int
+ default 371 if !LRNG_APT_BROKEN
+ default 33 if LRNG_APT_BROKEN
+
+comment "Interrupt Entropy Source"
+
+config LRNG_IRQ
+ bool "Enable Interrupt Entropy Source as LRNG Seed Source"
+ default y
+ depends on !RANDOM_DEFAULT_IMPL
+ select LRNG_TIMER_COMMON
+ help
+ The LRNG models an entropy source based on the timing of the
+ occurrence of interrupts. Enable this option to enable this
+ IRQ entropy source.
+
+ The IRQ entropy source is triggered every time an interrupt
+ arrives and thus causes the interrupt handler to execute
+ slightly longer. Disabling the IRQ entropy source implies
+ that the performance penalty on the interrupt handler added
+ by the LRNG is eliminated. Yet, this entropy source is
+ considered to be an internal entropy source of the LRNG.
+ Thus, only disable it if you ensured that other entropy
+ sources are available that supply the LRNG with entropy.
+
+ If you disable the IRQ entropy source, you MUST ensure
+ one or more entropy sources collectively have the
+ capability to deliver sufficient entropy with one invocation
+ at a rate compliant to the security strength of the DRNG
+ (usually 256 bits of entropy). In addition, if those
+ entropy sources do not deliver sufficient entropy during
+ first request, the reseed must be triggered from user
+ space or kernel space when sufficient entropy is considered
+ to be present.
+
+ If unsure, say Y.
+
+choice
+ prompt "Continuous entropy compression boot time setting"
+ default LRNG_CONTINUOUS_COMPRESSION_ENABLED
+ depends on LRNG_IRQ
+ help
+ Select the default behavior of the interrupt entropy source
+ continuous compression operation.
+
+ The LRNG IRQ ES collects entropy data during each interrupt.
+ For performance reasons, a amount of entropy data defined by
+ the LRNG entropy collection pool size is concatenated into
+ an array. When that array is filled up, a hash is calculated
+ to compress the entropy. That hash is calculated in
+ interrupt context.
+
+ In case such hash calculation in interrupt context is deemed
+ too time-consuming, the continuous compression operation
+ can be disabled. If disabled, the collection of entropy will
+ not trigger a hash compression operation in interrupt context.
+ The compression happens only when the DRNG is reseeded which is
+ in process context. This implies that old entropy data
+ collected after the last DRNG-reseed is overwritten with newer
+ entropy data once the collection pool is full instead of
+ retaining its entropy with the compression operation.
+
+ config LRNG_CONTINUOUS_COMPRESSION_ENABLED
+ bool "Enable continuous compression (default)"
+
+ config LRNG_CONTINUOUS_COMPRESSION_DISABLED
+ bool "Disable continuous compression"
+
+endchoice
+
+config LRNG_ENABLE_CONTINUOUS_COMPRESSION
+ bool
+ default y if LRNG_CONTINUOUS_COMPRESSION_ENABLED
+ default n if LRNG_CONTINUOUS_COMPRESSION_DISABLED
+
+config LRNG_SWITCHABLE_CONTINUOUS_COMPRESSION
+ bool "Runtime-switchable continuous entropy compression"
+ depends on LRNG_IRQ
+ help
+ Per default, the interrupt entropy source continuous
+ compression operation behavior is hard-wired into the kernel.
+ Enable this option to allow it to be configurable at boot time.
+
+ To modify the default behavior of the continuous
+ compression operation, use the kernel command line option
+ of lrng_sw_noise.lrng_pcpu_continuous_compression.
+
+ If unsure, say N.
+
+config LRNG_IRQ_ENTROPY_RATE
+ int "Interrupt Entropy Source Entropy Rate"
+ depends on LRNG_IRQ
+ range 256 4294967295 if LRNG_IRQ_DFLT_TIMER_ES
+ range 4294967295 4294967295 if !LRNG_IRQ_DFLT_TIMER_ES
+ default 256 if LRNG_IRQ_DFLT_TIMER_ES
+ default 4294967295 if !LRNG_IRQ_DFLT_TIMER_ES
+ help
+ The LRNG will collect the configured number of interrupts to
+ obtain 256 bits of entropy. This value can be set to any between
+ 256 and 4294967295. The LRNG guarantees that this value is not
+ lower than 256. This lower limit implies that one interrupt event
+ is credited with one bit of entropy. This value is subject to the
+ increase by the oversampling factor, if no high-resolution timer
+ is found.
+
+ In order to effectively disable the interrupt entropy source,
+ the option has to be set to 4294967295. In this case, the
+ interrupt entropy source will still deliver data but without
+ being credited with entropy.
+
+comment "Jitter RNG Entropy Source"
+
+config LRNG_JENT
+ bool "Enable Jitter RNG as LRNG Seed Source"
+ depends on CRYPTO
+ select CRYPTO_JITTERENTROPY
+ help
+ The LRNG may use the Jitter RNG as entropy source. Enabling
+ this option enables the use of the Jitter RNG. Its default
+ entropy level is 16 bits of entropy per 256 data bits delivered
+ by the Jitter RNG. This entropy level can be changed at boot
+ time or at runtime with the lrng_base.jitterrng configuration
+ variable.
+
+choice
+ prompt "Jitter RNG Async Block Number"
+ default LRNG_JENT_ENTROPY_BLOCKS_NO_128
+ depends on LRNG_JENT
+ help
+ Select the number of Jitter RNG entropy blocks the asynchronous
+ collection operation will fill. A caller for Jitter RNG entropy
+ will be given data from the pre-filled blocks if available to
+ prevent the Jitter RNG from utilizing the CPU too much in a
+ possible hot code path.
+
+ The number specifies the number of 256/384 bit blocks that will
+ be held in memory and asynchronously filled with Jitter RNG data.
+
+ The asynchronous entropy collection can also be disabled at
+ kernel startup time when setting the command line option of
+ lrng_es_jent.jent_async_enabled=0. Also, setting this option at
+ runtime is allowed via the corresponding SysFS interface. This
+ option is only available with the options SysFS and
+ CONFIG_LRNG_RUNTIME_ES_CONFIG enabled.
+
+ config LRNG_JENT_ENTROPY_BLOCKS_DISABLED
+ bool "Async collection disabled"
+
+ # Any block number is allowed, provided it is a power of 2 and
+ # equal or larger than 4 (4 is due to the division in
+ # lrng_jent_async_get when deciding to wake up the monitor).
+ config LRNG_JENT_ENTROPY_BLOCKS_NO_32
+ bool "32 blocks"
+
+ config LRNG_JENT_ENTROPY_BLOCKS_NO_64
+ bool "64 blocks"
+
+ config LRNG_JENT_ENTROPY_BLOCKS_NO_128
+ bool "128 blocks (default)"
+
+ config LRNG_JENT_ENTROPY_BLOCKS_NO_256
+ bool "256 blocks"
+
+ config LRNG_JENT_ENTROPY_BLOCKS_NO_512
+ bool "512 blocks"
+
+ config LRNG_JENT_ENTROPY_BLOCKS_NO_1024
+ bool "1024 blocks"
+
+endchoice
+
+config LRNG_JENT_ENTROPY_BLOCKS
+ int
+ default 0 if LRNG_JENT_ENTROPY_BLOCKS_DISABLED
+ default 32 if LRNG_JENT_ENTROPY_BLOCKS_NO_32
+ default 64 if LRNG_JENT_ENTROPY_BLOCKS_NO_64
+ default 128 if LRNG_JENT_ENTROPY_BLOCKS_NO_128
+ default 256 if LRNG_JENT_ENTROPY_BLOCKS_NO_256
+ default 512 if LRNG_JENT_ENTROPY_BLOCKS_NO_512
+ default 1024 if LRNG_JENT_ENTROPY_BLOCKS_NO_1024
+
+config LRNG_JENT_ENTROPY_RATE
+ int "Jitter RNG Entropy Source Entropy Rate"
+ depends on LRNG_JENT
+ range 0 256
+ default 16
+ help
+ The option defines the amount of entropy the LRNG applies to 256
+ bits of data obtained from the Jitter RNG entropy source. The
+ LRNG enforces the limit that this value must be in the range
+ between 0 and 256.
+
+ When configuring this value to 0, the Jitter RNG entropy source
+ will provide 256 bits of data without being credited to contain
+ entropy.
+
+comment "CPU Entropy Source"
+
+config LRNG_CPU
+ bool "Enable CPU Entropy Source as LRNG Seed Source"
+ default y
+ help
+ Current CPUs commonly contain entropy sources which can be
+ used to seed the LRNG. For example, the Intel RDSEED
+ instruction, or the POWER DARN instruction will be sourced
+ to seed the LRNG if this option is enabled.
+
+ Note, if this option is enabled and the underlying CPU
+ does not offer such entropy source, the LRNG will automatically
+ detect this and ignore the hardware.
+
+config LRNG_CPU_FULL_ENT_MULTIPLIER
+ int
+ default 1 if !LRNG_TEST_CPU_ES_COMPRESSION
+ default 123 if LRNG_TEST_CPU_ES_COMPRESSION
+
+config LRNG_CPU_ENTROPY_RATE
+ int "CPU Entropy Source Entropy Rate"
+ depends on LRNG_CPU
+ range 0 256
+ default 8
+ help
+ The option defines the amount of entropy the LRNG applies to 256
+ bits of data obtained from the CPU entropy source. The LRNG
+ enforces the limit that this value must be in the range between
+ 0 and 256.
+
+ When configuring this value to 0, the CPU entropy source will
+ provide 256 bits of data without being credited to contain
+ entropy.
+
+ Note, this option is overwritten when the option
+ CONFIG_RANDOM_TRUST_CPU is set.
+
+comment "Scheduler Entropy Source"
+
+config LRNG_SCHED
+ bool "Enable Scheduer Entropy Source as LRNG Seed Source"
+ select LRNG_TIMER_COMMON
+ help
+ The LRNG models an entropy source based on the timing of the
+ occurrence of scheduler-triggered context switches. Enable
+ this option to enable this scheduler entropy source.
+
+ The scheduler entropy source is triggered every time a
+ context switch is triggered thus causes the scheduler to
+ execute slightly longer. Disabling the scheduler entropy
+ source implies that the performance penalty on the scheduler
+ added by the LRNG is eliminated. Yet, this entropy source is
+ considered to be an internal entropy source of the LRNG.
+ Thus, only disable it if you ensured that other entropy
+ sources are available that supply the LRNG with entropy.
+
+ If you disable the scheduler entropy source, you MUST
+ ensure one or more entropy sources collectively have the
+ capability to deliver sufficient entropy with one invocation
+ at a rate compliant to the security strength of the DRNG
+ (usually 256 bits of entropy). In addition, if those
+ entropy sources do not deliver sufficient entropy during
+ first request, the reseed must be triggered from user
+ space or kernel space when sufficient entropy is considered
+ to be present.
+
+ If unsure, say Y.
+
+config LRNG_SCHED_ENTROPY_RATE
+ int "Scheduler Entropy Source Entropy Rate"
+ depends on LRNG_SCHED
+ range 256 4294967295 if LRNG_SCHED_DFLT_TIMER_ES
+ range 4294967295 4294967295 if !LRNG_SCHED_DFLT_TIMER_ES
+ default 256 if LRNG_SCHED_DFLT_TIMER_ES
+ default 4294967295 if !LRNG_SCHED_DFLT_TIMER_ES
+ help
+ The LRNG will collect the configured number of context switches
+ triggered by the scheduler to obtain 256 bits of entropy. This
+ value can be set to any between 256 and 4294967295. The LRNG
+ guarantees that this value is not lower than 256. This lower
+ limit implies that one interrupt event is credited with one bit
+ of entropy. This value is subject to the increase by the
+ oversampling factor, if no high-resolution timer is found.
+
+ In order to effectively disable the scheduler entropy source,
+ the option has to be set to 4294967295. In this case, the
+ scheduler entropy source will still deliver data but without
+ being credited with entropy.
+
+comment "Kernel RNG Entropy Source"
+
+config LRNG_KERNEL_RNG
+ bool "Enable Kernel RNG as LRNG Seed Source"
+ depends on RANDOM_DEFAULT_IMPL
+ help
+ The LRNG may use the kernel RNG (random.c) as entropy
+ source.
+
+config LRNG_KERNEL_RNG_ENTROPY_RATE
+ int "Kernel RNG Entropy Source Entropy Rate"
+ depends on LRNG_KERNEL_RNG
+ range 0 256
+ default 256
+ help
+ The option defines the amount of entropy the LRNG applies to 256
+ bits of data obtained from the kernel RNG entropy source. The
+ LRNG enforces the limit that this value must be in the range
+ between 0 and 256.
+
+ When configuring this value to 0, the kernel RNG entropy source
+ will provide 256 bits of data without being credited to contain
+ entropy.
+
+ Note: This value is set to 0 automatically when booting the
+ kernel in FIPS mode (with fips=1 kernel command line option).
+ This is due to the fact that random.c is not SP800-90B
+ compliant.
+
+endmenu # "Entropy Source Configuration"
+
+config LRNG_DRNG_CHACHA20
+ tristate
+
+config LRNG_DRBG
+ tristate
+ depends on CRYPTO
+ select CRYPTO_DRBG_MENU
+
+config LRNG_DRNG_KCAPI
+ tristate
+ depends on CRYPTO
+ select CRYPTO_RNG
+
+config LRNG_SWITCH
+ bool
+
+menuconfig LRNG_SWITCH_HASH
+ bool "Support conditioning hash runtime switching"
+ select LRNG_SWITCH
+ help
+ The LRNG uses a default message digest. With this
+ configuration option other message digests can be selected
+ and loaded at runtime.
+
+if LRNG_SWITCH_HASH
+
+config LRNG_HASH_KCAPI
+ tristate "Kernel crypto API hashing support for LRNG"
+ select CRYPTO_HASH
+ select CRYPTO_SHA512
+ help
+ Enable the kernel crypto API support for entropy compression
+ and conditioning functions.
+
+endif # LRNG_SWITCH_HASH
+
+menuconfig LRNG_SWITCH_DRNG
+ bool "Support DRNG runtime switching"
+ select LRNG_SWITCH
+ help
+ The LRNG uses a default DRNG With this configuration
+ option other DRNGs or message digests can be selected and
+ loaded at runtime.
+
+if LRNG_SWITCH_DRNG
+
+config LRNG_SWITCH_DRNG_CHACHA20
+ tristate "ChaCha20-based DRNG support for LRNG"
+ depends on !LRNG_DFLT_DRNG_CHACHA20
+ select LRNG_DRNG_CHACHA20
+ help
+ Enable the ChaCha20-based DRNG. This DRNG implementation
+ does not depend on the kernel crypto API presence.
+
+config LRNG_SWITCH_DRBG
+ tristate "SP800-90A support for the LRNG"
+ depends on !LRNG_DFLT_DRNG_DRBG
+ select LRNG_DRBG
+ help
+ Enable the SP800-90A DRBG support for the LRNG. Once the
+ module is loaded, output from /dev/random, /dev/urandom,
+ getrandom(2), or get_random_bytes_full is provided by a DRBG.
+
+config LRNG_SWITCH_DRNG_KCAPI
+ tristate "Kernel Crypto API support for the LRNG"
+ depends on !LRNG_DFLT_DRNG_KCAPI
+ depends on !LRNG_SWITCH_DRBG
+ select LRNG_DRNG_KCAPI
+ help
+ Enable the support for generic pseudo-random number
+ generators offered by the kernel crypto API with the
+ LRNG. Once the module is loaded, output from /dev/random,
+ /dev/urandom, getrandom(2), or get_random_bytes is
+ provided by the selected kernel crypto API RNG.
+
+endif # LRNG_SWITCH_DRNG
+
+choice
+ prompt "LRNG Default DRNG"
+ default LRNG_DFLT_DRNG_CHACHA20
+ help
+ Select the default deterministic random number generator
+ that is used by the LRNG. When enabling the switchable
+ cryptographic mechanism support, this DRNG can be
+ replaced at runtime.
+
+ config LRNG_DFLT_DRNG_CHACHA20
+ bool "ChaCha20-based DRNG"
+ select LRNG_DRNG_CHACHA20
+
+ config LRNG_DFLT_DRNG_DRBG
+ depends on RANDOM_DEFAULT_IMPL
+ bool "SP800-90A DRBG"
+ select LRNG_DRBG
+
+ config LRNG_DFLT_DRNG_KCAPI
+ depends on RANDOM_DEFAULT_IMPL
+ bool "Kernel Crypto API DRNG"
+ select LRNG_DRNG_KCAPI
+endchoice
+
+menuconfig LRNG_TESTING_MENU
+ bool "LRNG testing interfaces"
+ depends on DEBUG_FS
+ help
+ Enable one or more of the following test interfaces.
+
+ If unsure, say N.
+
+if LRNG_TESTING_MENU
+
+config LRNG_TESTING
+ bool
+
+config LRNG_TESTING_RECORDING
+ bool
+
+comment "Interrupt Entropy Source Test Interfaces"
+
+config LRNG_RAW_HIRES_ENTROPY
+ bool "Interface to obtain raw unprocessed IRQ noise source data"
+ default y
+ depends on LRNG_IRQ
+ select LRNG_TESTING
+ select LRNG_TESTING_RECORDING
+ help
+ The test interface allows a privileged process to capture
+ the raw unconditioned high resolution time stamp noise that
+ is collected by the LRNG for statistical analysis. Extracted
+ noise data is not used to seed the LRNG.
+
+ The raw noise data can be obtained using the lrng_raw_hires
+ debugfs file. Using the option lrng_testing.boot_raw_hires_test=1
+ the raw noise of the first 1000 entropy events since boot
+ can be sampled.
+
+config LRNG_RAW_JIFFIES_ENTROPY
+ bool "Entropy test interface to Jiffies of IRQ noise source"
+ depends on LRNG_IRQ
+ select LRNG_TESTING
+ select LRNG_TESTING_RECORDING
+ help
+ The test interface allows a privileged process to capture
+ the raw unconditioned Jiffies that is collected by
+ the LRNG for statistical analysis. This data is used for
+ seeding the LRNG if a high-resolution time stamp is not
+ available. If a high-resolution time stamp is detected,
+ the Jiffies value is not collected by the LRNG and no
+ data is provided via the test interface. Extracted noise
+ data is not used to seed the random number generator.
+
+ The raw noise data can be obtained using the lrng_raw_jiffies
+ debugfs file. Using the option lrng_testing.boot_raw_jiffies_test=1
+ the raw noise of the first 1000 entropy events since boot
+ can be sampled.
+
+config LRNG_RAW_IRQ_ENTROPY
+ bool "Entropy test interface to IRQ number noise source"
+ depends on LRNG_IRQ
+ select LRNG_TESTING
+ select LRNG_TESTING_RECORDING
+ help
+ The test interface allows a privileged process to capture
+ the raw unconditioned interrupt number that is collected by
+ the LRNG for statistical analysis. Extracted noise data is
+ not used to seed the random number generator.
+
+ The raw noise data can be obtained using the lrng_raw_irq
+ debugfs file. Using the option lrng_testing.boot_raw_irq_test=1
+ the raw noise of the first 1000 entropy events since boot
+ can be sampled.
+
+config LRNG_RAW_RETIP_ENTROPY
+ bool "Entropy test interface to RETIP value of IRQ noise source"
+ depends on LRNG_IRQ
+ select LRNG_TESTING
+ select LRNG_TESTING_RECORDING
+ help
+ The test interface allows a privileged process to capture
+ the raw unconditioned return instruction pointer value
+ that is collected by the LRNG for statistical analysis.
+ Extracted noise data is not used to seed the random number
+ generator.
+
+ The raw noise data can be obtained using the lrng_raw_retip
+ debugfs file. Using the option lrng_testing.boot_raw_retip_test=1
+ the raw noise of the first 1000 entropy events since boot
+ can be sampled.
+
+config LRNG_RAW_REGS_ENTROPY
+ bool "Entropy test interface to IRQ register value noise source"
+ depends on LRNG_IRQ
+ select LRNG_TESTING
+ select LRNG_TESTING_RECORDING
+ help
+ The test interface allows a privileged process to capture
+ the raw unconditioned interrupt register value that is
+ collected by the LRNG for statistical analysis. Extracted noise
+ data is not used to seed the random number generator.
+
+ The raw noise data can be obtained using the lrng_raw_regs
+ debugfs file. Using the option lrng_testing.boot_raw_regs_test=1
+ the raw noise of the first 1000 entropy events since boot
+ can be sampled.
+
+config LRNG_RAW_ARRAY
+ bool "Test interface to LRNG raw entropy IRQ storage array"
+ depends on LRNG_IRQ
+ select LRNG_TESTING
+ select LRNG_TESTING_RECORDING
+ help
+ The test interface allows a privileged process to capture
+ the raw noise data that is collected by the LRNG
+ in the per-CPU array for statistical analysis. The purpose
+ of this interface is to verify that the array handling code
+ truly only concatenates data and provides the same entropy
+ rate as the raw unconditioned noise source when assessing
+ the collected data byte-wise.
+
+ The data can be obtained using the lrng_raw_array debugfs
+ file. Using the option lrng_testing.boot_raw_array=1
+ the raw noise of the first 1000 entropy events since boot
+ can be sampled.
+
+config LRNG_IRQ_PERF
+ bool "LRNG interrupt entropy source performance monitor"
+ depends on LRNG_IRQ
+ select LRNG_TESTING
+ select LRNG_TESTING_RECORDING
+ help
+ With this option, the performance monitor of the LRNG
+ interrupt handling code is enabled. The file provides
+ the execution time of the interrupt handler in
+ cycles.
+
+ The interrupt performance data can be obtained using
+ the lrng_irq_perf debugfs file. Using the option
+ lrng_testing.boot_irq_perf=1 the performance data of
+ the first 1000 entropy events since boot can be sampled.
+
+comment "Scheduler Entropy Source Test Interfaces"
+
+config LRNG_RAW_SCHED_HIRES_ENTROPY
+ bool "Interface to obtain raw unprocessed scheduler noise source data"
+ depends on LRNG_SCHED
+ select LRNG_TESTING
+ select LRNG_TESTING_RECORDING
+ help
+ The test interface allows a privileged process to capture
+ the raw unconditioned high resolution time stamp noise that
+ is collected by the LRNG for the Scheduler-based noise source
+ for statistical analysis. Extracted noise data is not used to
+ seed the LRNG.
+
+ The raw noise data can be obtained using the lrng_raw_sched_hires
+ debugfs file. Using the option
+ lrng_testing.boot_raw_sched_hires_test=1 the raw noise of the
+ first 1000 entropy events since boot can be sampled.
+
+config LRNG_RAW_SCHED_PID_ENTROPY
+ bool "Entropy test interface to PID value"
+ depends on LRNG_SCHED
+ select LRNG_TESTING
+ select LRNG_TESTING_RECORDING
+ help
+ The test interface allows a privileged process to capture
+ the raw unconditioned PID value that is collected by the
+ LRNG for statistical analysis. Extracted noise
+ data is not used to seed the random number generator.
+
+ The raw noise data can be obtained using the
+ lrng_raw_sched_pid debugfs file. Using the option
+ lrng_testing.boot_raw_sched_pid_test=1
+ the raw noise of the first 1000 entropy events since boot
+ can be sampled.
+
+config LRNG_RAW_SCHED_START_TIME_ENTROPY
+ bool "Entropy test interface to task start time value"
+ depends on LRNG_SCHED
+ select LRNG_TESTING
+ select LRNG_TESTING_RECORDING
+ help
+ The test interface allows a privileged process to capture
+ the raw unconditioned task start time value that is collected
+ by the LRNG for statistical analysis. Extracted noise
+ data is not used to seed the random number generator.
+
+ The raw noise data can be obtained using the
+ lrng_raw_sched_starttime debugfs file. Using the option
+ lrng_testing.boot_raw_sched_starttime_test=1
+ the raw noise of the first 1000 entropy events since boot
+ can be sampled.
+
+
+config LRNG_RAW_SCHED_NVCSW_ENTROPY
+ bool "Entropy test interface to task context switch numbers"
+ depends on LRNG_SCHED
+ select LRNG_TESTING
+ select LRNG_TESTING_RECORDING
+ help
+ The test interface allows a privileged process to capture
+ the raw unconditioned task numbers of context switches that
+ are collected by the LRNG for statistical analysis. Extracted
+ noise data is not used to seed the random number generator.
+
+ The raw noise data can be obtained using the
+ lrng_raw_sched_nvcsw debugfs file. Using the option
+ lrng_testing.boot_raw_sched_nvcsw_test=1
+ the raw noise of the first 1000 entropy events since boot
+ can be sampled.
+
+config LRNG_SCHED_PERF
+ bool "LRNG scheduler entropy source performance monitor"
+ depends on LRNG_SCHED
+ select LRNG_TESTING
+ select LRNG_TESTING_RECORDING
+ help
+ With this option, the performance monitor of the LRNG
+ scheduler event handling code is enabled. The file provides
+ the execution time of the interrupt handler in cycles.
+
+ The scheduler performance data can be obtained using
+ the lrng_sched_perf debugfs file. Using the option
+ lrng_testing.boot_sched_perf=1 the performance data of
+ the first 1000 entropy events since boot can be sampled.
+
+comment "Auxiliary Test Interfaces"
+
+config LRNG_ACVT_HASH
+ bool "Enable LRNG ACVT Hash interface"
+ select LRNG_TESTING
+ help
+ With this option, the LRNG built-in hash function used for
+ auxiliary pool management and prior to switching the
+ cryptographic backends is made available for ACVT. The
+ interface allows writing of the data to be hashed
+ into the interface. The read operation triggers the hash
+ operation to generate message digest.
+
+ The ACVT interface is available with the lrng_acvt_hash
+ debugfs file.
+
+config LRNG_RUNTIME_MAX_WO_RESEED_CONFIG
+ bool "Enable runtime configuration of max reseed threshold"
+ help
+ When enabling this option, the LRNG provides an interface
+ allowing the setting of the maximum number of DRNG generate
+ operations without a reseed that has full entropy. The
+ interface is lrng_drng.max_wo_reseed.
+
+config LRNG_RUNTIME_FORCE_SEEDING_DISABLE
+ bool "Enable runtime configuration of force seeding"
+ help
+ When enabling this option, the LRNG provides an interface
+ allowing the disabling of the force seeding when the DRNG
+ is not fully seeded but entropy is available.
+
+config LRNG_TEST_CPU_ES_COMPRESSION
+ bool "Force CPU ES compression operation"
+ help
+ When enabling this option, the CPU ES compression operation
+ is forced by setting an arbitrary value > 1 for the data
+ multiplier even when the CPU ES would deliver full entropy.
+ This allows testing of the compression operation. It
+ therefore forces to pull more data from the CPU ES
+ than what may be required.
+
+endif #LRNG_TESTING_MENU
+
+config LRNG_SELFTEST
+ bool "Enable power-on and on-demand self-tests"
+ help
+ The power-on self-tests are executed during boot time
+ covering the ChaCha20 DRNG, the hash operation used for
+ processing the entropy pools and the auxiliary pool, and
+ the time stamp management of the LRNG.
+
+ The on-demand self-tests are triggered by writing any
+ value into the SysFS file selftest_status. At the same
+ time, when reading this file, the test status is
+ returned. A zero indicates that all tests were executed
+ successfully.
+
+ If unsure, say Y.
+
+if LRNG_SELFTEST
+
+config LRNG_SELFTEST_PANIC
+ bool "Panic the kernel upon self-test failure"
+ help
+ If the option is enabled, the kernel is terminated if an
+ LRNG power-on self-test failure is detected.
+
+endif # LRNG_SELFTEST
+
+endif # LRNG
+
+endmenu # LRNG
diff --git a/drivers/char/lrng/Makefile b/drivers/char/lrng/Makefile
new file mode 100644
index 000000000000..f61fc40f4620
--- /dev/null
+++ b/drivers/char/lrng/Makefile
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Entropy Source and DRNG Manager.
+#
+
+obj-y += lrng_es_mgr.o lrng_drng_mgr.o \
+ lrng_es_aux.o
+obj-$(CONFIG_LRNG_SHA256) += lrng_sha256.o
+obj-$(CONFIG_LRNG_SHA1) += lrng_sha1.o
+
+obj-$(CONFIG_SYSCTL) += lrng_proc.o
+obj-$(CONFIG_LRNG_SYSCTL) += lrng_sysctl.o
+obj-$(CONFIG_NUMA) += lrng_numa.o
+
+obj-$(CONFIG_LRNG_SWITCH) += lrng_switch.o
+obj-$(CONFIG_LRNG_HASH_KCAPI) += lrng_hash_kcapi.o
+obj-$(CONFIG_LRNG_DRNG_CHACHA20) += lrng_drng_chacha20.o
+obj-$(CONFIG_LRNG_DRBG) += lrng_drng_drbg.o
+obj-$(CONFIG_LRNG_DRNG_KCAPI) += lrng_drng_kcapi.o
+obj-$(CONFIG_LRNG_DRNG_ATOMIC) += lrng_drng_atomic.o
+
+obj-$(CONFIG_LRNG_TIMER_COMMON) += lrng_es_timer_common.o
+obj-$(CONFIG_LRNG_IRQ) += lrng_es_irq.o
+obj-$(CONFIG_LRNG_KERNEL_RNG) += lrng_es_krng.o
+obj-$(CONFIG_LRNG_SCHED) += lrng_es_sched.o
+obj-$(CONFIG_LRNG_CPU) += lrng_es_cpu.o
+obj-$(CONFIG_LRNG_JENT) += lrng_es_jent.o
+
+obj-$(CONFIG_LRNG_HEALTH_TESTS) += lrng_health.o
+obj-$(CONFIG_LRNG_TESTING) += lrng_testing.o
+obj-$(CONFIG_LRNG_SELFTEST) += lrng_selftest.o
+
+obj-$(CONFIG_LRNG_COMMON_DEV_IF) += lrng_interface_dev_common.o
+obj-$(CONFIG_LRNG_RANDOM_IF) += lrng_interface_random_user.o \
+ lrng_interface_random_kernel.o \
+ lrng_interface_aux.o
+obj-$(CONFIG_LRNG_KCAPI_IF) += lrng_interface_kcapi.o
+obj-$(CONFIG_LRNG_DEV_IF) += lrng_interface_dev.o
+obj-$(CONFIG_LRNG_HWRAND_IF) += lrng_interface_hwrand.o
diff --git a/drivers/char/lrng/lrng_definitions.h b/drivers/char/lrng/lrng_definitions.h
new file mode 100644
index 000000000000..f6eb48e285cc
--- /dev/null
+++ b/drivers/char/lrng/lrng_definitions.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022 - 2023, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_DEFINITIONS_H
+#define _LRNG_DEFINITIONS_H
+
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <linux/slab.h>
+
+/*************************** General LRNG parameter ***************************/
+
+/*
+ * Specific settings for different use cases
+ */
+#ifdef CONFIG_CRYPTO_FIPS
+# define LRNG_OVERSAMPLE_ES_BITS 64
+# define LRNG_SEED_BUFFER_INIT_ADD_BITS 128
+#else /* CONFIG_CRYPTO_FIPS */
+# define LRNG_OVERSAMPLE_ES_BITS 0
+# define LRNG_SEED_BUFFER_INIT_ADD_BITS 0
+#endif /* CONFIG_CRYPTO_FIPS */
+
+/* Security strength of LRNG -- this must match DRNG security strength */
+#define LRNG_DRNG_SECURITY_STRENGTH_BYTES 32
+#define LRNG_DRNG_SECURITY_STRENGTH_BITS (LRNG_DRNG_SECURITY_STRENGTH_BYTES * 8)
+#define LRNG_DRNG_INIT_SEED_SIZE_BITS \
+ (LRNG_DRNG_SECURITY_STRENGTH_BITS + LRNG_SEED_BUFFER_INIT_ADD_BITS)
+#define LRNG_DRNG_INIT_SEED_SIZE_BYTES (LRNG_DRNG_INIT_SEED_SIZE_BITS >> 3)
+
+/*
+ * SP800-90A defines a maximum request size of 1<<16 bytes. The given value is
+ * considered a safer margin.
+ *
+ * This value is allowed to be changed.
+ */
+#define LRNG_DRNG_MAX_REQSIZE (1<<12)
+
+/*
+ * SP800-90A defines a maximum number of requests between reseeds of 2^48.
+ * The given value is considered a much safer margin, balancing requests for
+ * frequent reseeds with the need to conserve entropy. This value MUST NOT be
+ * larger than INT_MAX because it is used in an atomic_t.
+ *
+ * This value is allowed to be changed.
+ */
+#define LRNG_DRNG_RESEED_THRESH (1<<20)
+
+/*
+ * Maximum DRNG generation operations without reseed having full entropy
+ * This value defines the absolute maximum value of DRNG generation operations
+ * without a reseed holding full entropy. LRNG_DRNG_RESEED_THRESH is the
+ * threshold when a new reseed is attempted. But it is possible that this fails
+ * to deliver full entropy. In this case the DRNG will continue to provide data
+ * even though it was not reseeded with full entropy. To avoid in the extreme
+ * case that no reseed is performed for too long, this threshold is enforced.
+ * If that absolute low value is reached, the LRNG is marked as not operational.
+ *
+ * This value is allowed to be changed.
+ */
+#define LRNG_DRNG_MAX_WITHOUT_RESEED (1<<30)
+
+/*
+ * Min required seed entropy is 128 bits covering the minimum entropy
+ * requirement of SP800-131A and the German BSI's TR02102.
+ *
+ * This value is allowed to be changed.
+ */
+#define LRNG_FULL_SEED_ENTROPY_BITS LRNG_DRNG_SECURITY_STRENGTH_BITS
+#define LRNG_MIN_SEED_ENTROPY_BITS 128
+#define LRNG_INIT_ENTROPY_BITS 32
+
+/* AIS20/31: NTG.1.4 minimum entropy rate for one entropy source*/
+#define LRNG_AIS2031_NPTRNG_MIN_ENTROPY 220
+
+/*
+ * Wakeup value
+ *
+ * This value is allowed to be changed but must not be larger than the
+ * digest size of the hash operation used update the aux_pool.
+ */
+#ifdef CONFIG_LRNG_SHA256
+# define LRNG_ATOMIC_DIGEST_SIZE SHA256_DIGEST_SIZE
+#else
+# define LRNG_ATOMIC_DIGEST_SIZE SHA1_DIGEST_SIZE
+#endif
+#define LRNG_WRITE_WAKEUP_ENTROPY LRNG_ATOMIC_DIGEST_SIZE
+
+/*
+ * If the switching support is configured, we must provide support up to
+ * the largest digest size. Without switching support, we know it is only
+ * the built-in digest size.
+ */
+#ifdef CONFIG_LRNG_SWITCH
+# define LRNG_MAX_DIGESTSIZE 64
+#else
+# define LRNG_MAX_DIGESTSIZE LRNG_ATOMIC_DIGEST_SIZE
+#endif
+
+/*
+ * Oversampling factor of timer-based events to obtain
+ * LRNG_DRNG_SECURITY_STRENGTH_BYTES. This factor is used when a
+ * high-resolution time stamp is not available. In this case, jiffies and
+ * register contents are used to fill the entropy pool. These noise sources
+ * are much less entropic than the high-resolution timer. The entropy content
+ * is the entropy content assumed with LRNG_[IRQ|SCHED]_ENTROPY_BITS divided by
+ * LRNG_ES_OVERSAMPLING_FACTOR.
+ *
+ * This value is allowed to be changed.
+ */
+#define LRNG_ES_OVERSAMPLING_FACTOR 10
+
+/* Alignmask that is intended to be identical to CRYPTO_MINALIGN */
+#define LRNG_KCAPI_ALIGN ARCH_KMALLOC_MINALIGN
+
+/*
+ * This definition must provide a buffer that is equal to SHASH_DESC_ON_STACK
+ * as it will be casted into a struct shash_desc.
+ */
+#define LRNG_POOL_SIZE (sizeof(struct shash_desc) + HASH_MAX_DESCSIZE)
+
+/*
+ * Identification of a permanent health falure.
+ *
+ * Allow the given number of back-to-back health failures until incuring a
+ * permanent health failure. The chosen value implies an alpha of 2^-60
+ * considering that the alpha of one health failure is 2^-30
+ */
+#define LRNG_PERMANENT_HEALTH_FAILURES 2
+
+/****************************** Helper code ***********************************/
+
+static inline u32 lrng_fast_noise_entropylevel(u32 ent_bits, u32 requested_bits)
+{
+ /* Obtain entropy statement */
+ ent_bits = ent_bits * requested_bits / LRNG_DRNG_SECURITY_STRENGTH_BITS;
+ /* Cap entropy to buffer size in bits */
+ ent_bits = min_t(u32, ent_bits, requested_bits);
+ return ent_bits;
+}
+
+/* Convert entropy in bits into nr. of events with the same entropy content. */
+static inline u32 lrng_entropy_to_data(u32 entropy_bits, u32 entropy_rate)
+{
+ return ((entropy_bits * entropy_rate) /
+ LRNG_DRNG_SECURITY_STRENGTH_BITS);
+}
+
+/* Convert number of events into entropy value. */
+static inline u32 lrng_data_to_entropy(u32 num, u32 entropy_rate)
+{
+ return ((num * LRNG_DRNG_SECURITY_STRENGTH_BITS) /
+ entropy_rate);
+}
+
+static inline u32 atomic_read_u32(atomic_t *v)
+{
+ return (u32)atomic_read(v);
+}
+
+#endif /* _LRNG_DEFINITIONS_H */
diff --git a/drivers/char/lrng/lrng_drng_atomic.c b/drivers/char/lrng/lrng_drng_atomic.c
new file mode 100644
index 000000000000..290d346ea128
--- /dev/null
+++ b/drivers/char/lrng/lrng_drng_atomic.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG DRNG for atomic contexts
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+
+#include "lrng_drng_atomic.h"
+#include "lrng_drng_chacha20.h"
+#include "lrng_es_aux.h"
+#include "lrng_es_mgr.h"
+#include "lrng_sha.h"
+
+static struct chacha20_state chacha20_atomic = {
+ LRNG_CC20_INIT_RFC7539(.block)
+};
+
+/*
+ * DRNG usable in atomic context. This DRNG will always use the ChaCha20
+ * DRNG. It will never benefit from a DRNG switch like the "regular" DRNG. If
+ * there was no DRNG switch, the atomic DRNG is identical to the "regular" DRNG.
+ *
+ * The reason for having this is due to the fact that DRNGs other than
+ * the ChaCha20 DRNG may sleep.
+ */
+static struct lrng_drng lrng_drng_atomic = {
+ LRNG_DRNG_STATE_INIT(lrng_drng_atomic,
+ &chacha20_atomic, NULL,
+ &lrng_cc20_drng_cb, &lrng_sha_hash_cb),
+ .spin_lock = __SPIN_LOCK_UNLOCKED(lrng_drng_atomic.spin_lock)
+};
+
+struct lrng_drng *lrng_get_atomic(void)
+{
+ return &lrng_drng_atomic;
+}
+
+void lrng_drng_atomic_reset(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lrng_drng_atomic.spin_lock, flags);
+ lrng_drng_reset(&lrng_drng_atomic);
+ spin_unlock_irqrestore(&lrng_drng_atomic.spin_lock, flags);
+}
+
+void lrng_drng_atomic_force_reseed(void)
+{
+ lrng_drng_atomic.force_reseed = lrng_drng_atomic.fully_seeded;
+}
+
+static bool lrng_drng_atomic_must_reseed(struct lrng_drng *drng)
+{
+ return (!drng->fully_seeded ||
+ atomic_read(&lrng_drng_atomic.requests) == 0 ||
+ drng->force_reseed ||
+ time_after(jiffies,
+ drng->last_seeded + lrng_drng_reseed_max_time * HZ));
+}
+
+void lrng_drng_atomic_seed_drng(struct lrng_drng *regular_drng)
+{
+ u8 seedbuf[LRNG_DRNG_SECURITY_STRENGTH_BYTES]
+ __aligned(LRNG_KCAPI_ALIGN);
+ int ret;
+
+ if (!lrng_drng_atomic_must_reseed(&lrng_drng_atomic))
+ return;
+
+ /*
+ * Reseed atomic DRNG another DRNG "regular" while this regular DRNG
+ * is reseeded. Therefore, this code operates in non-atomic context and
+ * thus can use the lrng_drng_get function to get random numbers from
+ * the just freshly seeded DRNG.
+ */
+ ret = lrng_drng_get(regular_drng, seedbuf, sizeof(seedbuf));
+
+ if (ret < 0) {
+ pr_warn("Error generating random numbers for atomic DRNG: %d\n",
+ ret);
+ } else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&lrng_drng_atomic.spin_lock, flags);
+ lrng_drng_inject(&lrng_drng_atomic, seedbuf, ret,
+ regular_drng->fully_seeded, "atomic");
+ spin_unlock_irqrestore(&lrng_drng_atomic.spin_lock, flags);
+ }
+ memzero_explicit(&seedbuf, sizeof(seedbuf));
+}
+
+static void lrng_drng_atomic_get(u8 *outbuf, u32 outbuflen)
+{
+ struct lrng_drng *drng = &lrng_drng_atomic;
+ unsigned long flags;
+
+ if (!outbuf || !outbuflen)
+ return;
+
+ outbuflen = min_t(size_t, outbuflen, INT_MAX);
+
+ while (outbuflen) {
+ u32 todo = min_t(u32, outbuflen, LRNG_DRNG_MAX_REQSIZE);
+ int ret;
+
+ atomic_dec(&drng->requests);
+
+ spin_lock_irqsave(&drng->spin_lock, flags);
+ ret = drng->drng_cb->drng_generate(drng->drng, outbuf, todo);
+ spin_unlock_irqrestore(&drng->spin_lock, flags);
+ if (ret <= 0) {
+ pr_warn("getting random data from DRNG failed (%d)\n",
+ ret);
+ return;
+ }
+ outbuflen -= ret;
+ outbuf += ret;
+ }
+}
+
+void lrng_get_random_bytes(void *buf, int nbytes)
+{
+ lrng_drng_atomic_get((u8 *)buf, (u32)nbytes);
+ lrng_debug_report_seedlevel("lrng_get_random_bytes");
+}
+EXPORT_SYMBOL(lrng_get_random_bytes);
diff --git a/drivers/char/lrng/lrng_drng_atomic.h b/drivers/char/lrng/lrng_drng_atomic.h
new file mode 100644
index 000000000000..7ae10f20b4b8
--- /dev/null
+++ b/drivers/char/lrng/lrng_drng_atomic.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_DRNG_ATOMIC_H
+#define _LRNG_DRNG_ATOMIC_H
+
+#include "lrng_drng_mgr.h"
+
+#ifdef CONFIG_LRNG_DRNG_ATOMIC
+void lrng_drng_atomic_reset(void);
+void lrng_drng_atomic_seed_drng(struct lrng_drng *drng);
+void lrng_drng_atomic_force_reseed(void);
+struct lrng_drng *lrng_get_atomic(void);
+#else /* CONFIG_LRNG_DRNG_ATOMIC */
+static inline void lrng_drng_atomic_reset(void) { }
+static inline void lrng_drng_atomic_seed_drng(struct lrng_drng *drng) { }
+static inline void lrng_drng_atomic_force_reseed(void) { }
+static inline struct lrng_drng *lrng_get_atomic(void) { return NULL; }
+#endif /* CONFIG_LRNG_DRNG_ATOMIC */
+
+#endif /* _LRNG_DRNG_ATOMIC_H */
diff --git a/drivers/char/lrng/lrng_drng_chacha20.c b/drivers/char/lrng/lrng_drng_chacha20.c
new file mode 100644
index 000000000000..31be102e3007
--- /dev/null
+++ b/drivers/char/lrng/lrng_drng_chacha20.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Backend for the LRNG providing the cryptographic primitives using
+ * ChaCha20 cipher implementations.
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/chacha.h>
+#include <linux/lrng.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+
+#include "lrng_drng_chacha20.h"
+
+/******************************* ChaCha20 DRNG *******************************/
+
+#define CHACHA_BLOCK_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
+
+/*
+ * Update of the ChaCha20 state by either using an unused buffer part or by
+ * generating one ChaCha20 block which is half of the state of the ChaCha20.
+ * The block is XORed into the key part of the state. This shall ensure
+ * backtracking resistance as well as a proper mix of the ChaCha20 state once
+ * the key is injected.
+ */
+static void lrng_chacha20_update(struct chacha20_state *chacha20_state,
+ __le32 *buf, u32 used_words)
+{
+ struct chacha20_block *chacha20 = &chacha20_state->block;
+ u32 i;
+ __le32 tmp[CHACHA_BLOCK_WORDS];
+
+ BUILD_BUG_ON(sizeof(struct chacha20_block) != CHACHA_BLOCK_SIZE);
+ BUILD_BUG_ON(CHACHA_BLOCK_SIZE != 2 * CHACHA_KEY_SIZE);
+
+ if (used_words > CHACHA_KEY_SIZE_WORDS) {
+ chacha20_block(&chacha20->constants[0], (u8 *)tmp);
+ for (i = 0; i < CHACHA_KEY_SIZE_WORDS; i++)
+ chacha20->key.u[i] ^= le32_to_cpu(tmp[i]);
+ memzero_explicit(tmp, sizeof(tmp));
+ } else {
+ for (i = 0; i < CHACHA_KEY_SIZE_WORDS; i++)
+ chacha20->key.u[i] ^= le32_to_cpu(buf[i + used_words]);
+ }
+
+ /* Deterministic increment of nonce as required in RFC 7539 chapter 4 */
+ chacha20->nonce[0]++;
+ if (chacha20->nonce[0] == 0) {
+ chacha20->nonce[1]++;
+ if (chacha20->nonce[1] == 0)
+ chacha20->nonce[2]++;
+ }
+
+ /* Leave counter untouched as it is start value is undefined in RFC */
+}
+
+/*
+ * Seed the ChaCha20 DRNG by injecting the input data into the key part of
+ * the ChaCha20 state. If the input data is longer than the ChaCha20 key size,
+ * perform a ChaCha20 operation after processing of key size input data.
+ * This operation shall spread out the entropy into the ChaCha20 state before
+ * new entropy is injected into the key part.
+ */
+static int lrng_cc20_drng_seed_helper(void *drng, const u8 *inbuf, u32 inbuflen)
+{
+ struct chacha20_state *chacha20_state = (struct chacha20_state *)drng;
+ struct chacha20_block *chacha20 = &chacha20_state->block;
+
+ while (inbuflen) {
+ u32 i, todo = min_t(u32, inbuflen, CHACHA_KEY_SIZE);
+
+ for (i = 0; i < todo; i++)
+ chacha20->key.b[i] ^= inbuf[i];
+
+ /* Break potential dependencies between the inbuf key blocks */
+ lrng_chacha20_update(chacha20_state, NULL,
+ CHACHA_BLOCK_WORDS);
+ inbuf += todo;
+ inbuflen -= todo;
+ }
+
+ return 0;
+}
+
+/*
+ * Chacha20 DRNG generation of random numbers: the stream output of ChaCha20
+ * is the random number. After the completion of the generation of the
+ * stream, the entire ChaCha20 state is updated.
+ *
+ * Note, as the ChaCha20 implements a 32 bit counter, we must ensure
+ * that this function is only invoked for at most 2^32 - 1 ChaCha20 blocks
+ * before a reseed or an update happens. This is ensured by the variable
+ * outbuflen which is a 32 bit integer defining the number of bytes to be
+ * generated by the ChaCha20 DRNG. At the end of this function, an update
+ * operation is invoked which implies that the 32 bit counter will never be
+ * overflown in this implementation.
+ */
+static int lrng_cc20_drng_generate_helper(void *drng, u8 *outbuf, u32 outbuflen)
+{
+ struct chacha20_state *chacha20_state = (struct chacha20_state *)drng;
+ struct chacha20_block *chacha20 = &chacha20_state->block;
+ __le32 aligned_buf[CHACHA_BLOCK_WORDS];
+ u32 ret = outbuflen, used = CHACHA_BLOCK_WORDS;
+ int zeroize_buf = 0;
+
+ while (outbuflen >= CHACHA_BLOCK_SIZE) {
+ chacha20_block(&chacha20->constants[0], outbuf);
+ outbuf += CHACHA_BLOCK_SIZE;
+ outbuflen -= CHACHA_BLOCK_SIZE;
+ }
+
+ if (outbuflen) {
+ chacha20_block(&chacha20->constants[0], (u8 *)aligned_buf);
+ memcpy(outbuf, aligned_buf, outbuflen);
+ used = ((outbuflen + sizeof(aligned_buf[0]) - 1) /
+ sizeof(aligned_buf[0]));
+ zeroize_buf = 1;
+ }
+
+ lrng_chacha20_update(chacha20_state, aligned_buf, used);
+
+ if (zeroize_buf)
+ memzero_explicit(aligned_buf, sizeof(aligned_buf));
+
+ return ret;
+}
+
+/*
+ * Allocation of the DRNG state
+ */
+static void *lrng_cc20_drng_alloc(u32 sec_strength)
+{
+ struct chacha20_state *state = NULL;
+
+ if (sec_strength > CHACHA_KEY_SIZE) {
+ pr_err("Security strength of ChaCha20 DRNG (%u bits) lower than requested by LRNG (%u bits)\n",
+ CHACHA_KEY_SIZE * 8, sec_strength * 8);
+ return ERR_PTR(-EINVAL);
+ }
+ if (sec_strength < CHACHA_KEY_SIZE)
+ pr_warn("Security strength of ChaCha20 DRNG (%u bits) higher than requested by LRNG (%u bits)\n",
+ CHACHA_KEY_SIZE * 8, sec_strength * 8);
+
+ state = kmalloc(sizeof(struct chacha20_state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+ pr_debug("memory for ChaCha20 core allocated\n");
+
+ lrng_cc20_init_rfc7539(&state->block);
+
+ return state;
+}
+
+static void lrng_cc20_drng_dealloc(void *drng)
+{
+ struct chacha20_state *chacha20_state = (struct chacha20_state *)drng;
+
+ pr_debug("ChaCha20 core zeroized and freed\n");
+ kfree_sensitive(chacha20_state);
+}
+
+static const char *lrng_cc20_drng_name(void)
+{
+ return "ChaCha20 DRNG";
+}
+
+const struct lrng_drng_cb lrng_cc20_drng_cb = {
+ .drng_name = lrng_cc20_drng_name,
+ .drng_alloc = lrng_cc20_drng_alloc,
+ .drng_dealloc = lrng_cc20_drng_dealloc,
+ .drng_seed = lrng_cc20_drng_seed_helper,
+ .drng_generate = lrng_cc20_drng_generate_helper,
+};
+
+#if !defined(CONFIG_LRNG_DFLT_DRNG_CHACHA20) && \
+ !defined(CONFIG_LRNG_DRNG_ATOMIC)
+static int __init lrng_cc20_drng_init(void)
+{
+ return lrng_set_drng_cb(&lrng_cc20_drng_cb);
+}
+
+static void __exit lrng_cc20_drng_exit(void)
+{
+ lrng_set_drng_cb(NULL);
+}
+
+late_initcall(lrng_cc20_drng_init);
+module_exit(lrng_cc20_drng_exit);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("Entropy Source and DRNG Manager - ChaCha20-based DRNG backend");
+#endif /* CONFIG_LRNG_DFLT_DRNG_CHACHA20 */
diff --git a/drivers/char/lrng/lrng_drng_chacha20.h b/drivers/char/lrng/lrng_drng_chacha20.h
new file mode 100644
index 000000000000..fee6571281b6
--- /dev/null
+++ b/drivers/char/lrng/lrng_drng_chacha20.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * LRNG ChaCha20 definitions
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_CHACHA20_H
+#define _LRNG_CHACHA20_H
+
+#include <crypto/chacha.h>
+
+/* State according to RFC 7539 section 2.3 */
+struct chacha20_block {
+ u32 constants[4];
+ union {
+#define CHACHA_KEY_SIZE_WORDS (CHACHA_KEY_SIZE / sizeof(u32))
+ u32 u[CHACHA_KEY_SIZE_WORDS];
+ u8 b[CHACHA_KEY_SIZE];
+ } key;
+ u32 counter;
+ u32 nonce[3];
+};
+
+struct chacha20_state {
+ struct chacha20_block block;
+};
+
+static inline void lrng_cc20_init_rfc7539(struct chacha20_block *chacha20)
+{
+ chacha_init_consts(chacha20->constants);
+}
+
+#define LRNG_CC20_INIT_RFC7539(x) \
+ x.constants[0] = 0x61707865, \
+ x.constants[1] = 0x3320646e, \
+ x.constants[2] = 0x79622d32, \
+ x.constants[3] = 0x6b206574,
+
+extern const struct lrng_drng_cb lrng_cc20_drng_cb;
+
+#endif /* _LRNG_CHACHA20_H */
diff --git a/drivers/char/lrng/lrng_drng_drbg.c b/drivers/char/lrng/lrng_drng_drbg.c
new file mode 100644
index 000000000000..63de720d9d78
--- /dev/null
+++ b/drivers/char/lrng/lrng_drng_drbg.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Backend for the LRNG providing the cryptographic primitives using the
+ * kernel crypto API and its DRBG.
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/drbg.h>
+#include <linux/lrng.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "lrng_drng_drbg.h"
+
+/*
+ * Define a DRBG plus a hash / MAC used to extract data from the entropy pool.
+ * For LRNG_HASH_NAME you can use a hash or a MAC (HMAC or CMAC) of your choice
+ * (Note, you should use the suggested selections below -- using SHA-1 or MD5
+ * is not wise). The idea is that the used cipher primitive can be selected to
+ * be the same as used for the DRBG. I.e. the LRNG only uses one cipher
+ * primitive using the same cipher implementation with the options offered in
+ * the following. This means, if the CTR DRBG is selected and AES-NI is present,
+ * both the CTR DRBG and the selected cmac(aes) use AES-NI.
+ *
+ * The security strengths of the DRBGs are all 256 bits according to
+ * SP800-57 section 5.6.1.
+ *
+ * This definition is allowed to be changed.
+ */
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+static unsigned int lrng_drbg_type = 0;
+#elif defined CONFIG_CRYPTO_DRBG_HMAC
+static unsigned int lrng_drbg_type = 1;
+#elif defined CONFIG_CRYPTO_DRBG_HASH
+static unsigned int lrng_drbg_type = 2;
+#else
+#error "Unknown DRBG in use"
+#endif
+
+/* The parameter must be r/o in sysfs as otherwise races appear. */
+module_param(lrng_drbg_type, uint, 0444);
+MODULE_PARM_DESC(lrng_drbg_type, "DRBG type used for LRNG (0->CTR_DRBG, 1->HMAC_DRBG, 2->Hash_DRBG)");
+
+struct lrng_drbg {
+ const char *hash_name;
+ const char *drbg_core;
+};
+
+static const struct lrng_drbg lrng_drbg_types[] = {
+ { /* CTR_DRBG with AES-256 using derivation function */
+ .drbg_core = "drbg_nopr_ctr_aes256",
+ }, { /* HMAC_DRBG with SHA-512 */
+ .drbg_core = "drbg_nopr_hmac_sha512",
+ }, { /* Hash_DRBG with SHA-512 using derivation function */
+ .drbg_core = "drbg_nopr_sha512"
+ }
+};
+
+static int lrng_drbg_drng_seed_helper(void *drng, const u8 *inbuf, u32 inbuflen)
+{
+ struct drbg_state *drbg = (struct drbg_state *)drng;
+ LIST_HEAD(seedlist);
+ struct drbg_string data;
+ int ret;
+
+ drbg_string_fill(&data, inbuf, inbuflen);
+ list_add_tail(&data.list, &seedlist);
+ ret = drbg->d_ops->update(drbg, &seedlist, drbg->seeded);
+
+ if (ret >= 0)
+ drbg->seeded = DRBG_SEED_STATE_FULL;
+
+ return ret;
+}
+
+static int lrng_drbg_drng_generate_helper(void *drng, u8 *outbuf, u32 outbuflen)
+{
+ struct drbg_state *drbg = (struct drbg_state *)drng;
+
+ return drbg->d_ops->generate(drbg, outbuf, outbuflen, NULL);
+}
+
+static void *lrng_drbg_drng_alloc(u32 sec_strength)
+{
+ struct drbg_state *drbg;
+ int coreref = -1;
+ bool pr = false;
+ int ret;
+
+ drbg_convert_tfm_core(lrng_drbg_types[lrng_drbg_type].drbg_core,
+ &coreref, &pr);
+ if (coreref < 0)
+ return ERR_PTR(-EFAULT);
+
+ drbg = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);
+ if (!drbg)
+ return ERR_PTR(-ENOMEM);
+
+ drbg->core = &drbg_cores[coreref];
+ drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
+ ret = drbg_alloc_state(drbg);
+ if (ret)
+ goto err;
+
+ if (sec_strength > drbg_sec_strength(drbg->core->flags)) {
+ pr_err("Security strength of DRBG (%u bits) lower than requested by LRNG (%u bits)\n",
+ drbg_sec_strength(drbg->core->flags) * 8,
+ sec_strength * 8);
+ goto dealloc;
+ }
+
+ if (sec_strength < drbg_sec_strength(drbg->core->flags))
+ pr_warn("Security strength of DRBG (%u bits) higher than requested by LRNG (%u bits)\n",
+ drbg_sec_strength(drbg->core->flags) * 8,
+ sec_strength * 8);
+
+ pr_info("DRBG with %s core allocated\n", drbg->core->backend_cra_name);
+
+ return drbg;
+
+dealloc:
+ if (drbg->d_ops)
+ drbg->d_ops->crypto_fini(drbg);
+ drbg_dealloc_state(drbg);
+err:
+ kfree(drbg);
+ return ERR_PTR(-EINVAL);
+}
+
+static void lrng_drbg_drng_dealloc(void *drng)
+{
+ struct drbg_state *drbg = (struct drbg_state *)drng;
+
+ if (drbg && drbg->d_ops)
+ drbg->d_ops->crypto_fini(drbg);
+ drbg_dealloc_state(drbg);
+ kfree_sensitive(drbg);
+ pr_info("DRBG deallocated\n");
+}
+
+static const char *lrng_drbg_name(void)
+{
+ return lrng_drbg_types[lrng_drbg_type].drbg_core;
+}
+
+const struct lrng_drng_cb lrng_drbg_cb = {
+ .drng_name = lrng_drbg_name,
+ .drng_alloc = lrng_drbg_drng_alloc,
+ .drng_dealloc = lrng_drbg_drng_dealloc,
+ .drng_seed = lrng_drbg_drng_seed_helper,
+ .drng_generate = lrng_drbg_drng_generate_helper,
+};
+
+#ifndef CONFIG_LRNG_DFLT_DRNG_DRBG
+static int __init lrng_drbg_init(void)
+{
+ if (lrng_drbg_type >= ARRAY_SIZE(lrng_drbg_types)) {
+ pr_err("lrng_drbg_type parameter too large (given %u - max: %lu)",
+ lrng_drbg_type,
+ (unsigned long)ARRAY_SIZE(lrng_drbg_types) - 1);
+ return -EAGAIN;
+ }
+ return lrng_set_drng_cb(&lrng_drbg_cb);
+}
+
+static void __exit lrng_drbg_exit(void)
+{
+ lrng_set_drng_cb(NULL);
+}
+
+late_initcall(lrng_drbg_init);
+module_exit(lrng_drbg_exit);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("Entropy Source and DRNG Manager - SP800-90A DRBG backend");
+#endif /* CONFIG_LRNG_DFLT_DRNG_DRBG */
diff --git a/drivers/char/lrng/lrng_drng_drbg.h b/drivers/char/lrng/lrng_drng_drbg.h
new file mode 100644
index 000000000000..b3d556caf294
--- /dev/null
+++ b/drivers/char/lrng/lrng_drng_drbg.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * LRNG SP800-90A definitions
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_DRBG_H
+#define _LRNG_DRBG_H
+
+extern const struct lrng_drng_cb lrng_drbg_cb;
+
+#endif /* _LRNG_DRBG_H */
diff --git a/drivers/char/lrng/lrng_drng_kcapi.c b/drivers/char/lrng/lrng_drng_kcapi.c
new file mode 100644
index 000000000000..a204bcf52a9a
--- /dev/null
+++ b/drivers/char/lrng/lrng_drng_kcapi.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Backend for the LRNG providing the cryptographic primitives using the
+ * kernel crypto API.
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+#include <crypto/hash.h>
+#include <crypto/rng.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "lrng_drng_kcapi.h"
+
+static char *drng_name = NULL;
+module_param(drng_name, charp, 0444);
+MODULE_PARM_DESC(drng_name, "Kernel crypto API name of DRNG");
+
+static char *seed_hash = NULL;
+module_param(seed_hash, charp, 0444);
+MODULE_PARM_DESC(seed_hash,
+ "Kernel crypto API name of hash with output size equal to seedsize of DRNG to bring seed string to the size required by the DRNG");
+
+struct lrng_drng_info {
+ struct crypto_rng *kcapi_rng;
+ struct crypto_shash *hash_tfm;
+};
+
+static int lrng_kcapi_drng_seed_helper(void *drng, const u8 *inbuf,
+ u32 inbuflen)
+{
+ struct lrng_drng_info *lrng_drng_info = (struct lrng_drng_info *)drng;
+ struct crypto_rng *kcapi_rng = lrng_drng_info->kcapi_rng;
+ struct crypto_shash *hash_tfm = lrng_drng_info->hash_tfm;
+ SHASH_DESC_ON_STACK(shash, hash_tfm);
+ u32 digestsize;
+ u8 digest[HASH_MAX_DIGESTSIZE] __aligned(8);
+ int ret;
+
+ if (!hash_tfm)
+ return crypto_rng_reset(kcapi_rng, inbuf, inbuflen);
+
+ shash->tfm = hash_tfm;
+ digestsize = crypto_shash_digestsize(hash_tfm);
+
+ ret = crypto_shash_digest(shash, inbuf, inbuflen, digest);
+ shash_desc_zero(shash);
+ if (ret)
+ return ret;
+
+ ret = crypto_rng_reset(kcapi_rng, digest, digestsize);
+ if (ret)
+ return ret;
+
+ memzero_explicit(digest, digestsize);
+ return 0;
+}
+
+static int lrng_kcapi_drng_generate_helper(void *drng, u8 *outbuf,
+ u32 outbuflen)
+{
+ struct lrng_drng_info *lrng_drng_info = (struct lrng_drng_info *)drng;
+ struct crypto_rng *kcapi_rng = lrng_drng_info->kcapi_rng;
+ int ret = crypto_rng_get_bytes(kcapi_rng, outbuf, outbuflen);
+
+ if (ret < 0)
+ return ret;
+
+ return outbuflen;
+}
+
+static void *lrng_kcapi_drng_alloc(u32 sec_strength)
+{
+ struct lrng_drng_info *lrng_drng_info;
+ struct crypto_rng *kcapi_rng;
+ u32 time = random_get_entropy();
+ int seedsize, rv;
+ void *ret = ERR_PTR(-ENOMEM);
+
+ if (!drng_name) {
+ pr_err("DRNG name missing\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!memcmp(drng_name, "stdrng", 6) ||
+ !memcmp(drng_name, "lrng", 4) ||
+ !memcmp(drng_name, "drbg", 4) ||
+ !memcmp(drng_name, "jitterentropy_rng", 17)) {
+ pr_err("Refusing to load the requested random number generator\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ lrng_drng_info = kzalloc(sizeof(*lrng_drng_info), GFP_KERNEL);
+ if (!lrng_drng_info)
+ return ERR_PTR(-ENOMEM);
+
+ kcapi_rng = crypto_alloc_rng(drng_name, 0, 0);
+ if (IS_ERR(kcapi_rng)) {
+ pr_err("DRNG %s cannot be allocated\n", drng_name);
+ ret = ERR_CAST(kcapi_rng);
+ goto free;
+ }
+
+ lrng_drng_info->kcapi_rng = kcapi_rng;
+
+ seedsize = crypto_rng_seedsize(kcapi_rng);
+ if (seedsize) {
+ struct crypto_shash *hash_tfm;
+
+ if (!seed_hash) {
+ switch (seedsize) {
+ case 32:
+ seed_hash = "sha256";
+ break;
+ case 48:
+ seed_hash = "sha384";
+ break;
+ case 64:
+ seed_hash = "sha512";
+ break;
+ default:
+ pr_err("Seed size %d cannot be processed\n",
+ seedsize);
+ goto dealloc;
+ }
+ }
+
+ hash_tfm = crypto_alloc_shash(seed_hash, 0, 0);
+ if (IS_ERR(hash_tfm)) {
+ ret = ERR_CAST(hash_tfm);
+ goto dealloc;
+ }
+
+ if (seedsize != crypto_shash_digestsize(hash_tfm)) {
+ pr_err("Seed hash output size not equal to DRNG seed size\n");
+ crypto_free_shash(hash_tfm);
+ ret = ERR_PTR(-EINVAL);
+ goto dealloc;
+ }
+
+ lrng_drng_info->hash_tfm = hash_tfm;
+
+ pr_info("Seed hash %s allocated\n", seed_hash);
+ }
+
+ rv = lrng_kcapi_drng_seed_helper(lrng_drng_info, (u8 *)(&time),
+ sizeof(time));
+ if (rv) {
+ ret = ERR_PTR(rv);
+ goto dealloc;
+ }
+
+ pr_info("Kernel crypto API DRNG %s allocated\n", drng_name);
+
+ return lrng_drng_info;
+
+dealloc:
+ crypto_free_rng(kcapi_rng);
+free:
+ kfree(lrng_drng_info);
+ return ret;
+}
+
+static void lrng_kcapi_drng_dealloc(void *drng)
+{
+ struct lrng_drng_info *lrng_drng_info = (struct lrng_drng_info *)drng;
+ struct crypto_rng *kcapi_rng = lrng_drng_info->kcapi_rng;
+
+ crypto_free_rng(kcapi_rng);
+ if (lrng_drng_info->hash_tfm)
+ crypto_free_shash(lrng_drng_info->hash_tfm);
+ kfree(lrng_drng_info);
+ pr_info("DRNG %s deallocated\n", drng_name);
+}
+
+static const char *lrng_kcapi_drng_name(void)
+{
+ return drng_name;
+}
+
+const struct lrng_drng_cb lrng_kcapi_drng_cb = {
+ .drng_name = lrng_kcapi_drng_name,
+ .drng_alloc = lrng_kcapi_drng_alloc,
+ .drng_dealloc = lrng_kcapi_drng_dealloc,
+ .drng_seed = lrng_kcapi_drng_seed_helper,
+ .drng_generate = lrng_kcapi_drng_generate_helper,
+};
+
+#ifndef CONFIG_LRNG_DFLT_DRNG_KCAPI
+static int __init lrng_kcapi_init(void)
+{
+ return lrng_set_drng_cb(&lrng_kcapi_drng_cb);
+}
+static void __exit lrng_kcapi_exit(void)
+{
+ lrng_set_drng_cb(NULL);
+}
+
+late_initcall(lrng_kcapi_init);
+module_exit(lrng_kcapi_exit);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("Entropy Source and DRNG Manager - kernel crypto API DRNG backend");
+#endif /* CONFIG_LRNG_DFLT_DRNG_KCAPI */
diff --git a/drivers/char/lrng/lrng_drng_kcapi.h b/drivers/char/lrng/lrng_drng_kcapi.h
new file mode 100644
index 000000000000..5db25aaf830c
--- /dev/null
+++ b/drivers/char/lrng/lrng_drng_kcapi.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * LRNG kernel crypto API DRNG definition
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_KCAPI_DRNG_H
+#define _LRNG_KCAPI_DRNG_H
+
+extern const struct lrng_drng_cb lrng_kcapi_drng_cb;
+
+#endif /* _LRNG_KCAPI_DRNG_H */
diff --git a/drivers/char/lrng/lrng_drng_mgr.c b/drivers/char/lrng/lrng_drng_mgr.c
new file mode 100644
index 000000000000..69ad26431ac2
--- /dev/null
+++ b/drivers/char/lrng/lrng_drng_mgr.c
@@ -0,0 +1,742 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG DRNG management
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+#include <linux/fips.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "lrng_drng_atomic.h"
+#include "lrng_drng_chacha20.h"
+#include "lrng_drng_drbg.h"
+#include "lrng_drng_kcapi.h"
+#include "lrng_drng_mgr.h"
+#include "lrng_es_aux.h"
+#include "lrng_es_mgr.h"
+#include "lrng_interface_random_kernel.h"
+#include "lrng_numa.h"
+#include "lrng_sha.h"
+
+/*
+ * Maximum number of seconds between DRNG reseed intervals of the DRNG. Note,
+ * this is enforced with the next request of random numbers from the
+ * DRNG. Setting this value to zero implies a reseeding attempt before every
+ * generated random number.
+ */
+int lrng_drng_reseed_max_time = 600;
+
+/*
+ * Is LRNG for general-purpose use (i.e. is at least the lrng_drng_init
+ * fully allocated)?
+ */
+static atomic_t lrng_avail = ATOMIC_INIT(0);
+
+/* Guard protecting all crypto callback update operation of all DRNGs. */
+DEFINE_MUTEX(lrng_crypto_cb_update);
+
+/*
+ * Default hash callback that provides the crypto primitive right from the
+ * kernel start. It must not perform any memory allocation operation, but
+ * simply perform the hash calculation.
+ */
+const struct lrng_hash_cb *lrng_default_hash_cb = &lrng_sha_hash_cb;
+
+/*
+ * Default DRNG callback that provides the crypto primitive which is
+ * allocated either during late kernel boot stage. So, it is permissible for
+ * the callback to perform memory allocation operations.
+ */
+const struct lrng_drng_cb *lrng_default_drng_cb =
+#if defined(CONFIG_LRNG_DFLT_DRNG_CHACHA20)
+ &lrng_cc20_drng_cb;
+#elif defined(CONFIG_LRNG_DFLT_DRNG_DRBG)
+ &lrng_drbg_cb;
+#elif defined(CONFIG_LRNG_DFLT_DRNG_KCAPI)
+ &lrng_kcapi_drng_cb;
+#else
+#error "Unknown default DRNG selected"
+#endif
+
+/* DRNG for non-atomic use cases */
+static struct lrng_drng lrng_drng_init = {
+ LRNG_DRNG_STATE_INIT(lrng_drng_init, NULL, NULL, NULL,
+ &lrng_sha_hash_cb),
+ .lock = __MUTEX_INITIALIZER(lrng_drng_init.lock),
+};
+
+/* Prediction-resistance DRNG: only deliver as much data as received entropy */
+static struct lrng_drng lrng_drng_pr = {
+ LRNG_DRNG_STATE_INIT(lrng_drng_pr, NULL, NULL, NULL,
+ &lrng_sha_hash_cb),
+ .lock = __MUTEX_INITIALIZER(lrng_drng_pr.lock),
+};
+
+static u32 max_wo_reseed = LRNG_DRNG_MAX_WITHOUT_RESEED;
+#ifdef CONFIG_LRNG_RUNTIME_MAX_WO_RESEED_CONFIG
+module_param(max_wo_reseed, uint, 0444);
+MODULE_PARM_DESC(max_wo_reseed,
+ "Maximum number of DRNG generate operation without full reseed\n");
+#endif
+
+static bool force_seeding = true;
+#ifdef CONFIG_LRNG_RUNTIME_FORCE_SEEDING_DISABLE
+module_param(force_seeding, bool, 0444);
+MODULE_PARM_DESC(force_seeding,
+ "Allow disabling of the forced seeding when insufficient entropy is available\n");
+#endif
+
+/* Wait queue to wait until the LRNG is initialized - can freely be used */
+DECLARE_WAIT_QUEUE_HEAD(lrng_init_wait);
+
+/********************************** Helper ************************************/
+
+bool lrng_get_available(void)
+{
+ return likely(atomic_read(&lrng_avail));
+}
+
+struct lrng_drng *lrng_drng_init_instance(void)
+{
+ return &lrng_drng_init;
+}
+
+struct lrng_drng *lrng_drng_pr_instance(void)
+{
+ return &lrng_drng_pr;
+}
+
+struct lrng_drng *lrng_drng_node_instance(void)
+{
+ struct lrng_drng **lrng_drng = lrng_drng_instances();
+ int node = numa_node_id();
+
+ if (lrng_drng && lrng_drng[node])
+ return lrng_drng[node];
+
+ return lrng_drng_init_instance();
+}
+
+void lrng_drng_reset(struct lrng_drng *drng)
+{
+ /* Ensure reseed during next call */
+ atomic_set(&drng->requests, 1);
+ atomic_set(&drng->requests_since_fully_seeded, 0);
+ drng->last_seeded = jiffies;
+ drng->fully_seeded = false;
+ /* Do not set force, as this flag is used for the emergency reseeding */
+ drng->force_reseed = false;
+ pr_debug("reset DRNG\n");
+}
+
+/* Initialize the DRNG, except the mutex lock */
+int lrng_drng_alloc_common(struct lrng_drng *drng,
+ const struct lrng_drng_cb *drng_cb)
+{
+ if (!drng || !drng_cb)
+ return -EINVAL;
+ if (!IS_ERR_OR_NULL(drng->drng))
+ return 0;
+
+ drng->drng_cb = drng_cb;
+ drng->drng = drng_cb->drng_alloc(LRNG_DRNG_SECURITY_STRENGTH_BYTES);
+ if (IS_ERR(drng->drng))
+ return -PTR_ERR(drng->drng);
+
+ lrng_drng_reset(drng);
+ return 0;
+}
+
+/* Initialize the default DRNG during boot and perform its seeding */
+int lrng_drng_initalize(void)
+{
+ int ret;
+
+ if (lrng_get_available())
+ return 0;
+
+ /* Catch programming error */
+ WARN_ON(lrng_drng_init.hash_cb != lrng_default_hash_cb);
+
+ mutex_lock(&lrng_drng_init.lock);
+ if (lrng_get_available()) {
+ mutex_unlock(&lrng_drng_init.lock);
+ return 0;
+ }
+
+ /* Initialize the PR DRNG inside init lock as it guards lrng_avail. */
+ mutex_lock(&lrng_drng_pr.lock);
+ ret = lrng_drng_alloc_common(&lrng_drng_pr, lrng_default_drng_cb);
+ mutex_unlock(&lrng_drng_pr.lock);
+
+ if (!ret) {
+ ret = lrng_drng_alloc_common(&lrng_drng_init,
+ lrng_default_drng_cb);
+ if (!ret)
+ atomic_set(&lrng_avail, 1);
+ }
+ mutex_unlock(&lrng_drng_init.lock);
+ if (ret)
+ return ret;
+
+ pr_debug("LRNG for general use is available\n");
+
+ /* Seed the DRNG with any entropy available */
+ if (lrng_pool_trylock()) {
+ pr_info("Initial DRNG initialized triggering first seeding\n");
+ lrng_drng_seed_work(NULL);
+ } else {
+ pr_info("Initial DRNG initialized without seeding\n");
+ }
+
+ return 0;
+}
+
+static int __init lrng_drng_make_available(void)
+{
+ return lrng_drng_initalize();
+}
+late_initcall(lrng_drng_make_available);
+
+bool lrng_sp80090c_compliant(void)
+{
+ /* SP800-90C compliant oversampling is only requested in FIPS mode */
+ return fips_enabled;
+}
+
+/************************* Random Number Generation ***************************/
+
+/* Inject a data buffer into the DRNG - caller must hold its lock */
+void lrng_drng_inject(struct lrng_drng *drng, const u8 *inbuf, u32 inbuflen,
+ bool fully_seeded, const char *drng_type)
+{
+ BUILD_BUG_ON(LRNG_DRNG_RESEED_THRESH > INT_MAX);
+ pr_debug("seeding %s DRNG with %u bytes\n", drng_type, inbuflen);
+ if (drng->drng_cb->drng_seed(drng->drng, inbuf, inbuflen) < 0) {
+ pr_warn("seeding of %s DRNG failed\n", drng_type);
+ drng->force_reseed = true;
+ } else {
+ int gc = LRNG_DRNG_RESEED_THRESH - atomic_read(&drng->requests);
+
+ pr_debug("%s DRNG stats since last seeding: %lu secs; generate calls: %d\n",
+ drng_type,
+ (time_after(jiffies, drng->last_seeded) ?
+ (jiffies - drng->last_seeded) : 0) / HZ, gc);
+
+ /* Count the numbers of generate ops since last fully seeded */
+ if (fully_seeded)
+ atomic_set(&drng->requests_since_fully_seeded, 0);
+ else
+ atomic_add(gc, &drng->requests_since_fully_seeded);
+
+ drng->last_seeded = jiffies;
+ atomic_set(&drng->requests, LRNG_DRNG_RESEED_THRESH);
+ drng->force_reseed = false;
+
+ if (!drng->fully_seeded) {
+ drng->fully_seeded = fully_seeded;
+ if (drng->fully_seeded)
+ pr_debug("%s DRNG fully seeded\n", drng_type);
+ }
+ }
+}
+
+/*
+ * Perform the seeding of the DRNG with data from entropy source.
+ * The function returns the entropy injected into the DRNG in bits.
+ */
+static u32 lrng_drng_seed_es_nolock(struct lrng_drng *drng, bool init_ops,
+ const char *drng_type)
+{
+ struct entropy_buf seedbuf __aligned(LRNG_KCAPI_ALIGN),
+ collected_seedbuf;
+ u32 collected_entropy = 0;
+ unsigned int i, num_es_delivered = 0;
+ bool forced = drng->force_reseed;
+
+ for_each_lrng_es(i)
+ collected_seedbuf.e_bits[i] = 0;
+
+ do {
+ /* Count the number of ES which delivered entropy */
+ num_es_delivered = 0;
+
+ if (collected_entropy)
+ pr_debug("Force fully seeding level for %s DRNG by repeatedly pull entropy from available entropy sources\n",
+ drng_type);
+
+ lrng_fill_seed_buffer(&seedbuf,
+ lrng_get_seed_entropy_osr(drng->fully_seeded),
+ forced && !drng->fully_seeded);
+
+ collected_entropy += lrng_entropy_rate_eb(&seedbuf);
+
+ /* Sum iterations up. */
+ for_each_lrng_es(i) {
+ collected_seedbuf.e_bits[i] += seedbuf.e_bits[i];
+ num_es_delivered += !!seedbuf.e_bits[i];
+ }
+
+ lrng_drng_inject(drng, (u8 *)&seedbuf, sizeof(seedbuf),
+ lrng_fully_seeded(drng->fully_seeded,
+ collected_entropy,
+ &collected_seedbuf),
+ drng_type);
+
+ /*
+ * Set the seeding state of the LRNG
+ *
+ * Do not call lrng_init_ops(seedbuf) here as the atomic DRNG
+ * does not serve common users.
+ */
+ if (init_ops)
+ lrng_init_ops(&collected_seedbuf);
+
+ /*
+ * Emergency reseeding: If we reached the min seed threshold now
+ * multiple times but never reached fully seeded level and we collect
+ * entropy, keep doing it until we reached fully seeded level for
+ * at least one DRNG. This operation is not continued if the
+ * ES do not deliver entropy such that we cannot reach the fully seeded
+ * level.
+ *
+ * The emergency reseeding implies that the consecutively injected
+ * entropy can be added up. This is applicable due to the fact that
+ * the entire operation is atomic which means that the DRNG is not
+ * producing data while this is ongoing.
+ */
+ } while (force_seeding && forced && !drng->fully_seeded &&
+ num_es_delivered >= (lrng_ntg1_2022_compliant() ? 2 : 1));
+
+ memzero_explicit(&seedbuf, sizeof(seedbuf));
+
+ return collected_entropy;
+}
+
+static void lrng_drng_seed_es(struct lrng_drng *drng)
+{
+ mutex_lock(&drng->lock);
+ lrng_drng_seed_es_nolock(drng, true, "regular");
+ mutex_unlock(&drng->lock);
+}
+
+static void lrng_drng_seed(struct lrng_drng *drng)
+{
+ BUILD_BUG_ON(LRNG_MIN_SEED_ENTROPY_BITS >
+ LRNG_DRNG_SECURITY_STRENGTH_BITS);
+
+ /* (Re-)Seed DRNG */
+ lrng_drng_seed_es(drng);
+ /* (Re-)Seed atomic DRNG from regular DRNG */
+ lrng_drng_atomic_seed_drng(drng);
+}
+
+static void lrng_drng_seed_work_one(struct lrng_drng *drng, u32 node)
+{
+ pr_debug("reseed triggered by system events for DRNG on NUMA node %d\n",
+ node);
+ lrng_drng_seed(drng);
+ if (drng->fully_seeded) {
+ /* Prevent reseed storm */
+ drng->last_seeded += node * 100 * HZ;
+ }
+}
+
+/*
+ * DRNG reseed trigger: Kernel thread handler triggered by the schedule_work()
+ */
+static void __lrng_drng_seed_work(bool force)
+{
+ struct lrng_drng **lrng_drng;
+ u32 node;
+
+ /*
+ * If the DRNG is not yet initialized, let us try to seed the atomic
+ * DRNG.
+ */
+ if (!lrng_get_available()) {
+ struct lrng_drng *atomic;
+ unsigned long flags;
+
+ if (wq_has_sleeper(&lrng_init_wait)) {
+ lrng_init_ops(NULL);
+ return;
+ }
+ atomic = lrng_get_atomic();
+ if (!atomic || atomic->fully_seeded)
+ return;
+
+ atomic->force_reseed |= force;
+ spin_lock_irqsave(&atomic->spin_lock, flags);
+ lrng_drng_seed_es_nolock(atomic, false, "atomic");
+ spin_unlock_irqrestore(&atomic->spin_lock, flags);
+
+ return;
+ }
+
+ lrng_drng = lrng_drng_instances();
+ if (lrng_drng) {
+ for_each_online_node(node) {
+ struct lrng_drng *drng = lrng_drng[node];
+
+ if (drng && !drng->fully_seeded) {
+ drng->force_reseed |= force;
+ lrng_drng_seed_work_one(drng, node);
+ return;
+ }
+ }
+ } else {
+ if (!lrng_drng_init.fully_seeded) {
+ lrng_drng_init.force_reseed |= force;
+ lrng_drng_seed_work_one(&lrng_drng_init, 0);
+ return;
+ }
+ }
+
+ if (!lrng_drng_pr.fully_seeded) {
+ lrng_drng_pr.force_reseed |= force;
+ lrng_drng_seed_work_one(&lrng_drng_pr, 0);
+ return;
+ }
+
+ lrng_pool_all_numa_nodes_seeded(true);
+}
+
+void lrng_drng_seed_work(struct work_struct *dummy)
+{
+ __lrng_drng_seed_work(false);
+
+ /* Allow the seeding operation to be called again */
+ lrng_pool_unlock();
+}
+
+/* Force all DRNGs to reseed before next generation */
+void lrng_drng_force_reseed(void)
+{
+ struct lrng_drng **lrng_drng = lrng_drng_instances();
+ u32 node;
+
+ /*
+ * If the initial DRNG is over the reseed threshold, allow a forced
+ * reseed only for the initial DRNG as this is the fallback for all. It
+ * must be kept seeded before all others to keep the LRNG operational.
+ */
+ if (!lrng_drng ||
+ (atomic_read_u32(&lrng_drng_init.requests_since_fully_seeded) >
+ LRNG_DRNG_RESEED_THRESH)) {
+ lrng_drng_init.force_reseed = lrng_drng_init.fully_seeded;
+ pr_debug("force reseed of initial DRNG\n");
+ return;
+ }
+ for_each_online_node(node) {
+ struct lrng_drng *drng = lrng_drng[node];
+
+ if (!drng)
+ continue;
+
+ drng->force_reseed = drng->fully_seeded;
+ pr_debug("force reseed of DRNG on node %u\n", node);
+ }
+ lrng_drng_atomic_force_reseed();
+}
+EXPORT_SYMBOL(lrng_drng_force_reseed);
+
+static bool lrng_drng_must_reseed(struct lrng_drng *drng)
+{
+ return (atomic_dec_and_test(&drng->requests) ||
+ drng->force_reseed ||
+ time_after(jiffies,
+ drng->last_seeded + lrng_drng_reseed_max_time * HZ));
+}
+
+/*
+ * lrng_drng_get() - Get random data out of the DRNG which is reseeded
+ * frequently.
+ *
+ * @drng: DRNG instance
+ * @outbuf: buffer for storing random data
+ * @outbuflen: length of outbuf
+ *
+ * Return:
+ * * < 0 in error case (DRNG generation or update failed)
+ * * >=0 returning the returned number of bytes
+ */
+int lrng_drng_get(struct lrng_drng *drng, u8 *outbuf, u32 outbuflen)
+{
+ u32 processed = 0;
+ bool pr = (drng == &lrng_drng_pr) ? true : false;
+
+ if (!outbuf || !outbuflen)
+ return 0;
+
+ if (!lrng_get_available())
+ return -EOPNOTSUPP;
+
+ outbuflen = min_t(size_t, outbuflen, INT_MAX);
+
+ /* If DRNG operated without proper reseed for too long, block LRNG */
+ BUILD_BUG_ON(LRNG_DRNG_MAX_WITHOUT_RESEED < LRNG_DRNG_RESEED_THRESH);
+ if (atomic_read_u32(&drng->requests_since_fully_seeded) > max_wo_reseed)
+ lrng_unset_fully_seeded(drng);
+
+ while (outbuflen) {
+ u32 todo = min_t(u32, outbuflen, LRNG_DRNG_MAX_REQSIZE);
+ int ret;
+
+ /* In normal operation, check whether to reseed */
+ if (!pr && lrng_drng_must_reseed(drng)) {
+ if (!lrng_pool_trylock()) {
+ drng->force_reseed = true;
+ } else {
+ lrng_drng_seed(drng);
+ lrng_pool_unlock();
+ }
+ }
+
+ mutex_lock(&drng->lock);
+
+ if (pr) {
+ /* If async reseed did not deliver entropy, try now */
+ if (!drng->fully_seeded) {
+ u32 coll_ent_bits;
+
+ /* If we cannot get the pool lock, try again. */
+ if (!lrng_pool_trylock()) {
+ mutex_unlock(&drng->lock);
+ continue;
+ }
+
+ coll_ent_bits = lrng_drng_seed_es_nolock(
+ drng, true, "regular");
+
+ lrng_pool_unlock();
+
+ /* If no new entropy was received, stop now. */
+ if (!coll_ent_bits) {
+ mutex_unlock(&drng->lock);
+ goto out;
+ }
+
+ /* Produce no more data than received entropy */
+ todo = min_t(u32, todo, coll_ent_bits >> 3);
+ }
+
+ /* Do not produce more than DRNG security strength */
+ todo = min_t(u32, todo, lrng_security_strength() >> 3);
+ }
+ ret = drng->drng_cb->drng_generate(drng->drng,
+ outbuf + processed, todo);
+
+ mutex_unlock(&drng->lock);
+ if (ret <= 0) {
+ pr_warn("getting random data from DRNG failed (%d)\n",
+ ret);
+ return -EFAULT;
+ }
+ processed += ret;
+ outbuflen -= ret;
+
+ if (pr) {
+ /* Force the async reseed for PR DRNG */
+ lrng_unset_fully_seeded(drng);
+ if (outbuflen)
+ cond_resched();
+ }
+ }
+
+out:
+ return processed;
+}
+
+int lrng_drng_get_sleep(u8 *outbuf, u32 outbuflen, bool pr)
+{
+ struct lrng_drng **lrng_drng = lrng_drng_instances();
+ struct lrng_drng *drng = &lrng_drng_init;
+ int ret, node = numa_node_id();
+
+ might_sleep();
+
+ if (pr)
+ drng = &lrng_drng_pr;
+ else if (lrng_drng && lrng_drng[node] && lrng_drng[node]->fully_seeded)
+ drng = lrng_drng[node];
+
+ ret = lrng_drng_initalize();
+ if (ret)
+ return ret;
+
+ return lrng_drng_get(drng, outbuf, outbuflen);
+}
+
+/* Reset LRNG such that all existing entropy is gone */
+static void _lrng_reset(struct work_struct *work)
+{
+ struct lrng_drng **lrng_drng = lrng_drng_instances();
+
+ if (!lrng_drng) {
+ mutex_lock(&lrng_drng_init.lock);
+ lrng_drng_reset(&lrng_drng_init);
+ mutex_unlock(&lrng_drng_init.lock);
+ } else {
+ u32 node;
+
+ for_each_online_node(node) {
+ struct lrng_drng *drng = lrng_drng[node];
+
+ if (!drng)
+ continue;
+ mutex_lock(&drng->lock);
+ lrng_drng_reset(drng);
+ mutex_unlock(&drng->lock);
+ }
+ }
+
+ mutex_lock(&lrng_drng_pr.lock);
+ lrng_drng_reset(&lrng_drng_pr);
+ mutex_unlock(&lrng_drng_pr.lock);
+
+ lrng_drng_atomic_reset();
+ lrng_set_entropy_thresh(LRNG_INIT_ENTROPY_BITS);
+
+ lrng_reset_state();
+}
+
+static DECLARE_WORK(lrng_reset_work, _lrng_reset);
+
+void lrng_reset(void)
+{
+ schedule_work(&lrng_reset_work);
+}
+
+/******************* Generic LRNG kernel output interfaces ********************/
+
+void lrng_force_fully_seeded(void)
+{
+ if (lrng_pool_all_numa_nodes_seeded_get())
+ return;
+
+ lrng_pool_lock();
+ __lrng_drng_seed_work(true);
+ lrng_pool_unlock();
+}
+
+static int lrng_drng_sleep_while_not_all_nodes_seeded(unsigned int nonblock)
+{
+ lrng_force_fully_seeded();
+ if (lrng_pool_all_numa_nodes_seeded_get())
+ return 0;
+ if (nonblock)
+ return -EAGAIN;
+ wait_event_interruptible(lrng_init_wait,
+ lrng_pool_all_numa_nodes_seeded_get());
+ return 0;
+}
+
+int lrng_drng_sleep_while_nonoperational(int nonblock)
+{
+ lrng_force_fully_seeded();
+ if (likely(lrng_state_operational()))
+ return 0;
+ if (nonblock)
+ return -EAGAIN;
+ return wait_event_interruptible(lrng_init_wait,
+ lrng_state_operational());
+}
+
+int lrng_drng_sleep_while_non_min_seeded(void)
+{
+ lrng_force_fully_seeded();
+ if (likely(lrng_state_min_seeded()))
+ return 0;
+ return wait_event_interruptible(lrng_init_wait,
+ lrng_state_min_seeded());
+}
+
+ssize_t lrng_get_seed(u64 *buf, size_t nbytes, unsigned int flags)
+{
+ struct entropy_buf *eb = (struct entropy_buf *)(buf + 2);
+ u64 buflen = sizeof(struct entropy_buf) + 2 * sizeof(u64);
+ u64 collected_bits = 0;
+ int ret;
+
+ /* Ensure buffer is aligned as required */
+ BUILD_BUG_ON(sizeof(buflen) > LRNG_KCAPI_ALIGN);
+ if (nbytes < sizeof(buflen))
+ return -EINVAL;
+
+ /* Write buffer size into first word */
+ buf[0] = buflen;
+ if (nbytes < buflen)
+ return -EMSGSIZE;
+
+ ret = lrng_drng_sleep_while_not_all_nodes_seeded(
+ flags & LRNG_GET_SEED_NONBLOCK);
+ if (ret)
+ return ret;
+
+ /* Try to get the pool lock and sleep on it to get it. */
+ lrng_pool_lock();
+
+ /* If an LRNG DRNG becomes unseeded, give this DRNG precedence. */
+ if (!lrng_pool_all_numa_nodes_seeded_get()) {
+ lrng_pool_unlock();
+ return 0;
+ }
+
+ /*
+ * Try to get seed data - a rarely used busyloop is cheaper than a wait
+ * queue that is constantly woken up by the hot code path of
+ * lrng_init_ops.
+ */
+ for (;;) {
+ lrng_fill_seed_buffer(eb,
+ lrng_get_seed_entropy_osr(flags &
+ LRNG_GET_SEED_FULLY_SEEDED),
+ false);
+ collected_bits = lrng_entropy_rate_eb(eb);
+
+ /* Break the collection loop if we got entropy, ... */
+ if (collected_bits ||
+ /* ... a DRNG becomes unseeded, give DRNG precedence, ... */
+ !lrng_pool_all_numa_nodes_seeded_get() ||
+ /* ... if the caller does not want a blocking behavior. */
+ (flags & LRNG_GET_SEED_NONBLOCK))
+ break;
+
+ schedule();
+ }
+
+ lrng_pool_unlock();
+
+ /* Write collected entropy size into second word */
+ buf[1] = collected_bits;
+
+ return (ssize_t)buflen;
+}
+
+void lrng_get_random_bytes_full(void *buf, int nbytes)
+{
+ lrng_drng_sleep_while_nonoperational(0);
+ lrng_drng_get_sleep((u8 *)buf, (u32)nbytes, false);
+}
+EXPORT_SYMBOL(lrng_get_random_bytes_full);
+
+void lrng_get_random_bytes_min(void *buf, int nbytes)
+{
+ lrng_drng_sleep_while_non_min_seeded();
+ lrng_drng_get_sleep((u8 *)buf, (u32)nbytes, false);
+}
+EXPORT_SYMBOL(lrng_get_random_bytes_min);
+
+int lrng_get_random_bytes_pr(void *buf, int nbytes)
+{
+ lrng_drng_sleep_while_nonoperational(0);
+ return lrng_drng_get_sleep((u8 *)buf, (u32)nbytes, true);
+}
+EXPORT_SYMBOL(lrng_get_random_bytes_pr);
diff --git a/drivers/char/lrng/lrng_drng_mgr.h b/drivers/char/lrng/lrng_drng_mgr.h
new file mode 100644
index 000000000000..8b1de38275cd
--- /dev/null
+++ b/drivers/char/lrng/lrng_drng_mgr.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_DRNG_H
+#define _LRNG_DRNG_H
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "lrng_definitions.h"
+
+extern struct wait_queue_head lrng_init_wait;
+extern int lrng_drng_reseed_max_time;
+extern struct mutex lrng_crypto_cb_update;
+extern const struct lrng_drng_cb *lrng_default_drng_cb;
+extern const struct lrng_hash_cb *lrng_default_hash_cb;
+
+/* DRNG state handle */
+struct lrng_drng {
+ void *drng; /* DRNG handle */
+ void *hash; /* Hash handle */
+ const struct lrng_drng_cb *drng_cb; /* DRNG callbacks */
+ const struct lrng_hash_cb *hash_cb; /* Hash callbacks */
+ atomic_t requests; /* Number of DRNG requests */
+ atomic_t requests_since_fully_seeded; /* Number DRNG requests since
+ * last fully seeded
+ */
+ unsigned long last_seeded; /* Last time it was seeded */
+ bool fully_seeded; /* Is DRNG fully seeded? */
+ bool force_reseed; /* Force a reseed */
+
+ rwlock_t hash_lock; /* Lock hash_cb replacement */
+ /* Lock write operations on DRNG state, DRNG replacement of drng_cb */
+ struct mutex lock; /* Non-atomic DRNG operation */
+ spinlock_t spin_lock; /* Atomic DRNG operation */
+};
+
+#define LRNG_DRNG_STATE_INIT(x, d, h, d_cb, h_cb) \
+ .drng = d, \
+ .hash = h, \
+ .drng_cb = d_cb, \
+ .hash_cb = h_cb, \
+ .requests = ATOMIC_INIT(LRNG_DRNG_RESEED_THRESH),\
+ .requests_since_fully_seeded = ATOMIC_INIT(0), \
+ .last_seeded = 0, \
+ .fully_seeded = false, \
+ .force_reseed = true, \
+ .hash_lock = __RW_LOCK_UNLOCKED(x.hash_lock)
+
+struct lrng_drng *lrng_drng_init_instance(void);
+struct lrng_drng *lrng_drng_pr_instance(void);
+struct lrng_drng *lrng_drng_node_instance(void);
+
+void lrng_reset(void);
+int lrng_drng_alloc_common(struct lrng_drng *drng,
+ const struct lrng_drng_cb *crypto_cb);
+int lrng_drng_initalize(void);
+bool lrng_sp80090c_compliant(void);
+bool lrng_get_available(void);
+void lrng_drng_reset(struct lrng_drng *drng);
+void lrng_drng_inject(struct lrng_drng *drng, const u8 *inbuf, u32 inbuflen,
+ bool fully_seeded, const char *drng_type);
+int lrng_drng_get(struct lrng_drng *drng, u8 *outbuf, u32 outbuflen);
+int lrng_drng_sleep_while_nonoperational(int nonblock);
+int lrng_drng_sleep_while_non_min_seeded(void);
+int lrng_drng_get_sleep(u8 *outbuf, u32 outbuflen, bool pr);
+void lrng_drng_seed_work(struct work_struct *dummy);
+void lrng_drng_force_reseed(void);
+void lrng_force_fully_seeded(void);
+
+static inline u32 lrng_compress_osr(void)
+{
+ return lrng_sp80090c_compliant() ? LRNG_OVERSAMPLE_ES_BITS : 0;
+}
+
+static inline u32 lrng_reduce_by_osr(u32 entropy_bits)
+{
+ u32 osr_bits = lrng_compress_osr();
+
+ return (entropy_bits >= osr_bits) ? (entropy_bits - osr_bits) : 0;
+}
+
+#endif /* _LRNG_DRNG_H */
diff --git a/drivers/char/lrng/lrng_es_aux.c b/drivers/char/lrng/lrng_es_aux.c
new file mode 100644
index 000000000000..245bc829998b
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_aux.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG Slow Entropy Source: Auxiliary entropy pool
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+
+#include "lrng_es_aux.h"
+#include "lrng_es_mgr.h"
+#include "lrng_sysctl.h"
+
+/*
+ * This is the auxiliary pool
+ *
+ * The aux pool array is aligned to 8 bytes to comfort the kernel crypto API
+ * cipher implementations of the hash functions used to read the pool: for some
+ * accelerated implementations, we need an alignment to avoid a realignment
+ * which involves memcpy(). The alignment to 8 bytes should satisfy all crypto
+ * implementations.
+ */
+struct lrng_pool {
+ u8 aux_pool[LRNG_POOL_SIZE]; /* Aux pool: digest state */
+ atomic_t aux_entropy_bits;
+ atomic_t digestsize; /* Digest size of used hash */
+ bool initialized; /* Aux pool initialized? */
+
+ /* Serialize read of entropy pool and update of aux pool */
+ spinlock_t lock;
+};
+
+static struct lrng_pool lrng_pool __aligned(LRNG_KCAPI_ALIGN) = {
+ .aux_entropy_bits = ATOMIC_INIT(0),
+ .digestsize = ATOMIC_INIT(LRNG_ATOMIC_DIGEST_SIZE),
+ .initialized = false,
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_pool.lock)
+};
+
+/********************************** Helper ***********************************/
+
+/* Entropy in bits present in aux pool */
+static u32 lrng_aux_avail_entropy(u32 __unused)
+{
+ /* Cap available entropy with max entropy */
+ u32 avail_bits = min_t(u32, lrng_get_digestsize(),
+ atomic_read_u32(&lrng_pool.aux_entropy_bits));
+
+ /* Consider oversampling rate due to aux pool conditioning */
+ return lrng_reduce_by_osr(avail_bits);
+}
+
+/* Set the digest size of the used hash in bytes */
+static void lrng_set_digestsize(u32 digestsize)
+{
+ struct lrng_pool *pool = &lrng_pool;
+ u32 ent_bits = atomic_xchg_relaxed(&pool->aux_entropy_bits, 0),
+ old_digestsize = lrng_get_digestsize();
+
+ atomic_set(&lrng_pool.digestsize, digestsize);
+
+ /*
+ * Update the write wakeup threshold which must not be larger
+ * than the digest size of the current conditioning hash.
+ */
+ digestsize = lrng_reduce_by_osr(digestsize << 3);
+ lrng_sysctl_update_max_write_thresh(digestsize);
+ lrng_write_wakeup_bits = digestsize;
+
+ /*
+ * In case the new digest is larger than the old one, cap the available
+ * entropy to the old message digest used to process the existing data.
+ */
+ ent_bits = min_t(u32, ent_bits, old_digestsize);
+ atomic_add(ent_bits, &pool->aux_entropy_bits);
+}
+
+static int __init lrng_init_wakeup_bits(void)
+{
+ u32 digestsize = lrng_reduce_by_osr(lrng_get_digestsize());
+
+ lrng_sysctl_update_max_write_thresh(digestsize);
+ lrng_write_wakeup_bits = digestsize;
+ return 0;
+}
+core_initcall(lrng_init_wakeup_bits);
+
+/* Obtain the digest size provided by the used hash in bits */
+u32 lrng_get_digestsize(void)
+{
+ return atomic_read_u32(&lrng_pool.digestsize) << 3;
+}
+
+/* Set entropy content in user-space controllable aux pool */
+void lrng_pool_set_entropy(u32 entropy_bits)
+{
+ atomic_set(&lrng_pool.aux_entropy_bits, entropy_bits);
+}
+
+static void lrng_aux_reset(void)
+{
+ lrng_pool_set_entropy(0);
+}
+
+/*
+ * Replace old with new hash for auxiliary pool handling
+ *
+ * Assumption: the caller must guarantee that the new_cb is available during the
+ * entire operation (e.g. it must hold the write lock against pointer updating).
+ */
+static int
+lrng_aux_switch_hash(struct lrng_drng *drng, int __unused,
+ const struct lrng_hash_cb *new_cb, void *new_hash,
+ const struct lrng_hash_cb *old_cb)
+{
+ struct lrng_drng *init_drng = lrng_drng_init_instance();
+ struct lrng_pool *pool = &lrng_pool;
+ struct shash_desc *shash = (struct shash_desc *)pool->aux_pool;
+ u8 digest[LRNG_MAX_DIGESTSIZE];
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_LRNG_SWITCH))
+ return -EOPNOTSUPP;
+
+ if (unlikely(!pool->initialized))
+ return 0;
+
+ /* We only switch if the processed DRNG is the initial DRNG. */
+ if (init_drng != drng)
+ return 0;
+
+ /* Get the aux pool hash with old digest ... */
+ ret = old_cb->hash_final(shash, digest) ?:
+ /* ... re-initialize the hash with the new digest ... */
+ new_cb->hash_init(shash, new_hash) ?:
+ /*
+ * ... feed the old hash into the new state. We may feed
+ * uninitialized memory into the new state, but this is
+ * considered no issue and even good as we have some more
+ * uncertainty here.
+ */
+ new_cb->hash_update(shash, digest, sizeof(digest));
+ if (!ret) {
+ lrng_set_digestsize(new_cb->hash_digestsize(new_hash));
+ pr_debug("Re-initialize aux entropy pool with hash %s\n",
+ new_cb->hash_name());
+ }
+
+ memzero_explicit(digest, sizeof(digest));
+ return ret;
+}
+
+/* Insert data into auxiliary pool by using the hash update function. */
+static int
+lrng_aux_pool_insert_locked(const u8 *inbuf, u32 inbuflen, u32 entropy_bits)
+{
+ struct lrng_pool *pool = &lrng_pool;
+ struct shash_desc *shash = (struct shash_desc *)pool->aux_pool;
+ struct lrng_drng *drng = lrng_drng_init_instance();
+ const struct lrng_hash_cb *hash_cb;
+ unsigned long flags;
+ void *hash;
+ int ret;
+
+ entropy_bits = min_t(u32, entropy_bits, inbuflen << 3);
+
+ read_lock_irqsave(&drng->hash_lock, flags);
+ hash_cb = drng->hash_cb;
+ hash = drng->hash;
+
+ if (unlikely(!pool->initialized)) {
+ ret = hash_cb->hash_init(shash, hash);
+ if (ret)
+ goto out;
+ pool->initialized = true;
+ }
+
+ ret = hash_cb->hash_update(shash, inbuf, inbuflen);
+ if (ret)
+ goto out;
+
+ /*
+ * Cap the available entropy to the hash output size compliant to
+ * SP800-90B section 3.1.5.1 table 1.
+ */
+ entropy_bits += atomic_read_u32(&pool->aux_entropy_bits);
+ atomic_set(&pool->aux_entropy_bits,
+ min_t(u32, entropy_bits,
+ hash_cb->hash_digestsize(hash) << 3));
+
+out:
+ read_unlock_irqrestore(&drng->hash_lock, flags);
+ return ret;
+}
+
+int lrng_pool_insert_aux(const u8 *inbuf, u32 inbuflen, u32 entropy_bits)
+{
+ struct lrng_pool *pool = &lrng_pool;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ ret = lrng_aux_pool_insert_locked(inbuf, inbuflen, entropy_bits);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ lrng_es_add_entropy();
+
+ return ret;
+}
+EXPORT_SYMBOL(lrng_pool_insert_aux);
+
+/************************* Get data from entropy pool *************************/
+
+/*
+ * Get auxiliary entropy pool and its entropy content for seed buffer.
+ * Caller must hold lrng_pool.pool->lock.
+ * @outbuf: buffer to store data in with size requested_bits
+ * @requested_bits: Requested amount of entropy
+ * @return: amount of entropy in outbuf in bits.
+ */
+static u32 lrng_aux_get_pool(u8 *outbuf, u32 requested_bits)
+{
+ struct lrng_pool *pool = &lrng_pool;
+ struct shash_desc *shash = (struct shash_desc *)pool->aux_pool;
+ struct lrng_drng *drng = lrng_drng_init_instance();
+ const struct lrng_hash_cb *hash_cb;
+ unsigned long flags;
+ void *hash;
+ u32 collected_ent_bits, returned_ent_bits, unused_bits = 0,
+ digestsize, digestsize_bits, requested_bits_osr;
+ u8 aux_output[LRNG_MAX_DIGESTSIZE];
+
+ if (unlikely(!pool->initialized))
+ return 0;
+
+ read_lock_irqsave(&drng->hash_lock, flags);
+
+ hash_cb = drng->hash_cb;
+ hash = drng->hash;
+ digestsize = hash_cb->hash_digestsize(hash);
+ digestsize_bits = digestsize << 3;
+
+ /* Cap to maximum entropy that can ever be generated with given hash */
+ lrng_cap_requested(digestsize_bits, requested_bits);
+
+ /* Ensure that no more than the size of aux_pool can be requested */
+ requested_bits = min_t(u32, requested_bits, (LRNG_MAX_DIGESTSIZE << 3));
+ requested_bits_osr = requested_bits + lrng_compress_osr();
+
+ /* Cap entropy with entropy counter from aux pool and the used digest */
+ collected_ent_bits = min_t(u32, digestsize_bits,
+ atomic_xchg_relaxed(&pool->aux_entropy_bits, 0));
+
+ /* We collected too much entropy and put the overflow back */
+ if (collected_ent_bits > requested_bits_osr) {
+ /* Amount of bits we collected too much */
+ unused_bits = collected_ent_bits - requested_bits_osr;
+ /* Put entropy back */
+ atomic_add(unused_bits, &pool->aux_entropy_bits);
+ /* Fix collected entropy */
+ collected_ent_bits = requested_bits_osr;
+ }
+
+ /* Apply oversampling: discount requested oversampling rate */
+ returned_ent_bits = lrng_reduce_by_osr(collected_ent_bits);
+
+ pr_debug("obtained %u bits by collecting %u bits of entropy from aux pool, %u bits of entropy remaining\n",
+ returned_ent_bits, collected_ent_bits, unused_bits);
+
+ /* Get the digest for the aux pool to be returned to the caller ... */
+ if (hash_cb->hash_final(shash, aux_output) ||
+ /*
+ * ... and re-initialize the aux state. Do not add the aux pool
+ * digest for backward secrecy as it will be added with the
+ * insertion of the complete seed buffer after it has been filled.
+ */
+ hash_cb->hash_init(shash, hash)) {
+ returned_ent_bits = 0;
+ } else {
+ /*
+ * Do not truncate the output size exactly to collected_ent_bits
+ * as the aux pool may contain data that is not credited with
+ * entropy, but we want to use them to stir the DRNG state.
+ */
+ memcpy(outbuf, aux_output, requested_bits >> 3);
+ }
+
+ read_unlock_irqrestore(&drng->hash_lock, flags);
+ memzero_explicit(aux_output, digestsize);
+ return returned_ent_bits;
+}
+
+static void lrng_aux_get_backtrack(struct entropy_buf *eb, u32 requested_bits,
+ bool __unused)
+{
+ struct lrng_pool *pool = &lrng_pool;
+ unsigned long flags;
+
+ /* Ensure aux pool extraction and backtracking op are atomic */
+ spin_lock_irqsave(&pool->lock, flags);
+
+ eb->e_bits[lrng_ext_es_aux] = lrng_aux_get_pool(eb->e[lrng_ext_es_aux],
+ requested_bits);
+
+ /* Mix the extracted data back into pool for backtracking resistance */
+ if (lrng_aux_pool_insert_locked((u8 *)eb,
+ sizeof(struct entropy_buf), 0))
+ pr_warn("Backtracking resistance operation failed\n");
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+static void lrng_aux_es_state(unsigned char *buf, size_t buflen)
+{
+ const struct lrng_drng *lrng_drng_init = lrng_drng_init_instance();
+
+ /* Assume the lrng_drng_init lock is taken by caller */
+ snprintf(buf, buflen,
+ " Hash for operating entropy pool: %s\n"
+ " Available entropy: %u\n",
+ lrng_drng_init->hash_cb->hash_name(),
+ lrng_aux_avail_entropy(0));
+}
+
+struct lrng_es_cb lrng_es_aux = {
+ .name = "Auxiliary",
+ .get_ent = lrng_aux_get_backtrack,
+ .curr_entropy = lrng_aux_avail_entropy,
+ .max_entropy = lrng_get_digestsize,
+ .state = lrng_aux_es_state,
+ .reset = lrng_aux_reset,
+ .switch_hash = lrng_aux_switch_hash,
+};
diff --git a/drivers/char/lrng/lrng_es_aux.h b/drivers/char/lrng/lrng_es_aux.h
new file mode 100644
index 000000000000..bc41e6474aad
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_aux.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_ES_AUX_H
+#define _LRNG_ES_AUX_H
+
+#include "lrng_drng_mgr.h"
+#include "lrng_es_mgr_cb.h"
+
+u32 lrng_get_digestsize(void);
+void lrng_pool_set_entropy(u32 entropy_bits);
+int lrng_pool_insert_aux(const u8 *inbuf, u32 inbuflen, u32 entropy_bits);
+
+extern struct lrng_es_cb lrng_es_aux;
+
+/****************************** Helper code ***********************************/
+
+/* Obtain the security strength of the LRNG in bits */
+static inline u32 lrng_security_strength(void)
+{
+ /*
+ * We use a hash to read the entropy in the entropy pool. According to
+ * SP800-90B table 1, the entropy can be at most the digest size.
+ * Considering this together with the last sentence in section 3.1.5.1.2
+ * the security strength of a (approved) hash is equal to its output
+ * size. On the other hand the entropy cannot be larger than the
+ * security strength of the used DRBG.
+ */
+ return min_t(u32, LRNG_FULL_SEED_ENTROPY_BITS, lrng_get_digestsize());
+}
+
+static inline u32 lrng_get_seed_entropy_osr(bool fully_seeded)
+{
+ u32 requested_bits = lrng_security_strength();
+
+ /* Apply oversampling during initialization according to SP800-90C */
+ if (lrng_sp80090c_compliant() && !fully_seeded)
+ requested_bits += LRNG_SEED_BUFFER_INIT_ADD_BITS;
+ return requested_bits;
+}
+
+#endif /* _LRNG_ES_AUX_H */
diff --git a/drivers/char/lrng/lrng_es_cpu.c b/drivers/char/lrng/lrng_es_cpu.c
new file mode 100644
index 000000000000..96a621c16feb
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_cpu.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG Fast Entropy Source: CPU-based entropy source
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+#include <crypto/hash.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <asm/archrandom.h>
+
+#include "lrng_definitions.h"
+#include "lrng_es_aux.h"
+#include "lrng_es_cpu.h"
+
+/*
+ * Estimated entropy of data is a 32th of LRNG_DRNG_SECURITY_STRENGTH_BITS.
+ * As we have no ability to review the implementation of those noise sources,
+ * it is prudent to have a conservative estimate here.
+ */
+#define LRNG_ARCHRANDOM_DEFAULT_STRENGTH CONFIG_LRNG_CPU_ENTROPY_RATE
+#define LRNG_ARCHRANDOM_TRUST_CPU_STRENGTH LRNG_DRNG_SECURITY_STRENGTH_BITS
+#ifdef CONFIG_RANDOM_TRUST_CPU
+static u32 cpu_entropy = LRNG_ARCHRANDOM_TRUST_CPU_STRENGTH;
+#else
+static u32 cpu_entropy = LRNG_ARCHRANDOM_DEFAULT_STRENGTH;
+#endif
+#ifdef CONFIG_LRNG_RUNTIME_ES_CONFIG
+module_param(cpu_entropy, uint, 0644);
+MODULE_PARM_DESC(cpu_entropy, "Entropy in bits of 256 data bits from CPU noise source (e.g. RDSEED)");
+#endif
+
+static int __init lrng_parse_trust_cpu(char *arg)
+{
+ int ret;
+ bool trust_cpu = false;
+
+ ret = kstrtobool(arg, &trust_cpu);
+ if (ret)
+ return ret;
+
+ if (trust_cpu)
+ cpu_entropy = LRNG_ARCHRANDOM_TRUST_CPU_STRENGTH;
+ else
+ cpu_entropy = LRNG_ARCHRANDOM_DEFAULT_STRENGTH;
+
+ lrng_force_fully_seeded();
+
+ return 0;
+}
+early_param("random.trust_cpu", lrng_parse_trust_cpu);
+
+static u32 lrng_cpu_entropylevel(u32 requested_bits)
+{
+ return lrng_fast_noise_entropylevel(cpu_entropy, requested_bits);
+}
+
+static u32 lrng_cpu_poolsize(void)
+{
+ return lrng_cpu_entropylevel(lrng_security_strength());
+}
+
+static u32 lrng_get_cpu_data(u8 *outbuf, u32 requested_bits)
+{
+ size_t longs = 0;
+ u32 i, req = requested_bits >> 3;
+
+ /* operate on full blocks */
+ BUILD_BUG_ON(LRNG_DRNG_SECURITY_STRENGTH_BYTES % sizeof(unsigned long));
+ BUILD_BUG_ON(LRNG_SEED_BUFFER_INIT_ADD_BITS % sizeof(unsigned long));
+ /* ensure we have aligned buffers */
+ BUILD_BUG_ON(LRNG_KCAPI_ALIGN % sizeof(unsigned long));
+
+ for (i = 0; i < req; i += longs) {
+ longs = arch_get_random_seed_longs(
+ (unsigned long *)(outbuf + i), req - i);
+ if (longs)
+ continue;
+ longs = arch_get_random_longs((unsigned long *)(outbuf + i),
+ req - i);
+ if (!longs) {
+ cpu_entropy = 0;
+ return 0;
+ }
+ }
+
+ return requested_bits;
+}
+
+static u32 lrng_get_cpu_data_compress(u8 *outbuf, u32 requested_bits,
+ u32 data_multiplier)
+{
+ SHASH_DESC_ON_STACK(shash, NULL);
+ const struct lrng_hash_cb *hash_cb;
+ struct lrng_drng *drng = lrng_drng_node_instance();
+ unsigned long flags;
+ u32 ent_bits = 0, i, partial_bits = 0, digestsize, digestsize_bits,
+ full_bits;
+ void *hash;
+
+ read_lock_irqsave(&drng->hash_lock, flags);
+ hash_cb = drng->hash_cb;
+ hash = drng->hash;
+
+ digestsize = hash_cb->hash_digestsize(hash);
+ digestsize_bits = digestsize << 3;
+ /* Cap to maximum entropy that can ever be generated with given hash */
+ lrng_cap_requested(digestsize_bits, requested_bits);
+ full_bits = requested_bits * data_multiplier;
+
+ /* Calculate oversampling for SP800-90C */
+ if (lrng_sp80090c_compliant()) {
+ /* Complete amount of bits to be pulled */
+ full_bits += LRNG_OVERSAMPLE_ES_BITS * data_multiplier;
+ /* Full blocks that will be pulled */
+ data_multiplier = full_bits / requested_bits;
+ /* Partial block in bits to be pulled */
+ partial_bits = full_bits - (data_multiplier * requested_bits);
+ }
+
+ if (hash_cb->hash_init(shash, hash))
+ goto out;
+
+ /* Hash all data from the CPU entropy source */
+ for (i = 0; i < data_multiplier; i++) {
+ ent_bits = lrng_get_cpu_data(outbuf, requested_bits);
+ if (!ent_bits)
+ goto out;
+
+ if (hash_cb->hash_update(shash, outbuf, ent_bits >> 3))
+ goto err;
+ }
+
+ /* Hash partial block, if applicable */
+ ent_bits = lrng_get_cpu_data(outbuf, partial_bits);
+ if (ent_bits &&
+ hash_cb->hash_update(shash, outbuf, ent_bits >> 3))
+ goto err;
+
+ pr_debug("pulled %u bits from CPU RNG entropy source\n", full_bits);
+ ent_bits = requested_bits;
+
+ /* Generate the compressed data to be returned to the caller */
+ if (requested_bits < digestsize_bits) {
+ u8 digest[LRNG_MAX_DIGESTSIZE];
+
+ if (hash_cb->hash_final(shash, digest))
+ goto err;
+
+ /* Truncate output data to requested size */
+ memcpy(outbuf, digest, requested_bits >> 3);
+ memzero_explicit(digest, digestsize);
+ } else {
+ if (hash_cb->hash_final(shash, outbuf))
+ goto err;
+ }
+
+out:
+ hash_cb->hash_desc_zero(shash);
+ read_unlock_irqrestore(&drng->hash_lock, flags);
+ return ent_bits;
+
+err:
+ ent_bits = 0;
+ goto out;
+}
+
+/*
+ * If CPU entropy source requires does not return full entropy, return the
+ * multiplier of how much data shall be sampled from it.
+ */
+static u32 lrng_cpu_multiplier(void)
+{
+ static u32 data_multiplier = 0;
+ unsigned long v;
+
+ if (data_multiplier > 0)
+ return data_multiplier;
+
+ if (IS_ENABLED(CONFIG_X86) && !arch_get_random_seed_longs(&v, 1)) {
+ /*
+ * Intel SPEC: pulling 512 blocks from RDRAND ensures
+ * one reseed making it logically equivalent to RDSEED.
+ */
+ data_multiplier = 512;
+ } else if (IS_ENABLED(CONFIG_PPC)) {
+ /*
+ * PowerISA defines DARN to deliver at least 0.5 bits of
+ * entropy per data bit.
+ */
+ data_multiplier = 2;
+ } else if (IS_ENABLED(CONFIG_RISCV)) {
+ /*
+ * riscv-crypto-spec-scalar-1.0.0-rc6.pdf section 4.2 defines
+ * this requirement.
+ */
+ data_multiplier = 2;
+ } else {
+ /* CPU provides full entropy */
+ data_multiplier = CONFIG_LRNG_CPU_FULL_ENT_MULTIPLIER;
+ }
+ return data_multiplier;
+}
+
+static int
+lrng_cpu_switch_hash(struct lrng_drng *drng, int node,
+ const struct lrng_hash_cb *new_cb, void *new_hash,
+ const struct lrng_hash_cb *old_cb)
+{
+ u32 digestsize, multiplier;
+
+ if (!IS_ENABLED(CONFIG_LRNG_SWITCH))
+ return -EOPNOTSUPP;
+
+ digestsize = lrng_get_digestsize();
+ multiplier = lrng_cpu_multiplier();
+
+ /*
+ * It would be security violation if the new digestsize is smaller than
+ * the set CPU entropy rate.
+ */
+ WARN_ON(multiplier > 1 && digestsize < cpu_entropy);
+ cpu_entropy = min_t(u32, digestsize, cpu_entropy);
+ return 0;
+}
+
+/*
+ * lrng_get_arch() - Get CPU entropy source entropy
+ *
+ * @eb: entropy buffer to store entropy
+ * @requested_bits: requested entropy in bits
+ */
+static void lrng_cpu_get(struct entropy_buf *eb, u32 requested_bits,
+ bool __unused)
+{
+ u32 ent_bits, data_multiplier = lrng_cpu_multiplier();
+
+ if (data_multiplier <= 1) {
+ ent_bits = lrng_get_cpu_data(eb->e[lrng_ext_es_cpu],
+ requested_bits);
+ } else {
+ ent_bits = lrng_get_cpu_data_compress(eb->e[lrng_ext_es_cpu],
+ requested_bits,
+ data_multiplier);
+ }
+
+ ent_bits = lrng_cpu_entropylevel(ent_bits);
+ pr_debug("obtained %u bits of entropy from CPU RNG entropy source\n",
+ ent_bits);
+ eb->e_bits[lrng_ext_es_cpu] = ent_bits;
+}
+
+static void lrng_cpu_es_state(unsigned char *buf, size_t buflen)
+{
+ const struct lrng_drng *lrng_drng_init = lrng_drng_init_instance();
+ u32 data_multiplier = lrng_cpu_multiplier();
+
+ /* Assume the lrng_drng_init lock is taken by caller */
+ snprintf(buf, buflen,
+ " Hash for compressing data: %s\n"
+ " Available entropy: %u\n"
+ " Data multiplier: %u\n",
+ (data_multiplier <= 1) ?
+ "N/A" : lrng_drng_init->hash_cb->hash_name(),
+ lrng_cpu_poolsize(),
+ data_multiplier);
+}
+
+struct lrng_es_cb lrng_es_cpu = {
+ .name = "CPU",
+ .get_ent = lrng_cpu_get,
+ .curr_entropy = lrng_cpu_entropylevel,
+ .max_entropy = lrng_cpu_poolsize,
+ .state = lrng_cpu_es_state,
+ .reset = NULL,
+ .switch_hash = lrng_cpu_switch_hash,
+};
diff --git a/drivers/char/lrng/lrng_es_cpu.h b/drivers/char/lrng/lrng_es_cpu.h
new file mode 100644
index 000000000000..8dbb4d9a2926
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_cpu.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_ES_CPU_H
+#define _LRNG_ES_CPU_H
+
+#include "lrng_es_mgr_cb.h"
+
+#ifdef CONFIG_LRNG_CPU
+
+extern struct lrng_es_cb lrng_es_cpu;
+
+#endif /* CONFIG_LRNG_CPU */
+
+#endif /* _LRNG_ES_CPU_H */
diff --git a/drivers/char/lrng/lrng_es_irq.c b/drivers/char/lrng/lrng_es_irq.c
new file mode 100644
index 000000000000..08b0358444f8
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_irq.c
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG Slow Entropy Source: Interrupt data collection
+ *
+ * Copyright (C) 2022 - 2023, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/irq_regs.h>
+#include <asm/ptrace.h>
+#include <crypto/hash.h>
+#include <linux/gcd.h>
+#include <linux/module.h>
+#include <linux/random.h>
+
+#include "lrng_es_aux.h"
+#include "lrng_es_irq.h"
+#include "lrng_es_timer_common.h"
+#include "lrng_health.h"
+#include "lrng_numa.h"
+#include "lrng_testing.h"
+
+/*
+ * Number of interrupts to be recorded to assume that DRNG security strength
+ * bits of entropy are received.
+ * Note: a value below the DRNG security strength should not be defined as this
+ * may imply the DRNG can never be fully seeded in case other noise
+ * sources are unavailable.
+ */
+#define LRNG_IRQ_ENTROPY_BITS LRNG_UINT32_C(CONFIG_LRNG_IRQ_ENTROPY_RATE)
+
+
+/* Number of interrupts required for LRNG_DRNG_SECURITY_STRENGTH_BITS entropy */
+static u32 lrng_irq_entropy_bits = LRNG_IRQ_ENTROPY_BITS;
+
+static u32 irq_entropy __read_mostly = LRNG_IRQ_ENTROPY_BITS;
+#ifdef CONFIG_LRNG_RUNTIME_ES_CONFIG
+module_param(irq_entropy, uint, 0444);
+MODULE_PARM_DESC(irq_entropy,
+ "How many interrupts must be collected for obtaining 256 bits of entropy\n");
+#endif
+
+/* Per-CPU array holding concatenated IRQ entropy events */
+static DEFINE_PER_CPU(u32 [LRNG_DATA_ARRAY_SIZE], lrng_irq_array)
+ __aligned(LRNG_KCAPI_ALIGN);
+static DEFINE_PER_CPU(u32, lrng_irq_array_ptr) = 0;
+static DEFINE_PER_CPU(atomic_t, lrng_irq_array_irqs) = ATOMIC_INIT(0);
+
+/*
+ * The entropy collection is performed by executing the following steps:
+ * 1. fill up the per-CPU array holding the time stamps
+ * 2. once the per-CPU array is full, a compression of the data into
+ * the entropy pool is performed - this happens in interrupt context
+ *
+ * If step 2 is not desired in interrupt context, the following boolean
+ * needs to be set to false. This implies that old entropy data in the
+ * per-CPU array collected since the last DRNG reseed is overwritten with
+ * new entropy data instead of retaining the entropy with the compression
+ * operation.
+ *
+ * Impact on entropy:
+ *
+ * If continuous compression is enabled, the maximum entropy that is collected
+ * per CPU between DRNG reseeds is equal to the digest size of the used hash.
+ *
+ * If continuous compression is disabled, the maximum number of entropy events
+ * that can be collected per CPU is equal to LRNG_DATA_ARRAY_SIZE. This amount
+ * of events is converted into an entropy statement which then represents the
+ * maximum amount of entropy collectible per CPU between DRNG reseeds.
+ */
+static bool lrng_irq_continuous_compression __read_mostly =
+ IS_ENABLED(CONFIG_LRNG_ENABLE_CONTINUOUS_COMPRESSION);
+
+#ifdef CONFIG_LRNG_SWITCHABLE_CONTINUOUS_COMPRESSION
+module_param(lrng_irq_continuous_compression, bool, 0444);
+MODULE_PARM_DESC(lrng_irq_continuous_compression,
+ "Perform entropy compression if per-CPU entropy data array is full\n");
+#endif
+
+/*
+ * Per-CPU entropy pool with compressed entropy event
+ *
+ * The per-CPU entropy pool is defined as the hash state. New data is simply
+ * inserted into the entropy pool by performing a hash update operation.
+ * To read the entropy pool, a hash final must be invoked. However, before
+ * the entropy pool is released again after a hash final, the hash init must
+ * be performed.
+ */
+static DEFINE_PER_CPU(u8 [LRNG_POOL_SIZE], lrng_irq_pool)
+ __aligned(LRNG_KCAPI_ALIGN);
+/*
+ * Lock to allow other CPUs to read the pool - as this is only done during
+ * reseed which is infrequent, this lock is hardly contended.
+ */
+static DEFINE_PER_CPU(spinlock_t, lrng_irq_lock);
+static DEFINE_PER_CPU(bool, lrng_irq_lock_init) = false;
+
+static bool lrng_irq_pool_online(int cpu)
+{
+ return per_cpu(lrng_irq_lock_init, cpu);
+}
+
+static void __init lrng_irq_check_compression_state(void)
+{
+ /* One pool must hold sufficient entropy for disabled compression */
+ if (!lrng_irq_continuous_compression) {
+ u32 max_ent = min_t(u32, lrng_get_digestsize(),
+ lrng_data_to_entropy(LRNG_DATA_NUM_VALUES,
+ lrng_irq_entropy_bits));
+ if (max_ent < lrng_security_strength()) {
+ pr_warn("Force continuous compression operation to ensure LRNG can hold enough entropy\n");
+ lrng_irq_continuous_compression = true;
+ }
+ }
+}
+
+void __init lrng_irq_es_init(bool highres_timer)
+{
+ /* Set a minimum number of interrupts that must be collected */
+ irq_entropy = max_t(u32, LRNG_IRQ_ENTROPY_BITS, irq_entropy);
+
+ if (highres_timer) {
+ lrng_irq_entropy_bits = irq_entropy;
+ } else {
+ u32 new_entropy = irq_entropy * LRNG_ES_OVERSAMPLING_FACTOR;
+
+ lrng_irq_entropy_bits = (irq_entropy < new_entropy) ?
+ new_entropy : irq_entropy;
+ pr_warn("operating without high-resolution timer and applying IRQ oversampling factor %u\n",
+ LRNG_ES_OVERSAMPLING_FACTOR);
+ }
+
+ lrng_irq_check_compression_state();
+}
+
+/*
+ * Reset all per-CPU pools - reset entropy estimator but leave the pool data
+ * that may or may not have entropy unchanged.
+ */
+static void lrng_irq_reset(void)
+{
+ int cpu;
+
+ /* Trigger GCD calculation anew. */
+ lrng_gcd_set(0);
+
+ for_each_online_cpu(cpu)
+ atomic_set(per_cpu_ptr(&lrng_irq_array_irqs, cpu), 0);
+}
+
+static u32 lrng_irq_avail_pool_size(void)
+{
+ u32 max_size = 0, max_pool = lrng_get_digestsize();
+ int cpu;
+
+ if (!lrng_irq_continuous_compression)
+ max_pool = min_t(u32, max_pool, LRNG_DATA_NUM_VALUES);
+
+ for_each_online_cpu(cpu) {
+ if (lrng_irq_pool_online(cpu))
+ max_size += max_pool;
+ }
+
+ return max_size;
+}
+
+/* Return entropy of unused IRQs present in all per-CPU pools. */
+static u32 lrng_irq_avail_entropy(u32 __unused)
+{
+ u32 digestsize_irqs, irq = 0;
+ int cpu;
+
+ /* Only deliver entropy when SP800-90B self test is completed */
+ if (!lrng_sp80090b_startup_complete_es(lrng_int_es_irq))
+ return 0;
+
+ /* Obtain the cap of maximum numbers of IRQs we count */
+ digestsize_irqs = lrng_entropy_to_data(lrng_get_digestsize(),
+ lrng_irq_entropy_bits);
+ if (!lrng_irq_continuous_compression) {
+ /* Cap to max. number of IRQs the array can hold */
+ digestsize_irqs = min_t(u32, digestsize_irqs,
+ LRNG_DATA_NUM_VALUES);
+ }
+
+ for_each_online_cpu(cpu) {
+ if (!lrng_irq_pool_online(cpu))
+ continue;
+ irq += min_t(u32, digestsize_irqs,
+ atomic_read_u32(per_cpu_ptr(&lrng_irq_array_irqs,
+ cpu)));
+ }
+
+ /* Consider oversampling rate */
+ return lrng_reduce_by_osr(lrng_data_to_entropy(irq,
+ lrng_irq_entropy_bits));
+}
+
+/*
+ * Trigger a switch of the hash implementation for the per-CPU pool.
+ *
+ * For each per-CPU pool, obtain the message digest with the old hash
+ * implementation, initialize the per-CPU pool again with the new hash
+ * implementation and inject the message digest into the new state.
+ *
+ * Assumption: the caller must guarantee that the new_cb is available during the
+ * entire operation (e.g. it must hold the lock against pointer updating).
+ */
+static int
+lrng_irq_switch_hash(struct lrng_drng *drng, int node,
+ const struct lrng_hash_cb *new_cb, void *new_hash,
+ const struct lrng_hash_cb *old_cb)
+{
+ u8 digest[LRNG_MAX_DIGESTSIZE];
+ u32 digestsize_irqs, found_irqs;
+ int ret = 0, cpu;
+
+ if (!IS_ENABLED(CONFIG_LRNG_SWITCH))
+ return -EOPNOTSUPP;
+
+ for_each_online_cpu(cpu) {
+ struct shash_desc *pcpu_shash;
+
+ /*
+ * Only switch the per-CPU pools for the current node because
+ * the hash_cb only applies NUMA-node-wide.
+ */
+ if (cpu_to_node(cpu) != node || !lrng_irq_pool_online(cpu))
+ continue;
+
+ pcpu_shash = (struct shash_desc *)per_cpu_ptr(lrng_irq_pool,
+ cpu);
+
+ digestsize_irqs = old_cb->hash_digestsize(pcpu_shash);
+ digestsize_irqs = lrng_entropy_to_data(digestsize_irqs << 3,
+ lrng_irq_entropy_bits);
+
+ if (pcpu_shash->tfm == new_hash)
+ continue;
+
+ /* Get the per-CPU pool hash with old digest ... */
+ ret = old_cb->hash_final(pcpu_shash, digest) ?:
+ /* ... re-initialize the hash with the new digest ... */
+ new_cb->hash_init(pcpu_shash, new_hash) ?:
+ /*
+ * ... feed the old hash into the new state. We may feed
+ * uninitialized memory into the new state, but this is
+ * considered no issue and even good as we have some more
+ * uncertainty here.
+ */
+ new_cb->hash_update(pcpu_shash, digest, sizeof(digest));
+ if (ret)
+ goto out;
+
+ /*
+ * In case the new digest is larger than the old one, cap
+ * the available entropy to the old message digest used to
+ * process the existing data.
+ */
+ found_irqs = atomic_xchg_relaxed(
+ per_cpu_ptr(&lrng_irq_array_irqs, cpu), 0);
+ found_irqs = min_t(u32, found_irqs, digestsize_irqs);
+ atomic_add_return_relaxed(found_irqs,
+ per_cpu_ptr(&lrng_irq_array_irqs, cpu));
+
+ pr_debug("Re-initialize per-CPU interrupt entropy pool for CPU %d on NUMA node %d with hash %s\n",
+ cpu, node, new_cb->hash_name());
+ }
+
+out:
+ memzero_explicit(digest, sizeof(digest));
+ return ret;
+}
+
+/*
+ * When reading the per-CPU message digest, make sure we use the crypto
+ * callbacks defined for the NUMA node the per-CPU pool is defined for because
+ * the LRNG crypto switch support is only atomic per NUMA node.
+ */
+static u32
+lrng_irq_pool_hash_one(const struct lrng_hash_cb *pcpu_hash_cb,
+ void *pcpu_hash, int cpu, u8 *digest, u32 *digestsize)
+{
+ struct shash_desc *pcpu_shash =
+ (struct shash_desc *)per_cpu_ptr(lrng_irq_pool, cpu);
+ spinlock_t *lock = per_cpu_ptr(&lrng_irq_lock, cpu);
+ unsigned long flags;
+ u32 digestsize_irqs, found_irqs;
+
+ /* Lock guarding against reading / writing to per-CPU pool */
+ spin_lock_irqsave(lock, flags);
+
+ *digestsize = pcpu_hash_cb->hash_digestsize(pcpu_hash);
+ digestsize_irqs = lrng_entropy_to_data(*digestsize << 3,
+ lrng_irq_entropy_bits);
+
+ /* Obtain entropy statement like for the entropy pool */
+ found_irqs = atomic_xchg_relaxed(
+ per_cpu_ptr(&lrng_irq_array_irqs, cpu), 0);
+ /* Cap to maximum amount of data we can hold in hash */
+ found_irqs = min_t(u32, found_irqs, digestsize_irqs);
+
+ /* Cap to maximum amount of data we can hold in array */
+ if (!lrng_irq_continuous_compression)
+ found_irqs = min_t(u32, found_irqs, LRNG_DATA_NUM_VALUES);
+
+ /* Store all not-yet compressed data in data array into hash, ... */
+ if (pcpu_hash_cb->hash_update(pcpu_shash,
+ (u8 *)per_cpu_ptr(lrng_irq_array, cpu),
+ LRNG_DATA_ARRAY_SIZE * sizeof(u32)) ?:
+ /* ... get the per-CPU pool digest, ... */
+ pcpu_hash_cb->hash_final(pcpu_shash, digest) ?:
+ /* ... re-initialize the hash, ... */
+ pcpu_hash_cb->hash_init(pcpu_shash, pcpu_hash) ?:
+ /* ... feed the old hash into the new state. */
+ pcpu_hash_cb->hash_update(pcpu_shash, digest, *digestsize))
+ found_irqs = 0;
+
+ spin_unlock_irqrestore(lock, flags);
+ return found_irqs;
+}
+
+/*
+ * Hash all per-CPU pools and return the digest to be used as seed data for
+ * seeding a DRNG. The caller must guarantee backtracking resistance.
+ * The function will only copy as much data as entropy is available into the
+ * caller-provided output buffer.
+ *
+ * This function handles the translation from the number of received interrupts
+ * into an entropy statement. The conversion depends on LRNG_IRQ_ENTROPY_BITS
+ * which defines how many interrupts must be received to obtain 256 bits of
+ * entropy. With this value, the function lrng_data_to_entropy converts a given
+ * data size (received interrupts, requested amount of data, etc.) into an
+ * entropy statement. lrng_entropy_to_data does the reverse.
+ *
+ * @eb: entropy buffer to store entropy
+ * @requested_bits: Requested amount of entropy
+ * @fully_seeded: indicator whether LRNG is fully seeded
+ */
+static void lrng_irq_pool_hash(struct entropy_buf *eb, u32 requested_bits,
+ bool fully_seeded)
+{
+ SHASH_DESC_ON_STACK(shash, NULL);
+ const struct lrng_hash_cb *hash_cb;
+ struct lrng_drng **lrng_drng = lrng_drng_instances();
+ struct lrng_drng *drng = lrng_drng_init_instance();
+ u8 digest[LRNG_MAX_DIGESTSIZE];
+ unsigned long flags, flags2;
+ u32 found_irqs, collected_irqs = 0, collected_ent_bits, requested_irqs,
+ returned_ent_bits;
+ int ret, cpu;
+ void *hash;
+
+ /* Only deliver entropy when SP800-90B self test is completed */
+ if (!lrng_sp80090b_startup_complete_es(lrng_int_es_irq)) {
+ eb->e_bits[lrng_int_es_irq] = 0;
+ return;
+ }
+
+ /* Lock guarding replacement of per-NUMA hash */
+ read_lock_irqsave(&drng->hash_lock, flags);
+
+ hash_cb = drng->hash_cb;
+ hash = drng->hash;
+
+ /* The hash state of filled with all per-CPU pool hashes. */
+ ret = hash_cb->hash_init(shash, hash);
+ if (ret)
+ goto err;
+
+ /* Cap to maximum entropy that can ever be generated with given hash */
+ lrng_cap_requested(hash_cb->hash_digestsize(hash) << 3, requested_bits);
+ requested_irqs = lrng_entropy_to_data(requested_bits +
+ lrng_compress_osr(),
+ lrng_irq_entropy_bits);
+
+ /*
+ * Harvest entropy from each per-CPU hash state - even though we may
+ * have collected sufficient entropy, we will hash all per-CPU pools.
+ */
+ for_each_online_cpu(cpu) {
+ struct lrng_drng *pcpu_drng = drng;
+ u32 digestsize, pcpu_unused_irqs = 0;
+ int node = cpu_to_node(cpu);
+
+ /* If pool is not online, then no entropy is present. */
+ if (!lrng_irq_pool_online(cpu))
+ continue;
+
+ if (lrng_drng && lrng_drng[node])
+ pcpu_drng = lrng_drng[node];
+
+ if (pcpu_drng == drng) {
+ found_irqs = lrng_irq_pool_hash_one(hash_cb, hash,
+ cpu, digest,
+ &digestsize);
+ } else {
+ read_lock_irqsave(&pcpu_drng->hash_lock, flags2);
+ found_irqs =
+ lrng_irq_pool_hash_one(pcpu_drng->hash_cb,
+ pcpu_drng->hash, cpu,
+ digest, &digestsize);
+ read_unlock_irqrestore(&pcpu_drng->hash_lock, flags2);
+ }
+
+ /* Inject the digest into the state of all per-CPU pools */
+ ret = hash_cb->hash_update(shash, digest, digestsize);
+ if (ret)
+ goto err;
+
+ collected_irqs += found_irqs;
+ if (collected_irqs > requested_irqs) {
+ pcpu_unused_irqs = collected_irqs - requested_irqs;
+ atomic_add_return_relaxed(pcpu_unused_irqs,
+ per_cpu_ptr(&lrng_irq_array_irqs, cpu));
+ collected_irqs = requested_irqs;
+ }
+ pr_debug("%u interrupts used from entropy pool of CPU %d, %u interrupts remain unused\n",
+ found_irqs - pcpu_unused_irqs, cpu, pcpu_unused_irqs);
+ }
+
+ ret = hash_cb->hash_final(shash, digest);
+ if (ret)
+ goto err;
+
+ collected_ent_bits = lrng_data_to_entropy(collected_irqs,
+ lrng_irq_entropy_bits);
+ /* Apply oversampling: discount requested oversampling rate */
+ returned_ent_bits = lrng_reduce_by_osr(collected_ent_bits);
+
+ pr_debug("obtained %u bits by collecting %u bits of entropy from entropy pool noise source\n",
+ returned_ent_bits, collected_ent_bits);
+
+ /*
+ * Truncate to available entropy as implicitly allowed by SP800-90B
+ * section 3.1.5.1.1 table 1 which awards truncated hashes full
+ * entropy.
+ *
+ * During boot time, we read requested_bits data with
+ * returned_ent_bits entropy. In case our conservative entropy
+ * estimate underestimates the available entropy we can transport as
+ * much available entropy as possible.
+ */
+ memcpy(eb->e[lrng_int_es_irq], digest,
+ fully_seeded ? returned_ent_bits >> 3 : requested_bits >> 3);
+ eb->e_bits[lrng_int_es_irq] = returned_ent_bits;
+
+out:
+ hash_cb->hash_desc_zero(shash);
+ read_unlock_irqrestore(&drng->hash_lock, flags);
+ memzero_explicit(digest, sizeof(digest));
+ return;
+
+err:
+ eb->e_bits[lrng_int_es_irq] = 0;
+ goto out;
+}
+
+/* Compress the lrng_irq_array array into lrng_irq_pool */
+static void lrng_irq_array_compress(void)
+{
+ struct shash_desc *shash =
+ (struct shash_desc *)this_cpu_ptr(lrng_irq_pool);
+ struct lrng_drng *drng = lrng_drng_node_instance();
+ const struct lrng_hash_cb *hash_cb;
+ spinlock_t *lock = this_cpu_ptr(&lrng_irq_lock);
+ unsigned long flags, flags2;
+ void *hash;
+ bool init = false;
+
+ read_lock_irqsave(&drng->hash_lock, flags);
+ hash_cb = drng->hash_cb;
+ hash = drng->hash;
+
+ if (unlikely(!this_cpu_read(lrng_irq_lock_init))) {
+ init = true;
+ spin_lock_init(lock);
+ this_cpu_write(lrng_irq_lock_init, true);
+ pr_debug("Initializing per-CPU entropy pool for CPU %d on NUMA node %d with hash %s\n",
+ raw_smp_processor_id(), numa_node_id(),
+ hash_cb->hash_name());
+ }
+
+ spin_lock_irqsave(lock, flags2);
+
+ if (unlikely(init) && hash_cb->hash_init(shash, hash)) {
+ this_cpu_write(lrng_irq_lock_init, false);
+ pr_warn("Initialization of hash failed\n");
+ } else if (lrng_irq_continuous_compression) {
+ /* Add entire per-CPU data array content into entropy pool. */
+ if (hash_cb->hash_update(shash,
+ (u8 *)this_cpu_ptr(lrng_irq_array),
+ LRNG_DATA_ARRAY_SIZE * sizeof(u32)))
+ pr_warn_ratelimited("Hashing of entropy data failed\n");
+ }
+
+ spin_unlock_irqrestore(lock, flags2);
+ read_unlock_irqrestore(&drng->hash_lock, flags);
+}
+
+/* Compress data array into hash */
+static void lrng_irq_array_to_hash(u32 ptr)
+{
+ u32 *array = this_cpu_ptr(lrng_irq_array);
+
+ /*
+ * During boot time the hash operation is triggered more often than
+ * during regular operation.
+ */
+ if (unlikely(!lrng_state_fully_seeded())) {
+ if ((ptr & 31) && (ptr < LRNG_DATA_WORD_MASK))
+ return;
+ } else if (ptr < LRNG_DATA_WORD_MASK) {
+ return;
+ }
+
+ if (lrng_raw_array_entropy_store(*array)) {
+ u32 i;
+
+ /*
+ * If we fed even a part of the array to external analysis, we
+ * mark that the entire array and the per-CPU pool to have no
+ * entropy. This is due to the non-IID property of the data as
+ * we do not fully know whether the existing dependencies
+ * diminish the entropy beyond to what we expect it has.
+ */
+ atomic_set(this_cpu_ptr(&lrng_irq_array_irqs), 0);
+
+ for (i = 1; i < LRNG_DATA_ARRAY_SIZE; i++)
+ lrng_raw_array_entropy_store(*(array + i));
+ } else {
+ lrng_irq_array_compress();
+ /* Ping pool handler about received entropy */
+ if (lrng_sp80090b_startup_complete_es(lrng_int_es_irq))
+ lrng_es_add_entropy();
+ }
+}
+
+/*
+ * Concatenate full 32 bit word at the end of time array even when current
+ * ptr is not aligned to sizeof(data).
+ */
+static void _lrng_irq_array_add_u32(u32 data)
+{
+ /* Increment pointer by number of slots taken for input value */
+ u32 pre_ptr, mask, ptr = this_cpu_add_return(lrng_irq_array_ptr,
+ LRNG_DATA_SLOTS_PER_UINT);
+ unsigned int pre_array;
+
+ /*
+ * This function injects a unit into the array - guarantee that
+ * array unit size is equal to data type of input data.
+ */
+ BUILD_BUG_ON(LRNG_DATA_ARRAY_MEMBER_BITS != (sizeof(data) << 3));
+
+ /*
+ * The following logic requires at least two units holding
+ * the data as otherwise the pointer would immediately wrap when
+ * injection an u32 word.
+ */
+ BUILD_BUG_ON(LRNG_DATA_NUM_VALUES <= LRNG_DATA_SLOTS_PER_UINT);
+
+ lrng_data_split_u32(&ptr, &pre_ptr, &mask);
+
+ /* MSB of data go into previous unit */
+ pre_array = lrng_data_idx2array(pre_ptr);
+ /* zeroization of slot to ensure the following OR adds the data */
+ this_cpu_and(lrng_irq_array[pre_array], ~(0xffffffff & ~mask));
+ this_cpu_or(lrng_irq_array[pre_array], data & ~mask);
+
+ /* Invoke compression as we just filled data array completely */
+ if (unlikely(pre_ptr > ptr))
+ lrng_irq_array_to_hash(LRNG_DATA_WORD_MASK);
+
+ /* LSB of data go into current unit */
+ this_cpu_write(lrng_irq_array[lrng_data_idx2array(ptr)],
+ data & mask);
+
+ if (likely(pre_ptr <= ptr))
+ lrng_irq_array_to_hash(ptr);
+}
+
+/* Concatenate a 32-bit word at the end of the per-CPU array */
+void lrng_irq_array_add_u32(u32 data)
+{
+ /*
+ * Disregard entropy-less data without continuous compression to
+ * avoid it overwriting data with entropy when array ptr wraps.
+ */
+ if (lrng_irq_continuous_compression)
+ _lrng_irq_array_add_u32(data);
+}
+
+/* Concatenate data of max LRNG_DATA_SLOTSIZE_MASK at the end of time array */
+static void lrng_irq_array_add_slot(u32 data)
+{
+ /* Get slot */
+ u32 ptr = this_cpu_inc_return(lrng_irq_array_ptr) &
+ LRNG_DATA_WORD_MASK;
+ unsigned int array = lrng_data_idx2array(ptr);
+ unsigned int slot = lrng_data_idx2slot(ptr);
+
+ BUILD_BUG_ON(LRNG_DATA_ARRAY_MEMBER_BITS % LRNG_DATA_SLOTSIZE_BITS);
+ /* Ensure consistency of values */
+ BUILD_BUG_ON(LRNG_DATA_ARRAY_MEMBER_BITS !=
+ sizeof(lrng_irq_array[0]) << 3);
+
+ /* zeroization of slot to ensure the following OR adds the data */
+ this_cpu_and(lrng_irq_array[array],
+ ~(lrng_data_slot_val(0xffffffff & LRNG_DATA_SLOTSIZE_MASK,
+ slot)));
+ /* Store data into slot */
+ this_cpu_or(lrng_irq_array[array], lrng_data_slot_val(data, slot));
+
+ lrng_irq_array_to_hash(ptr);
+}
+
+static void
+lrng_time_process_common(u32 time, void(*add_time)(u32 data))
+{
+ enum lrng_health_res health_test;
+
+ if (lrng_raw_hires_entropy_store(time))
+ return;
+
+ health_test = lrng_health_test(time, lrng_int_es_irq);
+ if (health_test > lrng_health_fail_use)
+ return;
+
+ if (health_test == lrng_health_pass)
+ atomic_inc_return(this_cpu_ptr(&lrng_irq_array_irqs));
+
+ add_time(time);
+}
+
+/*
+ * Batching up of entropy in per-CPU array before injecting into entropy pool.
+ */
+static void lrng_time_process(void)
+{
+ u32 now_time = random_get_entropy();
+
+ if (unlikely(!lrng_gcd_tested())) {
+ /* When GCD is unknown, we process the full time stamp */
+ lrng_time_process_common(now_time, _lrng_irq_array_add_u32);
+ lrng_gcd_add_value(now_time);
+ } else {
+ /* GCD is known and applied */
+ lrng_time_process_common((now_time / lrng_gcd_get()) &
+ LRNG_DATA_SLOTSIZE_MASK,
+ lrng_irq_array_add_slot);
+ }
+
+ lrng_perf_time(now_time);
+}
+
+/* Hot code path - Callback for interrupt handler */
+void add_interrupt_randomness(int irq)
+{
+ if (lrng_highres_timer()) {
+ lrng_time_process();
+ } else {
+ struct pt_regs *regs = get_irq_regs();
+ static atomic_t reg_idx = ATOMIC_INIT(0);
+ u64 ip;
+ u32 tmp;
+
+ if (regs) {
+ u32 *ptr = (u32 *)regs;
+ int reg_ptr = atomic_add_return_relaxed(1, &reg_idx);
+ size_t n = (sizeof(struct pt_regs) / sizeof(u32));
+
+ ip = instruction_pointer(regs);
+ tmp = *(ptr + (reg_ptr % n));
+ tmp = lrng_raw_regs_entropy_store(tmp) ? 0 : tmp;
+ _lrng_irq_array_add_u32(tmp);
+ } else {
+ ip = _RET_IP_;
+ }
+
+ lrng_time_process();
+
+ /*
+ * The XOR operation combining the different values is not
+ * considered to destroy entropy since the entirety of all
+ * processed values delivers the entropy (and not each
+ * value separately of the other values).
+ */
+ tmp = lrng_raw_jiffies_entropy_store(jiffies) ? 0 : jiffies;
+ tmp ^= lrng_raw_irq_entropy_store(irq) ? 0 : irq;
+ tmp ^= lrng_raw_retip_entropy_store(ip) ? 0 : ip;
+ tmp ^= ip >> 32;
+ _lrng_irq_array_add_u32(tmp);
+ }
+}
+EXPORT_SYMBOL(add_interrupt_randomness);
+
+static void lrng_irq_es_state(unsigned char *buf, size_t buflen)
+{
+ const struct lrng_drng *lrng_drng_init = lrng_drng_init_instance();
+
+ /* Assume the lrng_drng_init lock is taken by caller */
+ snprintf(buf, buflen,
+ " Hash for operating entropy pool: %s\n"
+ " Available entropy: %u\n"
+ " per-CPU interrupt collection size: %u\n"
+ " Standards compliance: %s\n"
+ " High-resolution timer: %s\n"
+ " Continuous compression: %s\n"
+ " Health test passed: %s\n",
+ lrng_drng_init->hash_cb->hash_name(),
+ lrng_irq_avail_entropy(0),
+ LRNG_DATA_NUM_VALUES,
+ lrng_sp80090b_compliant(lrng_int_es_irq) ? "SP800-90B " : "",
+ lrng_highres_timer() ? "true" : "false",
+ lrng_irq_continuous_compression ? "true" : "false",
+ lrng_sp80090b_startup_complete_es(lrng_int_es_irq) ? "true" :
+ "false");
+}
+
+struct lrng_es_cb lrng_es_irq = {
+ .name = "IRQ",
+ .get_ent = lrng_irq_pool_hash,
+ .curr_entropy = lrng_irq_avail_entropy,
+ .max_entropy = lrng_irq_avail_pool_size,
+ .state = lrng_irq_es_state,
+ .reset = lrng_irq_reset,
+ .switch_hash = lrng_irq_switch_hash,
+};
diff --git a/drivers/char/lrng/lrng_es_irq.h b/drivers/char/lrng/lrng_es_irq.h
new file mode 100644
index 000000000000..2cd746611cf0
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_irq.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_ES_IRQ_H
+#define _LRNG_ES_IRQ_H
+
+#include <linux/lrng.h>
+
+#include "lrng_es_mgr_cb.h"
+
+#ifdef CONFIG_LRNG_IRQ
+void lrng_irq_es_init(bool highres_timer);
+void lrng_irq_array_add_u32(u32 data);
+
+extern struct lrng_es_cb lrng_es_irq;
+
+#else /* CONFIG_LRNG_IRQ */
+static inline void lrng_irq_es_init(bool highres_timer) { }
+static inline void lrng_irq_array_add_u32(u32 data) { }
+#endif /* CONFIG_LRNG_IRQ */
+
+#endif /* _LRNG_ES_IRQ_H */
diff --git a/drivers/char/lrng/lrng_es_jent.c b/drivers/char/lrng/lrng_es_jent.c
new file mode 100644
index 000000000000..250a8fc6e249
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_jent.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG Fast Entropy Source: Jitter RNG
+ *
+ * Copyright (C) 2022 - 2023, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/rng.h>
+#include <linux/fips.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "lrng_definitions.h"
+#include "lrng_es_aux.h"
+#include "lrng_es_jent.h"
+#include "lrng_es_mgr.h"
+
+/*
+ * Estimated entropy of data is a 16th of LRNG_DRNG_SECURITY_STRENGTH_BITS.
+ * Albeit a full entropy assessment is provided for the noise source indicating
+ * that it provides high entropy rates and considering that it deactivates
+ * when it detects insufficient hardware, the chosen under estimation of
+ * entropy is considered to be acceptable to all reviewers.
+ */
+static u32 jent_entropy = CONFIG_LRNG_JENT_ENTROPY_RATE;
+#ifdef CONFIG_LRNG_RUNTIME_ES_CONFIG
+module_param(jent_entropy, uint, 0644);
+MODULE_PARM_DESC(jent_entropy,
+ "Entropy in bits of 256 data bits from Jitter RNG noise source");
+#endif
+
+static bool lrng_jent_initialized = false;
+static struct crypto_rng *jent;
+
+#if (CONFIG_LRNG_JENT_ENTROPY_BLOCKS != 0)
+
+/* Entropy buffer filled by Jitter RNG thread - must be power of 2 */
+#define LRNG_JENT_ENTROPY_BLOCKS_MASK (CONFIG_LRNG_JENT_ENTROPY_BLOCKS - 1)
+
+struct jent_entropy_es {
+ uint8_t e[LRNG_DRNG_INIT_SEED_SIZE_BYTES];
+ uint32_t e_bits;
+};
+
+/* Buffer that is filled with Jitter RNG data by a thread. */
+static struct jent_entropy_es
+ lrng_jent_async[CONFIG_LRNG_JENT_ENTROPY_BLOCKS] __aligned(sizeof(u64));
+
+/* State of each Jitter RNG buffer entry to ensure atomic access. */
+enum lrng_jent_async_state {
+ buffer_empty,
+ buffer_filling,
+ buffer_filled,
+ buffer_reading,
+};
+static atomic_t lrng_jent_async_set[CONFIG_LRNG_JENT_ENTROPY_BLOCKS];
+
+/* Jitter RNG buffer work handler. */
+static struct work_struct lrng_jent_async_work;
+
+/* Is the asynchronous operation enabled? */
+static bool lrng_es_jent_async_enabled = true;
+
+#else /* CONFIG_LRNG_JENT_ENTROPY_BLOCKS */
+
+/* The asynchronous operation is disabled by compile time option. */
+static bool lrng_es_jent_async_enabled = false;
+
+#endif /* CONFIG_LRNG_JENT_ENTROPY_BLOCKS */
+
+static u32 lrng_jent_entropylevel(u32 requested_bits)
+{
+ return lrng_fast_noise_entropylevel(lrng_jent_initialized ?
+ jent_entropy : 0, requested_bits);
+}
+
+static u32 lrng_jent_poolsize(void)
+{
+ return lrng_jent_entropylevel(lrng_security_strength());
+}
+
+static void __lrng_jent_get(u8 *e, u32 *e_bits, u32 requested_bits)
+{
+ int ret;
+ u32 ent_bits = lrng_jent_entropylevel(requested_bits);
+ unsigned long flags;
+ static DEFINE_SPINLOCK(lrng_jent_lock);
+
+ if (!lrng_jent_initialized)
+ goto err;
+
+ spin_lock_irqsave(&lrng_jent_lock, flags);
+ ret = crypto_rng_get_bytes(jent, e, requested_bits >> 3);
+ spin_unlock_irqrestore(&lrng_jent_lock, flags);
+
+ if (ret) {
+ pr_debug("Jitter RNG failed with %d\n", ret);
+ goto err;
+ }
+
+ pr_debug("obtained %u bits of entropy from Jitter RNG noise source\n",
+ ent_bits);
+
+ *e_bits = ent_bits;
+ return;
+
+err:
+ *e_bits = 0;
+}
+
+/*
+ * lrng_get_jent() - Get Jitter RNG entropy
+ *
+ * @eb: entropy buffer to store entropy
+ * @requested_bits: requested entropy in bits
+ */
+static void lrng_jent_get(struct entropy_buf *eb, u32 requested_bits,
+ bool __unused)
+{
+ __lrng_jent_get(eb->e[lrng_ext_es_jitter],
+ &eb->e_bits[lrng_ext_es_jitter], requested_bits);
+}
+
+#if (CONFIG_LRNG_JENT_ENTROPY_BLOCKS != 0)
+
+/* Fill the Jitter RNG buffer with random data. */
+static void lrng_jent_async_monitor(struct work_struct *__unused)
+{
+ unsigned int i, requested_bits = lrng_get_seed_entropy_osr(true);
+
+ pr_debug("Jitter RNG block filling started\n");
+
+ for (i = 0; i < CONFIG_LRNG_JENT_ENTROPY_BLOCKS; i++) {
+ /* Ensure atomic access to the Jitter RNG buffer slot. */
+ if (atomic_cmpxchg(&lrng_jent_async_set[i],
+ buffer_empty, buffer_filling) !=
+ buffer_empty)
+ continue;
+
+ /*
+ * Always gather entropy data including
+ * potential oversampling factor.
+ */
+ __lrng_jent_get(lrng_jent_async[i].e,
+ &lrng_jent_async[i].e_bits, requested_bits);
+
+ atomic_set(&lrng_jent_async_set[i], buffer_filled);
+
+ pr_debug("Jitter RNG ES monitor: filled slot %u with %u bits of entropy\n",
+ i, requested_bits);
+ }
+
+ pr_debug("Jitter RNG block filling completed\n");
+}
+
+static void lrng_jent_async_monitor_schedule(void)
+{
+ if (lrng_es_jent_async_enabled)
+ schedule_work(&lrng_jent_async_work);
+}
+
+static void lrng_jent_async_fini(void)
+{
+ /* Reset state */
+ memzero_explicit(lrng_jent_async, sizeof(lrng_jent_async));
+}
+
+/* Get Jitter RNG data from the buffer */
+static void lrng_jent_async_get(struct entropy_buf *eb, uint32_t requested_bits,
+ bool __unused)
+{
+ static atomic_t idx = ATOMIC_INIT(-1);
+ unsigned int slot;
+
+ (void)requested_bits;
+
+ if (!lrng_jent_initialized) {
+ eb->e_bits[lrng_ext_es_jitter] = 0;
+ return;
+ }
+
+ /* CONFIG_LRNG_JENT_ENTROPY_BLOCKS must be a power of 2 */
+ BUILD_BUG_ON((CONFIG_LRNG_JENT_ENTROPY_BLOCKS &
+ LRNG_JENT_ENTROPY_BLOCKS_MASK) != 0);
+
+ slot = ((unsigned int)atomic_inc_return(&idx)) &
+ LRNG_JENT_ENTROPY_BLOCKS_MASK;
+
+ /* Ensure atomic access to the Jitter RNG buffer slot. */
+ if (atomic_cmpxchg(&lrng_jent_async_set[slot],
+ buffer_filled, buffer_reading) != buffer_filled) {
+ pr_debug("Jitter RNG ES monitor: buffer slot %u exhausted\n",
+ slot);
+ lrng_jent_get(eb, requested_bits, __unused);
+ lrng_jent_async_monitor_schedule();
+ return;
+ }
+
+ pr_debug("Jitter RNG ES monitor: used slot %u\n", slot);
+ memcpy(eb->e[lrng_ext_es_jitter], lrng_jent_async[slot].e,
+ LRNG_DRNG_INIT_SEED_SIZE_BYTES);
+ eb->e_bits[lrng_ext_es_jitter] = lrng_jent_async[slot].e_bits;
+
+ pr_debug("obtained %u bits of entropy from Jitter RNG noise source\n",
+ eb->e_bits[lrng_ext_es_jitter]);
+
+ memzero_explicit(&lrng_jent_async[slot],
+ sizeof(struct jent_entropy_es));
+
+ atomic_set(&lrng_jent_async_set[slot], buffer_empty);
+
+ /* Ensure division in the following check works */
+ BUILD_BUG_ON(CONFIG_LRNG_JENT_ENTROPY_BLOCKS < 4);
+ if (!(slot % (CONFIG_LRNG_JENT_ENTROPY_BLOCKS / 4)) && slot)
+ lrng_jent_async_monitor_schedule();
+}
+
+static void lrng_jent_get_check(struct entropy_buf *eb,
+ uint32_t requested_bits, bool __unused)
+{
+ if (lrng_es_jent_async_enabled &&
+ (requested_bits == lrng_get_seed_entropy_osr(true))) {
+ lrng_jent_async_get(eb, requested_bits, __unused);
+ } else {
+ lrng_jent_get(eb, requested_bits, __unused);
+ }
+}
+
+static void lrng_jent_async_init(void)
+{
+ unsigned int i;
+
+ if (!lrng_es_jent_async_enabled)
+ return;
+
+ for (i = 0; i < CONFIG_LRNG_JENT_ENTROPY_BLOCKS; i++)
+ atomic_set(&lrng_jent_async_set[i], buffer_empty);
+}
+
+static void lrng_jent_async_init_complete(void)
+{
+ lrng_jent_async_init();
+ INIT_WORK(&lrng_jent_async_work, lrng_jent_async_monitor);
+}
+
+#if (defined(CONFIG_SYSFS) && defined(CONFIG_LRNG_RUNTIME_ES_CONFIG))
+/* Initialize or deinitialize the Jitter RNG async collection */
+static int lrng_jent_async_sysfs_set(const char *val,
+ const struct kernel_param *kp)
+{
+ static const char val_dflt[] = "1";
+ int ret;
+ bool setting;
+
+ if (!val)
+ val = val_dflt;
+
+ ret = kstrtobool(val, &setting);
+ if (ret)
+ return ret;
+
+ if (setting) {
+ if (!lrng_es_jent_async_enabled) {
+ lrng_es_jent_async_enabled = 1;
+ lrng_jent_async_init();
+ pr_devel("Jitter RNG async data collection enabled\n");
+ lrng_jent_async_monitor_schedule();
+ }
+ } else {
+ if (lrng_es_jent_async_enabled) {
+ lrng_es_jent_async_enabled = 0;
+ lrng_jent_async_fini();
+ pr_devel("Jitter RNG async data collection disabled\n");
+ }
+ }
+
+ return 0;
+}
+
+static const struct kernel_param_ops lrng_jent_async_sysfs = {
+ .set = lrng_jent_async_sysfs_set,
+ .get = param_get_bool,
+};
+module_param_cb(jent_async_enabled, &lrng_jent_async_sysfs,
+ &lrng_es_jent_async_enabled, 0644);
+MODULE_PARM_DESC(lrng_es_jent_async_enabled,
+ "Enable Jitter RNG entropy buffer asynchronous collection");
+#endif /* CONFIG_SYSFS && CONFIG_LRNG_RUNTIME_ES_CONFIG */
+
+#else /* CONFIG_LRNG_JENT_ENTROPY_BLOCKS */
+
+static void lrng_jent_get_check(struct entropy_buf *eb,
+ uint32_t requested_bits, bool __unused)
+{
+ lrng_jent_get(eb, requested_bits, __unused);
+}
+
+static inline void __init lrng_jent_async_init_complete(void) { }
+
+#endif /* CONFIG_LRNG_JENT_ENTROPY_BLOCKS */
+
+static void lrng_jent_es_state(unsigned char *buf, size_t buflen)
+{
+ snprintf(buf, buflen,
+ " Available entropy: %u\n"
+ " Enabled: %s\n"
+ " Jitter RNG async collection %s\n",
+ lrng_jent_poolsize(),
+ lrng_jent_initialized ? "true" : "false",
+ lrng_es_jent_async_enabled ? "true" : "false");
+}
+
+static int __init lrng_jent_initialize(void)
+{
+ jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
+ if (IS_ERR(jent)) {
+ pr_err("Cannot allocate Jitter RNG\n");
+ return PTR_ERR(jent);
+ }
+
+ lrng_jent_async_init_complete();
+
+ lrng_jent_initialized = true;
+ pr_debug("Jitter RNG working on current system\n");
+
+ /*
+ * In FIPS mode, the Jitter RNG is defined to have full of entropy
+ * unless a different value has been specified at the command line
+ * (i.e. the user overrides the default), and the default value is
+ * larger than zero (if it is zero, it is assumed that an RBG2(P) or
+ * RBG2(NP) construction is attempted that intends to exclude the
+ * Jitter RNG).
+ */
+ if (fips_enabled && CONFIG_LRNG_JENT_ENTROPY_RATE > 0 &&
+ jent_entropy == CONFIG_LRNG_JENT_ENTROPY_RATE)
+ jent_entropy = LRNG_DRNG_SECURITY_STRENGTH_BITS;
+
+ if (jent_entropy)
+ lrng_force_fully_seeded();
+
+ return 0;
+}
+device_initcall(lrng_jent_initialize);
+
+struct lrng_es_cb lrng_es_jent = {
+ .name = "JitterRNG",
+ .get_ent = lrng_jent_get_check,
+ .curr_entropy = lrng_jent_entropylevel,
+ .max_entropy = lrng_jent_poolsize,
+ .state = lrng_jent_es_state,
+ .reset = NULL,
+ .switch_hash = NULL,
+};
diff --git a/drivers/char/lrng/lrng_es_jent.h b/drivers/char/lrng/lrng_es_jent.h
new file mode 100644
index 000000000000..32882d4bdf99
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_jent.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_ES_JENT_H
+#define _LRNG_ES_JENT_H
+
+#include "lrng_es_mgr_cb.h"
+
+#ifdef CONFIG_LRNG_JENT
+
+extern struct lrng_es_cb lrng_es_jent;
+
+#endif /* CONFIG_LRNG_JENT */
+
+#endif /* _LRNG_ES_JENT_H */
diff --git a/drivers/char/lrng/lrng_es_krng.c b/drivers/char/lrng/lrng_es_krng.c
new file mode 100644
index 000000000000..519ba640cc75
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_krng.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG Fast Entropy Source: Linux kernel RNG (random.c)
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/fips.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/types.h>
+
+#include "lrng_es_aux.h"
+#include "lrng_es_krng.h"
+
+static u32 krng_entropy = CONFIG_LRNG_KERNEL_RNG_ENTROPY_RATE;
+#ifdef CONFIG_LRNG_RUNTIME_ES_CONFIG
+module_param(krng_entropy, uint, 0644);
+MODULE_PARM_DESC(krng_entropy, "Entropy in bits of 256 data bits from the kernel RNG noise source");
+#endif
+
+static atomic_t lrng_krng_initial_rate = ATOMIC_INIT(0);
+
+static u32 lrng_krng_fips_entropylevel(u32 entropylevel)
+{
+ return fips_enabled ? 0 : entropylevel;
+}
+
+static int lrng_krng_adjust_entropy(void)
+{
+ u32 entropylevel;
+
+ krng_entropy = atomic_read_u32(&lrng_krng_initial_rate);
+
+ entropylevel = lrng_krng_fips_entropylevel(krng_entropy);
+ pr_debug("Kernel RNG is fully seeded, setting entropy rate to %u bits of entropy\n",
+ entropylevel);
+ lrng_drng_force_reseed();
+ if (entropylevel)
+ lrng_es_add_entropy();
+ return 0;
+}
+
+static u32 lrng_krng_entropylevel(u32 requested_bits)
+{
+ static bool init = false;
+
+ if (unlikely(!init) && rng_is_initialized()) {
+ init = true;
+ lrng_krng_adjust_entropy();
+ }
+
+ return lrng_fast_noise_entropylevel(
+ lrng_krng_fips_entropylevel(krng_entropy), requested_bits);
+}
+
+static u32 lrng_krng_poolsize(void)
+{
+ return lrng_krng_entropylevel(lrng_security_strength());
+}
+
+/*
+ * lrng_krng_get() - Get kernel RNG entropy
+ *
+ * @eb: entropy buffer to store entropy
+ * @requested_bits: requested entropy in bits
+ */
+static void lrng_krng_get(struct entropy_buf *eb, u32 requested_bits,
+ bool __unused)
+{
+ u32 ent_bits = lrng_krng_entropylevel(requested_bits);
+
+ get_random_bytes(eb->e[lrng_ext_es_krng], requested_bits >> 3);
+
+ pr_debug("obtained %u bits of entropy from kernel RNG noise source\n",
+ ent_bits);
+
+ eb->e_bits[lrng_ext_es_krng] = ent_bits;
+}
+
+static void lrng_krng_es_state(unsigned char *buf, size_t buflen)
+{
+ snprintf(buf, buflen,
+ " Available entropy: %u\n"
+ " Entropy Rate per 256 data bits: %u\n",
+ lrng_krng_poolsize(),
+ lrng_krng_entropylevel(256));
+}
+
+struct lrng_es_cb lrng_es_krng = {
+ .name = "KernelRNG",
+ .get_ent = lrng_krng_get,
+ .curr_entropy = lrng_krng_entropylevel,
+ .max_entropy = lrng_krng_poolsize,
+ .state = lrng_krng_es_state,
+ .reset = NULL,
+ .switch_hash = NULL,
+};
diff --git a/drivers/char/lrng/lrng_es_krng.h b/drivers/char/lrng/lrng_es_krng.h
new file mode 100644
index 000000000000..cf982b9eea05
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_krng.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_ES_RANDOM_H
+#define _LRNG_ES_RANDOM_H
+
+#include "lrng_es_mgr_cb.h"
+
+#ifdef CONFIG_LRNG_KERNEL_RNG
+
+extern struct lrng_es_cb lrng_es_krng;
+
+#endif /* CONFIG_LRNG_KERNEL_RNG */
+
+#endif /* _LRNG_ES_RANDOM_H */
diff --git a/drivers/char/lrng/lrng_es_mgr.c b/drivers/char/lrng/lrng_es_mgr.c
new file mode 100644
index 000000000000..8d01bedd3043
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_mgr.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG Entropy sources management
+ *
+ * Copyright (C) 2022 - 2023, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/utsname.h>
+#include <linux/workqueue.h>
+#include <asm/archrandom.h>
+
+#include "lrng_drng_atomic.h"
+#include "lrng_drng_mgr.h"
+#include "lrng_es_aux.h"
+#include "lrng_es_cpu.h"
+#include "lrng_es_irq.h"
+#include "lrng_es_jent.h"
+#include "lrng_es_krng.h"
+#include "lrng_es_mgr.h"
+#include "lrng_es_sched.h"
+#include "lrng_interface_dev_common.h"
+#include "lrng_interface_random_kernel.h"
+
+struct lrng_state {
+ bool can_invalidate; /* Can invalidate batched entropy? */
+ bool perform_seedwork; /* Can seed work be performed? */
+ bool lrng_operational; /* Is DRNG operational? */
+ bool lrng_fully_seeded; /* Is DRNG fully seeded? */
+ bool lrng_min_seeded; /* Is DRNG minimally seeded? */
+ bool all_online_numa_node_seeded;/* All NUMA DRNGs seeded? */
+
+ /*
+ * To ensure that external entropy providers cannot dominate the
+ * internal noise sources but yet cannot be dominated by internal
+ * noise sources, the following booleans are intended to allow
+ * external to provide seed once when a DRNG reseed occurs. This
+ * triggering of external noise source is performed even when the
+ * entropy pool has sufficient entropy.
+ */
+
+ atomic_t boot_entropy_thresh; /* Reseed threshold */
+ struct mutex reseed_in_progress; /* Flag for on executing reseed */
+ struct work_struct lrng_seed_work; /* (re)seed work queue */
+};
+
+static struct lrng_state lrng_state = {
+ false, false, false, false, false, false,
+ .boot_entropy_thresh = ATOMIC_INIT(LRNG_INIT_ENTROPY_BITS),
+ .reseed_in_progress =
+ __MUTEX_INITIALIZER(lrng_state.reseed_in_progress),
+};
+
+/*
+ * If the entropy count falls under this number of bits, then we
+ * should wake up processes which are selecting or polling on write
+ * access to /dev/random.
+ */
+u32 lrng_write_wakeup_bits = (LRNG_WRITE_WAKEUP_ENTROPY << 3);
+
+/*
+ * The entries must be in the same order as defined by enum lrng_internal_es and
+ * enum lrng_external_es
+ */
+struct lrng_es_cb *lrng_es[] = {
+#ifdef CONFIG_LRNG_IRQ
+ &lrng_es_irq,
+#endif
+#ifdef CONFIG_LRNG_SCHED
+ &lrng_es_sched,
+#endif
+#ifdef CONFIG_LRNG_JENT
+ &lrng_es_jent,
+#endif
+#ifdef CONFIG_LRNG_CPU
+ &lrng_es_cpu,
+#endif
+#ifdef CONFIG_LRNG_KERNEL_RNG
+ &lrng_es_krng,
+#endif
+ &lrng_es_aux
+};
+
+static bool ntg1 = false;
+#ifdef CONFIG_LRNG_AIS2031_NTG1_SEEDING_STRATEGY
+module_param(ntg1, bool, 0444);
+MODULE_PARM_DESC(ntg1, "Enable AIS20/31 NTG.1 compliant seeding strategy\n");
+#endif
+
+/* Only panic the kernel on permanent health failure if this variable is true */
+static bool lrng_panic_on_permanent_health_failure = false;
+module_param(lrng_panic_on_permanent_health_failure, bool, 0444);
+MODULE_PARM_DESC(lrng_panic_on_permanent_health_failure, "Panic on reaching permanent health failure - only required if LRNG is part of a FIPS 140-3 module\n");
+
+/********************************** Helper ***********************************/
+
+bool lrng_enforce_panic_on_permanent_health_failure(void)
+{
+ return lrng_panic_on_permanent_health_failure;
+}
+
+bool lrng_ntg1_2022_compliant(void)
+{
+ /* Implies use of /dev/random w/ O_SYNC / getrandom w/ GRND_RANDOM */
+ return ntg1;
+}
+
+void lrng_debug_report_seedlevel(const char *name)
+{
+#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+ static void *previous = NULL;
+ void *caller = (void *) _RET_IP_;
+ struct lrng_drng *atomic = lrng_get_atomic();
+
+ if (READ_ONCE(previous) == caller)
+ return;
+
+ if (atomic && !atomic->fully_seeded)
+ pr_notice("%pS %s called without reaching minimally seeded level (available entropy %u)\n",
+ caller, name, lrng_avail_entropy());
+
+ WRITE_ONCE(previous, caller);
+#endif
+}
+
+/*
+ * Reading of the LRNG pool is only allowed by one caller. The reading is
+ * only performed to (re)seed DRNGs. Thus, if this "lock" is already taken,
+ * the reseeding operation is in progress. The caller is not intended to wait
+ * but continue with its other operation.
+ */
+int lrng_pool_trylock(void)
+{
+ return mutex_trylock(&lrng_state.reseed_in_progress);
+}
+
+void lrng_pool_lock(void)
+{
+ mutex_lock(&lrng_state.reseed_in_progress);
+}
+
+void lrng_pool_unlock(void)
+{
+ mutex_unlock(&lrng_state.reseed_in_progress);
+}
+
+/* Set new entropy threshold for reseeding during boot */
+void lrng_set_entropy_thresh(u32 new_entropy_bits)
+{
+ atomic_set(&lrng_state.boot_entropy_thresh, new_entropy_bits);
+}
+
+/*
+ * Reset LRNG state - the entropy counters are reset, but the data that may
+ * or may not have entropy remains in the pools as this data will not hurt.
+ */
+void lrng_reset_state(void)
+{
+ u32 i;
+
+ for_each_lrng_es(i) {
+ if (lrng_es[i]->reset)
+ lrng_es[i]->reset();
+ }
+ lrng_state.lrng_operational = false;
+ lrng_state.lrng_fully_seeded = false;
+ lrng_state.lrng_min_seeded = false;
+ lrng_state.all_online_numa_node_seeded = false;
+ pr_debug("reset LRNG\n");
+}
+
+/* Set flag that all DRNGs are fully seeded */
+void lrng_pool_all_numa_nodes_seeded(bool set)
+{
+ lrng_state.all_online_numa_node_seeded = set;
+ if (set)
+ wake_up_all(&lrng_init_wait);
+}
+
+bool lrng_pool_all_numa_nodes_seeded_get(void)
+{
+ return lrng_state.all_online_numa_node_seeded;
+}
+
+/* Return boolean whether LRNG reached minimally seed level */
+bool lrng_state_min_seeded(void)
+{
+ return lrng_state.lrng_min_seeded;
+}
+
+/* Return boolean whether LRNG reached fully seed level */
+bool lrng_state_fully_seeded(void)
+{
+ return lrng_state.lrng_fully_seeded;
+}
+
+/* Return boolean whether LRNG is considered fully operational */
+bool lrng_state_operational(void)
+{
+ return lrng_state.lrng_operational;
+}
+
+static void lrng_init_wakeup(void)
+{
+ wake_up_all(&lrng_init_wait);
+ lrng_init_wakeup_dev();
+ lrng_kick_random_ready();
+}
+
+static u32 lrng_avail_entropy_thresh(void)
+{
+ u32 ent_thresh = lrng_security_strength();
+
+ /*
+ * Apply oversampling during initialization according to SP800-90C as
+ * we request a larger buffer from the ES.
+ */
+ if (lrng_sp80090c_compliant() &&
+ !lrng_state.all_online_numa_node_seeded)
+ ent_thresh += LRNG_SEED_BUFFER_INIT_ADD_BITS;
+
+ return ent_thresh;
+}
+
+bool lrng_fully_seeded(bool fully_seeded, u32 collected_entropy,
+ struct entropy_buf *eb)
+{
+ /* AIS20/31 NTG.1: two entropy sources with each delivering 220 bits */
+ if (ntg1) {
+ u32 i, result = 0, ent_thresh = lrng_avail_entropy_thresh();
+
+ for_each_lrng_es(i) {
+ result += (eb ? eb->e_bits[i] :
+ lrng_es[i]->curr_entropy(ent_thresh)) >=
+ LRNG_AIS2031_NPTRNG_MIN_ENTROPY;
+ }
+
+ return (result >= 2);
+ }
+
+ return (collected_entropy >= lrng_get_seed_entropy_osr(fully_seeded));
+}
+
+u32 lrng_entropy_rate_eb(struct entropy_buf *eb)
+{
+ u32 i, collected_entropy = 0;
+
+ for_each_lrng_es(i)
+ collected_entropy += eb->e_bits[i];
+
+ return collected_entropy;
+}
+
+/* Mark one DRNG as not fully seeded */
+void lrng_unset_fully_seeded(struct lrng_drng *drng)
+{
+ drng->fully_seeded = false;
+ lrng_pool_all_numa_nodes_seeded(false);
+
+ /*
+ * The init DRNG instance must always be fully seeded as this instance
+ * is the fall-back if any of the per-NUMA node DRNG instances is
+ * insufficiently seeded. Thus, we mark the entire LRNG as
+ * non-operational if the initial DRNG becomes not fully seeded.
+ */
+ if (drng == lrng_drng_init_instance() && lrng_state_operational()) {
+ pr_debug("LRNG set to non-operational\n");
+ lrng_state.lrng_operational = false;
+ lrng_state.lrng_fully_seeded = false;
+
+ /* If sufficient entropy is available, reseed now. */
+ lrng_es_add_entropy();
+ }
+}
+
+/* Policy to enable LRNG operational mode */
+static void lrng_set_operational(void)
+{
+ /*
+ * LRNG is operational if the initial DRNG is fully seeded. This state
+ * can only occur if either the external entropy sources provided
+ * sufficient entropy, or the SP800-90B startup test completed for
+ * the internal ES to supply also entropy data.
+ */
+ if (lrng_state.lrng_fully_seeded) {
+ lrng_state.lrng_operational = true;
+ lrng_init_wakeup();
+ pr_info("LRNG fully operational\n");
+ }
+}
+
+/* Available entropy in the entire LRNG considering all entropy sources */
+u32 lrng_avail_entropy(void)
+{
+ u32 i, ent = 0, ent_thresh = lrng_avail_entropy_thresh();
+
+ BUILD_BUG_ON(ARRAY_SIZE(lrng_es) != lrng_ext_es_last);
+ for_each_lrng_es(i)
+ ent += lrng_es[i]->curr_entropy(ent_thresh);
+ return ent;
+}
+
+u32 lrng_avail_entropy_aux(void)
+{
+ u32 ent_thresh = lrng_avail_entropy_thresh();
+
+ return lrng_es[lrng_ext_es_aux]->curr_entropy(ent_thresh);
+}
+
+/*
+ * lrng_init_ops() - Set seed stages of LRNG
+ *
+ * Set the slow noise source reseed trigger threshold. The initial threshold
+ * is set to the minimum data size that can be read from the pool: a word. Upon
+ * reaching this value, the next seed threshold of 128 bits is set followed
+ * by 256 bits.
+ *
+ * @eb: buffer containing the size of entropy currently injected into DRNG - if
+ * NULL, the function obtains the available entropy from the ES.
+ */
+void lrng_init_ops(struct entropy_buf *eb)
+{
+ struct lrng_state *state = &lrng_state;
+ u32 i, requested_bits, seed_bits = 0;
+
+ if (state->lrng_operational)
+ return;
+
+ requested_bits = ntg1 ?
+ /* Approximation so that two ES should deliver 220 bits each */
+ (lrng_avail_entropy() + LRNG_AIS2031_NPTRNG_MIN_ENTROPY) :
+ /* Apply SP800-90C oversampling if applicable */
+ lrng_get_seed_entropy_osr(state->all_online_numa_node_seeded);
+
+ if (eb) {
+ seed_bits = lrng_entropy_rate_eb(eb);
+ } else {
+ u32 ent_thresh = lrng_avail_entropy_thresh();
+
+ for_each_lrng_es(i)
+ seed_bits += lrng_es[i]->curr_entropy(ent_thresh);
+ }
+
+ /* DRNG is seeded with full security strength */
+ if (state->lrng_fully_seeded) {
+ lrng_set_operational();
+ lrng_set_entropy_thresh(requested_bits);
+ } else if (lrng_fully_seeded(state->all_online_numa_node_seeded,
+ seed_bits, eb)) {
+ if (state->can_invalidate)
+ invalidate_batched_entropy();
+
+ state->lrng_fully_seeded = true;
+ lrng_set_operational();
+ state->lrng_min_seeded = true;
+ pr_info("LRNG fully seeded with %u bits of entropy\n",
+ seed_bits);
+ lrng_set_entropy_thresh(requested_bits);
+ } else if (!state->lrng_min_seeded) {
+
+ /* DRNG is seeded with at least 128 bits of entropy */
+ if (seed_bits >= LRNG_MIN_SEED_ENTROPY_BITS) {
+ if (state->can_invalidate)
+ invalidate_batched_entropy();
+
+ state->lrng_min_seeded = true;
+ pr_info("LRNG minimally seeded with %u bits of entropy\n",
+ seed_bits);
+ lrng_set_entropy_thresh(requested_bits);
+ lrng_init_wakeup();
+
+ /* DRNG is seeded with at least LRNG_INIT_ENTROPY_BITS bits */
+ } else if (seed_bits >= LRNG_INIT_ENTROPY_BITS) {
+ pr_info("LRNG initial entropy level %u bits of entropy\n",
+ seed_bits);
+ lrng_set_entropy_thresh(LRNG_MIN_SEED_ENTROPY_BITS);
+ }
+ }
+}
+
+void __init lrng_rand_initialize_early(void)
+{
+ struct seed {
+ unsigned long data[((LRNG_MAX_DIGESTSIZE +
+ sizeof(unsigned long) - 1) /
+ sizeof(unsigned long))];
+ struct new_utsname utsname;
+ } seed __aligned(LRNG_KCAPI_ALIGN);
+ size_t longs = 0;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(seed.data); i += longs) {
+ longs = arch_get_random_seed_longs(seed.data + i,
+ ARRAY_SIZE(seed.data) - i);
+ if (longs)
+ continue;
+ longs = arch_get_random_longs(seed.data + i,
+ ARRAY_SIZE(seed.data) - i);
+ if (longs)
+ continue;
+ longs = 1;
+ }
+ memcpy(&seed.utsname, init_utsname(), sizeof(*(init_utsname())));
+
+ lrng_pool_insert_aux((u8 *)&seed, sizeof(seed), 0);
+ memzero_explicit(&seed, sizeof(seed));
+
+ lrng_force_fully_seeded();
+}
+
+void __init lrng_rand_initialize(void)
+{
+ unsigned long entropy = random_get_entropy();
+ ktime_t time = ktime_get_real();
+
+ lrng_pool_insert_aux((u8 *)&entropy, sizeof(entropy), 0);
+ lrng_pool_insert_aux((u8 *)&time, sizeof(time), 0);
+
+ /* Initialize the seed work queue */
+ INIT_WORK(&lrng_state.lrng_seed_work, lrng_drng_seed_work);
+ lrng_state.perform_seedwork = true;
+
+ invalidate_batched_entropy();
+
+ lrng_state.can_invalidate = true;
+}
+
+#ifndef CONFIG_LRNG_RANDOM_IF
+static int __init lrng_rand_initialize_call(void)
+{
+ lrng_rand_initialize_early();
+ lrng_rand_initialize();
+ return 0;
+}
+
+early_initcall(lrng_rand_initialize_call);
+#endif
+
+/* Interface requesting a reseed of the DRNG */
+void lrng_es_add_entropy(void)
+{
+ /*
+ * Once all DRNGs are fully seeded, the system-triggered arrival of
+ * entropy will not cause any reseeding any more.
+ */
+ if (likely(lrng_state.all_online_numa_node_seeded))
+ return;
+
+ /* Only trigger the DRNG reseed if we have collected entropy. */
+ if (lrng_avail_entropy() <
+ atomic_read_u32(&lrng_state.boot_entropy_thresh))
+ return;
+
+ /* Ensure that the seeding only occurs once at any given time. */
+ if (!lrng_pool_trylock())
+ return;
+
+ /* Seed the DRNG with any available noise. */
+ if (lrng_state.perform_seedwork)
+ schedule_work(&lrng_state.lrng_seed_work);
+ else
+ lrng_drng_seed_work(NULL);
+}
+
+/* Fill the seed buffer with data from the noise sources */
+void lrng_fill_seed_buffer(struct entropy_buf *eb, u32 requested_bits,
+ bool force)
+{
+ struct lrng_state *state = &lrng_state;
+ u32 i, req_ent = lrng_sp80090c_compliant() ?
+ lrng_security_strength() : LRNG_MIN_SEED_ENTROPY_BITS;
+
+ /* Guarantee that requested bits is a multiple of bytes */
+ BUILD_BUG_ON(LRNG_DRNG_SECURITY_STRENGTH_BITS % 8);
+
+ /* always reseed the DRNG with the current time stamp */
+ eb->now = random_get_entropy();
+
+ /*
+ * Require at least 128 bits of entropy for any reseed. If the LRNG is
+ * operated SP800-90C compliant we want to comply with SP800-90A section
+ * 9.2 mandating that DRNG is reseeded with the security strength.
+ */
+ if (!force &&
+ state->lrng_fully_seeded && (lrng_avail_entropy() < req_ent)) {
+ for_each_lrng_es(i)
+ eb->e_bits[i] = 0;
+
+ goto wakeup;
+ }
+
+ /* Concatenate the output of the entropy sources. */
+ for_each_lrng_es(i) {
+ lrng_es[i]->get_ent(eb, requested_bits,
+ state->lrng_fully_seeded);
+ }
+
+ /* allow external entropy provider to provide seed */
+ lrng_state_exseed_allow_all();
+
+wakeup:
+ lrng_writer_wakeup();
+}
diff --git a/drivers/char/lrng/lrng_es_mgr.h b/drivers/char/lrng/lrng_es_mgr.h
new file mode 100644
index 000000000000..7c4fbcb595f4
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_mgr.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_ES_MGR_H
+#define _LRNG_ES_MGR_H
+
+#include "lrng_es_mgr_cb.h"
+
+/*************************** General LRNG parameter ***************************/
+
+#define LRNG_DRNG_BLOCKSIZE 64 /* Maximum of DRNG block sizes */
+
+/* Helper to concatenate a macro with an integer type */
+#define LRNG_PASTER(x, y) x ## y
+#define LRNG_UINT32_C(x) LRNG_PASTER(x, U)
+
+/************************* Entropy sources management *************************/
+
+extern struct lrng_es_cb *lrng_es[];
+
+#define for_each_lrng_es(ctr) \
+ for ((ctr) = 0; (ctr) < lrng_ext_es_last; (ctr)++)
+
+bool lrng_enforce_panic_on_permanent_health_failure(void);
+bool lrng_ntg1_2022_compliant(void);
+bool lrng_pool_all_numa_nodes_seeded_get(void);
+bool lrng_state_min_seeded(void);
+void lrng_debug_report_seedlevel(const char *name);
+void lrng_rand_initialize_early(void);
+void lrng_rand_initialize(void);
+bool lrng_state_operational(void);
+
+extern u32 lrng_write_wakeup_bits;
+void lrng_set_entropy_thresh(u32 new);
+u32 lrng_avail_entropy(void);
+u32 lrng_avail_entropy_aux(void);
+void lrng_reset_state(void);
+
+bool lrng_state_fully_seeded(void);
+
+int lrng_pool_trylock(void);
+void lrng_pool_lock(void);
+void lrng_pool_unlock(void);
+void lrng_pool_all_numa_nodes_seeded(bool set);
+
+bool lrng_fully_seeded(bool fully_seeded, u32 collected_entropy,
+ struct entropy_buf *eb);
+u32 lrng_entropy_rate_eb(struct entropy_buf *eb);
+void lrng_unset_fully_seeded(struct lrng_drng *drng);
+void lrng_fill_seed_buffer(struct entropy_buf *eb, u32 requested_bits,
+ bool force);
+void lrng_init_ops(struct entropy_buf *eb);
+
+#endif /* _LRNG_ES_MGR_H */
diff --git a/drivers/char/lrng/lrng_es_mgr_cb.h b/drivers/char/lrng/lrng_es_mgr_cb.h
new file mode 100644
index 000000000000..08b24e1b7766
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_mgr_cb.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ *
+ * Definition of an entropy source.
+ */
+
+#ifndef _LRNG_ES_MGR_CB_H
+#define _LRNG_ES_MGR_CB_H
+
+#include <linux/lrng.h>
+
+#include "lrng_definitions.h"
+#include "lrng_drng_mgr.h"
+
+enum lrng_internal_es {
+#ifdef CONFIG_LRNG_IRQ
+ lrng_int_es_irq, /* IRQ-based entropy source */
+#endif
+#ifdef CONFIG_LRNG_SCHED
+ lrng_int_es_sched, /* Scheduler entropy source */
+#endif
+ lrng_int_es_last, /* MUST be the last entry */
+};
+
+enum lrng_external_es {
+ lrng_ext_link = lrng_int_es_last - 1, /* Link entry */
+#ifdef CONFIG_LRNG_JENT
+ lrng_ext_es_jitter, /* Jitter RNG */
+#endif
+#ifdef CONFIG_LRNG_CPU
+ lrng_ext_es_cpu, /* CPU-based, e.g. RDSEED */
+#endif
+#ifdef CONFIG_LRNG_KERNEL_RNG
+ lrng_ext_es_krng, /* random.c */
+#endif
+ lrng_ext_es_aux, /* MUST BE LAST ES! */
+ lrng_ext_es_last /* MUST be the last entry */
+};
+
+struct entropy_buf {
+ u8 e[lrng_ext_es_last][LRNG_DRNG_INIT_SEED_SIZE_BYTES];
+ u32 now, e_bits[lrng_ext_es_last];
+};
+
+/*
+ * struct lrng_es_cb - callback defining an entropy source
+ * @name: Name of the entropy source.
+ * @get_ent: Fetch entropy into the entropy_buf. The ES shall only deliver
+ * data if its internal initialization is complete, including any
+ * SP800-90B startup testing or similar.
+ * @curr_entropy: Return amount of currently available entropy.
+ * @max_entropy: Maximum amount of entropy the entropy source is able to
+ * maintain.
+ * @state: Buffer with human-readable ES state.
+ * @reset: Reset entropy source (drop all entropy and reinitialize).
+ * This callback may be NULL.
+ * @switch_hash: callback to switch from an old hash callback definition to
+ * a new one. This callback may be NULL.
+ */
+struct lrng_es_cb {
+ const char *name;
+ void (*get_ent)(struct entropy_buf *eb, u32 requested_bits,
+ bool fully_seeded);
+ u32 (*curr_entropy)(u32 requested_bits);
+ u32 (*max_entropy)(void);
+ void (*state)(unsigned char *buf, size_t buflen);
+ void (*reset)(void);
+ int (*switch_hash)(struct lrng_drng *drng, int node,
+ const struct lrng_hash_cb *new_cb, void *new_hash,
+ const struct lrng_hash_cb *old_cb);
+};
+
+/* Allow entropy sources to tell the ES manager that new entropy is there */
+void lrng_es_add_entropy(void);
+
+/* Cap to maximum entropy that can ever be generated with given hash */
+#define lrng_cap_requested(__digestsize_bits, __requested_bits) \
+ do { \
+ if (__digestsize_bits < __requested_bits) { \
+ pr_debug("Cannot satisfy requested entropy %u due to insufficient hash size %u\n",\
+ __requested_bits, __digestsize_bits); \
+ __requested_bits = __digestsize_bits; \
+ } \
+ } while (0)
+
+#endif /* _LRNG_ES_MGR_CB_H */
diff --git a/drivers/char/lrng/lrng_es_sched.c b/drivers/char/lrng/lrng_es_sched.c
new file mode 100644
index 000000000000..333c5b1ff4ea
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_sched.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG Slow Entropy Source: Scheduler-based data collection
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/irq_regs.h>
+#include <asm/ptrace.h>
+#include <linux/lrng.h>
+#include <crypto/hash.h>
+#include <linux/module.h>
+#include <linux/random.h>
+
+#include "lrng_es_aux.h"
+#include "lrng_es_sched.h"
+#include "lrng_es_timer_common.h"
+#include "lrng_health.h"
+#include "lrng_numa.h"
+#include "lrng_testing.h"
+
+/*
+ * Number of scheduler-based context switches to be recorded to assume that
+ * DRNG security strength bits of entropy are received.
+ * Note: a value below the DRNG security strength should not be defined as this
+ * may imply the DRNG can never be fully seeded in case other noise
+ * sources are unavailable.
+ */
+#define LRNG_SCHED_ENTROPY_BITS \
+ LRNG_UINT32_C(CONFIG_LRNG_SCHED_ENTROPY_RATE)
+
+/* Number of events required for LRNG_DRNG_SECURITY_STRENGTH_BITS entropy */
+static u32 lrng_sched_entropy_bits = LRNG_SCHED_ENTROPY_BITS;
+
+static u32 sched_entropy __read_mostly = LRNG_SCHED_ENTROPY_BITS;
+#ifdef CONFIG_LRNG_RUNTIME_ES_CONFIG
+module_param(sched_entropy, uint, 0444);
+MODULE_PARM_DESC(sched_entropy,
+ "How many scheduler-based context switches must be collected for obtaining 256 bits of entropy\n");
+#endif
+
+/* Per-CPU array holding concatenated entropy events */
+static DEFINE_PER_CPU(u32 [LRNG_DATA_ARRAY_SIZE], lrng_sched_array)
+ __aligned(LRNG_KCAPI_ALIGN);
+static DEFINE_PER_CPU(u32, lrng_sched_array_ptr) = 0;
+static DEFINE_PER_CPU(atomic_t, lrng_sched_array_events) = ATOMIC_INIT(0);
+
+/*
+ * Per-CPU entropy pool with compressed entropy event
+ *
+ * The per-CPU entropy pool is defined as the hash state. New data is simply
+ * inserted into the entropy pool by performing a hash update operation.
+ * To read the entropy pool, a hash final must be invoked. However, before
+ * the entropy pool is released again after a hash final, the hash init must
+ * be performed.
+ */
+static DEFINE_PER_CPU(u8 [LRNG_POOL_SIZE], lrng_sched_pool)
+ __aligned(LRNG_KCAPI_ALIGN);
+/*
+ * Lock to allow other CPUs to read the pool - as this is only done during
+ * reseed which is infrequent, this lock is hardly contended.
+ */
+static DEFINE_PER_CPU(spinlock_t, lrng_sched_lock);
+static DEFINE_PER_CPU(bool, lrng_sched_lock_init) = false;
+
+static bool lrng_sched_pool_online(int cpu)
+{
+ return per_cpu(lrng_sched_lock_init, cpu);
+}
+
+static void __init lrng_sched_check_compression_state(void)
+{
+ /* One pool should hold sufficient entropy for disabled compression */
+ u32 max_ent = min_t(u32, lrng_get_digestsize(),
+ lrng_data_to_entropy(LRNG_DATA_NUM_VALUES,
+ lrng_sched_entropy_bits));
+ if (max_ent < lrng_security_strength()) {
+ pr_devel("Scheduler entropy source will never provide %u bits of entropy required for fully seeding the DRNG all by itself\n",
+ lrng_security_strength());
+ }
+}
+
+void __init lrng_sched_es_init(bool highres_timer)
+{
+ /* Set a minimum number of scheduler events that must be collected */
+ sched_entropy = max_t(u32, LRNG_SCHED_ENTROPY_BITS, sched_entropy);
+
+ if (highres_timer) {
+ lrng_sched_entropy_bits = sched_entropy;
+ } else {
+ u32 new_entropy = sched_entropy * LRNG_ES_OVERSAMPLING_FACTOR;
+
+ lrng_sched_entropy_bits = (sched_entropy < new_entropy) ?
+ new_entropy : sched_entropy;
+ pr_warn("operating without high-resolution timer and applying oversampling factor %u\n",
+ LRNG_ES_OVERSAMPLING_FACTOR);
+ }
+
+ lrng_sched_check_compression_state();
+}
+
+static u32 lrng_sched_avail_pool_size(void)
+{
+ u32 max_pool = lrng_get_digestsize(),
+ max_size = min_t(u32, max_pool, LRNG_DATA_NUM_VALUES);
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ max_size += max_pool;
+
+ return max_size;
+}
+
+/* Return entropy of unused scheduler events present in all per-CPU pools. */
+static u32 lrng_sched_avail_entropy(u32 __unused)
+{
+ u32 digestsize_events, events = 0;
+ int cpu;
+
+ /* Only deliver entropy when SP800-90B self test is completed */
+ if (!lrng_sp80090b_startup_complete_es(lrng_int_es_sched))
+ return 0;
+
+ /* Obtain the cap of maximum numbers of scheduler events we count */
+ digestsize_events = lrng_entropy_to_data(lrng_get_digestsize(),
+ lrng_sched_entropy_bits);
+ /* Cap to max. number of scheduler events the array can hold */
+ digestsize_events = min_t(u32, digestsize_events, LRNG_DATA_NUM_VALUES);
+
+ for_each_online_cpu(cpu) {
+ events += min_t(u32, digestsize_events,
+ atomic_read_u32(per_cpu_ptr(&lrng_sched_array_events,
+ cpu)));
+ }
+
+ /* Consider oversampling rate */
+ return lrng_reduce_by_osr(
+ lrng_data_to_entropy(events, lrng_sched_entropy_bits));
+}
+
+/*
+ * Reset all per-CPU pools - reset entropy estimator but leave the pool data
+ * that may or may not have entropy unchanged.
+ */
+static void lrng_sched_reset(void)
+{
+ int cpu;
+
+ /* Trigger GCD calculation anew. */
+ lrng_gcd_set(0);
+
+ for_each_online_cpu(cpu)
+ atomic_set(per_cpu_ptr(&lrng_sched_array_events, cpu), 0);
+}
+
+/*
+ * Trigger a switch of the hash implementation for the per-CPU pool.
+ *
+ * For each per-CPU pool, obtain the message digest with the old hash
+ * implementation, initialize the per-CPU pool again with the new hash
+ * implementation and inject the message digest into the new state.
+ *
+ * Assumption: the caller must guarantee that the new_cb is available during the
+ * entire operation (e.g. it must hold the lock against pointer updating).
+ */
+static int
+lrng_sched_switch_hash(struct lrng_drng *drng, int node,
+ const struct lrng_hash_cb *new_cb, void *new_hash,
+ const struct lrng_hash_cb *old_cb)
+{
+ u8 digest[LRNG_MAX_DIGESTSIZE];
+ u32 digestsize_events, found_events;
+ int ret = 0, cpu;
+
+ if (!IS_ENABLED(CONFIG_LRNG_SWITCH))
+ return -EOPNOTSUPP;
+
+ for_each_online_cpu(cpu) {
+ struct shash_desc *pcpu_shash;
+
+ /*
+ * Only switch the per-CPU pools for the current node because
+ * the hash_cb only applies NUMA-node-wide.
+ */
+ if (cpu_to_node(cpu) != node || !lrng_sched_pool_online(cpu))
+ continue;
+
+ pcpu_shash = (struct shash_desc *)per_cpu_ptr(lrng_sched_pool,
+ cpu);
+
+ digestsize_events = old_cb->hash_digestsize(pcpu_shash);
+ digestsize_events = lrng_entropy_to_data(digestsize_events << 3,
+ lrng_sched_entropy_bits);
+
+ if (pcpu_shash->tfm == new_hash)
+ continue;
+
+ /* Get the per-CPU pool hash with old digest ... */
+ ret = old_cb->hash_final(pcpu_shash, digest) ?:
+ /* ... re-initialize the hash with the new digest ... */
+ new_cb->hash_init(pcpu_shash, new_hash) ?:
+ /*
+ * ... feed the old hash into the new state. We may feed
+ * uninitialized memory into the new state, but this is
+ * considered no issue and even good as we have some more
+ * uncertainty here.
+ */
+ new_cb->hash_update(pcpu_shash, digest, sizeof(digest));
+ if (ret)
+ goto out;
+
+ /*
+ * In case the new digest is larger than the old one, cap
+ * the available entropy to the old message digest used to
+ * process the existing data.
+ */
+ found_events = atomic_xchg_relaxed(
+ per_cpu_ptr(&lrng_sched_array_events, cpu), 0);
+ found_events = min_t(u32, found_events, digestsize_events);
+ atomic_add_return_relaxed(found_events,
+ per_cpu_ptr(&lrng_sched_array_events, cpu));
+
+ pr_debug("Re-initialize per-CPU scheduler entropy pool for CPU %d on NUMA node %d with hash %s\n",
+ cpu, node, new_cb->hash_name());
+ }
+
+out:
+ memzero_explicit(digest, sizeof(digest));
+ return ret;
+}
+
+static u32
+lrng_sched_pool_hash_one(const struct lrng_hash_cb *pcpu_hash_cb,
+ void *pcpu_hash, int cpu, u8 *digest, u32 *digestsize)
+{
+ struct shash_desc *pcpu_shash =
+ (struct shash_desc *)per_cpu_ptr(lrng_sched_pool, cpu);
+ spinlock_t *lock = per_cpu_ptr(&lrng_sched_lock, cpu);
+ unsigned long flags;
+ u32 digestsize_events, found_events;
+
+ if (unlikely(!per_cpu(lrng_sched_lock_init, cpu))) {
+ if (pcpu_hash_cb->hash_init(pcpu_shash, pcpu_hash)) {
+ pr_warn("Initialization of hash failed\n");
+ return 0;
+ }
+ spin_lock_init(lock);
+ per_cpu(lrng_sched_lock_init, cpu) = true;
+ pr_debug("Initializing per-CPU scheduler entropy pool for CPU %d with hash %s\n",
+ raw_smp_processor_id(), pcpu_hash_cb->hash_name());
+ }
+
+ /* Lock guarding against reading / writing to per-CPU pool */
+ spin_lock_irqsave(lock, flags);
+
+ *digestsize = pcpu_hash_cb->hash_digestsize(pcpu_hash);
+ digestsize_events = lrng_entropy_to_data(*digestsize << 3,
+ lrng_sched_entropy_bits);
+
+ /* Obtain entropy statement like for the entropy pool */
+ found_events = atomic_xchg_relaxed(
+ per_cpu_ptr(&lrng_sched_array_events, cpu), 0);
+ /* Cap to maximum amount of data we can hold in hash */
+ found_events = min_t(u32, found_events, digestsize_events);
+
+ /* Cap to maximum amount of data we can hold in array */
+ found_events = min_t(u32, found_events, LRNG_DATA_NUM_VALUES);
+
+ /* Store all not-yet compressed data in data array into hash, ... */
+ if (pcpu_hash_cb->hash_update(pcpu_shash,
+ (u8 *)per_cpu_ptr(lrng_sched_array, cpu),
+ LRNG_DATA_ARRAY_SIZE * sizeof(u32)) ?:
+ /* ... get the per-CPU pool digest, ... */
+ pcpu_hash_cb->hash_final(pcpu_shash, digest) ?:
+ /* ... re-initialize the hash, ... */
+ pcpu_hash_cb->hash_init(pcpu_shash, pcpu_hash) ?:
+ /* ... feed the old hash into the new state. */
+ pcpu_hash_cb->hash_update(pcpu_shash, digest, *digestsize))
+ found_events = 0;
+
+ spin_unlock_irqrestore(lock, flags);
+ return found_events;
+}
+
+/*
+ * Hash all per-CPU arrays and return the digest to be used as seed data for
+ * seeding a DRNG. The caller must guarantee backtracking resistance.
+ * The function will only copy as much data as entropy is available into the
+ * caller-provided output buffer.
+ *
+ * This function handles the translation from the number of received scheduler
+ * events into an entropy statement. The conversion depends on
+ * LRNG_SCHED_ENTROPY_BITS which defines how many scheduler events must be
+ * received to obtain 256 bits of entropy. With this value, the function
+ * lrng_data_to_entropy converts a given data size (received scheduler events,
+ * requested amount of data, etc.) into an entropy statement.
+ * lrng_entropy_to_data does the reverse.
+ *
+ * @eb: entropy buffer to store entropy
+ * @requested_bits: Requested amount of entropy
+ * @fully_seeded: indicator whether LRNG is fully seeded
+ */
+static void lrng_sched_pool_hash(struct entropy_buf *eb, u32 requested_bits,
+ bool fully_seeded)
+{
+ SHASH_DESC_ON_STACK(shash, NULL);
+ const struct lrng_hash_cb *hash_cb;
+ struct lrng_drng **lrng_drng = lrng_drng_instances();
+ struct lrng_drng *drng = lrng_drng_init_instance();
+ u8 digest[LRNG_MAX_DIGESTSIZE];
+ unsigned long flags, flags2;
+ u32 found_events, collected_events = 0, collected_ent_bits,
+ requested_events, returned_ent_bits;
+ int ret, cpu;
+ void *hash;
+
+ /* Only deliver entropy when SP800-90B self test is completed */
+ if (!lrng_sp80090b_startup_complete_es(lrng_int_es_sched)) {
+ eb->e_bits[lrng_int_es_sched] = 0;
+ return;
+ }
+
+ /* Lock guarding replacement of per-NUMA hash */
+ read_lock_irqsave(&drng->hash_lock, flags);
+
+ hash_cb = drng->hash_cb;
+ hash = drng->hash;
+
+ /* The hash state of filled with all per-CPU pool hashes. */
+ ret = hash_cb->hash_init(shash, hash);
+ if (ret)
+ goto err;
+
+ /* Cap to maximum entropy that can ever be generated with given hash */
+ lrng_cap_requested(hash_cb->hash_digestsize(hash) << 3, requested_bits);
+ requested_events = lrng_entropy_to_data(requested_bits +
+ lrng_compress_osr(),
+ lrng_sched_entropy_bits);
+
+ /*
+ * Harvest entropy from each per-CPU hash state - even though we may
+ * have collected sufficient entropy, we will hash all per-CPU pools.
+ */
+ for_each_online_cpu(cpu) {
+ struct lrng_drng *pcpu_drng = drng;
+ u32 digestsize, unused_events = 0;
+ int node = cpu_to_node(cpu);
+
+ if (lrng_drng && lrng_drng[node])
+ pcpu_drng = lrng_drng[node];
+
+ if (pcpu_drng == drng) {
+ found_events = lrng_sched_pool_hash_one(hash_cb, hash,
+ cpu, digest,
+ &digestsize);
+ } else {
+ read_lock_irqsave(&pcpu_drng->hash_lock, flags2);
+ found_events =
+ lrng_sched_pool_hash_one(pcpu_drng->hash_cb,
+ pcpu_drng->hash, cpu,
+ digest, &digestsize);
+ read_unlock_irqrestore(&pcpu_drng->hash_lock, flags2);
+ }
+
+ /* Store all not-yet compressed data in data array into hash */
+ ret = hash_cb->hash_update(shash, digest, digestsize);
+ if (ret)
+ goto err;
+
+ collected_events += found_events;
+ if (collected_events > requested_events) {
+ unused_events = collected_events - requested_events;
+ atomic_add_return_relaxed(unused_events,
+ per_cpu_ptr(&lrng_sched_array_events, cpu));
+ collected_events = requested_events;
+ }
+ pr_debug("%u scheduler-based events used from entropy array of CPU %d, %u scheduler-based events remain unused\n",
+ found_events - unused_events, cpu, unused_events);
+ }
+
+ ret = hash_cb->hash_final(shash, digest);
+ if (ret)
+ goto err;
+
+ collected_ent_bits = lrng_data_to_entropy(collected_events,
+ lrng_sched_entropy_bits);
+ /* Apply oversampling: discount requested oversampling rate */
+ returned_ent_bits = lrng_reduce_by_osr(collected_ent_bits);
+
+ pr_debug("obtained %u bits by collecting %u bits of entropy from scheduler-based noise source\n",
+ returned_ent_bits, collected_ent_bits);
+
+ /*
+ * Truncate to available entropy as implicitly allowed by SP800-90B
+ * section 3.1.5.1.1 table 1 which awards truncated hashes full
+ * entropy.
+ *
+ * During boot time, we read requested_bits data with
+ * returned_ent_bits entropy. In case our conservative entropy
+ * estimate underestimates the available entropy we can transport as
+ * much available entropy as possible.
+ */
+ memcpy(eb->e[lrng_int_es_sched], digest,
+ fully_seeded ? returned_ent_bits >> 3 : requested_bits >> 3);
+ eb->e_bits[lrng_int_es_sched] = returned_ent_bits;
+
+out:
+ hash_cb->hash_desc_zero(shash);
+ read_unlock_irqrestore(&drng->hash_lock, flags);
+ memzero_explicit(digest, sizeof(digest));
+ return;
+
+err:
+ eb->e_bits[lrng_int_es_sched] = 0;
+ goto out;
+}
+
+/*
+ * Concatenate full 32 bit word at the end of time array even when current
+ * ptr is not aligned to sizeof(data).
+ */
+static void lrng_sched_array_add_u32(u32 data)
+{
+ /* Increment pointer by number of slots taken for input value */
+ u32 pre_ptr, mask, ptr = this_cpu_add_return(lrng_sched_array_ptr,
+ LRNG_DATA_SLOTS_PER_UINT);
+ unsigned int pre_array;
+
+ lrng_data_split_u32(&ptr, &pre_ptr, &mask);
+
+ /* MSB of data go into previous unit */
+ pre_array = lrng_data_idx2array(pre_ptr);
+ /* zeroization of slot to ensure the following OR adds the data */
+ this_cpu_and(lrng_sched_array[pre_array], ~(0xffffffff & ~mask));
+ this_cpu_or(lrng_sched_array[pre_array], data & ~mask);
+
+ /*
+ * Continuous compression is not allowed for scheduler noise source,
+ * so do not call lrng_sched_array_to_hash here.
+ */
+
+ /* LSB of data go into current unit */
+ this_cpu_write(lrng_sched_array[lrng_data_idx2array(ptr)],
+ data & mask);
+}
+
+/* Concatenate data of max LRNG_DATA_SLOTSIZE_MASK at the end of time array */
+static void lrng_sched_array_add_slot(u32 data)
+{
+ /* Get slot */
+ u32 ptr = this_cpu_inc_return(lrng_sched_array_ptr) &
+ LRNG_DATA_WORD_MASK;
+ unsigned int array = lrng_data_idx2array(ptr);
+ unsigned int slot = lrng_data_idx2slot(ptr);
+
+ /* zeroization of slot to ensure the following OR adds the data */
+ this_cpu_and(lrng_sched_array[array],
+ ~(lrng_data_slot_val(0xffffffff & LRNG_DATA_SLOTSIZE_MASK,
+ slot)));
+ /* Store data into slot */
+ this_cpu_or(lrng_sched_array[array], lrng_data_slot_val(data, slot));
+
+ /*
+ * Continuous compression is not allowed for scheduler noise source,
+ * so do not call lrng_sched_array_to_hash here.
+ */
+}
+
+static void
+lrng_time_process_common(u32 time, void(*add_time)(u32 data))
+{
+ enum lrng_health_res health_test;
+
+ if (lrng_raw_sched_hires_entropy_store(time))
+ return;
+
+ health_test = lrng_health_test(time, lrng_int_es_sched);
+ if (health_test > lrng_health_fail_use)
+ return;
+
+ if (health_test == lrng_health_pass)
+ atomic_inc_return(this_cpu_ptr(&lrng_sched_array_events));
+
+ add_time(time);
+
+ /*
+ * We cannot call lrng_es_add_entropy() as this would call a schedule
+ * operation that is not permissible in scheduler context.
+ * As the scheduler ES provides a high bandwidth of entropy, we assume
+ * that other reseed triggers happen to pick up the scheduler ES
+ * entropy in due time.
+ */
+}
+
+/* Batching up of entropy in per-CPU array */
+static void lrng_sched_time_process(void)
+{
+ u32 now_time = random_get_entropy();
+
+ if (unlikely(!lrng_gcd_tested())) {
+ /* When GCD is unknown, we process the full time stamp */
+ lrng_time_process_common(now_time, lrng_sched_array_add_u32);
+ lrng_gcd_add_value(now_time);
+ } else {
+ /* GCD is known and applied */
+ lrng_time_process_common((now_time / lrng_gcd_get()) &
+ LRNG_DATA_SLOTSIZE_MASK,
+ lrng_sched_array_add_slot);
+ }
+
+ lrng_sched_perf_time(now_time);
+}
+
+void add_sched_randomness(const struct task_struct *p, int cpu)
+{
+ if (lrng_highres_timer()) {
+ lrng_sched_time_process();
+ } else {
+ u32 tmp = cpu;
+
+ tmp ^= lrng_raw_sched_pid_entropy_store(p->pid) ?
+ 0 : (u32)p->pid;
+ tmp ^= lrng_raw_sched_starttime_entropy_store(p->start_time) ?
+ 0 : (u32)p->start_time;
+ tmp ^= lrng_raw_sched_nvcsw_entropy_store(p->nvcsw) ?
+ 0 : (u32)p->nvcsw;
+
+ lrng_sched_time_process();
+ lrng_sched_array_add_u32(tmp);
+ }
+}
+EXPORT_SYMBOL(add_sched_randomness);
+
+static void lrng_sched_es_state(unsigned char *buf, size_t buflen)
+{
+ const struct lrng_drng *lrng_drng_init = lrng_drng_init_instance();
+
+ /* Assume the lrng_drng_init lock is taken by caller */
+ snprintf(buf, buflen,
+ " Hash for operating entropy pool: %s\n"
+ " Available entropy: %u\n"
+ " per-CPU scheduler event collection size: %u\n"
+ " Standards compliance: %s\n"
+ " High-resolution timer: %s\n"
+ " Health test passed: %s\n",
+ lrng_drng_init->hash_cb->hash_name(),
+ lrng_sched_avail_entropy(0),
+ LRNG_DATA_NUM_VALUES,
+ lrng_sp80090b_compliant(lrng_int_es_sched) ? "SP800-90B " : "",
+ lrng_highres_timer() ? "true" : "false",
+ lrng_sp80090b_startup_complete_es(lrng_int_es_sched) ?
+ "true" :
+ "false");
+}
+
+struct lrng_es_cb lrng_es_sched = {
+ .name = "Scheduler",
+ .get_ent = lrng_sched_pool_hash,
+ .curr_entropy = lrng_sched_avail_entropy,
+ .max_entropy = lrng_sched_avail_pool_size,
+ .state = lrng_sched_es_state,
+ .reset = lrng_sched_reset,
+ .switch_hash = lrng_sched_switch_hash,
+};
diff --git a/drivers/char/lrng/lrng_es_sched.h b/drivers/char/lrng/lrng_es_sched.h
new file mode 100644
index 000000000000..f1e596dd89d9
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_sched.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_ES_SCHED_H
+#define _LRNG_ES_SCHED_H
+
+#include "lrng_es_mgr_cb.h"
+
+#ifdef CONFIG_LRNG_SCHED
+void lrng_sched_es_init(bool highres_timer);
+
+extern struct lrng_es_cb lrng_es_sched;
+
+#else /* CONFIG_LRNG_SCHED */
+static inline void lrng_sched_es_init(bool highres_timer) { }
+#endif /* CONFIG_LRNG_SCHED */
+
+#endif /* _LRNG_ES_SCHED_H */
diff --git a/drivers/char/lrng/lrng_es_timer_common.c b/drivers/char/lrng/lrng_es_timer_common.c
new file mode 100644
index 000000000000..70f3ff074fe6
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_timer_common.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG Slow Entropy Source: Interrupt data collection
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/gcd.h>
+#include <linux/module.h>
+
+#include "lrng_es_irq.h"
+#include "lrng_es_sched.h"
+#include "lrng_es_timer_common.h"
+#include "lrng_health.h"
+
+/* Is high-resolution timer present? */
+static bool lrng_highres_timer_val = false;
+
+/* Number of time stamps analyzed to calculate a GCD */
+#define LRNG_GCD_WINDOW_SIZE 100
+static u32 lrng_gcd_history[LRNG_GCD_WINDOW_SIZE];
+static atomic_t lrng_gcd_history_ptr = ATOMIC_INIT(-1);
+
+/* The common divisor for all timestamps */
+static u32 lrng_gcd_timer = 0;
+
+bool lrng_gcd_tested(void)
+{
+ return (lrng_gcd_timer != 0);
+}
+
+u32 lrng_gcd_get(void)
+{
+ return lrng_gcd_timer;
+}
+
+/* Set the GCD for use in IRQ ES - if 0, the GCD calculation is restarted. */
+void lrng_gcd_set(u32 running_gcd)
+{
+ lrng_gcd_timer = running_gcd;
+ /* Ensure that update to global variable lrng_gcd_timer is visible */
+ mb();
+}
+
+static void lrng_gcd_set_check(u32 running_gcd)
+{
+ if (!lrng_gcd_tested()) {
+ lrng_gcd_set(running_gcd);
+ pr_debug("Setting GCD to %u\n", running_gcd);
+ }
+}
+
+u32 lrng_gcd_analyze(u32 *history, size_t nelem)
+{
+ u32 running_gcd = 0;
+ size_t i;
+
+ /* Now perform the analysis on the accumulated time data. */
+ for (i = 0; i < nelem; i++) {
+ /*
+ * NOTE: this would be the place to add more analysis on the
+ * appropriateness of the timer like checking the presence
+ * of sufficient variations in the timer.
+ */
+
+ /*
+ * This calculates the gcd of all the time values. that is
+ * gcd(time_1, time_2, ..., time_nelem)
+ *
+ * Some timers increment by a fixed (non-1) amount each step.
+ * This code checks for such increments, and allows the library
+ * to output the number of such changes have occurred.
+ */
+ running_gcd = (u32)gcd(history[i], running_gcd);
+
+ /* Zeroize data */
+ history[i] = 0;
+ }
+
+ return running_gcd;
+}
+
+void lrng_gcd_add_value(u32 time)
+{
+ u32 ptr = (u32)atomic_inc_return_relaxed(&lrng_gcd_history_ptr);
+
+ if (ptr < LRNG_GCD_WINDOW_SIZE) {
+ lrng_gcd_history[ptr] = time;
+ } else if (ptr == LRNG_GCD_WINDOW_SIZE) {
+ u32 gcd = lrng_gcd_analyze(lrng_gcd_history,
+ LRNG_GCD_WINDOW_SIZE);
+
+ if (!gcd)
+ gcd = 1;
+
+ /*
+ * Ensure that we have variations in the time stamp below the
+ * given value. This is just a safety measure to prevent the GCD
+ * becoming too large.
+ */
+ if (gcd >= 1000) {
+ pr_warn("calculated GCD is larger than expected: %u\n",
+ gcd);
+ gcd = 1000;
+ }
+
+ /* Adjust all deltas by the observed (small) common factor. */
+ lrng_gcd_set_check(gcd);
+ atomic_set(&lrng_gcd_history_ptr, 0);
+ }
+}
+
+/* Return boolean whether LRNG identified presence of high-resolution timer */
+bool lrng_highres_timer(void)
+{
+ return lrng_highres_timer_val;
+}
+
+static int __init lrng_init_time_source(void)
+{
+ if ((random_get_entropy() & LRNG_DATA_SLOTSIZE_MASK) ||
+ (random_get_entropy() & LRNG_DATA_SLOTSIZE_MASK)) {
+ /*
+ * As the highres timer is identified here, previous interrupts
+ * obtained during boot time are treated like a lowres-timer
+ * would have been present.
+ */
+ lrng_highres_timer_val = true;
+ } else {
+ lrng_health_disable();
+ lrng_highres_timer_val = false;
+ }
+
+ lrng_irq_es_init(lrng_highres_timer_val);
+ lrng_sched_es_init(lrng_highres_timer_val);
+
+ /* Ensure that changes to global variables are visible */
+ mb();
+
+ return 0;
+}
+core_initcall(lrng_init_time_source);
diff --git a/drivers/char/lrng/lrng_es_timer_common.h b/drivers/char/lrng/lrng_es_timer_common.h
new file mode 100644
index 000000000000..9ed954e20493
--- /dev/null
+++ b/drivers/char/lrng/lrng_es_timer_common.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * LRNG Slow Noise Source: Time stamp array handling
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_ES_TIMER_COMMON_H
+#define _LRNG_ES_TIMER_COMMON_H
+
+bool lrng_gcd_tested(void);
+void lrng_gcd_set(u32 running_gcd);
+u32 lrng_gcd_get(void);
+u32 lrng_gcd_analyze(u32 *history, size_t nelem);
+void lrng_gcd_add_value(u32 time);
+bool lrng_highres_timer(void);
+
+/*
+ * To limit the impact on the interrupt handling, the LRNG concatenates
+ * entropic LSB parts of the time stamps in a per-CPU array and only
+ * injects them into the entropy pool when the array is full.
+ */
+
+/* Store multiple integers in one u32 */
+#define LRNG_DATA_SLOTSIZE_BITS (8)
+#define LRNG_DATA_SLOTSIZE_MASK ((1 << LRNG_DATA_SLOTSIZE_BITS) - 1)
+#define LRNG_DATA_ARRAY_MEMBER_BITS (4 << 3) /* ((sizeof(u32)) << 3) */
+#define LRNG_DATA_SLOTS_PER_UINT (LRNG_DATA_ARRAY_MEMBER_BITS / \
+ LRNG_DATA_SLOTSIZE_BITS)
+
+/*
+ * Number of time values to store in the array - in small environments
+ * only one atomic_t variable per CPU is used.
+ */
+#define LRNG_DATA_NUM_VALUES (CONFIG_LRNG_COLLECTION_SIZE)
+/* Mask of LSB of time stamp to store */
+#define LRNG_DATA_WORD_MASK (LRNG_DATA_NUM_VALUES - 1)
+
+#define LRNG_DATA_SLOTS_MASK (LRNG_DATA_SLOTS_PER_UINT - 1)
+#define LRNG_DATA_ARRAY_SIZE (LRNG_DATA_NUM_VALUES / \
+ LRNG_DATA_SLOTS_PER_UINT)
+
+/* Starting bit index of slot */
+static inline unsigned int lrng_data_slot2bitindex(unsigned int slot)
+{
+ return (LRNG_DATA_SLOTSIZE_BITS * slot);
+}
+
+/* Convert index into the array index */
+static inline unsigned int lrng_data_idx2array(unsigned int idx)
+{
+ return idx / LRNG_DATA_SLOTS_PER_UINT;
+}
+
+/* Convert index into the slot of a given array index */
+static inline unsigned int lrng_data_idx2slot(unsigned int idx)
+{
+ return idx & LRNG_DATA_SLOTS_MASK;
+}
+
+/* Convert value into slot value */
+static inline unsigned int lrng_data_slot_val(unsigned int val,
+ unsigned int slot)
+{
+ return val << lrng_data_slot2bitindex(slot);
+}
+
+/*
+ * Return the pointers for the previous and current units to inject a u32 into.
+ * Also return the mask which the u32 word is to be processed.
+ */
+static inline void lrng_data_split_u32(u32 *ptr, u32 *pre_ptr, u32 *mask)
+{
+ /* ptr to previous unit */
+ *pre_ptr = (*ptr - LRNG_DATA_SLOTS_PER_UINT) & LRNG_DATA_WORD_MASK;
+ *ptr &= LRNG_DATA_WORD_MASK;
+
+ /* mask to split data into the two parts for the two units */
+ *mask = ((1 << (*pre_ptr & (LRNG_DATA_SLOTS_PER_UINT - 1)) *
+ LRNG_DATA_SLOTSIZE_BITS)) - 1;
+}
+
+#endif /* _LRNG_ES_TIMER_COMMON_H */
diff --git a/drivers/char/lrng/lrng_hash_kcapi.c b/drivers/char/lrng/lrng_hash_kcapi.c
new file mode 100644
index 000000000000..13e62db9b6c8
--- /dev/null
+++ b/drivers/char/lrng/lrng_hash_kcapi.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Backend for providing the hash primitive using the kernel crypto API.
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+#include <crypto/hash.h>
+#include <linux/module.h>
+
+
+static char *lrng_hash_name = "sha512";
+
+/* The parameter must be r/o in sysfs as otherwise races appear. */
+module_param(lrng_hash_name, charp, 0444);
+MODULE_PARM_DESC(lrng_hash_name, "Kernel crypto API hash name");
+
+struct lrng_hash_info {
+ struct crypto_shash *tfm;
+};
+
+static const char *lrng_kcapi_hash_name(void)
+{
+ return lrng_hash_name;
+}
+
+static void _lrng_kcapi_hash_free(struct lrng_hash_info *lrng_hash)
+{
+ struct crypto_shash *tfm = lrng_hash->tfm;
+
+ crypto_free_shash(tfm);
+ kfree(lrng_hash);
+}
+
+static void *lrng_kcapi_hash_alloc(const char *name)
+{
+ struct lrng_hash_info *lrng_hash;
+ struct crypto_shash *tfm;
+ int ret;
+
+ if (!name) {
+ pr_err("Hash name missing\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ tfm = crypto_alloc_shash(name, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("could not allocate hash %s\n", name);
+ return ERR_CAST(tfm);
+ }
+
+ ret = sizeof(struct lrng_hash_info);
+ lrng_hash = kmalloc(ret, GFP_KERNEL);
+ if (!lrng_hash) {
+ crypto_free_shash(tfm);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ lrng_hash->tfm = tfm;
+
+ pr_info("Hash %s allocated\n", name);
+
+ return lrng_hash;
+}
+
+static void *lrng_kcapi_hash_name_alloc(void)
+{
+ return lrng_kcapi_hash_alloc(lrng_kcapi_hash_name());
+}
+
+static u32 lrng_kcapi_hash_digestsize(void *hash)
+{
+ struct lrng_hash_info *lrng_hash = (struct lrng_hash_info *)hash;
+ struct crypto_shash *tfm = lrng_hash->tfm;
+
+ return crypto_shash_digestsize(tfm);
+}
+
+static void lrng_kcapi_hash_dealloc(void *hash)
+{
+ struct lrng_hash_info *lrng_hash = (struct lrng_hash_info *)hash;
+
+ _lrng_kcapi_hash_free(lrng_hash);
+ pr_info("Hash deallocated\n");
+}
+
+static int lrng_kcapi_hash_init(struct shash_desc *shash, void *hash)
+{
+ struct lrng_hash_info *lrng_hash = (struct lrng_hash_info *)hash;
+ struct crypto_shash *tfm = lrng_hash->tfm;
+
+ shash->tfm = tfm;
+ return crypto_shash_init(shash);
+}
+
+static int lrng_kcapi_hash_update(struct shash_desc *shash, const u8 *inbuf,
+ u32 inbuflen)
+{
+ return crypto_shash_update(shash, inbuf, inbuflen);
+}
+
+static int lrng_kcapi_hash_final(struct shash_desc *shash, u8 *digest)
+{
+ return crypto_shash_final(shash, digest);
+}
+
+static void lrng_kcapi_hash_zero(struct shash_desc *shash)
+{
+ shash_desc_zero(shash);
+}
+
+static const struct lrng_hash_cb lrng_kcapi_hash_cb = {
+ .hash_name = lrng_kcapi_hash_name,
+ .hash_alloc = lrng_kcapi_hash_name_alloc,
+ .hash_dealloc = lrng_kcapi_hash_dealloc,
+ .hash_digestsize = lrng_kcapi_hash_digestsize,
+ .hash_init = lrng_kcapi_hash_init,
+ .hash_update = lrng_kcapi_hash_update,
+ .hash_final = lrng_kcapi_hash_final,
+ .hash_desc_zero = lrng_kcapi_hash_zero,
+};
+
+static int __init lrng_kcapi_init(void)
+{
+ return lrng_set_hash_cb(&lrng_kcapi_hash_cb);
+}
+
+static void __exit lrng_kcapi_exit(void)
+{
+ lrng_set_hash_cb(NULL);
+}
+
+late_initcall(lrng_kcapi_init);
+module_exit(lrng_kcapi_exit);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("Entropy Source and DRNG Manager - Kernel crypto API hash backend");
diff --git a/drivers/char/lrng/lrng_health.c b/drivers/char/lrng/lrng_health.c
new file mode 100644
index 000000000000..2c884d1c15c1
--- /dev/null
+++ b/drivers/char/lrng/lrng_health.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Entropy Source and DRNG Manager (LRNG) Health Testing
+ *
+ * Copyright (C) 2022 - 2023, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/fips.h>
+#include <linux/module.h>
+
+#include "lrng_definitions.h"
+#include "lrng_es_mgr.h"
+#include "lrng_health.h"
+
+/* Stuck Test */
+struct lrng_stuck_test {
+ u32 last_time; /* Stuck test: time of previous IRQ */
+ u32 last_delta; /* Stuck test: delta of previous IRQ */
+ u32 last_delta2; /* Stuck test: 2. time derivation of prev IRQ */
+};
+
+/* Repetition Count Test */
+struct lrng_rct {
+ atomic_t rct_count; /* Number of stuck values */
+};
+
+/* Adaptive Proportion Test */
+struct lrng_apt {
+ /* Data window size */
+#define LRNG_APT_WINDOW_SIZE 512
+ /* LSB of time stamp to process */
+#define LRNG_APT_LSB 16
+#define LRNG_APT_WORD_MASK (LRNG_APT_LSB - 1)
+ atomic_t apt_count; /* APT counter */
+ atomic_t apt_base; /* APT base reference */
+
+ atomic_t apt_trigger;
+ bool apt_base_set; /* Is APT base set? */
+};
+
+/* Health data collected for one entropy source */
+struct lrng_health_es_state {
+ struct lrng_rct rct;
+ struct lrng_apt apt;
+
+ /* SP800-90B startup health tests */
+#define LRNG_SP80090B_STARTUP_SAMPLES 1024
+#define LRNG_SP80090B_STARTUP_BLOCKS ((LRNG_SP80090B_STARTUP_SAMPLES + \
+ LRNG_APT_WINDOW_SIZE - 1) / \
+ LRNG_APT_WINDOW_SIZE)
+ bool sp80090b_startup_done;
+ atomic_t sp80090b_startup_blocks;
+};
+
+#define LRNG_HEALTH_ES_INIT(x) \
+ x.rct.rct_count = ATOMIC_INIT(0), \
+ x.apt.apt_count = ATOMIC_INIT(0), \
+ x.apt.apt_base = ATOMIC_INIT(-1), \
+ x.apt.apt_trigger = ATOMIC_INIT(LRNG_APT_WINDOW_SIZE), \
+ x.apt.apt_base_set = false, \
+ x.sp80090b_startup_blocks = ATOMIC_INIT(LRNG_SP80090B_STARTUP_BLOCKS), \
+ x.sp80090b_startup_done = false,
+
+/* The health test code must operate lock-less */
+struct lrng_health {
+ bool health_test_enabled;
+ struct lrng_health_es_state es_state[lrng_int_es_last];
+};
+
+static struct lrng_health lrng_health = {
+ .health_test_enabled = true,
+
+#ifdef CONFIG_LRNG_IRQ
+ LRNG_HEALTH_ES_INIT(.es_state[lrng_int_es_irq])
+#endif
+#ifdef CONFIG_LRNG_SCHED
+ LRNG_HEALTH_ES_INIT(.es_state[lrng_int_es_sched])
+#endif
+};
+
+static DEFINE_PER_CPU(struct lrng_stuck_test[lrng_int_es_last],
+ lrng_stuck_test_array);
+
+static bool lrng_sp80090b_health_requested(void)
+{
+ /* Health tests are only requested in FIPS mode */
+ return fips_enabled;
+}
+
+static bool lrng_sp80090b_health_enabled(void)
+{
+ struct lrng_health *health = &lrng_health;
+
+ return lrng_sp80090b_health_requested() && health->health_test_enabled;
+}
+
+/***************************************************************************
+ * SP800-90B Compliance
+ *
+ * If the Linux-RNG is booted into FIPS mode, the following interfaces
+ * provide an SP800-90B compliant noise source:
+ *
+ * * /dev/random
+ * * getrandom(2)
+ * * get_random_bytes_full
+ *
+ * All other interfaces, including /dev/urandom or get_random_bytes without
+ * the add_random_ready_callback cannot claim to use an SP800-90B compliant
+ * noise source.
+ ***************************************************************************/
+
+/*
+ * Perform SP800-90B startup testing
+ */
+static void lrng_sp80090b_startup(struct lrng_health *health,
+ enum lrng_internal_es es)
+{
+ struct lrng_health_es_state *es_state = &health->es_state[es];
+
+ if (!es_state->sp80090b_startup_done &&
+ atomic_dec_and_test(&es_state->sp80090b_startup_blocks)) {
+ es_state->sp80090b_startup_done = true;
+ pr_info("SP800-90B startup health tests for internal entropy source %u completed\n",
+ es);
+ lrng_drng_force_reseed();
+
+ /*
+ * We cannot call lrng_es_add_entropy() as this may cause a
+ * schedule operation while in scheduler context for the
+ * scheduler ES.
+ */
+ }
+}
+
+/*
+ * Handle failure of SP800-90B startup testing
+ */
+static void lrng_sp80090b_startup_failure(struct lrng_health *health,
+ enum lrng_internal_es es)
+{
+ struct lrng_health_es_state *es_state = &health->es_state[es];
+
+
+ /* Reset of LRNG and its entropy - NOTE: we are in atomic context */
+ lrng_reset();
+
+ /*
+ * Reset the SP800-90B startup test.
+ *
+ * NOTE SP800-90B section 4.3 bullet 4 does not specify what
+ * exactly is to be done in case of failure! Thus, we do what
+ * makes sense, i.e. restarting the health test and thus gating
+ * the output function of /dev/random and getrandom(2).
+ */
+ atomic_set(&es_state->sp80090b_startup_blocks,
+ LRNG_SP80090B_STARTUP_BLOCKS);
+}
+
+/*
+ * Handle failure of SP800-90B runtime testing
+ */
+static void lrng_sp80090b_runtime_failure(struct lrng_health *health,
+ enum lrng_internal_es es)
+{
+ struct lrng_health_es_state *es_state = &health->es_state[es];
+
+ lrng_sp80090b_startup_failure(health, es);
+ es_state->sp80090b_startup_done = false;
+}
+
+static void lrng_rct_reset(struct lrng_rct *rct);
+static void lrng_apt_reset(struct lrng_apt *apt, unsigned int time_masked);
+static void lrng_apt_restart(struct lrng_apt *apt);
+static void lrng_sp80090b_permanent_failure(struct lrng_health *health,
+ enum lrng_internal_es es)
+{
+ struct lrng_health_es_state *es_state = &health->es_state[es];
+ struct lrng_apt *apt = &es_state->apt;
+ struct lrng_rct *rct = &es_state->rct;
+
+ if (lrng_enforce_panic_on_permanent_health_failure()) {
+ panic("SP800-90B permanent health test failure for internal entropy source %u\n",
+ es);
+ }
+
+ pr_err("SP800-90B permanent health test failure for internal entropy source %u - invalidating all existing entropy and initiate SP800-90B startup\n",
+ es);
+ lrng_sp80090b_runtime_failure(health, es);
+
+ lrng_rct_reset(rct);
+ lrng_apt_reset(apt, 0);
+ lrng_apt_restart(apt);
+}
+
+static void lrng_sp80090b_failure(struct lrng_health *health,
+ enum lrng_internal_es es)
+{
+ struct lrng_health_es_state *es_state = &health->es_state[es];
+
+ if (es_state->sp80090b_startup_done) {
+ pr_warn("SP800-90B runtime health test failure for internal entropy source %u - invalidating all existing entropy and initiate SP800-90B startup\n", es);
+ lrng_sp80090b_runtime_failure(health, es);
+ } else {
+ pr_warn("SP800-90B startup test failure for internal entropy source %u - resetting\n", es);
+ lrng_sp80090b_startup_failure(health, es);
+ }
+}
+
+bool lrng_sp80090b_startup_complete_es(enum lrng_internal_es es)
+{
+ struct lrng_health *health = &lrng_health;
+ struct lrng_health_es_state *es_state = &health->es_state[es];
+
+ if (!lrng_sp80090b_health_enabled())
+ return true;
+
+ return es_state->sp80090b_startup_done;
+}
+
+bool lrng_sp80090b_compliant(enum lrng_internal_es es)
+{
+ struct lrng_health *health = &lrng_health;
+ struct lrng_health_es_state *es_state = &health->es_state[es];
+
+ return lrng_sp80090b_health_enabled() &&
+ es_state->sp80090b_startup_done;
+}
+
+/***************************************************************************
+ * Adaptive Proportion Test
+ *
+ * This test complies with SP800-90B section 4.4.2.
+ ***************************************************************************/
+
+/*
+ * Reset the APT counter
+ *
+ * @health [in] Reference to health state
+ */
+static void lrng_apt_reset(struct lrng_apt *apt, unsigned int time_masked)
+{
+ /* Reset APT */
+ atomic_set(&apt->apt_count, 0);
+ atomic_set(&apt->apt_base, time_masked);
+}
+
+static void lrng_apt_restart(struct lrng_apt *apt)
+{
+ atomic_set(&apt->apt_trigger, LRNG_APT_WINDOW_SIZE);
+}
+
+/*
+ * Insert a new entropy event into APT
+ *
+ * This function does is void as it does not decide about the fate of a time
+ * stamp. An APT failure can only happen at the same time of a stuck test
+ * failure. Thus, the stuck failure will already decide how the time stamp
+ * is handled.
+ *
+ * @health [in] Reference to health state
+ * @now_time [in] Time stamp to process
+ */
+static void lrng_apt_insert(struct lrng_health *health,
+ unsigned int now_time, enum lrng_internal_es es)
+{
+ struct lrng_health_es_state *es_state = &health->es_state[es];
+ struct lrng_apt *apt = &es_state->apt;
+
+ if (!lrng_sp80090b_health_requested())
+ return;
+
+ now_time &= LRNG_APT_WORD_MASK;
+
+ /* Initialization of APT */
+ if (!apt->apt_base_set) {
+ atomic_set(&apt->apt_base, now_time);
+ apt->apt_base_set = true;
+ return;
+ }
+
+ if (now_time == (unsigned int)atomic_read(&apt->apt_base)) {
+ u32 apt_val = (u32)atomic_inc_return_relaxed(&apt->apt_count);
+
+ if (apt_val >= CONFIG_LRNG_APT_CUTOFF_PERMANENT)
+ lrng_sp80090b_permanent_failure(health, es);
+ else if (apt_val >= CONFIG_LRNG_APT_CUTOFF)
+ lrng_sp80090b_failure(health, es);
+ }
+
+ if (atomic_dec_and_test(&apt->apt_trigger)) {
+ lrng_apt_restart(apt);
+ lrng_apt_reset(apt, now_time);
+ lrng_sp80090b_startup(health, es);
+ }
+}
+
+/***************************************************************************
+ * Repetition Count Test
+ *
+ * The LRNG uses an enhanced version of the Repetition Count Test
+ * (RCT) specified in SP800-90B section 4.4.1. Instead of counting identical
+ * back-to-back values, the input to the RCT is the counting of the stuck
+ * values while filling the entropy pool.
+ *
+ * The RCT is applied with an alpha of 2^-30 compliant to FIPS 140-2 IG 9.8.
+ *
+ * During the counting operation, the LRNG always calculates the RCT
+ * cut-off value of C. If that value exceeds the allowed cut-off value,
+ * the LRNG will invalidate all entropy for the entropy pool which implies
+ * that no data can be extracted from the entropy pool unless new entropy
+ * is received.
+ ***************************************************************************/
+
+static void lrng_rct_reset(struct lrng_rct *rct)
+{
+ /* Reset RCT */
+ atomic_set(&rct->rct_count, 0);
+}
+
+/*
+ * Hot code path - Insert data for Repetition Count Test
+ *
+ * @health: Reference to health information
+ * @stuck: Decision of stuck test
+ */
+static void lrng_rct(struct lrng_health *health, enum lrng_internal_es es,
+ int stuck)
+{
+ struct lrng_health_es_state *es_state = &health->es_state[es];
+ struct lrng_rct *rct = &es_state->rct;
+
+ if (!lrng_sp80090b_health_requested())
+ return;
+
+ if (stuck) {
+ u32 rct_count = atomic_add_return_relaxed(1, &rct->rct_count);
+
+ /*
+ * The cutoff value is based on the following consideration:
+ * alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8.
+ * In addition, we imply an entropy value H of 1 bit as this
+ * is the minimum entropy required to provide full entropy.
+ *
+ * Note, rct_count (which equals to value B in the
+ * pseudo code of SP800-90B section 4.4.1) starts with zero.
+ * Hence we need to subtract one from the cutoff value as
+ * calculated following SP800-90B.
+ */
+ if (rct_count >= CONFIG_LRNG_RCT_CUTOFF_PERMANENT)
+ lrng_sp80090b_permanent_failure(health, es);
+ else if (rct_count >= CONFIG_LRNG_RCT_CUTOFF)
+ lrng_sp80090b_failure(health, es);
+ } else {
+ lrng_rct_reset(rct);
+ }
+}
+
+/***************************************************************************
+ * Stuck Test
+ *
+ * Checking the:
+ * 1st derivative of the event occurrence (time delta)
+ * 2nd derivative of the event occurrence (delta of time deltas)
+ * 3rd derivative of the event occurrence (delta of delta of time deltas)
+ *
+ * All values must always be non-zero. The stuck test is only valid disabled if
+ * high-resolution time stamps are identified after initialization.
+ ***************************************************************************/
+
+static u32 lrng_delta(u32 prev, u32 next)
+{
+ /*
+ * Note that this (unsigned) subtraction does yield the correct value
+ * in the wraparound-case, i.e. when next < prev.
+ */
+ return (next - prev);
+}
+
+/*
+ * Hot code path
+ *
+ * @health: Reference to health information
+ * @now: Event time
+ * @return: 0 event occurrence not stuck (good time stamp)
+ * != 0 event occurrence stuck (reject time stamp)
+ */
+static int lrng_irq_stuck(enum lrng_internal_es es, u32 now_time)
+{
+ struct lrng_stuck_test *stuck = this_cpu_ptr(lrng_stuck_test_array);
+ u32 delta = lrng_delta(stuck[es].last_time, now_time);
+ u32 delta2 = lrng_delta(stuck[es].last_delta, delta);
+ u32 delta3 = lrng_delta(stuck[es].last_delta2, delta2);
+
+ stuck[es].last_time = now_time;
+ stuck[es].last_delta = delta;
+ stuck[es].last_delta2 = delta2;
+
+ if (!delta || !delta2 || !delta3)
+ return 1;
+
+ return 0;
+}
+
+/***************************************************************************
+ * Health test interfaces
+ ***************************************************************************/
+
+/*
+ * Disable all health tests
+ */
+void lrng_health_disable(void)
+{
+ struct lrng_health *health = &lrng_health;
+
+ health->health_test_enabled = false;
+
+ if (lrng_sp80090b_health_requested())
+ pr_warn("SP800-90B compliance requested but the Linux RNG is NOT SP800-90B compliant\n");
+}
+
+/*
+ * Hot code path - Perform health test on time stamp received from an event
+ *
+ * @now_time Time stamp
+ */
+enum lrng_health_res lrng_health_test(u32 now_time, enum lrng_internal_es es)
+{
+ struct lrng_health *health = &lrng_health;
+ int stuck;
+
+ if (!health->health_test_enabled)
+ return lrng_health_pass;
+
+ lrng_apt_insert(health, now_time, es);
+
+ stuck = lrng_irq_stuck(es, now_time);
+ lrng_rct(health, es, stuck);
+ if (stuck) {
+ /* SP800-90B disallows using a failing health test time stamp */
+ return lrng_sp80090b_health_requested() ?
+ lrng_health_fail_drop : lrng_health_fail_use;
+ }
+
+ return lrng_health_pass;
+}
diff --git a/drivers/char/lrng/lrng_health.h b/drivers/char/lrng/lrng_health.h
new file mode 100644
index 000000000000..4f9f5033fc30
--- /dev/null
+++ b/drivers/char/lrng/lrng_health.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_HEALTH_H
+#define _LRNG_HEALTH_H
+
+#include "lrng_es_mgr.h"
+
+enum lrng_health_res {
+ lrng_health_pass, /* Health test passes on time stamp */
+ lrng_health_fail_use, /* Time stamp unhealthy, but mix in */
+ lrng_health_fail_drop /* Time stamp unhealthy, drop it */
+};
+
+#ifdef CONFIG_LRNG_HEALTH_TESTS
+bool lrng_sp80090b_startup_complete_es(enum lrng_internal_es es);
+bool lrng_sp80090b_compliant(enum lrng_internal_es es);
+
+enum lrng_health_res lrng_health_test(u32 now_time, enum lrng_internal_es es);
+void lrng_health_disable(void);
+#else /* CONFIG_LRNG_HEALTH_TESTS */
+static inline bool lrng_sp80090b_startup_complete_es(enum lrng_internal_es es)
+{
+ return true;
+}
+
+static inline bool lrng_sp80090b_compliant(enum lrng_internal_es es)
+{
+ return false;
+}
+
+static inline enum lrng_health_res
+lrng_health_test(u32 now_time, enum lrng_internal_es es)
+{
+ return lrng_health_pass;
+}
+static inline void lrng_health_disable(void) { }
+#endif /* CONFIG_LRNG_HEALTH_TESTS */
+
+#endif /* _LRNG_HEALTH_H */
diff --git a/drivers/char/lrng/lrng_interface_aux.c b/drivers/char/lrng/lrng_interface_aux.c
new file mode 100644
index 000000000000..12eb1d8d9b46
--- /dev/null
+++ b/drivers/char/lrng/lrng_interface_aux.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG auxiliary interfaces
+ *
+ * Copyright (C) 2022 Stephan Mueller <smueller@chronox.de>
+ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
+ * Rights Reserved.
+ * Copyright (C) 2016 Jason Cooper <jason@lakedaemon.net>
+ */
+
+#include <linux/lrng.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+
+#include "lrng_es_mgr.h"
+#include "lrng_interface_random_kernel.h"
+
+/*
+ * Fill a buffer with random numbers and tokenize it to provide random numbers
+ * to callers in fixed chunks. This approach is provided to be consistent with
+ * the Linux kernel interface requirements. Yet, this approach violate the
+ * backtracking resistance of the random number generator. Thus, the provided
+ * random numbers are not considered to be as strong as those requested directly
+ * from the LRNG.
+ */
+struct batched_entropy {
+ union {
+ u64 entropy_u64[LRNG_DRNG_BLOCKSIZE / sizeof(u64)];
+ u32 entropy_u32[LRNG_DRNG_BLOCKSIZE / sizeof(u32)];
+ u16 entropy_u16[LRNG_DRNG_BLOCKSIZE / sizeof(u16)];
+ u8 entropy_u8[LRNG_DRNG_BLOCKSIZE / sizeof(u8)];
+ };
+ unsigned int position;
+ spinlock_t batch_lock;
+};
+
+/*
+ * Get a random word for internal kernel use only. The quality of the random
+ * number is as good as /dev/urandom, but there is no backtrack protection,
+ * with the goal of being quite fast and not depleting entropy.
+ */
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+};
+
+u64 get_random_u64(void)
+{
+ u64 ret;
+ unsigned long flags;
+ struct batched_entropy *batch;
+
+ lrng_debug_report_seedlevel("get_random_u64");
+
+ batch = raw_cpu_ptr(&batched_entropy_u64);
+ spin_lock_irqsave(&batch->batch_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+ lrng_get_random_bytes(batch->entropy_u64, LRNG_DRNG_BLOCKSIZE);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u64[batch->position++];
+ spin_unlock_irqrestore(&batch->batch_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(get_random_u64);
+
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
+};
+
+u32 get_random_u32(void)
+{
+ u32 ret;
+ unsigned long flags;
+ struct batched_entropy *batch;
+
+ lrng_debug_report_seedlevel("get_random_u32");
+
+ batch = raw_cpu_ptr(&batched_entropy_u32);
+ spin_lock_irqsave(&batch->batch_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+ lrng_get_random_bytes(batch->entropy_u32, LRNG_DRNG_BLOCKSIZE);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u32[batch->position++];
+ spin_unlock_irqrestore(&batch->batch_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(get_random_u32);
+
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u16) = {
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u16.lock),
+};
+
+u16 get_random_u16(void)
+{
+ u16 ret;
+ unsigned long flags;
+ struct batched_entropy *batch;
+
+ lrng_debug_report_seedlevel("get_random_u16");
+
+ batch = raw_cpu_ptr(&batched_entropy_u16);
+ spin_lock_irqsave(&batch->batch_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u16) == 0) {
+ lrng_get_random_bytes(batch->entropy_u16, LRNG_DRNG_BLOCKSIZE);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u16[batch->position++];
+ spin_unlock_irqrestore(&batch->batch_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(get_random_u16);
+
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u8) = {
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u8.lock),
+};
+
+u8 get_random_u8(void)
+{
+ u8 ret;
+ unsigned long flags;
+ struct batched_entropy *batch;
+
+ lrng_debug_report_seedlevel("get_random_u8");
+
+ batch = raw_cpu_ptr(&batched_entropy_u8);
+ spin_lock_irqsave(&batch->batch_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u8) == 0) {
+ lrng_get_random_bytes(batch->entropy_u8, LRNG_DRNG_BLOCKSIZE);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u8[batch->position++];
+ spin_unlock_irqrestore(&batch->batch_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(get_random_u8);
+
+/* Taken directly from random.c */
+u32 __get_random_u32_below(u32 ceil)
+{
+ u64 mult = (u64)ceil * get_random_u32();
+
+ if (unlikely((u32)mult < ceil)) {
+ u32 bound = -ceil % ceil;
+ while (unlikely((u32)mult < bound))
+ mult = (u64)ceil * get_random_u32();
+ }
+ return mult >> 32;
+}
+EXPORT_SYMBOL(__get_random_u32_below);
+
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU is coming up, with entry
+ * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
+ */
+int random_prepare_cpu(unsigned int cpu)
+{
+ /*
+ * When the cpu comes back online, immediately invalidate all batches,
+ * so that we serve fresh randomness.
+ */
+ per_cpu_ptr(&batched_entropy_u8, cpu)->position = 0;
+ per_cpu_ptr(&batched_entropy_u16, cpu)->position = 0;
+ per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
+ per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+ return 0;
+}
+
+int random_online_cpu(unsigned int cpu)
+{
+ return 0;
+}
+#endif
+
+/*
+ * It's important to invalidate all potential batched entropy that might
+ * be stored before the crng is initialized, which we can do lazily by
+ * simply resetting the counter to zero so that it's re-extracted on the
+ * next usage.
+ */
+void invalidate_batched_entropy(void)
+{
+ int cpu;
+ unsigned long flags;
+
+ for_each_possible_cpu(cpu) {
+ struct batched_entropy *batched_entropy;
+
+ batched_entropy = per_cpu_ptr(&batched_entropy_u8, cpu);
+ spin_lock_irqsave(&batched_entropy->batch_lock, flags);
+ batched_entropy->position = 0;
+ spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
+
+ batched_entropy = per_cpu_ptr(&batched_entropy_u16, cpu);
+ spin_lock_irqsave(&batched_entropy->batch_lock, flags);
+ batched_entropy->position = 0;
+ spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
+
+ batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
+ spin_lock_irqsave(&batched_entropy->batch_lock, flags);
+ batched_entropy->position = 0;
+ spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
+
+ batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
+ spin_lock(&batched_entropy->batch_lock);
+ batched_entropy->position = 0;
+ spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
+ }
+}
diff --git a/drivers/char/lrng/lrng_interface_dev.c b/drivers/char/lrng/lrng_interface_dev.c
new file mode 100644
index 000000000000..e60060d402b3
--- /dev/null
+++ b/drivers/char/lrng/lrng_interface_dev.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG user space device file interface
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+
+#include "lrng_interface_dev_common.h"
+
+static const struct file_operations lrng_fops = {
+ .read = lrng_drng_read_block,
+ .write = lrng_drng_write,
+ .poll = lrng_random_poll,
+ .unlocked_ioctl = lrng_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .fasync = lrng_fasync,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice lrng_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "lrng",
+ .nodename = "lrng",
+ .fops = &lrng_fops,
+ .mode = 0666
+};
+
+static int __init lrng_dev_if_mod_init(void)
+{
+ return misc_register(&lrng_miscdev);
+}
+device_initcall(lrng_dev_if_mod_init);
diff --git a/drivers/char/lrng/lrng_interface_dev_common.c b/drivers/char/lrng/lrng_interface_dev_common.c
new file mode 100644
index 000000000000..f69e86ecd983
--- /dev/null
+++ b/drivers/char/lrng/lrng_interface_dev_common.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG User and kernel space interfaces
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/random.h>
+#include <linux/slab.h>
+
+#include "lrng_drng_mgr.h"
+#include "lrng_es_aux.h"
+#include "lrng_es_mgr.h"
+#include "lrng_interface_dev_common.h"
+
+DECLARE_WAIT_QUEUE_HEAD(lrng_write_wait);
+static struct fasync_struct *fasync;
+
+static bool lrng_seed_hw = true; /* Allow HW to provide seed */
+static bool lrng_seed_user = true; /* Allow user space to provide seed */
+
+/********************************** Helper ***********************************/
+
+static u32 lrng_get_aux_ent(void)
+{
+ return lrng_es[lrng_ext_es_aux]->curr_entropy(0);
+}
+
+/* Is the DRNG seed level too low? */
+bool lrng_need_entropy(void)
+{
+ return (lrng_get_aux_ent() < lrng_write_wakeup_bits);
+}
+
+void lrng_writer_wakeup(void)
+{
+ if (lrng_need_entropy() && wq_has_sleeper(&lrng_write_wait)) {
+ wake_up_interruptible(&lrng_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
+}
+
+void lrng_init_wakeup_dev(void)
+{
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+}
+
+/* External entropy provider is allowed to provide seed data */
+bool lrng_state_exseed_allow(enum lrng_external_noise_source source)
+{
+ if (source == lrng_noise_source_hw)
+ return lrng_seed_hw;
+ return lrng_seed_user;
+}
+
+/* Enable / disable external entropy provider to furnish seed */
+void lrng_state_exseed_set(enum lrng_external_noise_source source, bool type)
+{
+ /*
+ * If the LRNG is not yet operational, allow all entropy sources
+ * to deliver data unconditionally to get fully seeded asap.
+ */
+ if (!lrng_state_operational())
+ return;
+
+ if (source == lrng_noise_source_hw)
+ lrng_seed_hw = type;
+ else
+ lrng_seed_user = type;
+}
+
+void lrng_state_exseed_allow_all(void)
+{
+ lrng_state_exseed_set(lrng_noise_source_hw, true);
+ lrng_state_exseed_set(lrng_noise_source_user, true);
+}
+
+/************************ LRNG user output interfaces *************************/
+
+ssize_t lrng_read_seed(char __user *buf, size_t nbytes, unsigned int flags)
+{
+ ssize_t ret = 0;
+ u64 t[(sizeof(struct entropy_buf) + 3 * sizeof(u64) - 1) / sizeof(u64)];
+
+ memset(t, 0, sizeof(t));
+ ret = lrng_get_seed(t, min_t(size_t, nbytes, sizeof(t)), flags);
+ if (ret == -EMSGSIZE && copy_to_user(buf, t, sizeof(u64)))
+ ret = -EFAULT;
+ else if (ret > 0 && copy_to_user(buf, t, ret))
+ ret = -EFAULT;
+
+ memzero_explicit(t, sizeof(t));
+
+ return ret;
+}
+
+ssize_t lrng_read_common(char __user *buf, size_t nbytes, bool pr)
+{
+ ssize_t ret = 0;
+ u8 tmpbuf[LRNG_DRNG_BLOCKSIZE] __aligned(LRNG_KCAPI_ALIGN);
+ u8 *tmp_large = NULL, *tmp = tmpbuf;
+ u32 tmplen = sizeof(tmpbuf);
+
+ if (nbytes == 0)
+ return 0;
+
+ /*
+ * Satisfy large read requests -- as the common case are smaller
+ * request sizes, such as 16 or 32 bytes, avoid a kmalloc overhead for
+ * those by using the stack variable of tmpbuf.
+ */
+ if (!CONFIG_BASE_SMALL && (nbytes > sizeof(tmpbuf))) {
+ tmplen = min_t(u32, nbytes, LRNG_DRNG_MAX_REQSIZE);
+ tmp_large = kmalloc(tmplen + LRNG_KCAPI_ALIGN, GFP_KERNEL);
+ if (!tmp_large)
+ tmplen = sizeof(tmpbuf);
+ else
+ tmp = PTR_ALIGN(tmp_large, LRNG_KCAPI_ALIGN);
+ }
+
+ while (nbytes) {
+ u32 todo = min_t(u32, nbytes, tmplen);
+ int rc = 0;
+
+ /* Reschedule if we received a large request. */
+ if ((tmp_large) && need_resched()) {
+ if (signal_pending(current)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ break;
+ }
+ schedule();
+ }
+
+ rc = lrng_drng_get_sleep(tmp, todo, pr);
+ if (rc <= 0) {
+ if (rc < 0)
+ ret = rc;
+ break;
+ }
+ if (copy_to_user(buf, tmp, rc)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ nbytes -= rc;
+ buf += rc;
+ ret += rc;
+ }
+
+ /* Wipe data just returned from memory */
+ if (tmp_large)
+ kfree_sensitive(tmp_large);
+ else
+ memzero_explicit(tmpbuf, sizeof(tmpbuf));
+
+ return ret;
+}
+
+ssize_t lrng_read_common_block(int nonblock, int pr,
+ char __user *buf, size_t nbytes)
+{
+ int ret;
+
+ if (nbytes == 0)
+ return 0;
+
+ ret = lrng_drng_sleep_while_nonoperational(nonblock);
+ if (ret)
+ return ret;
+
+ return lrng_read_common(buf, nbytes, !!pr);
+}
+
+ssize_t lrng_drng_read_block(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ return lrng_read_common_block(file->f_flags & O_NONBLOCK,
+ file->f_flags & O_SYNC, buf, nbytes);
+}
+
+__poll_t lrng_random_poll(struct file *file, poll_table *wait)
+{
+ __poll_t mask;
+
+ poll_wait(file, &lrng_init_wait, wait);
+ poll_wait(file, &lrng_write_wait, wait);
+ mask = 0;
+ if (lrng_state_operational())
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (lrng_need_entropy() ||
+ lrng_state_exseed_allow(lrng_noise_source_user)) {
+ lrng_state_exseed_set(lrng_noise_source_user, false);
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ }
+ return mask;
+}
+
+ssize_t lrng_drng_write_common(const char __user *buffer, size_t count,
+ u32 entropy_bits)
+{
+ ssize_t ret = 0;
+ u8 buf[64] __aligned(LRNG_KCAPI_ALIGN);
+ const char __user *p = buffer;
+ u32 orig_entropy_bits = entropy_bits;
+
+ if (!lrng_get_available()) {
+ ret = lrng_drng_initalize();
+ if (!ret)
+ return ret;
+ }
+
+ count = min_t(size_t, count, INT_MAX);
+ while (count > 0) {
+ size_t bytes = min_t(size_t, count, sizeof(buf));
+ u32 ent = min_t(u32, bytes<<3, entropy_bits);
+
+ if (copy_from_user(&buf, p, bytes))
+ return -EFAULT;
+ /* Inject data into entropy pool */
+ lrng_pool_insert_aux(buf, bytes, ent);
+
+ count -= bytes;
+ p += bytes;
+ ret += bytes;
+ entropy_bits -= ent;
+
+ cond_resched();
+ }
+
+ /* Force reseed of DRNG during next data request. */
+ if (!orig_entropy_bits)
+ lrng_drng_force_reseed();
+
+ return ret;
+}
+
+ssize_t lrng_drng_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return lrng_drng_write_common(buffer, count, 0);
+}
+
+long lrng_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ u32 digestsize_bits;
+ int size, ent_count_bits, ret;
+ int __user *p = (int __user *)arg;
+
+ switch (cmd) {
+ case RNDGETENTCNT:
+ ent_count_bits = lrng_avail_entropy_aux();
+ if (put_user(ent_count_bits, p))
+ return -EFAULT;
+ return 0;
+ case RNDADDTOENTCNT:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ent_count_bits, p))
+ return -EFAULT;
+ ent_count_bits = (int)lrng_get_aux_ent() + ent_count_bits;
+ if (ent_count_bits < 0)
+ ent_count_bits = 0;
+ digestsize_bits = lrng_get_digestsize();
+ if (ent_count_bits > digestsize_bits)
+ ent_count_bits = digestsize_bits;
+ lrng_pool_set_entropy(ent_count_bits);
+ return 0;
+ case RNDADDENTROPY:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ent_count_bits, p++))
+ return -EFAULT;
+ if (ent_count_bits < 0)
+ return -EINVAL;
+ if (get_user(size, p++))
+ return -EFAULT;
+ if (size < 0)
+ return -EINVAL;
+ /* there cannot be more entropy than data */
+ ent_count_bits = min(ent_count_bits, size<<3);
+ ret = lrng_drng_write_common((const char __user *)p, size,
+ ent_count_bits);
+ return (ret < 0) ? ret : 0;
+ case RNDZAPENTCNT:
+ case RNDCLEARPOOL:
+ /* Clear the entropy pool counter. */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ lrng_pool_set_entropy(0);
+ return 0;
+ case RNDRESEEDCRNG:
+ /*
+ * We leave the capability check here since it is present
+ * in the upstream's RNG implementation. Yet, user space
+ * can trigger a reseed as easy as writing into /dev/random
+ * or /dev/urandom where no privilege is needed.
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ /* Force a reseed of all DRNGs */
+ lrng_drng_force_reseed();
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(lrng_ioctl);
+
+int lrng_fasync(int fd, struct file *filp, int on)
+{
+ return fasync_helper(fd, filp, on, &fasync);
+}
diff --git a/drivers/char/lrng/lrng_interface_dev_common.h b/drivers/char/lrng/lrng_interface_dev_common.h
new file mode 100644
index 000000000000..9e6603ad8af4
--- /dev/null
+++ b/drivers/char/lrng/lrng_interface_dev_common.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_INTERFACE_DEV_COMMON_H
+#define _LRNG_INTERFACE_DEV_COMMON_H
+
+#include <linux/poll.h>
+#include <linux/wait.h>
+
+/******************* Upstream functions hooked into the LRNG ******************/
+enum lrng_external_noise_source {
+ lrng_noise_source_hw,
+ lrng_noise_source_user
+};
+
+#ifdef CONFIG_LRNG_COMMON_DEV_IF
+void lrng_writer_wakeup(void);
+void lrng_init_wakeup_dev(void);
+void lrng_state_exseed_set(enum lrng_external_noise_source source, bool type);
+void lrng_state_exseed_allow_all(void);
+#else /* CONFIG_LRNG_COMMON_DEV_IF */
+static inline void lrng_writer_wakeup(void) { }
+static inline void lrng_init_wakeup_dev(void) { }
+static inline void
+lrng_state_exseed_set(enum lrng_external_noise_source source, bool type) { }
+static inline void lrng_state_exseed_allow_all(void) { }
+#endif /* CONFIG_LRNG_COMMON_DEV_IF */
+
+/****** Downstream service functions to actual interface implementations ******/
+
+bool lrng_state_exseed_allow(enum lrng_external_noise_source source);
+int lrng_fasync(int fd, struct file *filp, int on);
+long lrng_ioctl(struct file *f, unsigned int cmd, unsigned long arg);
+ssize_t lrng_drng_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+ssize_t lrng_drng_write_common(const char __user *buffer, size_t count,
+ u32 entropy_bits);
+__poll_t lrng_random_poll(struct file *file, poll_table *wait);
+ssize_t lrng_read_common_block(int nonblock, int pr,
+ char __user *buf, size_t nbytes);
+ssize_t lrng_drng_read_block(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos);
+ssize_t lrng_read_seed(char __user *buf, size_t nbytes, unsigned int flags);
+ssize_t lrng_read_common(char __user *buf, size_t nbytes, bool pr);
+bool lrng_need_entropy(void);
+
+extern struct wait_queue_head lrng_write_wait;
+
+#endif /* _LRNG_INTERFACE_DEV_COMMON_H */
diff --git a/drivers/char/lrng/lrng_interface_hwrand.c b/drivers/char/lrng/lrng_interface_hwrand.c
new file mode 100644
index 000000000000..e841eea13348
--- /dev/null
+++ b/drivers/char/lrng/lrng_interface_hwrand.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG interface with the HW-Random framework
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#include <linux/lrng.h>
+#include <linux/hw_random.h>
+#include <linux/module.h>
+
+static int lrng_hwrand_if_random(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ /*
+ * lrng_get_random_bytes_full not called as we cannot block.
+ *
+ * Note: We should either adjust .quality below depending on
+ * rng_is_initialized() or block here, but neither is not supported by
+ * the hw_rand framework.
+ */
+ lrng_get_random_bytes(buf, max);
+ return (int)max;
+}
+
+static struct hwrng lrng_hwrand = {
+ .name = "lrng",
+ .init = NULL,
+ .cleanup = NULL,
+ .read = lrng_hwrand_if_random,
+
+ /*
+ * We set .quality only in case the LRNG does not provide the common
+ * interfaces or does not use the legacy RNG as entropy source. This
+ * shall avoid that the LRNG automatically spawns the hw_rand
+ * framework's hwrng kernel thread to feed data into
+ * add_hwgenerator_randomness. When the LRNG implements the common
+ * interfaces, this function feeds the data directly into the LRNG.
+ * If the LRNG uses the legacy RNG as entropy source,
+ * add_hwgenerator_randomness is implemented by the legacy RNG, but
+ * still eventually feeds the data into the LRNG. We should avoid such
+ * circular loops.
+ *
+ * We can specify full entropy here, because the LRNG is designed
+ * to provide full entropy.
+ */
+#if !defined(CONFIG_LRNG_RANDOM_IF) && \
+ !defined(CONFIG_LRNG_KERNEL_RNG)
+ .quality = 1024,
+#endif
+};
+
+static int __init lrng_hwrand_if_mod_init(void)
+{
+ return hwrng_register(&lrng_hwrand);
+}
+
+static void __exit lrng_hwrand_if_mod_exit(void)
+{
+ hwrng_unregister(&lrng_hwrand);
+}
+
+module_init(lrng_hwrand_if_mod_init);
+module_exit(lrng_hwrand_if_mod_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("Entropy Source and DRNG Manager HW-Random Interface");
diff --git a/drivers/char/lrng/lrng_interface_kcapi.c b/drivers/char/lrng/lrng_interface_kcapi.c
new file mode 100644
index 000000000000..4cb511f8088e
--- /dev/null
+++ b/drivers/char/lrng/lrng_interface_kcapi.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG interface with the RNG framework of the kernel crypto API
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#include <linux/lrng.h>
+#include <linux/module.h>
+#include <crypto/internal/rng.h>
+
+#include "lrng_drng_mgr.h"
+#include "lrng_es_aux.h"
+
+static int lrng_kcapi_if_init(struct crypto_tfm *tfm)
+{
+ return 0;
+}
+
+static void lrng_kcapi_if_cleanup(struct crypto_tfm *tfm) { }
+
+static int lrng_kcapi_if_reseed(const u8 *src, unsigned int slen)
+{
+ int ret;
+
+ if (!slen)
+ return 0;
+
+ /* Insert caller-provided data without crediting entropy */
+ ret = lrng_pool_insert_aux((u8 *)src, slen, 0);
+ if (ret)
+ return ret;
+
+ /* Make sure the new data is immediately available to DRNG */
+ lrng_drng_force_reseed();
+
+ return 0;
+}
+
+static int lrng_kcapi_if_random(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *rdata, unsigned int dlen)
+{
+ int ret = lrng_kcapi_if_reseed(src, slen);
+
+ if (!ret)
+ lrng_get_random_bytes_full(rdata, dlen);
+
+ return ret;
+}
+
+static int lrng_kcapi_if_reset(struct crypto_rng *tfm,
+ const u8 *seed, unsigned int slen)
+{
+ return lrng_kcapi_if_reseed(seed, slen);
+}
+
+static struct rng_alg lrng_alg = {
+ .generate = lrng_kcapi_if_random,
+ .seed = lrng_kcapi_if_reset,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "lrng",
+ .cra_priority = 500,
+ .cra_ctxsize = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = lrng_kcapi_if_init,
+ .cra_exit = lrng_kcapi_if_cleanup,
+
+ }
+};
+
+#ifdef CONFIG_LRNG_DRNG_ATOMIC
+static int lrng_kcapi_if_random_atomic(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *rdata, unsigned int dlen)
+{
+ int ret = lrng_kcapi_if_reseed(src, slen);
+
+ if (!ret)
+ lrng_get_random_bytes(rdata, dlen);
+
+ return ret;
+}
+
+static struct rng_alg lrng_alg_atomic = {
+ .generate = lrng_kcapi_if_random_atomic,
+ .seed = lrng_kcapi_if_reset,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "lrng_atomic",
+ .cra_driver_name = "lrng_atomic",
+ .cra_priority = 100,
+ .cra_ctxsize = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = lrng_kcapi_if_init,
+ .cra_exit = lrng_kcapi_if_cleanup,
+
+ }
+};
+#endif /* CONFIG_LRNG_DRNG_ATOMIC */
+
+static int __init lrng_kcapi_if_mod_init(void)
+{
+ return
+#ifdef CONFIG_LRNG_DRNG_ATOMIC
+ crypto_register_rng(&lrng_alg_atomic) ?:
+#endif
+ crypto_register_rng(&lrng_alg);
+}
+
+static void __exit lrng_kcapi_if_mod_exit(void)
+{
+ crypto_unregister_rng(&lrng_alg);
+#ifdef CONFIG_LRNG_DRNG_ATOMIC
+ crypto_unregister_rng(&lrng_alg_atomic);
+#endif
+}
+
+module_init(lrng_kcapi_if_mod_init);
+module_exit(lrng_kcapi_if_mod_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("Entropy Source and DRNG Manager kernel crypto API RNG framework interface");
+MODULE_ALIAS_CRYPTO("lrng");
+MODULE_ALIAS_CRYPTO("lrng_atomic");
+MODULE_ALIAS_CRYPTO("stdrng");
diff --git a/drivers/char/lrng/lrng_interface_random_kernel.c b/drivers/char/lrng/lrng_interface_random_kernel.c
new file mode 100644
index 000000000000..fabf2109ceaf
--- /dev/null
+++ b/drivers/char/lrng/lrng_interface_random_kernel.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG Kernel space interfaces API/ABI compliant to linux/random.h
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/blkdev.h>
+#include <linux/hw_random.h>
+#include <linux/kthread.h>
+#include <linux/lrng.h>
+#include <linux/random.h>
+
+#include "lrng_es_aux.h"
+#include "lrng_es_irq.h"
+#include "lrng_es_mgr.h"
+#include "lrng_interface_dev_common.h"
+#include "lrng_interface_random_kernel.h"
+
+static ATOMIC_NOTIFIER_HEAD(random_ready_notifier);
+
+/********************************** Helper ***********************************/
+
+static bool lrng_trust_bootloader __initdata =
+ IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
+
+static int __init lrng_parse_trust_bootloader(char *arg)
+{
+ return kstrtobool(arg, &lrng_trust_bootloader);
+}
+early_param("random.trust_bootloader", lrng_parse_trust_bootloader);
+
+void __init random_init_early(const char *command_line)
+{
+ lrng_rand_initialize_early();
+ lrng_pool_insert_aux(command_line, strlen(command_line), 0);
+}
+
+void __init random_init(void)
+{
+ lrng_rand_initialize();
+}
+
+/*
+ * Add a callback function that will be invoked when the LRNG is initialised,
+ * or immediately if it already has been. Only use this is you are absolutely
+ * sure it is required. Most users should instead be able to test
+ * `rng_is_initialized()` on demand, or make use of `get_random_bytes_wait()`.
+ */
+int __cold execute_with_initialized_rng(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&random_ready_notifier.lock, flags);
+ if (rng_is_initialized())
+ nb->notifier_call(nb, 0, NULL);
+ else
+ ret = raw_notifier_chain_register(
+ (struct raw_notifier_head *)&random_ready_notifier.head,
+ nb);
+ spin_unlock_irqrestore(&random_ready_notifier.lock, flags);
+ return ret;
+}
+
+void lrng_kick_random_ready(void)
+{
+ atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
+}
+
+/************************ LRNG kernel input interfaces ************************/
+
+/*
+ * add_hwgenerator_randomness() - Interface for in-kernel drivers of true
+ * hardware RNGs.
+ *
+ * Those devices may produce endless random bits and will be throttled
+ * when our pool is full.
+ *
+ * @buffer: buffer holding the entropic data from HW noise sources to be used to
+ * insert into entropy pool.
+ * @count: length of buffer
+ * @entropy_bits: amount of entropy in buffer (value is in bits)
+ */
+void add_hwgenerator_randomness(const void *buffer, size_t count,
+ size_t entropy_bits, bool sleep_after)
+{
+ /*
+ * Suspend writing if we are fully loaded with entropy or if caller
+ * did not provide any entropy. We'll be woken up again once below
+ * lrng_write_wakeup_thresh, or when the calling thread is about to
+ * terminate.
+ */
+ wait_event_interruptible(lrng_write_wait,
+ (lrng_need_entropy() && entropy_bits) ||
+ lrng_state_exseed_allow(lrng_noise_source_hw) ||
+ !sleep_after ||
+ kthread_should_stop());
+ lrng_state_exseed_set(lrng_noise_source_hw, false);
+ lrng_pool_insert_aux(buffer, count, entropy_bits);
+}
+EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
+
+/*
+ * add_bootloader_randomness() - Handle random seed passed by bootloader.
+ *
+ * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
+ * it would be regarded as device data.
+ * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
+ *
+ * @buf: buffer holding the entropic data from HW noise sources to be used to
+ * insert into entropy pool.
+ * @size: length of buffer
+ */
+void __init add_bootloader_randomness(const void *buf, size_t size)
+{
+ lrng_pool_insert_aux(buf, size, lrng_trust_bootloader ? size * 8 : 0);
+}
+
+/*
+ * Callback for HID layer -- use the HID event values to stir the entropy pool
+ */
+void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value)
+{
+ static unsigned char last_value;
+
+ /* ignore autorepeat and the like */
+ if (value == last_value)
+ return;
+
+ last_value = value;
+
+ lrng_irq_array_add_u32((type << 4) ^ code ^ (code >> 4) ^ value);
+}
+EXPORT_SYMBOL_GPL(add_input_randomness);
+
+/*
+ * add_device_randomness() - Add device- or boot-specific data to the entropy
+ * pool to help initialize it.
+ *
+ * None of this adds any entropy; it is meant to avoid the problem of
+ * the entropy pool having similar initial state across largely
+ * identical devices.
+ *
+ * @buf: buffer holding the entropic data from HW noise sources to be used to
+ * insert into entropy pool.
+ * @size: length of buffer
+ */
+void add_device_randomness(const void *buf, size_t size)
+{
+ lrng_pool_insert_aux((u8 *)buf, size, 0);
+}
+EXPORT_SYMBOL(add_device_randomness);
+
+#ifdef CONFIG_BLOCK
+void rand_initialize_disk(struct gendisk *disk) { }
+void add_disk_randomness(struct gendisk *disk) { }
+EXPORT_SYMBOL(add_disk_randomness);
+#endif
+
+#ifndef CONFIG_LRNG_IRQ
+void add_interrupt_randomness(int irq) { }
+EXPORT_SYMBOL(add_interrupt_randomness);
+#endif
+
+#if IS_ENABLED(CONFIG_VMGENID)
+static BLOCKING_NOTIFIER_HEAD(lrng_vmfork_chain);
+
+/*
+ * Handle a new unique VM ID, which is unique, not secret, so we
+ * don't credit it, but we do immediately force a reseed after so
+ * that it's used by the crng posthaste.
+ */
+void add_vmfork_randomness(const void *unique_vm_id, size_t size)
+{
+ add_device_randomness(unique_vm_id, size);
+ if (lrng_state_operational())
+ lrng_drng_force_reseed();
+ blocking_notifier_call_chain(&lrng_vmfork_chain, 0, NULL);
+}
+#if IS_MODULE(CONFIG_VMGENID)
+EXPORT_SYMBOL_GPL(add_vmfork_randomness);
+#endif
+
+int register_random_vmfork_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&lrng_vmfork_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
+
+int unregister_random_vmfork_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&lrng_vmfork_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
+#endif
+
+/*********************** LRNG kernel output interfaces ************************/
+
+/*
+ * get_random_bytes() - Provider of cryptographic strong random numbers for
+ * kernel-internal usage.
+ *
+ * This function is appropriate for all in-kernel use cases. However,
+ * it will always use the ChaCha20 DRNG.
+ *
+ * @buf: buffer to store the random bytes
+ * @nbytes: size of the buffer
+ */
+void get_random_bytes(void *buf, size_t nbytes)
+{
+ lrng_get_random_bytes(buf, nbytes);
+}
+EXPORT_SYMBOL(get_random_bytes);
+
+/*
+ * wait_for_random_bytes() - Wait for the LRNG to be seeded and thus
+ * guaranteed to supply cryptographically secure random numbers.
+ *
+ * This applies to: the /dev/urandom device, the get_random_bytes function,
+ * and the get_random_{u32,u64,int,long} family of functions. Using any of
+ * these functions without first calling this function forfeits the guarantee
+ * of security.
+ *
+ * Return:
+ * * 0 if the LRNG has been seeded.
+ * * -ERESTARTSYS if the function was interrupted by a signal.
+ */
+int wait_for_random_bytes(void)
+{
+ return lrng_drng_sleep_while_non_min_seeded();
+}
+EXPORT_SYMBOL(wait_for_random_bytes);
+
+/*
+ * Returns whether or not the LRNG has been seeded.
+ *
+ * Returns: true if the urandom pool has been seeded.
+ * false if the urandom pool has not been seeded.
+ */
+bool rng_is_initialized(void)
+{
+ return lrng_state_operational();
+}
+EXPORT_SYMBOL(rng_is_initialized);
diff --git a/drivers/char/lrng/lrng_interface_random_kernel.h b/drivers/char/lrng/lrng_interface_random_kernel.h
new file mode 100644
index 000000000000..ea2b5be8d7f3
--- /dev/null
+++ b/drivers/char/lrng/lrng_interface_random_kernel.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_INTERFACE_RANDOM_H
+#define _LRNG_INTERFACE_RANDOM_H
+
+#ifdef CONFIG_LRNG_RANDOM_IF
+void invalidate_batched_entropy(void);
+void lrng_kick_random_ready(void);
+#else /* CONFIG_LRNG_RANDOM_IF */
+static inline void invalidate_batched_entropy(void) { }
+static inline void lrng_kick_random_ready(void) { }
+#endif /* CONFIG_LRNG_RANDOM_IF */
+
+#endif /* _LRNG_INTERFACE_RANDOM_H */
diff --git a/drivers/char/lrng/lrng_interface_random_user.c b/drivers/char/lrng/lrng_interface_random_user.c
new file mode 100644
index 000000000000..d12e883804d9
--- /dev/null
+++ b/drivers/char/lrng/lrng_interface_random_user.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG Common user space interfaces compliant to random(4), random(7) and
+ * getrandom(2) man pages.
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/random.h>
+#include <linux/syscalls.h>
+
+#include "lrng_es_mgr.h"
+#include "lrng_interface_dev_common.h"
+
+static ssize_t lrng_drng_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ if (!lrng_state_min_seeded())
+ pr_notice_ratelimited("%s - use of insufficiently seeded DRNG (%zu bytes read)\n",
+ current->comm, nbytes);
+ else if (!lrng_state_operational())
+ pr_debug_ratelimited("%s - use of not fully seeded DRNG (%zu bytes read)\n",
+ current->comm, nbytes);
+
+ return lrng_read_common(buf, nbytes, false);
+}
+
+const struct file_operations random_fops = {
+ .read = lrng_drng_read_block,
+ .write = lrng_drng_write,
+ .poll = lrng_random_poll,
+ .unlocked_ioctl = lrng_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .fasync = lrng_fasync,
+ .llseek = noop_llseek,
+};
+
+const struct file_operations urandom_fops = {
+ .read = lrng_drng_read,
+ .write = lrng_drng_write,
+ .unlocked_ioctl = lrng_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .fasync = lrng_fasync,
+ .llseek = noop_llseek,
+};
+
+/*
+ * GRND_SEED
+ *
+ * This flag requests to provide the data directly from the entropy sources.
+ *
+ * The behavior of the call is exactly as outlined for the function
+ * lrng_get_seed in lrng.h.
+ */
+#define GRND_SEED 0x0010
+
+/*
+ * GRND_FULLY_SEEDED
+ *
+ * This flag indicates whether the caller wants to reseed a DRNG that is already
+ * fully seeded. See esdm_get_seed in lrng.h for details.
+ */
+#define GRND_FULLY_SEEDED 0x0020
+
+SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
+ unsigned int, flags)
+{
+ if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE|
+ GRND_SEED|GRND_FULLY_SEEDED))
+ return -EINVAL;
+
+ /*
+ * Requesting insecure and blocking randomness at the same time makes
+ * no sense.
+ */
+ if ((flags &
+ (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
+ return -EINVAL;
+ if ((flags &
+ (GRND_INSECURE|GRND_SEED)) == (GRND_INSECURE|GRND_SEED))
+ return -EINVAL;
+ if ((flags &
+ (GRND_RANDOM|GRND_SEED)) == (GRND_RANDOM|GRND_SEED))
+ return -EINVAL;
+
+ if (count > INT_MAX)
+ count = INT_MAX;
+
+ if (flags & GRND_INSECURE) {
+ return lrng_drng_read(NULL, buf, count, NULL);
+ } else if (flags & GRND_SEED) {
+ unsigned int seed_flags = (flags & GRND_NONBLOCK) ?
+ LRNG_GET_SEED_NONBLOCK : 0;
+
+ seed_flags |= (flags & GRND_FULLY_SEEDED) ?
+ LRNG_GET_SEED_FULLY_SEEDED : 0;
+ return lrng_read_seed(buf, count, seed_flags);
+ }
+
+ return lrng_read_common_block(flags & GRND_NONBLOCK,
+ flags & GRND_RANDOM, buf, count);
+}
diff --git a/drivers/char/lrng/lrng_numa.c b/drivers/char/lrng/lrng_numa.c
new file mode 100644
index 000000000000..d74dd8df2843
--- /dev/null
+++ b/drivers/char/lrng/lrng_numa.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG NUMA support
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+#include <linux/slab.h>
+
+#include "lrng_drng_mgr.h"
+#include "lrng_es_irq.h"
+#include "lrng_es_mgr.h"
+#include "lrng_numa.h"
+#include "lrng_proc.h"
+
+static struct lrng_drng **lrng_drng __read_mostly = NULL;
+
+struct lrng_drng **lrng_drng_instances(void)
+{
+ /* counterpart to cmpxchg_release in _lrng_drngs_numa_alloc */
+ return READ_ONCE(lrng_drng);
+}
+
+/* Allocate the data structures for the per-NUMA node DRNGs */
+static void _lrng_drngs_numa_alloc(struct work_struct *work)
+{
+ struct lrng_drng **drngs;
+ struct lrng_drng *lrng_drng_init = lrng_drng_init_instance();
+ u32 node;
+ bool init_drng_used = false;
+
+ mutex_lock(&lrng_crypto_cb_update);
+
+ /* per-NUMA-node DRNGs are already present */
+ if (lrng_drng)
+ goto unlock;
+
+ /* Make sure the initial DRNG is initialized and its drng_cb is set */
+ if (lrng_drng_initalize())
+ goto err;
+
+ drngs = kcalloc(nr_node_ids, sizeof(void *), GFP_KERNEL|__GFP_NOFAIL);
+ for_each_online_node(node) {
+ struct lrng_drng *drng;
+
+ if (!init_drng_used) {
+ drngs[node] = lrng_drng_init;
+ init_drng_used = true;
+ continue;
+ }
+
+ drng = kmalloc_node(sizeof(struct lrng_drng),
+ GFP_KERNEL|__GFP_NOFAIL, node);
+ memset(drng, 0, sizeof(lrng_drng));
+
+ if (lrng_drng_alloc_common(drng, lrng_drng_init->drng_cb)) {
+ kfree(drng);
+ goto err;
+ }
+
+ drng->hash_cb = lrng_drng_init->hash_cb;
+ drng->hash = lrng_drng_init->hash_cb->hash_alloc();
+ if (IS_ERR(drng->hash)) {
+ lrng_drng_init->drng_cb->drng_dealloc(drng->drng);
+ kfree(drng);
+ goto err;
+ }
+
+ mutex_init(&drng->lock);
+ rwlock_init(&drng->hash_lock);
+
+ /*
+ * No reseeding of NUMA DRNGs from previous DRNGs as this
+ * would complicate the code. Let it simply reseed.
+ */
+ drngs[node] = drng;
+
+ lrng_pool_inc_numa_node();
+ pr_info("DRNG and entropy pool read hash for NUMA node %d allocated\n",
+ node);
+ }
+
+ /* counterpart to READ_ONCE in lrng_drng_instances */
+ if (!cmpxchg_release(&lrng_drng, NULL, drngs)) {
+ lrng_pool_all_numa_nodes_seeded(false);
+ goto unlock;
+ }
+
+err:
+ for_each_online_node(node) {
+ struct lrng_drng *drng = drngs[node];
+
+ if (drng == lrng_drng_init)
+ continue;
+
+ if (drng) {
+ drng->hash_cb->hash_dealloc(drng->hash);
+ drng->drng_cb->drng_dealloc(drng->drng);
+ kfree(drng);
+ }
+ }
+ kfree(drngs);
+
+unlock:
+ mutex_unlock(&lrng_crypto_cb_update);
+}
+
+static DECLARE_WORK(lrng_drngs_numa_alloc_work, _lrng_drngs_numa_alloc);
+
+static void lrng_drngs_numa_alloc(void)
+{
+ schedule_work(&lrng_drngs_numa_alloc_work);
+}
+
+static int __init lrng_numa_init(void)
+{
+ lrng_drngs_numa_alloc();
+ return 0;
+}
+
+late_initcall(lrng_numa_init);
diff --git a/drivers/char/lrng/lrng_numa.h b/drivers/char/lrng/lrng_numa.h
new file mode 100644
index 000000000000..dc8dff9816ee
--- /dev/null
+++ b/drivers/char/lrng/lrng_numa.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_NUMA_H
+#define _LRNG_NUMA_H
+
+#ifdef CONFIG_NUMA
+struct lrng_drng **lrng_drng_instances(void);
+#else /* CONFIG_NUMA */
+static inline struct lrng_drng **lrng_drng_instances(void) { return NULL; }
+#endif /* CONFIG_NUMA */
+
+#endif /* _LRNG_NUMA_H */
diff --git a/drivers/char/lrng/lrng_proc.c b/drivers/char/lrng/lrng_proc.c
new file mode 100644
index 000000000000..a9c8d90c7d56
--- /dev/null
+++ b/drivers/char/lrng/lrng_proc.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG proc interfaces
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#include <linux/lrng.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include "lrng_drng_mgr.h"
+#include "lrng_es_aux.h"
+#include "lrng_es_mgr.h"
+#include "lrng_proc.h"
+
+/* Number of online DRNGs */
+static u32 numa_drngs = 1;
+
+void lrng_pool_inc_numa_node(void)
+{
+ numa_drngs++;
+}
+
+static int lrng_proc_type_show(struct seq_file *m, void *v)
+{
+ struct lrng_drng *lrng_drng_init = lrng_drng_init_instance();
+ unsigned char buf[270];
+ u32 i;
+
+ mutex_lock(&lrng_drng_init->lock);
+ snprintf(buf, sizeof(buf),
+ "DRNG name: %s\n"
+ "LRNG security strength in bits: %d\n"
+ "Number of DRNG instances: %u\n"
+ "Standards compliance: %sNTG.1 (2011%s)\n"
+ "LRNG minimally seeded: %s\n"
+ "LRNG fully seeded: %s\n"
+ "LRNG entropy level: %u\n",
+ lrng_drng_init->drng_cb->drng_name(),
+ lrng_security_strength(),
+ numa_drngs,
+ lrng_sp80090c_compliant() ? "SP800-90C, " : "",
+ lrng_ntg1_2022_compliant() ? " / 2022" : "",
+ lrng_state_min_seeded() ? "true" : "false",
+ lrng_state_fully_seeded() ? "true" : "false",
+ lrng_avail_entropy());
+ seq_write(m, buf, strlen(buf));
+
+ for_each_lrng_es(i) {
+ snprintf(buf, sizeof(buf),
+ "Entropy Source %u properties:\n"
+ " Name: %s\n",
+ i, lrng_es[i]->name);
+ seq_write(m, buf, strlen(buf));
+
+ buf[0] = '\0';
+ lrng_es[i]->state(buf, sizeof(buf));
+ seq_write(m, buf, strlen(buf));
+ }
+
+ mutex_unlock(&lrng_drng_init->lock);
+
+ return 0;
+}
+
+static int __init lrng_proc_type_init(void)
+{
+ proc_create_single("lrng_type", 0444, NULL, &lrng_proc_type_show);
+ return 0;
+}
+
+module_init(lrng_proc_type_init);
diff --git a/drivers/char/lrng/lrng_proc.h b/drivers/char/lrng/lrng_proc.h
new file mode 100644
index 000000000000..c653274f1954
--- /dev/null
+++ b/drivers/char/lrng/lrng_proc.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_PROC_H
+#define _LRNG_PROC_H
+
+#ifdef CONFIG_SYSCTL
+void lrng_pool_inc_numa_node(void);
+#else
+static inline void lrng_pool_inc_numa_node(void) { }
+#endif
+
+#endif /* _LRNG_PROC_H */
diff --git a/drivers/char/lrng/lrng_selftest.c b/drivers/char/lrng/lrng_selftest.c
new file mode 100644
index 000000000000..15f1e4a2a719
--- /dev/null
+++ b/drivers/char/lrng/lrng_selftest.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG power-on and on-demand self-test
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+/*
+ * In addition to the self-tests below, the following LRNG components
+ * are covered with self-tests during regular operation:
+ *
+ * * power-on self-test: SP800-90A DRBG provided by the Linux kernel crypto API
+ * * power-on self-test: PRNG provided by the Linux kernel crypto API
+ * * runtime test: Raw noise source data testing including SP800-90B compliant
+ * tests when enabling CONFIG_LRNG_HEALTH_TESTS
+ *
+ * Additional developer tests present with LRNG code:
+ * * SP800-90B APT and RCT test enforcement validation when enabling
+ * CONFIG_LRNG_APT_BROKEN or CONFIG_LRNG_RCT_BROKEN.
+ * * Collection of raw entropy from the interrupt noise source when enabling
+ * CONFIG_LRNG_TESTING and pulling the data from the kernel with the provided
+ * interface.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "lrng_drng_chacha20.h"
+#include "lrng_sha.h"
+
+#define LRNG_SELFTEST_PASSED 0
+#define LRNG_SEFLTEST_ERROR_TIME (1 << 0)
+#define LRNG_SEFLTEST_ERROR_CHACHA20 (1 << 1)
+#define LRNG_SEFLTEST_ERROR_HASH (1 << 2)
+#define LRNG_SEFLTEST_ERROR_GCD (1 << 3)
+#define LRNG_SELFTEST_NOT_EXECUTED 0xffffffff
+
+#ifdef CONFIG_LRNG_TIMER_COMMON
+
+#include "lrng_es_timer_common.h"
+
+static u32 lrng_data_selftest_ptr = 0;
+static u32 lrng_data_selftest[LRNG_DATA_ARRAY_SIZE];
+
+static void lrng_data_process_selftest_insert(u32 time)
+{
+ u32 ptr = lrng_data_selftest_ptr++ & LRNG_DATA_WORD_MASK;
+ unsigned int array = lrng_data_idx2array(ptr);
+ unsigned int slot = lrng_data_idx2slot(ptr);
+
+ /* zeroization of slot to ensure the following OR adds the data */
+ lrng_data_selftest[array] &=
+ ~(lrng_data_slot_val(0xffffffff & LRNG_DATA_SLOTSIZE_MASK,
+ slot));
+ lrng_data_selftest[array] |=
+ lrng_data_slot_val(time & LRNG_DATA_SLOTSIZE_MASK, slot);
+}
+
+static void lrng_data_process_selftest_u32(u32 data)
+{
+ u32 pre_ptr, ptr, mask;
+ unsigned int pre_array;
+
+ /* Increment pointer by number of slots taken for input value */
+ lrng_data_selftest_ptr += LRNG_DATA_SLOTS_PER_UINT;
+
+ /* ptr to current unit */
+ ptr = lrng_data_selftest_ptr;
+
+ lrng_data_split_u32(&ptr, &pre_ptr, &mask);
+
+ /* MSB of data go into previous unit */
+ pre_array = lrng_data_idx2array(pre_ptr);
+ /* zeroization of slot to ensure the following OR adds the data */
+ lrng_data_selftest[pre_array] &= ~(0xffffffff & ~mask);
+ lrng_data_selftest[pre_array] |= data & ~mask;
+
+ /* LSB of data go into current unit */
+ lrng_data_selftest[lrng_data_idx2array(ptr)] = data & mask;
+}
+
+static unsigned int lrng_data_process_selftest(void)
+{
+ u32 time;
+ u32 idx_zero_compare = (0 << 0) | (1 << 8) | (2 << 16) | (3 << 24);
+ u32 idx_one_compare = (4 << 0) | (5 << 8) | (6 << 16) | (7 << 24);
+ u32 idx_last_compare =
+ (((LRNG_DATA_NUM_VALUES - 4) & LRNG_DATA_SLOTSIZE_MASK) << 0) |
+ (((LRNG_DATA_NUM_VALUES - 3) & LRNG_DATA_SLOTSIZE_MASK) << 8) |
+ (((LRNG_DATA_NUM_VALUES - 2) & LRNG_DATA_SLOTSIZE_MASK) << 16) |
+ (((LRNG_DATA_NUM_VALUES - 1) & LRNG_DATA_SLOTSIZE_MASK) << 24);
+
+ (void)idx_one_compare;
+
+ /* "poison" the array to verify the operation of the zeroization */
+ lrng_data_selftest[0] = 0xffffffff;
+ lrng_data_selftest[1] = 0xffffffff;
+
+ lrng_data_process_selftest_insert(0);
+ /*
+ * Note, when using lrng_data_process_u32() on unaligned ptr,
+ * the first slots will go into next word, and the last slots go
+ * into the previous word.
+ */
+ lrng_data_process_selftest_u32((4 << 0) | (1 << 8) | (2 << 16) |
+ (3 << 24));
+ lrng_data_process_selftest_insert(5);
+ lrng_data_process_selftest_insert(6);
+ lrng_data_process_selftest_insert(7);
+
+ if ((lrng_data_selftest[0] != idx_zero_compare) ||
+ (lrng_data_selftest[1] != idx_one_compare))
+ goto err;
+
+ /* Reset for next test */
+ lrng_data_selftest[0] = 0;
+ lrng_data_selftest[1] = 0;
+ lrng_data_selftest_ptr = 0;
+
+ for (time = 0; time < LRNG_DATA_NUM_VALUES; time++)
+ lrng_data_process_selftest_insert(time);
+
+ if ((lrng_data_selftest[0] != idx_zero_compare) ||
+ (lrng_data_selftest[1] != idx_one_compare) ||
+ (lrng_data_selftest[LRNG_DATA_ARRAY_SIZE - 1] != idx_last_compare))
+ goto err;
+
+ return LRNG_SELFTEST_PASSED;
+
+err:
+ pr_err("LRNG data array self-test FAILED\n");
+ return LRNG_SEFLTEST_ERROR_TIME;
+}
+
+static unsigned int lrng_gcd_selftest(void)
+{
+ u32 history[10];
+ unsigned int i;
+
+#define LRNG_GCD_SELFTEST 3
+ for (i = 0; i < ARRAY_SIZE(history); i++)
+ history[i] = i * LRNG_GCD_SELFTEST;
+
+ if (lrng_gcd_analyze(history, ARRAY_SIZE(history)) == LRNG_GCD_SELFTEST)
+ return LRNG_SELFTEST_PASSED;
+
+ pr_err("LRNG GCD self-test FAILED\n");
+ return LRNG_SEFLTEST_ERROR_GCD;
+}
+
+#else /* CONFIG_LRNG_TIMER_COMMON */
+
+static unsigned int lrng_data_process_selftest(void)
+{
+ return LRNG_SELFTEST_PASSED;
+}
+
+static unsigned int lrng_gcd_selftest(void)
+{
+ return LRNG_SELFTEST_PASSED;
+}
+
+#endif /* CONFIG_LRNG_TIMER_COMMON */
+
+/* The test vectors are taken from crypto/testmgr.h */
+static unsigned int lrng_hash_selftest(void)
+{
+ SHASH_DESC_ON_STACK(shash, NULL);
+ const struct lrng_hash_cb *hash_cb = &lrng_sha_hash_cb;
+ static const u8 lrng_hash_selftest_result[] =
+#ifdef CONFIG_CRYPTO_LIB_SHA256
+ { 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea,
+ 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23,
+ 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
+ 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad };
+#else /* CONFIG_CRYPTO_LIB_SHA256 */
+ { 0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06, 0x81, 0x6a, 0xba, 0x3e,
+ 0x25, 0x71, 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0, 0xd8, 0x9d };
+#endif /* CONFIG_CRYPTO_LIB_SHA256 */
+ static const u8 hash_input[] = { 0x61, 0x62, 0x63 }; /* "abc" */
+ u8 digest[sizeof(lrng_hash_selftest_result)] __aligned(sizeof(u32));
+
+ if (sizeof(digest) != hash_cb->hash_digestsize(NULL))
+ return LRNG_SEFLTEST_ERROR_HASH;
+
+ if (!hash_cb->hash_init(shash, NULL) &&
+ !hash_cb->hash_update(shash, hash_input,
+ sizeof(hash_input)) &&
+ !hash_cb->hash_final(shash, digest) &&
+ !memcmp(digest, lrng_hash_selftest_result, sizeof(digest)))
+ return 0;
+
+ pr_err("LRNG %s Hash self-test FAILED\n", hash_cb->hash_name());
+ return LRNG_SEFLTEST_ERROR_HASH;
+}
+
+#ifdef CONFIG_LRNG_DRNG_CHACHA20
+
+static void lrng_selftest_bswap32(u32 *ptr, u32 words)
+{
+ u32 i;
+
+ /* Byte-swap data which is an LE representation */
+ for (i = 0; i < words; i++) {
+ __le32 *p = (__le32 *)ptr;
+
+ *p = cpu_to_le32(*ptr);
+ ptr++;
+ }
+}
+
+/*
+ * The test vectors were generated using the ChaCha20 DRNG from
+ * https://www.chronox.de/chacha20.html
+ */
+static unsigned int lrng_chacha20_drng_selftest(void)
+{
+ const struct lrng_drng_cb *drng_cb = &lrng_cc20_drng_cb;
+ u8 seed[CHACHA_KEY_SIZE * 2] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ };
+ struct chacha20_block chacha20;
+ int ret;
+ u8 outbuf[CHACHA_KEY_SIZE * 2] __aligned(sizeof(u32));
+
+ /*
+ * Expected result when ChaCha20 DRNG state is zero:
+ * * constants are set to "expand 32-byte k"
+ * * remaining state is 0
+ * and pulling one half ChaCha20 DRNG block.
+ */
+ static const u8 expected_halfblock[CHACHA_KEY_SIZE] = {
+ 0x76, 0xb8, 0xe0, 0xad, 0xa0, 0xf1, 0x3d, 0x90,
+ 0x40, 0x5d, 0x6a, 0xe5, 0x53, 0x86, 0xbd, 0x28,
+ 0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a,
+ 0xa8, 0x36, 0xef, 0xcc, 0x8b, 0x77, 0x0d, 0xc7 };
+
+ /*
+ * Expected result when ChaCha20 DRNG state is zero:
+ * * constants are set to "expand 32-byte k"
+ * * remaining state is 0
+ * followed by a reseed with two keyblocks
+ * 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ * 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ * 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ * 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ * 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ * 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ * 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ * 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
+ * and pulling one ChaCha20 DRNG block.
+ */
+ static const u8 expected_oneblock[CHACHA_KEY_SIZE * 2] = {
+ 0xe3, 0xb0, 0x8a, 0xcc, 0x34, 0xc3, 0x17, 0x0e,
+ 0xc3, 0xd8, 0xc3, 0x40, 0xe7, 0x73, 0xe9, 0x0d,
+ 0xd1, 0x62, 0xa3, 0x5d, 0x7d, 0xf2, 0xf1, 0x4a,
+ 0x24, 0x42, 0xb7, 0x1e, 0xb0, 0x05, 0x17, 0x07,
+ 0xb9, 0x35, 0x10, 0x69, 0x8b, 0x46, 0xfb, 0x51,
+ 0xe9, 0x91, 0x3f, 0x46, 0xf2, 0x4d, 0xea, 0xd0,
+ 0x81, 0xc1, 0x1b, 0xa9, 0x5d, 0x52, 0x91, 0x5f,
+ 0xcd, 0xdc, 0xc6, 0xd6, 0xc3, 0x7c, 0x50, 0x23 };
+
+ /*
+ * Expected result when ChaCha20 DRNG state is zero:
+ * * constants are set to "expand 32-byte k"
+ * * remaining state is 0
+ * followed by a reseed with one key block plus one byte
+ * 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ * 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ * 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ * 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ * 0x20
+ * and pulling less than one ChaCha20 DRNG block.
+ */
+ static const u8 expected_block_nonalinged[CHACHA_KEY_SIZE + 4] = {
+ 0x9c, 0xfc, 0x5e, 0x31, 0x21, 0x62, 0x11, 0x85,
+ 0xd3, 0x77, 0xd3, 0x69, 0x0f, 0xa8, 0x16, 0x55,
+ 0xb4, 0x4c, 0xf6, 0x52, 0xf3, 0xa8, 0x37, 0x99,
+ 0x38, 0x76, 0xa0, 0x66, 0xec, 0xbb, 0xce, 0xa9,
+ 0x9c, 0x95, 0xa1, 0xfd };
+
+ BUILD_BUG_ON(sizeof(seed) % sizeof(u32));
+
+ memset(&chacha20, 0, sizeof(chacha20));
+ lrng_cc20_init_rfc7539(&chacha20);
+ lrng_selftest_bswap32((u32 *)seed, sizeof(seed) / sizeof(u32));
+
+ /* Generate with zero state */
+ ret = drng_cb->drng_generate(&chacha20, outbuf,
+ sizeof(expected_halfblock));
+ if (ret != sizeof(expected_halfblock))
+ goto err;
+ if (memcmp(outbuf, expected_halfblock, sizeof(expected_halfblock)))
+ goto err;
+
+ /* Clear state of DRNG */
+ memset(&chacha20.key.u[0], 0, 48);
+
+ /* Reseed with 2 key blocks */
+ ret = drng_cb->drng_seed(&chacha20, seed, sizeof(expected_oneblock));
+ if (ret < 0)
+ goto err;
+ ret = drng_cb->drng_generate(&chacha20, outbuf,
+ sizeof(expected_oneblock));
+ if (ret != sizeof(expected_oneblock))
+ goto err;
+ if (memcmp(outbuf, expected_oneblock, sizeof(expected_oneblock)))
+ goto err;
+
+ /* Clear state of DRNG */
+ memset(&chacha20.key.u[0], 0, 48);
+
+ /* Reseed with 1 key block and one byte */
+ ret = drng_cb->drng_seed(&chacha20, seed,
+ sizeof(expected_block_nonalinged));
+ if (ret < 0)
+ goto err;
+ ret = drng_cb->drng_generate(&chacha20, outbuf,
+ sizeof(expected_block_nonalinged));
+ if (ret != sizeof(expected_block_nonalinged))
+ goto err;
+ if (memcmp(outbuf, expected_block_nonalinged,
+ sizeof(expected_block_nonalinged)))
+ goto err;
+
+ return LRNG_SELFTEST_PASSED;
+
+err:
+ pr_err("LRNG ChaCha20 DRNG self-test FAILED\n");
+ return LRNG_SEFLTEST_ERROR_CHACHA20;
+}
+
+#else /* CONFIG_LRNG_DRNG_CHACHA20 */
+
+static unsigned int lrng_chacha20_drng_selftest(void)
+{
+ return LRNG_SELFTEST_PASSED;
+}
+
+#endif /* CONFIG_LRNG_DRNG_CHACHA20 */
+
+static unsigned int lrng_selftest_status = LRNG_SELFTEST_NOT_EXECUTED;
+
+static int lrng_selftest(void)
+{
+ unsigned int ret = lrng_data_process_selftest();
+
+ ret |= lrng_chacha20_drng_selftest();
+ ret |= lrng_hash_selftest();
+ ret |= lrng_gcd_selftest();
+
+ if (ret) {
+ if (IS_ENABLED(CONFIG_LRNG_SELFTEST_PANIC))
+ panic("LRNG self-tests failed: %u\n", ret);
+ } else {
+ pr_info("LRNG self-tests passed\n");
+ }
+
+ lrng_selftest_status = ret;
+
+ if (lrng_selftest_status)
+ return -EFAULT;
+ return 0;
+}
+
+#ifdef CONFIG_SYSFS
+/* Re-perform self-test when any value is written to the sysfs file. */
+static int lrng_selftest_sysfs_set(const char *val,
+ const struct kernel_param *kp)
+{
+ return lrng_selftest();
+}
+
+static const struct kernel_param_ops lrng_selftest_sysfs = {
+ .set = lrng_selftest_sysfs_set,
+ .get = param_get_uint,
+};
+module_param_cb(selftest_status, &lrng_selftest_sysfs, &lrng_selftest_status,
+ 0644);
+#endif /* CONFIG_SYSFS */
+
+static int __init lrng_selftest_init(void)
+{
+ return lrng_selftest();
+}
+
+module_init(lrng_selftest_init);
diff --git a/drivers/char/lrng/lrng_sha.h b/drivers/char/lrng/lrng_sha.h
new file mode 100644
index 000000000000..d2f134f54773
--- /dev/null
+++ b/drivers/char/lrng/lrng_sha.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * LRNG SHA definition usable in atomic contexts right from the start of the
+ * kernel.
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_SHA_H
+#define _LRNG_SHA_H
+
+extern const struct lrng_hash_cb lrng_sha_hash_cb;
+
+#endif /* _LRNG_SHA_H */
diff --git a/drivers/char/lrng/lrng_sha1.c b/drivers/char/lrng/lrng_sha1.c
new file mode 100644
index 000000000000..9cbc7a6fee49
--- /dev/null
+++ b/drivers/char/lrng/lrng_sha1.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Backend for the LRNG providing the SHA-1 implementation that can be used
+ * without the kernel crypto API available including during early boot and in
+ * atomic contexts.
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+#include <crypto/sha1.h>
+#include <crypto/sha1_base.h>
+
+#include "lrng_sha.h"
+
+/*
+ * If the SHA-256 support is not compiled, we fall back to SHA-1 that is always
+ * compiled and present in the kernel.
+ */
+static u32 lrng_sha1_hash_digestsize(void *hash)
+{
+ return SHA1_DIGEST_SIZE;
+}
+
+static void lrng_sha1_block_fn(struct sha1_state *sctx, const u8 *src,
+ int blocks)
+{
+ u32 temp[SHA1_WORKSPACE_WORDS];
+
+ while (blocks--) {
+ sha1_transform(sctx->state, src, temp);
+ src += SHA1_BLOCK_SIZE;
+ }
+ memzero_explicit(temp, sizeof(temp));
+}
+
+static int lrng_sha1_hash_init(struct shash_desc *shash, void *hash)
+{
+ /*
+ * We do not need a TFM - we only need sufficient space for
+ * struct sha1_state on the stack.
+ */
+ sha1_base_init(shash);
+ return 0;
+}
+
+static int lrng_sha1_hash_update(struct shash_desc *shash,
+ const u8 *inbuf, u32 inbuflen)
+{
+ return sha1_base_do_update(shash, inbuf, inbuflen, lrng_sha1_block_fn);
+}
+
+static int lrng_sha1_hash_final(struct shash_desc *shash, u8 *digest)
+{
+ return sha1_base_do_finalize(shash, lrng_sha1_block_fn) ?:
+ sha1_base_finish(shash, digest);
+}
+
+static const char *lrng_sha1_hash_name(void)
+{
+ return "SHA-1";
+}
+
+static void lrng_sha1_hash_desc_zero(struct shash_desc *shash)
+{
+ memzero_explicit(shash_desc_ctx(shash), sizeof(struct sha1_state));
+}
+
+static void *lrng_sha1_hash_alloc(void)
+{
+ pr_info("Hash %s allocated\n", lrng_sha1_hash_name());
+ return NULL;
+}
+
+static void lrng_sha1_hash_dealloc(void *hash) { }
+
+const struct lrng_hash_cb lrng_sha_hash_cb = {
+ .hash_name = lrng_sha1_hash_name,
+ .hash_alloc = lrng_sha1_hash_alloc,
+ .hash_dealloc = lrng_sha1_hash_dealloc,
+ .hash_digestsize = lrng_sha1_hash_digestsize,
+ .hash_init = lrng_sha1_hash_init,
+ .hash_update = lrng_sha1_hash_update,
+ .hash_final = lrng_sha1_hash_final,
+ .hash_desc_zero = lrng_sha1_hash_desc_zero,
+};
diff --git a/drivers/char/lrng/lrng_sha256.c b/drivers/char/lrng/lrng_sha256.c
new file mode 100644
index 000000000000..50705351a71c
--- /dev/null
+++ b/drivers/char/lrng/lrng_sha256.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Backend for the LRNG providing the SHA-256 implementation that can be used
+ * without the kernel crypto API available including during early boot and in
+ * atomic contexts.
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+#include <crypto/sha2.h>
+
+#include "lrng_sha.h"
+
+static u32 lrng_sha256_hash_digestsize(void *hash)
+{
+ return SHA256_DIGEST_SIZE;
+}
+
+static int lrng_sha256_hash_init(struct shash_desc *shash, void *hash)
+{
+ /*
+ * We do not need a TFM - we only need sufficient space for
+ * struct sha256_state on the stack.
+ */
+ sha256_init(shash_desc_ctx(shash));
+ return 0;
+}
+
+static int lrng_sha256_hash_update(struct shash_desc *shash,
+ const u8 *inbuf, u32 inbuflen)
+{
+ sha256_update(shash_desc_ctx(shash), inbuf, inbuflen);
+ return 0;
+}
+
+static int lrng_sha256_hash_final(struct shash_desc *shash, u8 *digest)
+{
+ sha256_final(shash_desc_ctx(shash), digest);
+ return 0;
+}
+
+static const char *lrng_sha256_hash_name(void)
+{
+ return "SHA-256";
+}
+
+static void lrng_sha256_hash_desc_zero(struct shash_desc *shash)
+{
+ memzero_explicit(shash_desc_ctx(shash), sizeof(struct sha256_state));
+}
+
+static void *lrng_sha256_hash_alloc(void)
+{
+ pr_info("Hash %s allocated\n", lrng_sha256_hash_name());
+ return NULL;
+}
+
+static void lrng_sha256_hash_dealloc(void *hash) { }
+
+const struct lrng_hash_cb lrng_sha_hash_cb = {
+ .hash_name = lrng_sha256_hash_name,
+ .hash_alloc = lrng_sha256_hash_alloc,
+ .hash_dealloc = lrng_sha256_hash_dealloc,
+ .hash_digestsize = lrng_sha256_hash_digestsize,
+ .hash_init = lrng_sha256_hash_init,
+ .hash_update = lrng_sha256_hash_update,
+ .hash_final = lrng_sha256_hash_final,
+ .hash_desc_zero = lrng_sha256_hash_desc_zero,
+};
diff --git a/drivers/char/lrng/lrng_switch.c b/drivers/char/lrng/lrng_switch.c
new file mode 100644
index 000000000000..13c70797b193
--- /dev/null
+++ b/drivers/char/lrng/lrng_switch.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG DRNG switching support
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+
+#include "lrng_es_aux.h"
+#include "lrng_es_mgr.h"
+#include "lrng_interface_dev_common.h"
+#include "lrng_numa.h"
+
+static int __maybe_unused
+lrng_hash_switch(struct lrng_drng *drng_store, const void *cb, int node)
+{
+ const struct lrng_hash_cb *new_cb = (const struct lrng_hash_cb *)cb;
+ const struct lrng_hash_cb *old_cb = drng_store->hash_cb;
+ unsigned long flags;
+ u32 i;
+ void *new_hash, *old_hash;
+ int ret;
+
+ if (node == -1)
+ return 0;
+
+ new_hash = new_cb->hash_alloc();
+ old_hash = drng_store->hash;
+
+ if (IS_ERR(new_hash)) {
+ pr_warn("could not allocate new LRNG pool hash (%ld)\n",
+ PTR_ERR(new_hash));
+ return PTR_ERR(new_hash);
+ }
+
+ if (new_cb->hash_digestsize(new_hash) > LRNG_MAX_DIGESTSIZE) {
+ pr_warn("digest size of newly requested hash too large\n");
+ new_cb->hash_dealloc(new_hash);
+ return -EINVAL;
+ }
+
+ write_lock_irqsave(&drng_store->hash_lock, flags);
+
+ /* Trigger the switch for each entropy source */
+ for_each_lrng_es(i) {
+ if (!lrng_es[i]->switch_hash)
+ continue;
+ ret = lrng_es[i]->switch_hash(drng_store, node, new_cb,
+ new_hash, old_cb);
+ if (ret) {
+ u32 j;
+
+ /* Revert all already executed operations */
+ for (j = 0; j < i; j++) {
+ if (!lrng_es[j]->switch_hash)
+ continue;
+ WARN_ON(lrng_es[j]->switch_hash(drng_store,
+ node, old_cb,
+ old_hash,
+ new_cb));
+ }
+ goto err;
+ }
+ }
+
+ drng_store->hash = new_hash;
+ drng_store->hash_cb = new_cb;
+ old_cb->hash_dealloc(old_hash);
+ pr_info("Conditioning function allocated for DRNG for NUMA node %d\n",
+ node);
+
+err:
+ write_unlock_irqrestore(&drng_store->hash_lock, flags);
+ return ret;
+}
+
+static int __maybe_unused
+lrng_drng_switch(struct lrng_drng *drng_store, const void *cb, int node)
+{
+ const struct lrng_drng_cb *new_cb = (const struct lrng_drng_cb *)cb;
+ const struct lrng_drng_cb *old_cb = drng_store->drng_cb;
+ int ret;
+ u8 seed[LRNG_DRNG_SECURITY_STRENGTH_BYTES];
+ void *new_drng = new_cb->drng_alloc(LRNG_DRNG_SECURITY_STRENGTH_BYTES);
+ void *old_drng = drng_store->drng;
+ u32 current_security_strength;
+ bool reset_drng = !lrng_get_available();
+
+ if (IS_ERR(new_drng)) {
+ pr_warn("could not allocate new DRNG for NUMA node %d (%ld)\n",
+ node, PTR_ERR(new_drng));
+ return PTR_ERR(new_drng);
+ }
+
+ current_security_strength = lrng_security_strength();
+ mutex_lock(&drng_store->lock);
+
+ /*
+ * Pull from existing DRNG to seed new DRNG regardless of seed status
+ * of old DRNG -- the entropy state for the DRNG is left unchanged which
+ * implies that als the new DRNG is reseeded when deemed necessary. This
+ * seeding of the new DRNG shall only ensure that the new DRNG has the
+ * same entropy as the old DRNG.
+ */
+ ret = old_cb->drng_generate(old_drng, seed, sizeof(seed));
+ mutex_unlock(&drng_store->lock);
+
+ if (ret < 0) {
+ reset_drng = true;
+ pr_warn("getting random data from DRNG failed for NUMA node %d (%d)\n",
+ node, ret);
+ } else {
+ /* seed new DRNG with data */
+ ret = new_cb->drng_seed(new_drng, seed, ret);
+ memzero_explicit(seed, sizeof(seed));
+ if (ret < 0) {
+ reset_drng = true;
+ pr_warn("seeding of new DRNG failed for NUMA node %d (%d)\n",
+ node, ret);
+ } else {
+ pr_debug("seeded new DRNG of NUMA node %d instance from old DRNG instance\n",
+ node);
+ }
+ }
+
+ mutex_lock(&drng_store->lock);
+
+ if (reset_drng)
+ lrng_drng_reset(drng_store);
+
+ drng_store->drng = new_drng;
+ drng_store->drng_cb = new_cb;
+
+ /* Reseed if previous LRNG security strength was insufficient */
+ if (current_security_strength < lrng_security_strength())
+ drng_store->force_reseed = true;
+
+ /* Force oversampling seeding as we initialize DRNG */
+ if (IS_ENABLED(CONFIG_CRYPTO_FIPS))
+ lrng_unset_fully_seeded(drng_store);
+
+ if (lrng_state_min_seeded())
+ lrng_set_entropy_thresh(lrng_get_seed_entropy_osr(
+ drng_store->fully_seeded));
+
+ old_cb->drng_dealloc(old_drng);
+
+ pr_info("DRNG of NUMA node %d switched\n", node);
+
+ mutex_unlock(&drng_store->lock);
+ return ret;
+}
+
+/*
+ * Switch the existing DRNG and hash instances with new using the new crypto
+ * callbacks. The caller must hold the lrng_crypto_cb_update lock.
+ */
+static int lrng_switch(const void *cb,
+ int (*switcher)(struct lrng_drng *drng_store,
+ const void *cb, int node))
+{
+ struct lrng_drng **lrng_drng = lrng_drng_instances();
+ struct lrng_drng *lrng_drng_init = lrng_drng_init_instance();
+ struct lrng_drng *lrng_drng_pr = lrng_drng_pr_instance();
+ int ret = 0;
+
+ if (lrng_drng) {
+ u32 node;
+
+ for_each_online_node(node) {
+ if (lrng_drng[node])
+ ret |= switcher(lrng_drng[node], cb, node);
+ }
+ } else {
+ ret |= switcher(lrng_drng_init, cb, 0);
+ }
+
+ ret |= switcher(lrng_drng_pr, cb, -1);
+
+ return ret;
+}
+
+/*
+ * lrng_set_drng_cb - Register new cryptographic callback functions for DRNG
+ * The registering implies that all old DRNG states are replaced with new
+ * DRNG states.
+ *
+ * drng_cb: Callback functions to be registered -- if NULL, use the default
+ * callbacks defined at compile time.
+ *
+ * Return:
+ * * 0 on success
+ * * < 0 on error
+ */
+int lrng_set_drng_cb(const struct lrng_drng_cb *drng_cb)
+{
+ struct lrng_drng *lrng_drng_init = lrng_drng_init_instance();
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_LRNG_SWITCH_DRNG))
+ return -EOPNOTSUPP;
+
+ if (!drng_cb)
+ drng_cb = lrng_default_drng_cb;
+
+ mutex_lock(&lrng_crypto_cb_update);
+
+ /*
+ * If a callback other than the default is set, allow it only to be
+ * set back to the default callback. This ensures that multiple
+ * different callbacks can be registered at the same time. If a
+ * callback different from the current callback and the default
+ * callback shall be set, the current callback must be deregistered
+ * (e.g. the kernel module providing it must be unloaded) and the new
+ * implementation can be registered.
+ */
+ if ((drng_cb != lrng_default_drng_cb) &&
+ (lrng_drng_init->drng_cb != lrng_default_drng_cb)) {
+ pr_warn("disallow setting new DRNG callbacks, unload the old callbacks first!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = lrng_switch(drng_cb, lrng_drng_switch);
+ /* The switch may imply new entropy due to larger DRNG sec strength. */
+ if (!ret)
+ lrng_es_add_entropy();
+
+out:
+ mutex_unlock(&lrng_crypto_cb_update);
+ return ret;
+}
+EXPORT_SYMBOL(lrng_set_drng_cb);
+
+/*
+ * lrng_set_hash_cb - Register new cryptographic callback functions for hash
+ * The registering implies that all old hash states are replaced with new
+ * hash states.
+ *
+ * @hash_cb: Callback functions to be registered -- if NULL, use the default
+ * callbacks defined at compile time.
+ *
+ * Return:
+ * * 0 on success
+ * * < 0 on error
+ */
+int lrng_set_hash_cb(const struct lrng_hash_cb *hash_cb)
+{
+ struct lrng_drng *lrng_drng_init = lrng_drng_init_instance();
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_LRNG_SWITCH_HASH))
+ return -EOPNOTSUPP;
+
+ if (!hash_cb)
+ hash_cb = lrng_default_hash_cb;
+
+ mutex_lock(&lrng_crypto_cb_update);
+
+ /* Comment from lrng_set_drng_cb applies. */
+ if ((hash_cb != lrng_default_hash_cb) &&
+ (lrng_drng_init->hash_cb != lrng_default_hash_cb)) {
+ pr_warn("disallow setting new hash callbacks, unload the old callbacks first!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = lrng_switch(hash_cb, lrng_hash_switch);
+ /*
+ * The switch may imply new entropy due to larger digest size. But
+ * it may also offer more room in the aux pool which means we ping
+ * any waiting entropy providers.
+ */
+ if (!ret) {
+ lrng_es_add_entropy();
+ lrng_writer_wakeup();
+ }
+
+out:
+ mutex_unlock(&lrng_crypto_cb_update);
+ return ret;
+}
+EXPORT_SYMBOL(lrng_set_hash_cb);
diff --git a/drivers/char/lrng/lrng_sysctl.c b/drivers/char/lrng/lrng_sysctl.c
new file mode 100644
index 000000000000..ecdd96a842b4
--- /dev/null
+++ b/drivers/char/lrng/lrng_sysctl.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG sysctl interfaces
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#include <linux/lrng.h>
+#include <linux/proc_fs.h>
+#include <linux/sysctl.h>
+#include <linux/uuid.h>
+
+#include "lrng_drng_mgr.h"
+#include "lrng_es_mgr.h"
+#include "lrng_sysctl.h"
+
+/*
+ * This function is used to return both the bootid UUID, and random
+ * UUID. The difference is in whether table->data is NULL; if it is,
+ * then a new UUID is generated and returned to the user.
+ *
+ * If the user accesses this via the proc interface, the UUID will be
+ * returned as an ASCII string in the standard UUID format; if via the
+ * sysctl system call, as 16 bytes of binary data.
+ */
+static int lrng_sysctl_do_uuid(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table fake_table;
+ unsigned char buf[64], tmp_uuid[16], *uuid;
+
+ uuid = table->data;
+ if (!uuid) {
+ uuid = tmp_uuid;
+ generate_random_uuid(uuid);
+ } else {
+ static DEFINE_SPINLOCK(bootid_spinlock);
+
+ spin_lock(&bootid_spinlock);
+ if (!uuid[8])
+ generate_random_uuid(uuid);
+ spin_unlock(&bootid_spinlock);
+ }
+
+ sprintf(buf, "%pU", uuid);
+
+ fake_table.data = buf;
+ fake_table.maxlen = sizeof(buf);
+
+ return proc_dostring(&fake_table, write, buffer, lenp, ppos);
+}
+
+static int lrng_sysctl_do_entropy(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table fake_table;
+ int entropy_count = lrng_avail_entropy_aux();
+
+ fake_table.data = &entropy_count;
+ fake_table.maxlen = sizeof(entropy_count);
+
+ return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
+}
+
+static int lrng_sysctl_do_poolsize(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table fake_table;
+ u32 entropy_count = lrng_es[lrng_ext_es_aux]->max_entropy();
+
+ fake_table.data = &entropy_count;
+ fake_table.maxlen = sizeof(entropy_count);
+
+ return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
+}
+
+static int lrng_min_write_thresh;
+static int lrng_max_write_thresh = (LRNG_WRITE_WAKEUP_ENTROPY << 3);
+static char lrng_sysctl_bootid[16];
+static int lrng_drng_reseed_max_min;
+
+void lrng_sysctl_update_max_write_thresh(u32 new_digestsize)
+{
+ lrng_max_write_thresh = (int)new_digestsize;
+ /* Ensure that changes to the global variable are visible */
+ mb();
+}
+
+static struct ctl_table random_table[] = {
+ {
+ .procname = "poolsize",
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = lrng_sysctl_do_poolsize,
+ },
+ {
+ .procname = "entropy_avail",
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = lrng_sysctl_do_entropy,
+ },
+ {
+ .procname = "write_wakeup_threshold",
+ .data = &lrng_write_wakeup_bits,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &lrng_min_write_thresh,
+ .extra2 = &lrng_max_write_thresh,
+ },
+ {
+ .procname = "boot_id",
+ .data = &lrng_sysctl_bootid,
+ .maxlen = 16,
+ .mode = 0444,
+ .proc_handler = lrng_sysctl_do_uuid,
+ },
+ {
+ .procname = "uuid",
+ .maxlen = 16,
+ .mode = 0444,
+ .proc_handler = lrng_sysctl_do_uuid,
+ },
+ {
+ .procname = "urandom_min_reseed_secs",
+ .data = &lrng_drng_reseed_max_time,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &lrng_drng_reseed_max_min,
+ },
+ { }
+};
+
+static int __init random_sysctls_init(void)
+{
+ register_sysctl_init("kernel/random", random_table);
+ return 0;
+}
+device_initcall(random_sysctls_init);
diff --git a/drivers/char/lrng/lrng_sysctl.h b/drivers/char/lrng/lrng_sysctl.h
new file mode 100644
index 000000000000..4b487e5077ed
--- /dev/null
+++ b/drivers/char/lrng/lrng_sysctl.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_SYSCTL_H
+#define _LRNG_SYSCTL_H
+
+#ifdef CONFIG_LRNG_SYSCTL
+void lrng_sysctl_update_max_write_thresh(u32 new_digestsize);
+#else
+static inline void lrng_sysctl_update_max_write_thresh(u32 new_digestsize) { }
+#endif
+
+#endif /* _LRNG_SYSCTL_H */
diff --git a/drivers/char/lrng/lrng_testing.c b/drivers/char/lrng/lrng_testing.c
new file mode 100644
index 000000000000..101140085d81
--- /dev/null
+++ b/drivers/char/lrng/lrng_testing.c
@@ -0,0 +1,901 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG testing interfaces to obtain raw entropy
+ *
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <asm/errno.h>
+
+#include "lrng_definitions.h"
+#include "lrng_drng_chacha20.h"
+#include "lrng_sha.h"
+#include "lrng_testing.h"
+
+#if defined(CONFIG_LRNG_RAW_SCHED_HIRES_ENTROPY) || \
+ defined(CONFIG_LRNG_RAW_SCHED_PID_ENTROPY) || \
+ defined(CONFIG_LRNG_RAW_SCHED_START_TIME_ENTROPY) || \
+ defined(CONFIG_LRNG_RAW_SCHED_NVCSW_ENTROPY) || \
+ defined(CONFIG_LRNG_SCHED_PERF)
+#define LRNG_TESTING_USE_BUSYLOOP
+#endif
+
+#ifdef CONFIG_LRNG_TESTING_RECORDING
+
+#define LRNG_TESTING_RINGBUFFER_SIZE 1024
+#define LRNG_TESTING_RINGBUFFER_MASK (LRNG_TESTING_RINGBUFFER_SIZE - 1)
+
+struct lrng_testing {
+ u32 lrng_testing_rb[LRNG_TESTING_RINGBUFFER_SIZE];
+ u32 rb_reader;
+ atomic_t rb_writer;
+ atomic_t lrng_testing_enabled;
+ spinlock_t lock;
+ wait_queue_head_t read_wait;
+};
+
+/*************************** Generic Data Handling ****************************/
+
+/*
+ * boot variable:
+ * 0 ==> No boot test, gathering of runtime data allowed
+ * 1 ==> Boot test enabled and ready for collecting data, gathering runtime
+ * data is disabled
+ * 2 ==> Boot test completed and disabled, gathering of runtime data is
+ * disabled
+ */
+
+static void lrng_testing_reset(struct lrng_testing *data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+ data->rb_reader = 0;
+ atomic_set(&data->rb_writer, 0);
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void lrng_testing_init(struct lrng_testing *data, u32 boot)
+{
+ /*
+ * The boot time testing implies we have a running test. If the
+ * caller wants to clear it, he has to unset the boot_test flag
+ * at runtime via sysfs to enable regular runtime testing
+ */
+ if (boot)
+ return;
+
+ lrng_testing_reset(data);
+ atomic_set(&data->lrng_testing_enabled, 1);
+ pr_warn("Enabling data collection\n");
+}
+
+static void lrng_testing_fini(struct lrng_testing *data, u32 boot)
+{
+ /* If we have boot data, we do not reset yet to allow data to be read */
+ if (boot)
+ return;
+
+ atomic_set(&data->lrng_testing_enabled, 0);
+ lrng_testing_reset(data);
+ pr_warn("Disabling data collection\n");
+}
+
+static bool lrng_testing_store(struct lrng_testing *data, u32 value,
+ u32 *boot)
+{
+ unsigned long flags;
+
+ if (!atomic_read(&data->lrng_testing_enabled) && (*boot != 1))
+ return false;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /*
+ * Disable entropy testing for boot time testing after ring buffer
+ * is filled.
+ */
+ if (*boot) {
+ if (((u32)atomic_read(&data->rb_writer)) >
+ LRNG_TESTING_RINGBUFFER_SIZE) {
+ *boot = 2;
+ pr_warn_once("One time data collection test disabled\n");
+ spin_unlock_irqrestore(&data->lock, flags);
+ return false;
+ }
+
+ if (atomic_read(&data->rb_writer) == 1)
+ pr_warn("One time data collection test enabled\n");
+ }
+
+ data->lrng_testing_rb[((u32)atomic_read(&data->rb_writer)) &
+ LRNG_TESTING_RINGBUFFER_MASK] = value;
+ atomic_inc(&data->rb_writer);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+#ifndef LRNG_TESTING_USE_BUSYLOOP
+ if (wq_has_sleeper(&data->read_wait))
+ wake_up_interruptible(&data->read_wait);
+#endif
+
+ return true;
+}
+
+static bool lrng_testing_have_data(struct lrng_testing *data)
+{
+ return ((((u32)atomic_read(&data->rb_writer)) &
+ LRNG_TESTING_RINGBUFFER_MASK) !=
+ (data->rb_reader & LRNG_TESTING_RINGBUFFER_MASK));
+}
+
+static int lrng_testing_reader(struct lrng_testing *data, u32 *boot,
+ u8 *outbuf, u32 outbuflen)
+{
+ unsigned long flags;
+ int collected_data = 0;
+
+ lrng_testing_init(data, *boot);
+
+ while (outbuflen) {
+ u32 writer = (u32)atomic_read(&data->rb_writer);
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* We have no data or reached the writer. */
+ if (!writer || (writer == data->rb_reader)) {
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /*
+ * Now we gathered all boot data, enable regular data
+ * collection.
+ */
+ if (*boot) {
+ *boot = 0;
+ goto out;
+ }
+
+#ifdef LRNG_TESTING_USE_BUSYLOOP
+ while (!lrng_testing_have_data(data))
+ ;
+#else
+ wait_event_interruptible(data->read_wait,
+ lrng_testing_have_data(data));
+#endif
+ if (signal_pending(current)) {
+ collected_data = -ERESTARTSYS;
+ goto out;
+ }
+
+ continue;
+ }
+
+ /* We copy out word-wise */
+ if (outbuflen < sizeof(u32)) {
+ spin_unlock_irqrestore(&data->lock, flags);
+ goto out;
+ }
+
+ memcpy(outbuf, &data->lrng_testing_rb[data->rb_reader],
+ sizeof(u32));
+ data->rb_reader++;
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ outbuf += sizeof(u32);
+ outbuflen -= sizeof(u32);
+ collected_data += sizeof(u32);
+ }
+
+out:
+ lrng_testing_fini(data, *boot);
+ return collected_data;
+}
+
+static int lrng_testing_extract_user(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos,
+ int (*reader)(u8 *outbuf, u32 outbuflen))
+{
+ u8 *tmp, *tmp_aligned;
+ int ret = 0, large_request = (nbytes > 256);
+
+ if (!nbytes)
+ return 0;
+
+ /*
+ * The intention of this interface is for collecting at least
+ * 1000 samples due to the SP800-90B requirements. So, we make no
+ * effort in avoiding allocating more memory that actually needed
+ * by the user. Hence, we allocate sufficient memory to always hold
+ * that amount of data.
+ */
+ tmp = kmalloc(LRNG_TESTING_RINGBUFFER_SIZE + sizeof(u32), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp_aligned = PTR_ALIGN(tmp, sizeof(u32));
+
+ while (nbytes) {
+ int i;
+
+ if (large_request && need_resched()) {
+ if (signal_pending(current)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ break;
+ }
+ schedule();
+ }
+
+ i = min_t(int, nbytes, LRNG_TESTING_RINGBUFFER_SIZE);
+ i = reader(tmp_aligned, i);
+ if (i <= 0) {
+ if (i < 0)
+ ret = i;
+ break;
+ }
+ if (copy_to_user(buf, tmp_aligned, i)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ nbytes -= i;
+ buf += i;
+ ret += i;
+ }
+
+ kfree_sensitive(tmp);
+
+ if (ret > 0)
+ *ppos += ret;
+
+ return ret;
+}
+
+#endif /* CONFIG_LRNG_TESTING_RECORDING */
+
+/************* Raw High-Resolution IRQ Timer Entropy Data Handling ************/
+
+#ifdef CONFIG_LRNG_RAW_HIRES_ENTROPY
+
+static u32 boot_raw_hires_test = 0;
+module_param(boot_raw_hires_test, uint, 0644);
+MODULE_PARM_DESC(boot_raw_hires_test, "Enable gathering boot time high resolution timer entropy of the first IRQ entropy events");
+
+static struct lrng_testing lrng_raw_hires = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_hires.lock),
+ .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_hires.read_wait)
+};
+
+bool lrng_raw_hires_entropy_store(u32 value)
+{
+ return lrng_testing_store(&lrng_raw_hires, value, &boot_raw_hires_test);
+}
+
+static int lrng_raw_hires_entropy_reader(u8 *outbuf, u32 outbuflen)
+{
+ return lrng_testing_reader(&lrng_raw_hires, &boot_raw_hires_test,
+ outbuf, outbuflen);
+}
+
+static ssize_t lrng_raw_hires_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return lrng_testing_extract_user(file, to, count, ppos,
+ lrng_raw_hires_entropy_reader);
+}
+
+static const struct file_operations lrng_raw_hires_fops = {
+ .owner = THIS_MODULE,
+ .read = lrng_raw_hires_read,
+};
+
+#endif /* CONFIG_LRNG_RAW_HIRES_ENTROPY */
+
+/********************* Raw Jiffies Entropy Data Handling **********************/
+
+#ifdef CONFIG_LRNG_RAW_JIFFIES_ENTROPY
+
+static u32 boot_raw_jiffies_test = 0;
+module_param(boot_raw_jiffies_test, uint, 0644);
+MODULE_PARM_DESC(boot_raw_jiffies_test, "Enable gathering boot time high resolution timer entropy of the first entropy events");
+
+static struct lrng_testing lrng_raw_jiffies = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_jiffies.lock),
+ .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_jiffies.read_wait)
+};
+
+bool lrng_raw_jiffies_entropy_store(u32 value)
+{
+ return lrng_testing_store(&lrng_raw_jiffies, value,
+ &boot_raw_jiffies_test);
+}
+
+static int lrng_raw_jiffies_entropy_reader(u8 *outbuf, u32 outbuflen)
+{
+ return lrng_testing_reader(&lrng_raw_jiffies, &boot_raw_jiffies_test,
+ outbuf, outbuflen);
+}
+
+static ssize_t lrng_raw_jiffies_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return lrng_testing_extract_user(file, to, count, ppos,
+ lrng_raw_jiffies_entropy_reader);
+}
+
+static const struct file_operations lrng_raw_jiffies_fops = {
+ .owner = THIS_MODULE,
+ .read = lrng_raw_jiffies_read,
+};
+
+#endif /* CONFIG_LRNG_RAW_JIFFIES_ENTROPY */
+
+/************************** Raw IRQ Data Handling ****************************/
+
+#ifdef CONFIG_LRNG_RAW_IRQ_ENTROPY
+
+static u32 boot_raw_irq_test = 0;
+module_param(boot_raw_irq_test, uint, 0644);
+MODULE_PARM_DESC(boot_raw_irq_test, "Enable gathering boot time entropy of the first IRQ entropy events");
+
+static struct lrng_testing lrng_raw_irq = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_irq.lock),
+ .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_irq.read_wait)
+};
+
+bool lrng_raw_irq_entropy_store(u32 value)
+{
+ return lrng_testing_store(&lrng_raw_irq, value, &boot_raw_irq_test);
+}
+
+static int lrng_raw_irq_entropy_reader(u8 *outbuf, u32 outbuflen)
+{
+ return lrng_testing_reader(&lrng_raw_irq, &boot_raw_irq_test, outbuf,
+ outbuflen);
+}
+
+static ssize_t lrng_raw_irq_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return lrng_testing_extract_user(file, to, count, ppos,
+ lrng_raw_irq_entropy_reader);
+}
+
+static const struct file_operations lrng_raw_irq_fops = {
+ .owner = THIS_MODULE,
+ .read = lrng_raw_irq_read,
+};
+
+#endif /* CONFIG_LRNG_RAW_IRQ_ENTROPY */
+
+/************************ Raw _RET_IP_ Data Handling **************************/
+
+#ifdef CONFIG_LRNG_RAW_RETIP_ENTROPY
+
+static u32 boot_raw_retip_test = 0;
+module_param(boot_raw_retip_test, uint, 0644);
+MODULE_PARM_DESC(boot_raw_retip_test, "Enable gathering boot time entropy of the first return instruction pointer entropy events");
+
+static struct lrng_testing lrng_raw_retip = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_retip.lock),
+ .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_retip.read_wait)
+};
+
+bool lrng_raw_retip_entropy_store(u32 value)
+{
+ return lrng_testing_store(&lrng_raw_retip, value, &boot_raw_retip_test);
+}
+
+static int lrng_raw_retip_entropy_reader(u8 *outbuf, u32 outbuflen)
+{
+ return lrng_testing_reader(&lrng_raw_retip, &boot_raw_retip_test,
+ outbuf, outbuflen);
+}
+
+static ssize_t lrng_raw_retip_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return lrng_testing_extract_user(file, to, count, ppos,
+ lrng_raw_retip_entropy_reader);
+}
+
+static const struct file_operations lrng_raw_retip_fops = {
+ .owner = THIS_MODULE,
+ .read = lrng_raw_retip_read,
+};
+
+#endif /* CONFIG_LRNG_RAW_RETIP_ENTROPY */
+
+/********************** Raw IRQ register Data Handling ************************/
+
+#ifdef CONFIG_LRNG_RAW_REGS_ENTROPY
+
+static u32 boot_raw_regs_test = 0;
+module_param(boot_raw_regs_test, uint, 0644);
+MODULE_PARM_DESC(boot_raw_regs_test, "Enable gathering boot time entropy of the first interrupt register entropy events");
+
+static struct lrng_testing lrng_raw_regs = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_regs.lock),
+ .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_regs.read_wait)
+};
+
+bool lrng_raw_regs_entropy_store(u32 value)
+{
+ return lrng_testing_store(&lrng_raw_regs, value, &boot_raw_regs_test);
+}
+
+static int lrng_raw_regs_entropy_reader(u8 *outbuf, u32 outbuflen)
+{
+ return lrng_testing_reader(&lrng_raw_regs, &boot_raw_regs_test,
+ outbuf, outbuflen);
+}
+
+static ssize_t lrng_raw_regs_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return lrng_testing_extract_user(file, to, count, ppos,
+ lrng_raw_regs_entropy_reader);
+}
+
+static const struct file_operations lrng_raw_regs_fops = {
+ .owner = THIS_MODULE,
+ .read = lrng_raw_regs_read,
+};
+
+#endif /* CONFIG_LRNG_RAW_REGS_ENTROPY */
+
+/********************** Raw Entropy Array Data Handling ***********************/
+
+#ifdef CONFIG_LRNG_RAW_ARRAY
+
+static u32 boot_raw_array = 0;
+module_param(boot_raw_array, uint, 0644);
+MODULE_PARM_DESC(boot_raw_array, "Enable gathering boot time raw noise array data of the first entropy events");
+
+static struct lrng_testing lrng_raw_array = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_array.lock),
+ .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_array.read_wait)
+};
+
+bool lrng_raw_array_entropy_store(u32 value)
+{
+ return lrng_testing_store(&lrng_raw_array, value, &boot_raw_array);
+}
+
+static int lrng_raw_array_entropy_reader(u8 *outbuf, u32 outbuflen)
+{
+ return lrng_testing_reader(&lrng_raw_array, &boot_raw_array, outbuf,
+ outbuflen);
+}
+
+static ssize_t lrng_raw_array_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return lrng_testing_extract_user(file, to, count, ppos,
+ lrng_raw_array_entropy_reader);
+}
+
+static const struct file_operations lrng_raw_array_fops = {
+ .owner = THIS_MODULE,
+ .read = lrng_raw_array_read,
+};
+
+#endif /* CONFIG_LRNG_RAW_ARRAY */
+
+/******************** Interrupt Performance Data Handling *********************/
+
+#ifdef CONFIG_LRNG_IRQ_PERF
+
+static u32 boot_irq_perf = 0;
+module_param(boot_irq_perf, uint, 0644);
+MODULE_PARM_DESC(boot_irq_perf, "Enable gathering interrupt entropy source performance data");
+
+static struct lrng_testing lrng_irq_perf = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_irq_perf.lock),
+ .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_irq_perf.read_wait)
+};
+
+bool lrng_perf_time(u32 start)
+{
+ return lrng_testing_store(&lrng_irq_perf, random_get_entropy() - start,
+ &boot_irq_perf);
+}
+
+static int lrng_irq_perf_reader(u8 *outbuf, u32 outbuflen)
+{
+ return lrng_testing_reader(&lrng_irq_perf, &boot_irq_perf, outbuf,
+ outbuflen);
+}
+
+static ssize_t lrng_irq_perf_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return lrng_testing_extract_user(file, to, count, ppos,
+ lrng_irq_perf_reader);
+}
+
+static const struct file_operations lrng_irq_perf_fops = {
+ .owner = THIS_MODULE,
+ .read = lrng_irq_perf_read,
+};
+
+#endif /* CONFIG_LRNG_IRQ_PERF */
+
+/****** Raw High-Resolution Scheduler-based Timer Entropy Data Handling *******/
+
+#ifdef CONFIG_LRNG_RAW_SCHED_HIRES_ENTROPY
+
+static u32 boot_raw_sched_hires_test = 0;
+module_param(boot_raw_sched_hires_test, uint, 0644);
+MODULE_PARM_DESC(boot_raw_sched_hires_test, "Enable gathering boot time high resolution timer entropy of the first Scheduler-based entropy events");
+
+static struct lrng_testing lrng_raw_sched_hires = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_sched_hires.lock),
+ .read_wait =
+ __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_sched_hires.read_wait)
+};
+
+bool lrng_raw_sched_hires_entropy_store(u32 value)
+{
+ return lrng_testing_store(&lrng_raw_sched_hires, value,
+ &boot_raw_sched_hires_test);
+}
+
+static int lrng_raw_sched_hires_entropy_reader(u8 *outbuf, u32 outbuflen)
+{
+ return lrng_testing_reader(&lrng_raw_sched_hires,
+ &boot_raw_sched_hires_test,
+ outbuf, outbuflen);
+}
+
+static ssize_t lrng_raw_sched_hires_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return lrng_testing_extract_user(file, to, count, ppos,
+ lrng_raw_sched_hires_entropy_reader);
+}
+
+static const struct file_operations lrng_raw_sched_hires_fops = {
+ .owner = THIS_MODULE,
+ .read = lrng_raw_sched_hires_read,
+};
+
+#endif /* CONFIG_LRNG_RAW_SCHED_HIRES_ENTROPY */
+
+/******************** Interrupt Performance Data Handling *********************/
+
+#ifdef CONFIG_LRNG_SCHED_PERF
+
+static u32 boot_sched_perf = 0;
+module_param(boot_sched_perf, uint, 0644);
+MODULE_PARM_DESC(boot_sched_perf, "Enable gathering scheduler-based entropy source performance data");
+
+static struct lrng_testing lrng_sched_perf = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_sched_perf.lock),
+ .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_sched_perf.read_wait)
+};
+
+bool lrng_sched_perf_time(u32 start)
+{
+ return lrng_testing_store(&lrng_sched_perf, random_get_entropy() - start,
+ &boot_sched_perf);
+}
+
+static int lrng_sched_perf_reader(u8 *outbuf, u32 outbuflen)
+{
+ return lrng_testing_reader(&lrng_sched_perf, &boot_sched_perf, outbuf,
+ outbuflen);
+}
+
+static ssize_t lrng_sched_perf_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return lrng_testing_extract_user(file, to, count, ppos,
+ lrng_sched_perf_reader);
+}
+
+static const struct file_operations lrng_sched_perf_fops = {
+ .owner = THIS_MODULE,
+ .read = lrng_sched_perf_read,
+};
+
+#endif /* CONFIG_LRNG_SCHED_PERF */
+
+/*************** Raw Scheduler task_struct->pid Data Handling *****************/
+
+#ifdef CONFIG_LRNG_RAW_SCHED_PID_ENTROPY
+
+static u32 boot_raw_sched_pid_test = 0;
+module_param(boot_raw_sched_pid_test, uint, 0644);
+MODULE_PARM_DESC(boot_raw_sched_pid_test, "Enable gathering boot time entropy of the first PIDs collected by the scheduler entropy source");
+
+static struct lrng_testing lrng_raw_sched_pid = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_sched_pid.lock),
+ .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_sched_pid.read_wait)
+};
+
+bool lrng_raw_sched_pid_entropy_store(u32 value)
+{
+ return lrng_testing_store(&lrng_raw_sched_pid, value,
+ &boot_raw_sched_pid_test);
+}
+
+static int lrng_raw_sched_pid_entropy_reader(u8 *outbuf, u32 outbuflen)
+{
+ return lrng_testing_reader(&lrng_raw_sched_pid,
+ &boot_raw_sched_pid_test, outbuf, outbuflen);
+}
+
+static ssize_t lrng_raw_sched_pid_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return lrng_testing_extract_user(file, to, count, ppos,
+ lrng_raw_sched_pid_entropy_reader);
+}
+
+static const struct file_operations lrng_raw_sched_pid_fops = {
+ .owner = THIS_MODULE,
+ .read = lrng_raw_sched_pid_read,
+};
+
+#endif /* CONFIG_LRNG_RAW_SCHED_PID_ENTROPY */
+
+
+/*********** Raw Scheduler task_struct->start_time Data Handling **************/
+
+#ifdef CONFIG_LRNG_RAW_SCHED_START_TIME_ENTROPY
+
+static u32 boot_raw_sched_starttime_test = 0;
+module_param(boot_raw_sched_starttime_test, uint, 0644);
+MODULE_PARM_DESC(boot_raw_sched_starttime_test, "Enable gathering boot time entropy of the first task start times collected by the scheduler entropy source");
+
+static struct lrng_testing lrng_raw_sched_starttime = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_sched_starttime.lock),
+ .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_sched_starttime.read_wait)
+};
+
+bool lrng_raw_sched_starttime_entropy_store(u32 value)
+{
+ return lrng_testing_store(&lrng_raw_sched_starttime, value,
+ &boot_raw_sched_starttime_test);
+}
+
+static int lrng_raw_sched_starttime_entropy_reader(u8 *outbuf, u32 outbuflen)
+{
+ return lrng_testing_reader(&lrng_raw_sched_starttime,
+ &boot_raw_sched_starttime_test, outbuf, outbuflen);
+}
+
+static ssize_t lrng_raw_sched_starttime_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return lrng_testing_extract_user(file, to, count, ppos,
+ lrng_raw_sched_starttime_entropy_reader);
+}
+
+static const struct file_operations lrng_raw_sched_starttime_fops = {
+ .owner = THIS_MODULE,
+ .read = lrng_raw_sched_starttime_read,
+};
+
+#endif /* CONFIG_LRNG_RAW_SCHED_START_TIME_ENTROPY */
+
+/************** Raw Scheduler task_struct->nvcsw Data Handling ****************/
+
+#ifdef CONFIG_LRNG_RAW_SCHED_NVCSW_ENTROPY
+
+static u32 boot_raw_sched_nvcsw_test = 0;
+module_param(boot_raw_sched_nvcsw_test, uint, 0644);
+MODULE_PARM_DESC(boot_raw_sched_nvcsw_test, "Enable gathering boot time entropy of the first task context switch numbers collected by the scheduler entropy source");
+
+static struct lrng_testing lrng_raw_sched_nvcsw = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_raw_sched_nvcsw.lock),
+ .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lrng_raw_sched_nvcsw.read_wait)
+};
+
+bool lrng_raw_sched_nvcsw_entropy_store(u32 value)
+{
+ return lrng_testing_store(&lrng_raw_sched_nvcsw, value,
+ &boot_raw_sched_nvcsw_test);
+}
+
+static int lrng_raw_sched_nvcsw_entropy_reader(u8 *outbuf, u32 outbuflen)
+{
+ return lrng_testing_reader(&lrng_raw_sched_nvcsw,
+ &boot_raw_sched_nvcsw_test, outbuf, outbuflen);
+}
+
+static ssize_t lrng_raw_sched_nvcsw_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return lrng_testing_extract_user(file, to, count, ppos,
+ lrng_raw_sched_nvcsw_entropy_reader);
+}
+
+static const struct file_operations lrng_raw_sched_nvcsw_fops = {
+ .owner = THIS_MODULE,
+ .read = lrng_raw_sched_nvcsw_read,
+};
+
+#endif /* CONFIG_LRNG_RAW_SCHED_NVCSW_ENTROPY */
+
+/*********************************** ACVT ************************************/
+
+#ifdef CONFIG_LRNG_ACVT_HASH
+
+/* maximum amount of data to be hashed as defined by ACVP */
+#define LRNG_ACVT_MAX_SHA_MSG (65536 >> 3)
+
+/*
+ * As we use static variables to store the data, it is clear that the
+ * test interface is only able to handle single threaded testing. This is
+ * considered to be sufficient for testing. If multi-threaded use of the
+ * ACVT test interface would be performed, the caller would get garbage
+ * but the kernel operation is unaffected by this.
+ */
+static u8 lrng_acvt_hash_data[LRNG_ACVT_MAX_SHA_MSG]
+ __aligned(LRNG_KCAPI_ALIGN);
+static atomic_t lrng_acvt_hash_data_size = ATOMIC_INIT(0);
+static u8 lrng_acvt_hash_digest[LRNG_ATOMIC_DIGEST_SIZE];
+
+static ssize_t lrng_acvt_hash_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ if (nbytes > LRNG_ACVT_MAX_SHA_MSG)
+ return -EINVAL;
+
+ atomic_set(&lrng_acvt_hash_data_size, (int)nbytes);
+
+ return simple_write_to_buffer(lrng_acvt_hash_data,
+ LRNG_ACVT_MAX_SHA_MSG, ppos, buf, nbytes);
+}
+
+static ssize_t lrng_acvt_hash_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ SHASH_DESC_ON_STACK(shash, NULL);
+ const struct lrng_hash_cb *hash_cb = &lrng_sha_hash_cb;
+ ssize_t ret;
+
+ if (count > LRNG_ATOMIC_DIGEST_SIZE)
+ return -EINVAL;
+
+ ret = hash_cb->hash_init(shash, NULL) ?:
+ hash_cb->hash_update(shash, lrng_acvt_hash_data,
+ atomic_read_u32(&lrng_acvt_hash_data_size)) ?:
+ hash_cb->hash_final(shash, lrng_acvt_hash_digest);
+ if (ret)
+ return ret;
+
+ return simple_read_from_buffer(to, count, ppos, lrng_acvt_hash_digest,
+ sizeof(lrng_acvt_hash_digest));
+}
+
+static const struct file_operations lrng_acvt_hash_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .llseek = default_llseek,
+ .read = lrng_acvt_hash_read,
+ .write = lrng_acvt_hash_write,
+};
+
+#endif /* CONFIG_LRNG_ACVT_DRNG */
+
+/**************************************************************************
+ * Debugfs interface
+ **************************************************************************/
+
+static int __init lrng_raw_init(void)
+{
+ struct dentry *lrng_raw_debugfs_root;
+
+ lrng_raw_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+#ifdef CONFIG_LRNG_RAW_HIRES_ENTROPY
+ debugfs_create_file_unsafe("lrng_raw_hires", 0400,
+ lrng_raw_debugfs_root, NULL,
+ &lrng_raw_hires_fops);
+#endif
+#ifdef CONFIG_LRNG_RAW_JIFFIES_ENTROPY
+ debugfs_create_file_unsafe("lrng_raw_jiffies", 0400,
+ lrng_raw_debugfs_root, NULL,
+ &lrng_raw_jiffies_fops);
+#endif
+#ifdef CONFIG_LRNG_RAW_IRQ_ENTROPY
+ debugfs_create_file_unsafe("lrng_raw_irq", 0400, lrng_raw_debugfs_root,
+ NULL, &lrng_raw_irq_fops);
+#endif
+#ifdef CONFIG_LRNG_RAW_RETIP_ENTROPY
+ debugfs_create_file_unsafe("lrng_raw_retip", 0400,
+ lrng_raw_debugfs_root, NULL,
+ &lrng_raw_retip_fops);
+#endif
+#ifdef CONFIG_LRNG_RAW_REGS_ENTROPY
+ debugfs_create_file_unsafe("lrng_raw_regs", 0400,
+ lrng_raw_debugfs_root, NULL,
+ &lrng_raw_regs_fops);
+#endif
+#ifdef CONFIG_LRNG_RAW_ARRAY
+ debugfs_create_file_unsafe("lrng_raw_array", 0400,
+ lrng_raw_debugfs_root, NULL,
+ &lrng_raw_array_fops);
+#endif
+#ifdef CONFIG_LRNG_IRQ_PERF
+ debugfs_create_file_unsafe("lrng_irq_perf", 0400, lrng_raw_debugfs_root,
+ NULL, &lrng_irq_perf_fops);
+#endif
+#ifdef CONFIG_LRNG_RAW_SCHED_HIRES_ENTROPY
+ debugfs_create_file_unsafe("lrng_raw_sched_hires", 0400,
+ lrng_raw_debugfs_root,
+ NULL, &lrng_raw_sched_hires_fops);
+#endif
+#ifdef CONFIG_LRNG_RAW_SCHED_PID_ENTROPY
+ debugfs_create_file_unsafe("lrng_raw_sched_pid", 0400,
+ lrng_raw_debugfs_root, NULL,
+ &lrng_raw_sched_pid_fops);
+#endif
+#ifdef CONFIG_LRNG_RAW_SCHED_START_TIME_ENTROPY
+ debugfs_create_file_unsafe("lrng_raw_sched_starttime", 0400,
+ lrng_raw_debugfs_root, NULL,
+ &lrng_raw_sched_starttime_fops);
+#endif
+#ifdef CONFIG_LRNG_RAW_SCHED_NVCSW_ENTROPY
+ debugfs_create_file_unsafe("lrng_raw_sched_nvcsw", 0400,
+ lrng_raw_debugfs_root, NULL,
+ &lrng_raw_sched_nvcsw_fops);
+#endif
+#ifdef CONFIG_LRNG_SCHED_PERF
+ debugfs_create_file_unsafe("lrng_sched_perf", 0400,
+ lrng_raw_debugfs_root, NULL,
+ &lrng_sched_perf_fops);
+#endif
+#ifdef CONFIG_LRNG_ACVT_HASH
+ debugfs_create_file_unsafe("lrng_acvt_hash", 0600,
+ lrng_raw_debugfs_root, NULL,
+ &lrng_acvt_hash_fops);
+#endif
+
+ return 0;
+}
+
+module_init(lrng_raw_init);
diff --git a/drivers/char/lrng/lrng_testing.h b/drivers/char/lrng/lrng_testing.h
new file mode 100644
index 000000000000..672a7fca4595
--- /dev/null
+++ b/drivers/char/lrng/lrng_testing.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_TESTING_H
+#define _LRNG_TESTING_H
+
+#ifdef CONFIG_LRNG_RAW_HIRES_ENTROPY
+bool lrng_raw_hires_entropy_store(u32 value);
+#else /* CONFIG_LRNG_RAW_HIRES_ENTROPY */
+static inline bool lrng_raw_hires_entropy_store(u32 value) { return false; }
+#endif /* CONFIG_LRNG_RAW_HIRES_ENTROPY */
+
+#ifdef CONFIG_LRNG_RAW_JIFFIES_ENTROPY
+bool lrng_raw_jiffies_entropy_store(u32 value);
+#else /* CONFIG_LRNG_RAW_JIFFIES_ENTROPY */
+static inline bool lrng_raw_jiffies_entropy_store(u32 value) { return false; }
+#endif /* CONFIG_LRNG_RAW_JIFFIES_ENTROPY */
+
+#ifdef CONFIG_LRNG_RAW_IRQ_ENTROPY
+bool lrng_raw_irq_entropy_store(u32 value);
+#else /* CONFIG_LRNG_RAW_IRQ_ENTROPY */
+static inline bool lrng_raw_irq_entropy_store(u32 value) { return false; }
+#endif /* CONFIG_LRNG_RAW_IRQ_ENTROPY */
+
+#ifdef CONFIG_LRNG_RAW_RETIP_ENTROPY
+bool lrng_raw_retip_entropy_store(u32 value);
+#else /* CONFIG_LRNG_RAW_RETIP_ENTROPY */
+static inline bool lrng_raw_retip_entropy_store(u32 value) { return false; }
+#endif /* CONFIG_LRNG_RAW_RETIP_ENTROPY */
+
+#ifdef CONFIG_LRNG_RAW_REGS_ENTROPY
+bool lrng_raw_regs_entropy_store(u32 value);
+#else /* CONFIG_LRNG_RAW_REGS_ENTROPY */
+static inline bool lrng_raw_regs_entropy_store(u32 value) { return false; }
+#endif /* CONFIG_LRNG_RAW_REGS_ENTROPY */
+
+#ifdef CONFIG_LRNG_RAW_ARRAY
+bool lrng_raw_array_entropy_store(u32 value);
+#else /* CONFIG_LRNG_RAW_ARRAY */
+static inline bool lrng_raw_array_entropy_store(u32 value) { return false; }
+#endif /* CONFIG_LRNG_RAW_ARRAY */
+
+#ifdef CONFIG_LRNG_IRQ_PERF
+bool lrng_perf_time(u32 start);
+#else /* CONFIG_LRNG_IRQ_PERF */
+static inline bool lrng_perf_time(u32 start) { return false; }
+#endif /*CONFIG_LRNG_IRQ_PERF */
+
+#ifdef CONFIG_LRNG_RAW_SCHED_HIRES_ENTROPY
+bool lrng_raw_sched_hires_entropy_store(u32 value);
+#else /* CONFIG_LRNG_RAW_SCHED_HIRES_ENTROPY */
+static inline bool
+lrng_raw_sched_hires_entropy_store(u32 value) { return false; }
+#endif /* CONFIG_LRNG_RAW_SCHED_HIRES_ENTROPY */
+
+#ifdef CONFIG_LRNG_RAW_SCHED_PID_ENTROPY
+bool lrng_raw_sched_pid_entropy_store(u32 value);
+#else /* CONFIG_LRNG_RAW_SCHED_PID_ENTROPY */
+static inline bool
+lrng_raw_sched_pid_entropy_store(u32 value) { return false; }
+#endif /* CONFIG_LRNG_RAW_SCHED_PID_ENTROPY */
+
+#ifdef CONFIG_LRNG_RAW_SCHED_START_TIME_ENTROPY
+bool lrng_raw_sched_starttime_entropy_store(u32 value);
+#else /* CONFIG_LRNG_RAW_SCHED_START_TIME_ENTROPY */
+static inline bool
+lrng_raw_sched_starttime_entropy_store(u32 value) { return false; }
+#endif /* CONFIG_LRNG_RAW_SCHED_START_TIME_ENTROPY */
+
+#ifdef CONFIG_LRNG_RAW_SCHED_NVCSW_ENTROPY
+bool lrng_raw_sched_nvcsw_entropy_store(u32 value);
+#else /* CONFIG_LRNG_RAW_SCHED_NVCSW_ENTROPY */
+static inline bool
+lrng_raw_sched_nvcsw_entropy_store(u32 value) { return false; }
+#endif /* CONFIG_LRNG_RAW_SCHED_NVCSW_ENTROPY */
+
+#ifdef CONFIG_LRNG_SCHED_PERF
+bool lrng_sched_perf_time(u32 start);
+#else /* CONFIG_LRNG_SCHED_PERF */
+static inline bool lrng_sched_perf_time(u32 start) { return false; }
+#endif /*CONFIG_LRNG_SCHED_PERF */
+
+#endif /* _LRNG_TESTING_H */
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index af5ad51d3eef..b12ae9bdebf4 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -283,4 +283,11 @@ enum drbg_prefixes {
DRBG_PREFIX3
};
+extern int drbg_alloc_state(struct drbg_state *drbg);
+extern void drbg_dealloc_state(struct drbg_state *drbg);
+extern void drbg_convert_tfm_core(const char *cra_driver_name, int *coreref,
+ bool *pr);
+extern const struct drbg_core drbg_cores[];
+extern unsigned short drbg_sec_strength(drbg_flag_t flags);
+
#endif /* _DRBG_H */
diff --git a/include/linux/lrng.h b/include/linux/lrng.h
new file mode 100644
index 000000000000..c0d31a03d51f
--- /dev/null
+++ b/include/linux/lrng.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_H
+#define _LRNG_H
+
+#include <crypto/hash.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+
+/*
+ * struct lrng_drng_cb - cryptographic callback functions defining a DRNG
+ * @drng_name Name of DRNG
+ * @drng_alloc: Allocate DRNG -- the provided integer should be used for
+ * sanity checks.
+ * return: allocated data structure or PTR_ERR on error
+ * @drng_dealloc: Deallocate DRNG
+ * @drng_seed: Seed the DRNG with data of arbitrary length drng: is
+ * pointer to data structure allocated with drng_alloc
+ * return: >= 0 on success, < 0 on error
+ * @drng_generate: Generate random numbers from the DRNG with arbitrary
+ * length
+ */
+struct lrng_drng_cb {
+ const char *(*drng_name)(void);
+ void *(*drng_alloc)(u32 sec_strength);
+ void (*drng_dealloc)(void *drng);
+ int (*drng_seed)(void *drng, const u8 *inbuf, u32 inbuflen);
+ int (*drng_generate)(void *drng, u8 *outbuf, u32 outbuflen);
+};
+
+/*
+ * struct lrng_hash_cb - cryptographic callback functions defining a hash
+ * @hash_name Name of Hash used for reading entropy pool arbitrary
+ * length
+ * @hash_alloc: Allocate the hash for reading the entropy pool
+ * return: allocated data structure (NULL is success too)
+ * or ERR_PTR on error
+ * @hash_dealloc: Deallocate Hash
+ * @hash_digestsize: Return the digestsize for the used hash to read out
+ * entropy pool
+ * hash: is pointer to data structure allocated with
+ * hash_alloc
+ * return: size of digest of hash in bytes
+ * @hash_init: Initialize hash
+ * hash: is pointer to data structure allocated with
+ * hash_alloc
+ * return: 0 on success, < 0 on error
+ * @hash_update: Update hash operation
+ * hash: is pointer to data structure allocated with
+ * hash_alloc
+ * return: 0 on success, < 0 on error
+ * @hash_final Final hash operation
+ * hash: is pointer to data structure allocated with
+ * hash_alloc
+ * return: 0 on success, < 0 on error
+ * @hash_desc_zero Zeroization of hash state buffer
+ *
+ * Assumptions:
+ *
+ * 1. Hash operation will not sleep
+ * 2. The hash' volatile state information is provided with *shash by caller.
+ */
+struct lrng_hash_cb {
+ const char *(*hash_name)(void);
+ void *(*hash_alloc)(void);
+ void (*hash_dealloc)(void *hash);
+ u32 (*hash_digestsize)(void *hash);
+ int (*hash_init)(struct shash_desc *shash, void *hash);
+ int (*hash_update)(struct shash_desc *shash, const u8 *inbuf,
+ u32 inbuflen);
+ int (*hash_final)(struct shash_desc *shash, u8 *digest);
+ void (*hash_desc_zero)(struct shash_desc *shash);
+};
+
+/* Register cryptographic backend */
+#ifdef CONFIG_LRNG_SWITCH
+int lrng_set_drng_cb(const struct lrng_drng_cb *cb);
+int lrng_set_hash_cb(const struct lrng_hash_cb *cb);
+#else /* CONFIG_LRNG_SWITCH */
+static inline int
+lrng_set_drng_cb(const struct lrng_drng_cb *cb) { return -EOPNOTSUPP; }
+static inline int
+lrng_set_hash_cb(const struct lrng_hash_cb *cb) { return -EOPNOTSUPP; }
+#endif /* CONFIG_LRNG_SWITCH */
+
+/* Callback to feed events to the scheduler entropy source */
+#ifdef CONFIG_LRNG_SCHED
+extern void add_sched_randomness(const struct task_struct *p, int cpu);
+#else
+static inline void
+add_sched_randomness(const struct task_struct *p, int cpu) { }
+#endif
+
+/*
+ * lrng_get_random_bytes() - Provider of cryptographic strong random numbers
+ * for kernel-internal usage.
+ *
+ * This function is appropriate for in-kernel use cases operating in atomic
+ * contexts. It will always use the ChaCha20 DRNG and it may be the case that
+ * it is not fully seeded when being used.
+ *
+ * @buf: buffer to store the random bytes
+ * @nbytes: size of the buffer
+ */
+#ifdef CONFIG_LRNG_DRNG_ATOMIC
+void lrng_get_random_bytes(void *buf, int nbytes);
+#endif
+
+/*
+ * lrng_get_random_bytes_full() - Provider of cryptographic strong
+ * random numbers for kernel-internal usage from a fully initialized LRNG.
+ *
+ * This function will always return random numbers from a fully seeded and
+ * fully initialized LRNG.
+ *
+ * This function is appropriate only for non-atomic use cases as this
+ * function may sleep. It provides access to the full functionality of LRNG
+ * including the switchable DRNG support, that may support other DRNGs such
+ * as the SP800-90A DRBG.
+ *
+ * @buf: buffer to store the random bytes
+ * @nbytes: size of the buffer
+ */
+#ifdef CONFIG_LRNG
+void lrng_get_random_bytes_full(void *buf, int nbytes);
+#endif
+
+/*
+ * lrng_get_random_bytes_min() - Provider of cryptographic strong
+ * random numbers for kernel-internal usage from at least a minimally seeded
+ * LRNG, which is not necessarily fully initialized yet (e.g. SP800-90C
+ * oversampling applied in FIPS mode is not applied yet).
+ *
+ * This function is appropriate only for non-atomic use cases as this
+ * function may sleep. It provides access to the full functionality of LRNG
+ * including the switchable DRNG support, that may support other DRNGs such
+ * as the SP800-90A DRBG.
+ *
+ * @buf: buffer to store the random bytes
+ * @nbytes: size of the buffer
+ */
+#ifdef CONFIG_LRNG
+void lrng_get_random_bytes_min(void *buf, int nbytes);
+#endif
+
+/*
+ * lrng_get_random_bytes_pr() - Provider of cryptographic strong
+ * random numbers for kernel-internal usage from a fully initialized LRNG and
+ * requiring a reseed from the entropy sources before.
+ *
+ * This function will always return random numbers from a fully seeded and
+ * fully initialized LRNG.
+ *
+ * This function is appropriate only for non-atomic use cases as this
+ * function may sleep. It provides access to the full functionality of LRNG
+ * including the switchable DRNG support, that may support other DRNGs such
+ * as the SP800-90A DRBG.
+ *
+ * This call only returns no more data than entropy was pulled from the
+ * entropy sources. Thus, it is likely that this call returns less data
+ * than requested by the caller. Also, the caller MUST be prepared that this
+ * call returns 0 bytes, i.e. it did not generate data.
+ *
+ * @buf: buffer to store the random bytes
+ * @nbytes: size of the buffer
+ *
+ * @return: positive number indicates amount of generated bytes, < 0 on error
+ */
+#ifdef CONFIG_LRNG
+int lrng_get_random_bytes_pr(void *buf, int nbytes);
+#endif
+
+/*
+ * lrng_get_seed() - Fill buffer with data from entropy sources
+ *
+ * This call allows accessing the entropy sources directly and fill the buffer
+ * with data from all available entropy sources. This filled buffer is
+ * identical to the temporary seed buffer used by the LRNG to seed its DRNGs.
+ *
+ * The call is to allows users to seed their DRNG directly from the entropy
+ * sources in case the caller does not want to use the LRNG's DRNGs. This
+ * buffer can be directly used to seed the caller's DRNG from.
+ *
+ * The call blocks as long as one LRNG DRNG is not yet fully seeded. If
+ * LRNG_GET_SEED_NONBLOCK is specified, it does not block in this case, but
+ * returns with an error.
+ *
+ * Considering SP800-90C, there is a differentiation between the seeding
+ * requirements during instantiating a DRNG and at runtime of the DRNG. When
+ * specifying LRNG_GET_SEED_FULLY_SEEDED the caller indicates the DRNG was
+ * already fully seeded and the regular amount of entropy is requested.
+ * Otherwise, the LRNG will obtain the entropy rate required for initial
+ * seeding. The following minimum entropy rates will be obtained:
+ *
+ * * FIPS mode:
+ * * Initial seeding: 384 bits of entropy
+ * * Runtime seeding: 256 bits of entropy
+ * * Non-FIPS mode:
+ * * 128 bits of entropy in any case
+ *
+ * Albeit these are minimum entropy rates, the LRNG tries to request the
+ * given amount of entropy from each entropy source individually. If the
+ * minimum amount of entropy cannot be obtained collectively by all entropy
+ * sources, the LRNG will not fill the buffer.
+ *
+ * The return data in buf is structurally equivalent to the following
+ * definition:
+ *
+ * struct {
+ * u64 seedlen;
+ * u64 entropy_rate;
+ * struct entropy_buf seed;
+ * } __attribute((__packed__));
+ *
+ * As struct entropy_buf is not known outsize of the LRNG, the LRNG fills
+ * seedlen first with the size of struct entropy_buf. If the caller-provided
+ * buffer buf is smaller than u64, then -EINVAL is returned
+ * and buf is not touched. If it is u64 or larger but smaller
+ * than the size of the structure above, -EMSGSIZE is returned and seedlen
+ * is filled with the size of the buffer. Finally, if buf is large
+ * enough to hold all data, it is filled with the seed data and the seedlen
+ * is set to sizeof(struct entropy_buf). The entropy rate is returned with
+ * the variable entropy_rate and provides the value in bits.
+ *
+ * The seed buffer is the data that should be handed to the caller's DRNG as
+ * seed data.
+ *
+ * @buf [out] Buffer to be filled with data from the entropy sources - note, the
+ * buffer is marked as u64 to ensure it is aligned to 64 bits.
+ * @nbytes [in] Size of the buffer allocated by the caller - this value
+ * provides size of @param buf in bytes.
+ * @flags [in] Flags field to adjust the behavior
+ *
+ * @return -EINVAL or -EMSGSIZE indicating the buffer is too small, -EAGAIN when
+ * the call would block, but NONBLOCK is specified, > 0 the size of
+ * the filled buffer.
+ */
+#ifdef CONFIG_LRNG
+enum lrng_get_seed_flags {
+ LRNG_GET_SEED_NONBLOCK = 0x0001, /**< Do not block the call */
+ LRNG_GET_SEED_FULLY_SEEDED = 0x0002, /**< DRNG is fully seeded */
+};
+
+ssize_t lrng_get_seed(u64 *buf, size_t nbytes, unsigned int flags);
+#endif
+
+#endif /* _LRNG_H */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9116bcc90346..b44ab31a35cf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6,6 +6,7 @@
*
* Copyright (C) 1991-2002 Linus Torvalds
*/
+#include <linux/lrng.h>
#include <linux/highmem.h>
#include <linux/hrtimer_api.h>
#include <linux/ktime_api.h>
@@ -3728,6 +3729,8 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
{
struct rq *rq;
+ add_sched_randomness(p, cpu);
+
if (!schedstat_enabled())
return;
--
2.43.0