diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/prandom.h | 18 | ||||
| -rw-r--r-- | include/linux/random.h | 40 |
2 files changed, 42 insertions, 16 deletions
diff --git a/include/linux/prandom.h b/include/linux/prandom.h index e0a0759dd09c..1f4a0de7b019 100644 --- a/include/linux/prandom.h +++ b/include/linux/prandom.h @@ -23,24 +23,10 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); #define prandom_init_once(pcpu_state) \ DO_ONCE(prandom_seed_full_state, (pcpu_state)) -/** - * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) - * @ep_ro: right open interval endpoint - * - * Returns a pseudo-random number that is in interval [0, ep_ro). This is - * useful when requesting a random index of an array containing ep_ro elements, - * for example. The result is somewhat biased when ep_ro is not a power of 2, - * so do not use this for cryptographic purposes. - * - * Returns: pseudo-random number in interval [0, ep_ro) - */ +/* Deprecated: use get_random_u32_below() instead. */ static inline u32 prandom_u32_max(u32 ep_ro) { - if (__builtin_constant_p(ep_ro <= 1U << 8) && ep_ro <= 1U << 8) - return (get_random_u8() * ep_ro) >> 8; - if (__builtin_constant_p(ep_ro <= 1U << 16) && ep_ro <= 1U << 16) - return (get_random_u16() * ep_ro) >> 16; - return ((u64)get_random_u32() * ep_ro) >> 32; + return get_random_u32_below(ep_ro); } /* diff --git a/include/linux/random.h b/include/linux/random.h index 147a5e0d0b8e..3a82c0a8bc46 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -51,6 +51,46 @@ static inline unsigned long get_random_long(void) #endif } +u32 __get_random_u32_below(u32 ceil); + +/* + * Returns a random integer in the interval [0, ceil), with uniform + * distribution, suitable for all uses. Fastest when ceil is a constant, but + * still fast for variable ceil as well. + */ +static inline u32 get_random_u32_below(u32 ceil) +{ + if (!__builtin_constant_p(ceil)) + return __get_random_u32_below(ceil); + + /* + * For the fast path, below, all operations on ceil are precomputed by + * the compiler, so this incurs no overhead for checking pow2, doing + * divisions, or branching based on integer size. The resultant + * algorithm does traditional reciprocal multiplication (typically + * optimized by the compiler into shifts and adds), rejecting samples + * whose lower half would indicate a range indivisible by ceil. + */ + BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0"); + if (ceil <= 1) + return 0; + for (;;) { + if (ceil <= 1U << 8) { + u32 mult = ceil * get_random_u8(); + if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil)) + return mult >> 8; + } else if (ceil <= 1U << 16) { + u32 mult = ceil * get_random_u16(); + if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil)) + return mult >> 16; + } else { + u64 mult = (u64)ceil * get_random_u32(); + if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil)) + return mult >> 32; + } + } +} + /* * On 64-bit architectures, protect against non-terminated C string overflows * by zeroing out the first byte of the canary; this leaves 56 bits of entropy. |
