rand_core/block.rs
1// Copyright 2018 Developers of the Rand project.
2//
3// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
5// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
6// option. This file may not be copied, modified, or distributed
7// except according to those terms.
8
9//! The `BlockRngCore` trait and implementation helpers
10//!
11//! The [`BlockRngCore`] trait exists to assist in the implementation of RNGs
12//! which generate a block of data in a cache instead of returning generated
13//! values directly.
14//!
15//! Usage of this trait is optional, but provides two advantages:
16//! implementations only need to concern themselves with generation of the
17//! block, not the various [`RngCore`] methods (especially [`fill_bytes`], where
18//! the optimal implementations are not trivial), and this allows
19//! `ReseedingRng` (see [`rand`](https://docs.rs/rand) crate) perform periodic
20//! reseeding with very low overhead.
21//!
22//! # Example
23//!
24//! ```norun
25//! use rand_core::block::{BlockRngCore, BlockRng};
26//!
27//! struct MyRngCore;
28//!
29//! impl BlockRngCore for MyRngCore {
30//! type Results = [u32; 16];
31//!
32//! fn generate(&mut self, results: &mut Self::Results) {
33//! unimplemented!()
34//! }
35//! }
36//!
37//! impl SeedableRng for MyRngCore {
38//! type Seed = unimplemented!();
39//! fn from_seed(seed: Self::Seed) -> Self {
40//! unimplemented!()
41//! }
42//! }
43//!
44//! // optionally, also implement CryptoRng for MyRngCore
45//!
46//! // Final RNG.
47//! type MyRng = BlockRng<u32, MyRngCore>;
48//! ```
49//!
50//! [`BlockRngCore`]: crate::block::BlockRngCore
51//! [`fill_bytes`]: RngCore::fill_bytes
52
53use core::convert::AsRef;
54use core::{fmt, ptr};
55use {RngCore, CryptoRng, SeedableRng, Error};
56use impls::{fill_via_u32_chunks, fill_via_u64_chunks};
57
58/// A trait for RNGs which do not generate random numbers individually, but in
59/// blocks (typically `[u32; N]`). This technique is commonly used by
60/// cryptographic RNGs to improve performance.
61///
62/// See the [module][crate::block] documentation for details.
63pub trait BlockRngCore {
64 /// Results element type, e.g. `u32`.
65 type Item;
66
67 /// Results type. This is the 'block' an RNG implementing `BlockRngCore`
68 /// generates, which will usually be an array like `[u32; 16]`.
69 type Results: AsRef<[Self::Item]> + AsMut<[Self::Item]> + Default;
70
71 /// Generate a new block of results.
72 fn generate(&mut self, results: &mut Self::Results);
73}
74
75
76/// A wrapper type implementing [`RngCore`] for some type implementing
77/// [`BlockRngCore`] with `u32` array buffer; i.e. this can be used to implement
78/// a full RNG from just a `generate` function.
79///
80/// The `core` field may be accessed directly but the results buffer may not.
81/// PRNG implementations can simply use a type alias
82/// (`pub type MyRng = BlockRng<MyRngCore>;`) but might prefer to use a
83/// wrapper type (`pub struct MyRng(BlockRng<MyRngCore>);`); the latter must
84/// re-implement `RngCore` but hides the implementation details and allows
85/// extra functionality to be defined on the RNG
86/// (e.g. `impl MyRng { fn set_stream(...){...} }`).
87///
88/// `BlockRng` has heavily optimized implementations of the [`RngCore`] methods
89/// reading values from the results buffer, as well as
90/// calling [`BlockRngCore::generate`] directly on the output array when
91/// [`fill_bytes`] / [`try_fill_bytes`] is called on a large array. These methods
92/// also handle the bookkeeping of when to generate a new batch of values.
93///
94/// No whole generated `u32` values are thown away and all values are consumed
95/// in-order. [`next_u32`] simply takes the next available `u32` value.
96/// [`next_u64`] is implemented by combining two `u32` values, least
97/// significant first. [`fill_bytes`] and [`try_fill_bytes`] consume a whole
98/// number of `u32` values, converting each `u32` to a byte slice in
99/// little-endian order. If the requested byte length is not a multiple of 4,
100/// some bytes will be discarded.
101///
102/// See also [`BlockRng64`] which uses `u64` array buffers. Currently there is
103/// no direct support for other buffer types.
104///
105/// For easy initialization `BlockRng` also implements [`SeedableRng`].
106///
107/// [`next_u32`]: RngCore::next_u32
108/// [`next_u64`]: RngCore::next_u64
109/// [`fill_bytes`]: RngCore::fill_bytes
110/// [`try_fill_bytes`]: RngCore::try_fill_bytes
111#[derive(Clone)]
112#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
113pub struct BlockRng<R: BlockRngCore + ?Sized> {
114 results: R::Results,
115 index: usize,
116 /// The *core* part of the RNG, implementing the `generate` function.
117 pub core: R,
118}
119
120// Custom Debug implementation that does not expose the contents of `results`.
121impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng<R> {
122 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
123 fmt.debug_struct("BlockRng")
124 .field("core", &self.core)
125 .field("result_len", &self.results.as_ref().len())
126 .field("index", &self.index)
127 .finish()
128 }
129}
130
131impl<R: BlockRngCore> BlockRng<R> {
132 /// Create a new `BlockRng` from an existing RNG implementing
133 /// `BlockRngCore`. Results will be generated on first use.
134 #[inline]
135 pub fn new(core: R) -> BlockRng<R>{
136 let results_empty = R::Results::default();
137 BlockRng {
138 core,
139 index: results_empty.as_ref().len(),
140 results: results_empty,
141 }
142 }
143
144 /// Get the index into the result buffer.
145 ///
146 /// If this is equal to or larger than the size of the result buffer then
147 /// the buffer is "empty" and `generate()` must be called to produce new
148 /// results.
149 #[inline(always)]
150 pub fn index(&self) -> usize {
151 self.index
152 }
153
154 /// Reset the number of available results.
155 /// This will force a new set of results to be generated on next use.
156 #[inline]
157 pub fn reset(&mut self) {
158 self.index = self.results.as_ref().len();
159 }
160
161 /// Generate a new set of results immediately, setting the index to the
162 /// given value.
163 #[inline]
164 pub fn generate_and_set(&mut self, index: usize) {
165 assert!(index < self.results.as_ref().len());
166 self.core.generate(&mut self.results);
167 self.index = index;
168 }
169}
170
171impl<R: BlockRngCore<Item=u32>> RngCore for BlockRng<R>
172where <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>
173{
174 #[inline]
175 fn next_u32(&mut self) -> u32 {
176 if self.index >= self.results.as_ref().len() {
177 self.generate_and_set(0);
178 }
179
180 let value = self.results.as_ref()[self.index];
181 self.index += 1;
182 value
183 }
184
185 #[inline]
186 fn next_u64(&mut self) -> u64 {
187 let read_u64 = |results: &[u32], index| {
188 if cfg!(any(target_endian = "little")) {
189 // requires little-endian CPU
190 let ptr: *const u64 = results[index..index+2].as_ptr() as *const u64;
191 unsafe { ptr::read_unaligned(ptr) }
192 } else {
193 let x = u64::from(results[index]);
194 let y = u64::from(results[index + 1]);
195 (y << 32) | x
196 }
197 };
198
199 let len = self.results.as_ref().len();
200
201 let index = self.index;
202 if index < len-1 {
203 self.index += 2;
204 // Read an u64 from the current index
205 read_u64(self.results.as_ref(), index)
206 } else if index >= len {
207 self.generate_and_set(2);
208 read_u64(self.results.as_ref(), 0)
209 } else {
210 let x = u64::from(self.results.as_ref()[len-1]);
211 self.generate_and_set(1);
212 let y = u64::from(self.results.as_ref()[0]);
213 (y << 32) | x
214 }
215 }
216
217 #[inline]
218 fn fill_bytes(&mut self, dest: &mut [u8]) {
219 let mut read_len = 0;
220 while read_len < dest.len() {
221 if self.index >= self.results.as_ref().len() {
222 self.generate_and_set(0);
223 }
224 let (consumed_u32, filled_u8) =
225 fill_via_u32_chunks(&self.results.as_ref()[self.index..],
226 &mut dest[read_len..]);
227
228 self.index += consumed_u32;
229 read_len += filled_u8;
230 }
231 }
232
233 #[inline(always)]
234 fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
235 Ok(self.fill_bytes(dest))
236 }
237}
238
239impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng<R> {
240 type Seed = R::Seed;
241
242 #[inline(always)]
243 fn from_seed(seed: Self::Seed) -> Self {
244 Self::new(R::from_seed(seed))
245 }
246
247 #[inline(always)]
248 fn seed_from_u64(seed: u64) -> Self {
249 Self::new(R::seed_from_u64(seed))
250 }
251
252 #[inline(always)]
253 fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> {
254 Ok(Self::new(R::from_rng(rng)?))
255 }
256}
257
258
259
260/// A wrapper type implementing [`RngCore`] for some type implementing
261/// [`BlockRngCore`] with `u64` array buffer; i.e. this can be used to implement
262/// a full RNG from just a `generate` function.
263///
264/// This is similar to [`BlockRng`], but specialized for algorithms that operate
265/// on `u64` values.
266///
267/// No whole generated `u64` values are thrown away and all values are consumed
268/// in-order. [`next_u64`] simply takes the next available `u64` value.
269/// [`next_u32`] is however a bit special: half of a `u64` is consumed, leaving
270/// the other half in the buffer. If the next function called is [`next_u32`]
271/// then the other half is then consumed, however both [`next_u64`] and
272/// [`fill_bytes`] discard the rest of any half-consumed `u64`s when called.
273///
274/// [`fill_bytes`] and [`try_fill_bytes`] consume a whole number of `u64`
275/// values. If the requested length is not a multiple of 8, some bytes will be
276/// discarded.
277///
278/// [`next_u32`]: RngCore::next_u32
279/// [`next_u64`]: RngCore::next_u64
280/// [`fill_bytes`]: RngCore::fill_bytes
281/// [`try_fill_bytes`]: RngCore::try_fill_bytes
282#[derive(Clone)]
283#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
284pub struct BlockRng64<R: BlockRngCore + ?Sized> {
285 results: R::Results,
286 index: usize,
287 half_used: bool, // true if only half of the previous result is used
288 /// The *core* part of the RNG, implementing the `generate` function.
289 pub core: R,
290}
291
292// Custom Debug implementation that does not expose the contents of `results`.
293impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng64<R> {
294 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
295 fmt.debug_struct("BlockRng64")
296 .field("core", &self.core)
297 .field("result_len", &self.results.as_ref().len())
298 .field("index", &self.index)
299 .field("half_used", &self.half_used)
300 .finish()
301 }
302}
303
304impl<R: BlockRngCore> BlockRng64<R> {
305 /// Create a new `BlockRng` from an existing RNG implementing
306 /// `BlockRngCore`. Results will be generated on first use.
307 #[inline]
308 pub fn new(core: R) -> BlockRng64<R>{
309 let results_empty = R::Results::default();
310 BlockRng64 {
311 core,
312 index: results_empty.as_ref().len(),
313 half_used: false,
314 results: results_empty,
315 }
316 }
317
318 /// Get the index into the result buffer.
319 ///
320 /// If this is equal to or larger than the size of the result buffer then
321 /// the buffer is "empty" and `generate()` must be called to produce new
322 /// results.
323 #[inline(always)]
324 pub fn index(&self) -> usize {
325 self.index
326 }
327
328 /// Reset the number of available results.
329 /// This will force a new set of results to be generated on next use.
330 #[inline]
331 pub fn reset(&mut self) {
332 self.index = self.results.as_ref().len();
333 self.half_used = false;
334 }
335
336 /// Generate a new set of results immediately, setting the index to the
337 /// given value.
338 #[inline]
339 pub fn generate_and_set(&mut self, index: usize) {
340 assert!(index < self.results.as_ref().len());
341 self.core.generate(&mut self.results);
342 self.index = index;
343 self.half_used = false;
344 }
345}
346
347impl<R: BlockRngCore<Item=u64>> RngCore for BlockRng64<R>
348where <R as BlockRngCore>::Results: AsRef<[u64]> + AsMut<[u64]>
349{
350 #[inline]
351 fn next_u32(&mut self) -> u32 {
352 let mut index = self.index * 2 - self.half_used as usize;
353 if index >= self.results.as_ref().len() * 2 {
354 self.core.generate(&mut self.results);
355 self.index = 0;
356 // `self.half_used` is by definition `false`
357 self.half_used = false;
358 index = 0;
359 }
360
361 self.half_used = !self.half_used;
362 self.index += self.half_used as usize;
363
364 // Index as if this is a u32 slice.
365 unsafe {
366 let results =
367 &*(self.results.as_ref() as *const [u64] as *const [u32]);
368 if cfg!(target_endian = "little") {
369 *results.get_unchecked(index)
370 } else {
371 *results.get_unchecked(index ^ 1)
372 }
373 }
374 }
375
376 #[inline]
377 fn next_u64(&mut self) -> u64 {
378 if self.index >= self.results.as_ref().len() {
379 self.core.generate(&mut self.results);
380 self.index = 0;
381 }
382
383 let value = self.results.as_ref()[self.index];
384 self.index += 1;
385 self.half_used = false;
386 value
387 }
388
389 #[inline]
390 fn fill_bytes(&mut self, dest: &mut [u8]) {
391 let mut read_len = 0;
392 self.half_used = false;
393 while read_len < dest.len() {
394 if self.index as usize >= self.results.as_ref().len() {
395 self.core.generate(&mut self.results);
396 self.index = 0;
397 }
398
399 let (consumed_u64, filled_u8) =
400 fill_via_u64_chunks(&self.results.as_ref()[self.index as usize..],
401 &mut dest[read_len..]);
402
403 self.index += consumed_u64;
404 read_len += filled_u8;
405 }
406 }
407
408 #[inline(always)]
409 fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
410 Ok(self.fill_bytes(dest))
411 }
412}
413
414impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng64<R> {
415 type Seed = R::Seed;
416
417 #[inline(always)]
418 fn from_seed(seed: Self::Seed) -> Self {
419 Self::new(R::from_seed(seed))
420 }
421
422 #[inline(always)]
423 fn seed_from_u64(seed: u64) -> Self {
424 Self::new(R::seed_from_u64(seed))
425 }
426
427 #[inline(always)]
428 fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> {
429 Ok(Self::new(R::from_rng(rng)?))
430 }
431}
432
433impl<R: BlockRngCore + CryptoRng> CryptoRng for BlockRng<R> {}