Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * random.c -- A strong random number generator | |
3 | * | |
9e95ce27 | 4 | * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 |
1da177e4 LT |
5 | * |
6 | * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All | |
7 | * rights reserved. | |
8 | * | |
9 | * Redistribution and use in source and binary forms, with or without | |
10 | * modification, are permitted provided that the following conditions | |
11 | * are met: | |
12 | * 1. Redistributions of source code must retain the above copyright | |
13 | * notice, and the entire permission notice in its entirety, | |
14 | * including the disclaimer of warranties. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | |
18 | * 3. The name of the author may not be used to endorse or promote | |
19 | * products derived from this software without specific prior | |
20 | * written permission. | |
21 | * | |
22 | * ALTERNATIVELY, this product may be distributed under the terms of | |
23 | * the GNU General Public License, in which case the provisions of the GPL are | |
24 | * required INSTEAD OF the above restrictions. (This clause is | |
25 | * necessary due to a potential bad interaction between the GPL and | |
26 | * the restrictions contained in a BSD-style copyright.) | |
27 | * | |
28 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | |
29 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
30 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF | |
31 | * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE | |
32 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
33 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT | |
34 | * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | |
35 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | |
36 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
37 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | |
38 | * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH | |
39 | * DAMAGE. | |
40 | */ | |
41 | ||
42 | /* | |
43 | * (now, with legal B.S. out of the way.....) | |
44 | * | |
45 | * This routine gathers environmental noise from device drivers, etc., | |
46 | * and returns good random numbers, suitable for cryptographic use. | |
47 | * Besides the obvious cryptographic uses, these numbers are also good | |
48 | * for seeding TCP sequence numbers, and other places where it is | |
49 | * desirable to have numbers which are not only random, but hard to | |
50 | * predict by an attacker. | |
51 | * | |
52 | * Theory of operation | |
53 | * =================== | |
54 | * | |
55 | * Computers are very predictable devices. Hence it is extremely hard | |
56 | * to produce truly random numbers on a computer --- as opposed to | |
57 | * pseudo-random numbers, which can easily generated by using a | |
58 | * algorithm. Unfortunately, it is very easy for attackers to guess | |
59 | * the sequence of pseudo-random number generators, and for some | |
60 | * applications this is not acceptable. So instead, we must try to | |
61 | * gather "environmental noise" from the computer's environment, which | |
62 | * must be hard for outside attackers to observe, and use that to | |
63 | * generate random numbers. In a Unix environment, this is best done | |
64 | * from inside the kernel. | |
65 | * | |
66 | * Sources of randomness from the environment include inter-keyboard | |
67 | * timings, inter-interrupt timings from some interrupts, and other | |
68 | * events which are both (a) non-deterministic and (b) hard for an | |
69 | * outside observer to measure. Randomness from these sources are | |
70 | * added to an "entropy pool", which is mixed using a CRC-like function. | |
71 | * This is not cryptographically strong, but it is adequate assuming | |
72 | * the randomness is not chosen maliciously, and it is fast enough that | |
73 | * the overhead of doing it on every interrupt is very reasonable. | |
74 | * As random bytes are mixed into the entropy pool, the routines keep | |
75 | * an *estimate* of how many bits of randomness have been stored into | |
76 | * the random number generator's internal state. | |
77 | * | |
78 | * When random bytes are desired, they are obtained by taking the SHA | |
79 | * hash of the contents of the "entropy pool". The SHA hash avoids | |
80 | * exposing the internal state of the entropy pool. It is believed to | |
81 | * be computationally infeasible to derive any useful information | |
82 | * about the input of SHA from its output. Even if it is possible to | |
83 | * analyze SHA in some clever way, as long as the amount of data | |
84 | * returned from the generator is less than the inherent entropy in | |
85 | * the pool, the output data is totally unpredictable. For this | |
86 | * reason, the routine decreases its internal estimate of how many | |
87 | * bits of "true randomness" are contained in the entropy pool as it | |
88 | * outputs random numbers. | |
89 | * | |
90 | * If this estimate goes to zero, the routine can still generate | |
91 | * random numbers; however, an attacker may (at least in theory) be | |
92 | * able to infer the future output of the generator from prior | |
93 | * outputs. This requires successful cryptanalysis of SHA, which is | |
94 | * not believed to be feasible, but there is a remote possibility. | |
95 | * Nonetheless, these numbers should be useful for the vast majority | |
96 | * of purposes. | |
97 | * | |
98 | * Exported interfaces ---- output | |
99 | * =============================== | |
100 | * | |
101 | * There are three exported interfaces; the first is one designed to | |
102 | * be used from within the kernel: | |
103 | * | |
104 | * void get_random_bytes(void *buf, int nbytes); | |
105 | * | |
106 | * This interface will return the requested number of random bytes, | |
107 | * and place it in the requested buffer. | |
108 | * | |
109 | * The two other interfaces are two character devices /dev/random and | |
110 | * /dev/urandom. /dev/random is suitable for use when very high | |
111 | * quality randomness is desired (for example, for key generation or | |
112 | * one-time pads), as it will only return a maximum of the number of | |
113 | * bits of randomness (as estimated by the random number generator) | |
114 | * contained in the entropy pool. | |
115 | * | |
116 | * The /dev/urandom device does not have this limit, and will return | |
117 | * as many bytes as are requested. As more and more random bytes are | |
118 | * requested without giving time for the entropy pool to recharge, | |
119 | * this will result in random numbers that are merely cryptographically | |
120 | * strong. For many applications, however, this is acceptable. | |
121 | * | |
122 | * Exported interfaces ---- input | |
123 | * ============================== | |
124 | * | |
125 | * The current exported interfaces for gathering environmental noise | |
126 | * from the devices are: | |
127 | * | |
a2080a67 | 128 | * void add_device_randomness(const void *buf, unsigned int size); |
1da177e4 LT |
129 | * void add_input_randomness(unsigned int type, unsigned int code, |
130 | * unsigned int value); | |
775f4b29 | 131 | * void add_interrupt_randomness(int irq, int irq_flags); |
442a4fff | 132 | * void add_disk_randomness(struct gendisk *disk); |
1da177e4 | 133 | * |
a2080a67 LT |
134 | * add_device_randomness() is for adding data to the random pool that |
135 | * is likely to differ between two devices (or possibly even per boot). | |
136 | * This would be things like MAC addresses or serial numbers, or the | |
137 | * read-out of the RTC. This does *not* add any actual entropy to the | |
138 | * pool, but it initializes the pool to different values for devices | |
139 | * that might otherwise be identical and have very little entropy | |
140 | * available to them (particularly common in the embedded world). | |
141 | * | |
1da177e4 LT |
142 | * add_input_randomness() uses the input layer interrupt timing, as well as |
143 | * the event type information from the hardware. | |
144 | * | |
775f4b29 TT |
145 | * add_interrupt_randomness() uses the interrupt timing as random |
146 | * inputs to the entropy pool. Using the cycle counters and the irq source | |
147 | * as inputs, it feeds the randomness roughly once a second. | |
442a4fff JW |
148 | * |
149 | * add_disk_randomness() uses what amounts to the seek time of block | |
150 | * layer request events, on a per-disk_devt basis, as input to the | |
151 | * entropy pool. Note that high-speed solid state drives with very low | |
152 | * seek times do not make for good sources of entropy, as their seek | |
153 | * times are usually fairly consistent. | |
1da177e4 LT |
154 | * |
155 | * All of these routines try to estimate how many bits of randomness a | |
156 | * particular randomness source. They do this by keeping track of the | |
157 | * first and second order deltas of the event timings. | |
158 | * | |
159 | * Ensuring unpredictability at system startup | |
160 | * ============================================ | |
161 | * | |
162 | * When any operating system starts up, it will go through a sequence | |
163 | * of actions that are fairly predictable by an adversary, especially | |
164 | * if the start-up does not involve interaction with a human operator. | |
165 | * This reduces the actual number of bits of unpredictability in the | |
166 | * entropy pool below the value in entropy_count. In order to | |
167 | * counteract this effect, it helps to carry information in the | |
168 | * entropy pool across shut-downs and start-ups. To do this, put the | |
169 | * following lines an appropriate script which is run during the boot | |
170 | * sequence: | |
171 | * | |
172 | * echo "Initializing random number generator..." | |
173 | * random_seed=/var/run/random-seed | |
174 | * # Carry a random seed from start-up to start-up | |
175 | * # Load and then save the whole entropy pool | |
176 | * if [ -f $random_seed ]; then | |
177 | * cat $random_seed >/dev/urandom | |
178 | * else | |
179 | * touch $random_seed | |
180 | * fi | |
181 | * chmod 600 $random_seed | |
182 | * dd if=/dev/urandom of=$random_seed count=1 bs=512 | |
183 | * | |
184 | * and the following lines in an appropriate script which is run as | |
185 | * the system is shutdown: | |
186 | * | |
187 | * # Carry a random seed from shut-down to start-up | |
188 | * # Save the whole entropy pool | |
189 | * echo "Saving random seed..." | |
190 | * random_seed=/var/run/random-seed | |
191 | * touch $random_seed | |
192 | * chmod 600 $random_seed | |
193 | * dd if=/dev/urandom of=$random_seed count=1 bs=512 | |
194 | * | |
195 | * For example, on most modern systems using the System V init | |
196 | * scripts, such code fragments would be found in | |
197 | * /etc/rc.d/init.d/random. On older Linux systems, the correct script | |
198 | * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0. | |
199 | * | |
200 | * Effectively, these commands cause the contents of the entropy pool | |
201 | * to be saved at shut-down time and reloaded into the entropy pool at | |
202 | * start-up. (The 'dd' in the addition to the bootup script is to | |
203 | * make sure that /etc/random-seed is different for every start-up, | |
204 | * even if the system crashes without executing rc.0.) Even with | |
205 | * complete knowledge of the start-up activities, predicting the state | |
206 | * of the entropy pool requires knowledge of the previous history of | |
207 | * the system. | |
208 | * | |
209 | * Configuring the /dev/random driver under Linux | |
210 | * ============================================== | |
211 | * | |
212 | * The /dev/random driver under Linux uses minor numbers 8 and 9 of | |
213 | * the /dev/mem major number (#1). So if your system does not have | |
214 | * /dev/random and /dev/urandom created already, they can be created | |
215 | * by using the commands: | |
216 | * | |
217 | * mknod /dev/random c 1 8 | |
218 | * mknod /dev/urandom c 1 9 | |
219 | * | |
220 | * Acknowledgements: | |
221 | * ================= | |
222 | * | |
223 | * Ideas for constructing this random number generator were derived | |
224 | * from Pretty Good Privacy's random number generator, and from private | |
225 | * discussions with Phil Karn. Colin Plumb provided a faster random | |
226 | * number generator, which speed up the mixing function of the entropy | |
227 | * pool, taken from PGPfone. Dale Worley has also contributed many | |
228 | * useful ideas and suggestions to improve this driver. | |
229 | * | |
230 | * Any flaws in the design are solely my responsibility, and should | |
231 | * not be attributed to the Phil, Colin, or any of authors of PGP. | |
232 | * | |
233 | * Further background information on this topic may be obtained from | |
234 | * RFC 1750, "Randomness Recommendations for Security", by Donald | |
235 | * Eastlake, Steve Crocker, and Jeff Schiller. | |
236 | */ | |
237 | ||
238 | #include <linux/utsname.h> | |
1da177e4 LT |
239 | #include <linux/module.h> |
240 | #include <linux/kernel.h> | |
241 | #include <linux/major.h> | |
242 | #include <linux/string.h> | |
243 | #include <linux/fcntl.h> | |
244 | #include <linux/slab.h> | |
245 | #include <linux/random.h> | |
246 | #include <linux/poll.h> | |
247 | #include <linux/init.h> | |
248 | #include <linux/fs.h> | |
249 | #include <linux/genhd.h> | |
250 | #include <linux/interrupt.h> | |
27ac792c | 251 | #include <linux/mm.h> |
1da177e4 LT |
252 | #include <linux/spinlock.h> |
253 | #include <linux/percpu.h> | |
254 | #include <linux/cryptohash.h> | |
5b739ef8 | 255 | #include <linux/fips.h> |
775f4b29 | 256 | #include <linux/ptrace.h> |
e6d4947b | 257 | #include <linux/kmemcheck.h> |
1da177e4 | 258 | |
d178a1eb YL |
259 | #ifdef CONFIG_GENERIC_HARDIRQS |
260 | # include <linux/irq.h> | |
261 | #endif | |
262 | ||
1da177e4 LT |
263 | #include <asm/processor.h> |
264 | #include <asm/uaccess.h> | |
265 | #include <asm/irq.h> | |
775f4b29 | 266 | #include <asm/irq_regs.h> |
1da177e4 LT |
267 | #include <asm/io.h> |
268 | ||
00ce1db1 TT |
269 | #define CREATE_TRACE_POINTS |
270 | #include <trace/events/random.h> | |
271 | ||
1da177e4 LT |
272 | /* |
273 | * Configuration information | |
274 | */ | |
30e37ec5 PA |
275 | #define INPUT_POOL_SHIFT 12 |
276 | #define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5)) | |
277 | #define OUTPUT_POOL_SHIFT 10 | |
278 | #define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5)) | |
279 | #define SEC_XFER_SIZE 512 | |
280 | #define EXTRACT_SIZE 10 | |
1da177e4 | 281 | |
d2e7c96a PA |
282 | #define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) |
283 | ||
a283b5c4 PA |
284 | /* |
285 | * To allow fractional bits to be tracked, the following fields contain | |
286 | * this many fractional bits: | |
287 | * | |
288 | * entropy_count, trickle_thresh | |
30e37ec5 PA |
289 | * |
290 | * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in | |
291 | * credit_entropy_bits() needs to be 64 bits wide. | |
a283b5c4 PA |
292 | */ |
293 | #define ENTROPY_SHIFT 3 | |
294 | #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT) | |
295 | ||
1da177e4 LT |
296 | /* |
297 | * The minimum number of bits of entropy before we wake up a read on | |
298 | * /dev/random. Should be enough to do a significant reseed. | |
299 | */ | |
300 | static int random_read_wakeup_thresh = 64; | |
301 | ||
302 | /* | |
303 | * If the entropy count falls under this number of bits, then we | |
304 | * should wake up processes which are selecting or polling on write | |
305 | * access to /dev/random. | |
306 | */ | |
307 | static int random_write_wakeup_thresh = 128; | |
308 | ||
f5c2742c TT |
309 | /* |
310 | * The minimum number of seconds between urandom pool resending. We | |
311 | * do this to limit the amount of entropy that can be drained from the | |
312 | * input pool even if there are heavy demands on /dev/urandom. | |
313 | */ | |
314 | static int random_min_urandom_seed = 60; | |
315 | ||
1da177e4 LT |
316 | /* |
317 | * When the input pool goes over trickle_thresh, start dropping most | |
318 | * samples to avoid wasting CPU time and reduce lock contention. | |
319 | */ | |
a283b5c4 | 320 | static const int trickle_thresh = (INPUT_POOL_WORDS * 28) << ENTROPY_SHIFT; |
1da177e4 | 321 | |
90b75ee5 | 322 | static DEFINE_PER_CPU(int, trickle_count); |
1da177e4 LT |
323 | |
324 | /* | |
325 | * A pool of size .poolwords is stirred with a primitive polynomial | |
326 | * of degree .poolwords over GF(2). The taps for various sizes are | |
327 | * defined below. They are chosen to be evenly spaced (minimum RMS | |
328 | * distance from evenly spaced; the numbers in the comments are a | |
329 | * scaled squared error sum) except for the last tap, which is 1 to | |
330 | * get the twisting happening as fast as possible. | |
331 | */ | |
9ed17b70 | 332 | |
1da177e4 | 333 | static struct poolinfo { |
a283b5c4 PA |
334 | int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits; |
335 | #define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5) | |
1da177e4 LT |
336 | int tap1, tap2, tap3, tap4, tap5; |
337 | } poolinfo_table[] = { | |
338 | /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */ | |
9ed17b70 | 339 | { S(128), 103, 76, 51, 25, 1 }, |
1da177e4 | 340 | /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */ |
9ed17b70 | 341 | { S(32), 26, 20, 14, 7, 1 }, |
1da177e4 LT |
342 | #if 0 |
343 | /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */ | |
9ed17b70 | 344 | { S(2048), 1638, 1231, 819, 411, 1 }, |
1da177e4 LT |
345 | |
346 | /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */ | |
9ed17b70 | 347 | { S(1024), 817, 615, 412, 204, 1 }, |
1da177e4 LT |
348 | |
349 | /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */ | |
9ed17b70 | 350 | { S(1024), 819, 616, 410, 207, 2 }, |
1da177e4 LT |
351 | |
352 | /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */ | |
9ed17b70 | 353 | { S(512), 411, 308, 208, 104, 1 }, |
1da177e4 LT |
354 | |
355 | /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */ | |
9ed17b70 | 356 | { S(512), 409, 307, 206, 102, 2 }, |
1da177e4 | 357 | /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */ |
9ed17b70 | 358 | { S(512), 409, 309, 205, 103, 2 }, |
1da177e4 LT |
359 | |
360 | /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */ | |
9ed17b70 | 361 | { S(256), 205, 155, 101, 52, 1 }, |
1da177e4 LT |
362 | |
363 | /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */ | |
9ed17b70 | 364 | { S(128), 103, 78, 51, 27, 2 }, |
1da177e4 LT |
365 | |
366 | /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */ | |
9ed17b70 | 367 | { S(64), 52, 39, 26, 14, 1 }, |
1da177e4 LT |
368 | #endif |
369 | }; | |
370 | ||
1da177e4 LT |
371 | /* |
372 | * For the purposes of better mixing, we use the CRC-32 polynomial as | |
373 | * well to make a twisted Generalized Feedback Shift Reigster | |
374 | * | |
375 | * (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR generators. ACM | |
376 | * Transactions on Modeling and Computer Simulation 2(3):179-194. | |
377 | * Also see M. Matsumoto & Y. Kurita, 1994. Twisted GFSR generators | |
378 | * II. ACM Transactions on Mdeling and Computer Simulation 4:254-266) | |
379 | * | |
380 | * Thanks to Colin Plumb for suggesting this. | |
381 | * | |
382 | * We have not analyzed the resultant polynomial to prove it primitive; | |
383 | * in fact it almost certainly isn't. Nonetheless, the irreducible factors | |
384 | * of a random large-degree polynomial over GF(2) are more than large enough | |
385 | * that periodicity is not a concern. | |
386 | * | |
387 | * The input hash is much less sensitive than the output hash. All | |
388 | * that we want of it is that it be a good non-cryptographic hash; | |
389 | * i.e. it not produce collisions when fed "random" data of the sort | |
390 | * we expect to see. As long as the pool state differs for different | |
391 | * inputs, we have preserved the input entropy and done a good job. | |
392 | * The fact that an intelligent attacker can construct inputs that | |
393 | * will produce controlled alterations to the pool's state is not | |
394 | * important because we don't consider such inputs to contribute any | |
395 | * randomness. The only property we need with respect to them is that | |
396 | * the attacker can't increase his/her knowledge of the pool's state. | |
397 | * Since all additions are reversible (knowing the final state and the | |
398 | * input, you can reconstruct the initial state), if an attacker has | |
399 | * any uncertainty about the initial state, he/she can only shuffle | |
400 | * that uncertainty about, but never cause any collisions (which would | |
401 | * decrease the uncertainty). | |
402 | * | |
403 | * The chosen system lets the state of the pool be (essentially) the input | |
404 | * modulo the generator polymnomial. Now, for random primitive polynomials, | |
405 | * this is a universal class of hash functions, meaning that the chance | |
406 | * of a collision is limited by the attacker's knowledge of the generator | |
407 | * polynomail, so if it is chosen at random, an attacker can never force | |
408 | * a collision. Here, we use a fixed polynomial, but we *can* assume that | |
409 | * ###--> it is unknown to the processes generating the input entropy. <-### | |
410 | * Because of this important property, this is a good, collision-resistant | |
411 | * hash; hash collisions will occur no more often than chance. | |
412 | */ | |
413 | ||
414 | /* | |
415 | * Static global variables | |
416 | */ | |
417 | static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); | |
418 | static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); | |
9a6f70bb | 419 | static struct fasync_struct *fasync; |
1da177e4 | 420 | |
90ab5ee9 | 421 | static bool debug; |
1da177e4 | 422 | module_param(debug, bool, 0644); |
90b75ee5 MM |
423 | #define DEBUG_ENT(fmt, arg...) do { \ |
424 | if (debug) \ | |
425 | printk(KERN_DEBUG "random %04d %04d %04d: " \ | |
426 | fmt,\ | |
427 | input_pool.entropy_count,\ | |
428 | blocking_pool.entropy_count,\ | |
429 | nonblocking_pool.entropy_count,\ | |
430 | ## arg); } while (0) | |
1da177e4 LT |
431 | |
432 | /********************************************************************** | |
433 | * | |
434 | * OS independent entropy store. Here are the functions which handle | |
435 | * storing entropy in an entropy pool. | |
436 | * | |
437 | **********************************************************************/ | |
438 | ||
439 | struct entropy_store; | |
440 | struct entropy_store { | |
43358209 | 441 | /* read-only data: */ |
30e37ec5 | 442 | const struct poolinfo *poolinfo; |
1da177e4 LT |
443 | __u32 *pool; |
444 | const char *name; | |
1da177e4 LT |
445 | struct entropy_store *pull; |
446 | ||
447 | /* read-write data: */ | |
f5c2742c | 448 | unsigned long last_pulled; |
43358209 | 449 | spinlock_t lock; |
c59974ae TT |
450 | unsigned short add_ptr; |
451 | unsigned short input_rotate; | |
cda796a3 | 452 | int entropy_count; |
775f4b29 | 453 | int entropy_total; |
775f4b29 | 454 | unsigned int initialized:1; |
c59974ae TT |
455 | unsigned int limit:1; |
456 | unsigned int last_data_init:1; | |
e954bc91 | 457 | __u8 last_data[EXTRACT_SIZE]; |
1da177e4 LT |
458 | }; |
459 | ||
460 | static __u32 input_pool_data[INPUT_POOL_WORDS]; | |
461 | static __u32 blocking_pool_data[OUTPUT_POOL_WORDS]; | |
462 | static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS]; | |
463 | ||
464 | static struct entropy_store input_pool = { | |
465 | .poolinfo = &poolinfo_table[0], | |
466 | .name = "input", | |
467 | .limit = 1, | |
eece09ec | 468 | .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), |
1da177e4 LT |
469 | .pool = input_pool_data |
470 | }; | |
471 | ||
472 | static struct entropy_store blocking_pool = { | |
473 | .poolinfo = &poolinfo_table[1], | |
474 | .name = "blocking", | |
475 | .limit = 1, | |
476 | .pull = &input_pool, | |
eece09ec | 477 | .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock), |
1da177e4 LT |
478 | .pool = blocking_pool_data |
479 | }; | |
480 | ||
481 | static struct entropy_store nonblocking_pool = { | |
482 | .poolinfo = &poolinfo_table[1], | |
483 | .name = "nonblocking", | |
484 | .pull = &input_pool, | |
eece09ec | 485 | .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock), |
1da177e4 LT |
486 | .pool = nonblocking_pool_data |
487 | }; | |
488 | ||
775f4b29 TT |
489 | static __u32 const twist_table[8] = { |
490 | 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, | |
491 | 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; | |
492 | ||
1da177e4 | 493 | /* |
e68e5b66 | 494 | * This function adds bytes into the entropy "pool". It does not |
1da177e4 | 495 | * update the entropy estimate. The caller should call |
adc782da | 496 | * credit_entropy_bits if this is appropriate. |
1da177e4 LT |
497 | * |
498 | * The pool is stirred with a primitive polynomial of the appropriate | |
499 | * degree, and then twisted. We twist by three bits at a time because | |
500 | * it's cheap to do so and helps slightly in the expected case where | |
501 | * the entropy is concentrated in the low-order bits. | |
502 | */ | |
00ce1db1 TT |
503 | static void _mix_pool_bytes(struct entropy_store *r, const void *in, |
504 | int nbytes, __u8 out[64]) | |
1da177e4 | 505 | { |
993ba211 | 506 | unsigned long i, j, tap1, tap2, tap3, tap4, tap5; |
feee7697 | 507 | int input_rotate; |
1da177e4 | 508 | int wordmask = r->poolinfo->poolwords - 1; |
e68e5b66 | 509 | const char *bytes = in; |
6d38b827 | 510 | __u32 w; |
1da177e4 | 511 | |
1da177e4 LT |
512 | tap1 = r->poolinfo->tap1; |
513 | tap2 = r->poolinfo->tap2; | |
514 | tap3 = r->poolinfo->tap3; | |
515 | tap4 = r->poolinfo->tap4; | |
516 | tap5 = r->poolinfo->tap5; | |
1da177e4 | 517 | |
902c098a TT |
518 | smp_rmb(); |
519 | input_rotate = ACCESS_ONCE(r->input_rotate); | |
520 | i = ACCESS_ONCE(r->add_ptr); | |
1da177e4 | 521 | |
e68e5b66 MM |
522 | /* mix one byte at a time to simplify size handling and churn faster */ |
523 | while (nbytes--) { | |
c59974ae | 524 | w = rol32(*bytes++, input_rotate); |
993ba211 | 525 | i = (i - 1) & wordmask; |
1da177e4 LT |
526 | |
527 | /* XOR in the various taps */ | |
993ba211 | 528 | w ^= r->pool[i]; |
1da177e4 LT |
529 | w ^= r->pool[(i + tap1) & wordmask]; |
530 | w ^= r->pool[(i + tap2) & wordmask]; | |
531 | w ^= r->pool[(i + tap3) & wordmask]; | |
532 | w ^= r->pool[(i + tap4) & wordmask]; | |
533 | w ^= r->pool[(i + tap5) & wordmask]; | |
993ba211 MM |
534 | |
535 | /* Mix the result back in with a twist */ | |
1da177e4 | 536 | r->pool[i] = (w >> 3) ^ twist_table[w & 7]; |
feee7697 MM |
537 | |
538 | /* | |
539 | * Normally, we add 7 bits of rotation to the pool. | |
540 | * At the beginning of the pool, add an extra 7 bits | |
541 | * rotation, so that successive passes spread the | |
542 | * input bits across the pool evenly. | |
543 | */ | |
c59974ae | 544 | input_rotate = (input_rotate + (i ? 7 : 14)) & 31; |
1da177e4 LT |
545 | } |
546 | ||
902c098a TT |
547 | ACCESS_ONCE(r->input_rotate) = input_rotate; |
548 | ACCESS_ONCE(r->add_ptr) = i; | |
549 | smp_wmb(); | |
1da177e4 | 550 | |
993ba211 MM |
551 | if (out) |
552 | for (j = 0; j < 16; j++) | |
e68e5b66 | 553 | ((__u32 *)out)[j] = r->pool[(i - j) & wordmask]; |
1da177e4 LT |
554 | } |
555 | ||
00ce1db1 | 556 | static void __mix_pool_bytes(struct entropy_store *r, const void *in, |
902c098a | 557 | int nbytes, __u8 out[64]) |
00ce1db1 TT |
558 | { |
559 | trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_); | |
560 | _mix_pool_bytes(r, in, nbytes, out); | |
561 | } | |
562 | ||
563 | static void mix_pool_bytes(struct entropy_store *r, const void *in, | |
564 | int nbytes, __u8 out[64]) | |
1da177e4 | 565 | { |
902c098a TT |
566 | unsigned long flags; |
567 | ||
00ce1db1 | 568 | trace_mix_pool_bytes(r->name, nbytes, _RET_IP_); |
902c098a | 569 | spin_lock_irqsave(&r->lock, flags); |
00ce1db1 | 570 | _mix_pool_bytes(r, in, nbytes, out); |
902c098a | 571 | spin_unlock_irqrestore(&r->lock, flags); |
1da177e4 LT |
572 | } |
573 | ||
775f4b29 TT |
574 | struct fast_pool { |
575 | __u32 pool[4]; | |
576 | unsigned long last; | |
577 | unsigned short count; | |
578 | unsigned char rotate; | |
579 | unsigned char last_timer_intr; | |
580 | }; | |
581 | ||
582 | /* | |
583 | * This is a fast mixing routine used by the interrupt randomness | |
584 | * collector. It's hardcoded for an 128 bit pool and assumes that any | |
585 | * locks that might be needed are taken by the caller. | |
586 | */ | |
587 | static void fast_mix(struct fast_pool *f, const void *in, int nbytes) | |
588 | { | |
589 | const char *bytes = in; | |
590 | __u32 w; | |
591 | unsigned i = f->count; | |
592 | unsigned input_rotate = f->rotate; | |
593 | ||
594 | while (nbytes--) { | |
595 | w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^ | |
596 | f->pool[(i + 1) & 3]; | |
597 | f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7]; | |
598 | input_rotate += (i++ & 3) ? 7 : 14; | |
599 | } | |
600 | f->count = i; | |
601 | f->rotate = input_rotate; | |
602 | } | |
603 | ||
1da177e4 | 604 | /* |
a283b5c4 PA |
605 | * Credit (or debit) the entropy store with n bits of entropy. |
606 | * Use credit_entropy_bits_safe() if the value comes from userspace | |
607 | * or otherwise should be checked for extreme values. | |
1da177e4 | 608 | */ |
adc782da | 609 | static void credit_entropy_bits(struct entropy_store *r, int nbits) |
1da177e4 | 610 | { |
902c098a | 611 | int entropy_count, orig; |
30e37ec5 PA |
612 | const int pool_size = r->poolinfo->poolfracbits; |
613 | int nfrac = nbits << ENTROPY_SHIFT; | |
1da177e4 | 614 | |
adc782da MM |
615 | if (!nbits) |
616 | return; | |
617 | ||
adc782da | 618 | DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); |
902c098a TT |
619 | retry: |
620 | entropy_count = orig = ACCESS_ONCE(r->entropy_count); | |
30e37ec5 PA |
621 | if (nfrac < 0) { |
622 | /* Debit */ | |
623 | entropy_count += nfrac; | |
624 | } else { | |
625 | /* | |
626 | * Credit: we have to account for the possibility of | |
627 | * overwriting already present entropy. Even in the | |
628 | * ideal case of pure Shannon entropy, new contributions | |
629 | * approach the full value asymptotically: | |
630 | * | |
631 | * entropy <- entropy + (pool_size - entropy) * | |
632 | * (1 - exp(-add_entropy/pool_size)) | |
633 | * | |
634 | * For add_entropy <= pool_size/2 then | |
635 | * (1 - exp(-add_entropy/pool_size)) >= | |
636 | * (add_entropy/pool_size)*0.7869... | |
637 | * so we can approximate the exponential with | |
638 | * 3/4*add_entropy/pool_size and still be on the | |
639 | * safe side by adding at most pool_size/2 at a time. | |
640 | * | |
641 | * The use of pool_size-2 in the while statement is to | |
642 | * prevent rounding artifacts from making the loop | |
643 | * arbitrarily long; this limits the loop to log2(pool_size)*2 | |
644 | * turns no matter how large nbits is. | |
645 | */ | |
646 | int pnfrac = nfrac; | |
647 | const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2; | |
648 | /* The +2 corresponds to the /4 in the denominator */ | |
649 | ||
650 | do { | |
651 | unsigned int anfrac = min(pnfrac, pool_size/2); | |
652 | unsigned int add = | |
653 | ((pool_size - entropy_count)*anfrac*3) >> s; | |
654 | ||
655 | entropy_count += add; | |
656 | pnfrac -= anfrac; | |
657 | } while (unlikely(entropy_count < pool_size-2 && pnfrac)); | |
658 | } | |
00ce1db1 | 659 | |
8b76f46a | 660 | if (entropy_count < 0) { |
adc782da | 661 | DEBUG_ENT("negative entropy/overflow\n"); |
8b76f46a | 662 | entropy_count = 0; |
30e37ec5 PA |
663 | } else if (entropy_count > pool_size) |
664 | entropy_count = pool_size; | |
902c098a TT |
665 | if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) |
666 | goto retry; | |
1da177e4 | 667 | |
775f4b29 TT |
668 | if (!r->initialized && nbits > 0) { |
669 | r->entropy_total += nbits; | |
670 | if (r->entropy_total > 128) | |
671 | r->initialized = 1; | |
672 | } | |
673 | ||
a283b5c4 PA |
674 | trace_credit_entropy_bits(r->name, nbits, |
675 | entropy_count >> ENTROPY_SHIFT, | |
00ce1db1 TT |
676 | r->entropy_total, _RET_IP_); |
677 | ||
88c730da | 678 | /* should we wake readers? */ |
a283b5c4 PA |
679 | if (r == &input_pool && |
680 | (entropy_count >> ENTROPY_SHIFT) >= random_read_wakeup_thresh) { | |
88c730da | 681 | wake_up_interruptible(&random_read_wait); |
9a6f70bb JD |
682 | kill_fasync(&fasync, SIGIO, POLL_IN); |
683 | } | |
1da177e4 LT |
684 | } |
685 | ||
a283b5c4 PA |
686 | static void credit_entropy_bits_safe(struct entropy_store *r, int nbits) |
687 | { | |
688 | const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1)); | |
689 | ||
690 | /* Cap the value to avoid overflows */ | |
691 | nbits = min(nbits, nbits_max); | |
692 | nbits = max(nbits, -nbits_max); | |
693 | ||
694 | credit_entropy_bits(r, nbits); | |
695 | } | |
696 | ||
1da177e4 LT |
697 | /********************************************************************* |
698 | * | |
699 | * Entropy input management | |
700 | * | |
701 | *********************************************************************/ | |
702 | ||
703 | /* There is one of these per entropy source */ | |
704 | struct timer_rand_state { | |
705 | cycles_t last_time; | |
90b75ee5 | 706 | long last_delta, last_delta2; |
1da177e4 LT |
707 | unsigned dont_count_entropy:1; |
708 | }; | |
709 | ||
a2080a67 LT |
710 | /* |
711 | * Add device- or boot-specific data to the input and nonblocking | |
712 | * pools to help initialize them to unique values. | |
713 | * | |
714 | * None of this adds any entropy, it is meant to avoid the | |
715 | * problem of the nonblocking pool having similar initial state | |
716 | * across largely identical devices. | |
717 | */ | |
718 | void add_device_randomness(const void *buf, unsigned int size) | |
719 | { | |
61875f30 | 720 | unsigned long time = random_get_entropy() ^ jiffies; |
3ef4cb2d | 721 | unsigned long flags; |
a2080a67 | 722 | |
5910895f | 723 | trace_add_device_randomness(size, _RET_IP_); |
3ef4cb2d TT |
724 | spin_lock_irqsave(&input_pool.lock, flags); |
725 | _mix_pool_bytes(&input_pool, buf, size, NULL); | |
726 | _mix_pool_bytes(&input_pool, &time, sizeof(time), NULL); | |
727 | spin_unlock_irqrestore(&input_pool.lock, flags); | |
728 | ||
729 | spin_lock_irqsave(&nonblocking_pool.lock, flags); | |
730 | _mix_pool_bytes(&nonblocking_pool, buf, size, NULL); | |
731 | _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL); | |
732 | spin_unlock_irqrestore(&nonblocking_pool.lock, flags); | |
a2080a67 LT |
733 | } |
734 | EXPORT_SYMBOL(add_device_randomness); | |
735 | ||
3060d6fe YL |
736 | static struct timer_rand_state input_timer_state; |
737 | ||
1da177e4 LT |
738 | /* |
739 | * This function adds entropy to the entropy "pool" by using timing | |
740 | * delays. It uses the timer_rand_state structure to make an estimate | |
741 | * of how many bits of entropy this call has added to the pool. | |
742 | * | |
743 | * The number "num" is also added to the pool - it should somehow describe | |
744 | * the type of event which just happened. This is currently 0-255 for | |
745 | * keyboard scan codes, and 256 upwards for interrupts. | |
746 | * | |
747 | */ | |
748 | static void add_timer_randomness(struct timer_rand_state *state, unsigned num) | |
749 | { | |
750 | struct { | |
1da177e4 | 751 | long jiffies; |
cf833d0b | 752 | unsigned cycles; |
1da177e4 LT |
753 | unsigned num; |
754 | } sample; | |
755 | long delta, delta2, delta3; | |
756 | ||
757 | preempt_disable(); | |
758 | /* if over the trickle threshold, use only 1 in 4096 samples */ | |
a283b5c4 | 759 | if (ENTROPY_BITS(&input_pool) > trickle_thresh && |
b29c617a | 760 | ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) |
1da177e4 LT |
761 | goto out; |
762 | ||
763 | sample.jiffies = jiffies; | |
61875f30 | 764 | sample.cycles = random_get_entropy(); |
1da177e4 | 765 | sample.num = num; |
902c098a | 766 | mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL); |
1da177e4 LT |
767 | |
768 | /* | |
769 | * Calculate number of bits of randomness we probably added. | |
770 | * We take into account the first, second and third-order deltas | |
771 | * in order to make our estimate. | |
772 | */ | |
773 | ||
774 | if (!state->dont_count_entropy) { | |
775 | delta = sample.jiffies - state->last_time; | |
776 | state->last_time = sample.jiffies; | |
777 | ||
778 | delta2 = delta - state->last_delta; | |
779 | state->last_delta = delta; | |
780 | ||
781 | delta3 = delta2 - state->last_delta2; | |
782 | state->last_delta2 = delta2; | |
783 | ||
784 | if (delta < 0) | |
785 | delta = -delta; | |
786 | if (delta2 < 0) | |
787 | delta2 = -delta2; | |
788 | if (delta3 < 0) | |
789 | delta3 = -delta3; | |
790 | if (delta > delta2) | |
791 | delta = delta2; | |
792 | if (delta > delta3) | |
793 | delta = delta3; | |
794 | ||
795 | /* | |
796 | * delta is now minimum absolute delta. | |
797 | * Round down by 1 bit on general principles, | |
798 | * and limit entropy entimate to 12 bits. | |
799 | */ | |
adc782da MM |
800 | credit_entropy_bits(&input_pool, |
801 | min_t(int, fls(delta>>1), 11)); | |
1da177e4 | 802 | } |
1da177e4 LT |
803 | out: |
804 | preempt_enable(); | |
805 | } | |
806 | ||
d251575a | 807 | void add_input_randomness(unsigned int type, unsigned int code, |
1da177e4 LT |
808 | unsigned int value) |
809 | { | |
810 | static unsigned char last_value; | |
811 | ||
812 | /* ignore autorepeat and the like */ | |
813 | if (value == last_value) | |
814 | return; | |
815 | ||
816 | DEBUG_ENT("input event\n"); | |
817 | last_value = value; | |
818 | add_timer_randomness(&input_timer_state, | |
819 | (type << 4) ^ code ^ (code >> 4) ^ value); | |
820 | } | |
80fc9f53 | 821 | EXPORT_SYMBOL_GPL(add_input_randomness); |
1da177e4 | 822 | |
775f4b29 TT |
823 | static DEFINE_PER_CPU(struct fast_pool, irq_randomness); |
824 | ||
825 | void add_interrupt_randomness(int irq, int irq_flags) | |
1da177e4 | 826 | { |
775f4b29 TT |
827 | struct entropy_store *r; |
828 | struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); | |
829 | struct pt_regs *regs = get_irq_regs(); | |
830 | unsigned long now = jiffies; | |
61875f30 | 831 | __u32 input[4], cycles = random_get_entropy(); |
775f4b29 TT |
832 | |
833 | input[0] = cycles ^ jiffies; | |
834 | input[1] = irq; | |
835 | if (regs) { | |
836 | __u64 ip = instruction_pointer(regs); | |
837 | input[2] = ip; | |
838 | input[3] = ip >> 32; | |
839 | } | |
3060d6fe | 840 | |
775f4b29 | 841 | fast_mix(fast_pool, input, sizeof(input)); |
3060d6fe | 842 | |
775f4b29 TT |
843 | if ((fast_pool->count & 1023) && |
844 | !time_after(now, fast_pool->last + HZ)) | |
1da177e4 LT |
845 | return; |
846 | ||
775f4b29 TT |
847 | fast_pool->last = now; |
848 | ||
849 | r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; | |
902c098a | 850 | __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); |
775f4b29 TT |
851 | /* |
852 | * If we don't have a valid cycle counter, and we see | |
853 | * back-to-back timer interrupts, then skip giving credit for | |
854 | * any entropy. | |
855 | */ | |
856 | if (cycles == 0) { | |
857 | if (irq_flags & __IRQF_TIMER) { | |
858 | if (fast_pool->last_timer_intr) | |
859 | return; | |
860 | fast_pool->last_timer_intr = 1; | |
861 | } else | |
862 | fast_pool->last_timer_intr = 0; | |
863 | } | |
864 | credit_entropy_bits(r, 1); | |
1da177e4 LT |
865 | } |
866 | ||
9361401e | 867 | #ifdef CONFIG_BLOCK |
1da177e4 LT |
868 | void add_disk_randomness(struct gendisk *disk) |
869 | { | |
870 | if (!disk || !disk->random) | |
871 | return; | |
872 | /* first major is 1, so we get >= 0x200 here */ | |
f331c029 TH |
873 | DEBUG_ENT("disk event %d:%d\n", |
874 | MAJOR(disk_devt(disk)), MINOR(disk_devt(disk))); | |
1da177e4 | 875 | |
f331c029 | 876 | add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); |
1da177e4 | 877 | } |
9361401e | 878 | #endif |
1da177e4 | 879 | |
1da177e4 LT |
880 | /********************************************************************* |
881 | * | |
882 | * Entropy extraction routines | |
883 | * | |
884 | *********************************************************************/ | |
885 | ||
90b75ee5 | 886 | static ssize_t extract_entropy(struct entropy_store *r, void *buf, |
1da177e4 LT |
887 | size_t nbytes, int min, int rsvd); |
888 | ||
889 | /* | |
25985edc | 890 | * This utility inline function is responsible for transferring entropy |
1da177e4 LT |
891 | * from the primary pool to the secondary extraction pool. We make |
892 | * sure we pull enough for a 'catastrophic reseed'. | |
893 | */ | |
894 | static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) | |
895 | { | |
d2e7c96a | 896 | __u32 tmp[OUTPUT_POOL_WORDS]; |
1da177e4 | 897 | |
f5c2742c TT |
898 | if (r->limit == 0 && random_min_urandom_seed) { |
899 | unsigned long now = jiffies; | |
900 | ||
901 | if (time_before(now, | |
902 | r->last_pulled + random_min_urandom_seed * HZ)) | |
903 | return; | |
904 | r->last_pulled = now; | |
905 | } | |
a283b5c4 PA |
906 | if (r->pull && |
907 | r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) && | |
908 | r->entropy_count < r->poolinfo->poolfracbits) { | |
5a021e9f | 909 | /* If we're limited, always leave two wakeup worth's BITS */ |
1da177e4 | 910 | int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4; |
5a021e9f MM |
911 | int bytes = nbytes; |
912 | ||
913 | /* pull at least as many as BYTES as wakeup BITS */ | |
914 | bytes = max_t(int, bytes, random_read_wakeup_thresh / 8); | |
915 | /* but never more than the buffer size */ | |
d2e7c96a | 916 | bytes = min_t(int, bytes, sizeof(tmp)); |
1da177e4 LT |
917 | |
918 | DEBUG_ENT("going to reseed %s with %d bits " | |
8eb2ffbf | 919 | "(%zu of %d requested)\n", |
a283b5c4 PA |
920 | r->name, bytes * 8, nbytes * 8, |
921 | r->entropy_count >> ENTROPY_SHIFT); | |
1da177e4 | 922 | |
d2e7c96a | 923 | bytes = extract_entropy(r->pull, tmp, bytes, |
90b75ee5 | 924 | random_read_wakeup_thresh / 8, rsvd); |
d2e7c96a | 925 | mix_pool_bytes(r, tmp, bytes, NULL); |
adc782da | 926 | credit_entropy_bits(r, bytes*8); |
1da177e4 LT |
927 | } |
928 | } | |
929 | ||
930 | /* | |
931 | * These functions extracts randomness from the "entropy pool", and | |
932 | * returns it in a buffer. | |
933 | * | |
934 | * The min parameter specifies the minimum amount we can pull before | |
935 | * failing to avoid races that defeat catastrophic reseeding while the | |
936 | * reserved parameter indicates how much entropy we must leave in the | |
937 | * pool after each pull to avoid starving other readers. | |
938 | * | |
939 | * Note: extract_entropy() assumes that .poolwords is a multiple of 16 words. | |
940 | */ | |
941 | ||
942 | static size_t account(struct entropy_store *r, size_t nbytes, int min, | |
943 | int reserved) | |
944 | { | |
945 | unsigned long flags; | |
b9809552 | 946 | int wakeup_write = 0; |
a283b5c4 PA |
947 | int have_bytes; |
948 | int entropy_count, orig; | |
949 | size_t ibytes; | |
1da177e4 | 950 | |
1da177e4 LT |
951 | /* Hold lock while accounting */ |
952 | spin_lock_irqsave(&r->lock, flags); | |
953 | ||
a283b5c4 | 954 | BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); |
8eb2ffbf | 955 | DEBUG_ENT("trying to extract %zu bits from %s\n", |
1da177e4 LT |
956 | nbytes * 8, r->name); |
957 | ||
958 | /* Can we pull enough? */ | |
10b3a32d | 959 | retry: |
a283b5c4 PA |
960 | entropy_count = orig = ACCESS_ONCE(r->entropy_count); |
961 | have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); | |
962 | ibytes = nbytes; | |
963 | if (have_bytes < min + reserved) { | |
964 | ibytes = 0; | |
965 | } else { | |
1da177e4 | 966 | /* If limited, never pull more than available */ |
a283b5c4 PA |
967 | if (r->limit && ibytes + reserved >= have_bytes) |
968 | ibytes = have_bytes - reserved; | |
969 | ||
970 | if (have_bytes >= ibytes + reserved) | |
971 | entropy_count -= ibytes << (ENTROPY_SHIFT + 3); | |
972 | else | |
973 | entropy_count = reserved << (ENTROPY_SHIFT + 3); | |
10b3a32d | 974 | |
a283b5c4 PA |
975 | if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) |
976 | goto retry; | |
977 | ||
978 | if ((r->entropy_count >> ENTROPY_SHIFT) | |
979 | < random_write_wakeup_thresh) | |
b9809552 | 980 | wakeup_write = 1; |
1da177e4 LT |
981 | } |
982 | ||
8eb2ffbf | 983 | DEBUG_ENT("debiting %zu entropy credits from %s%s\n", |
a283b5c4 | 984 | ibytes * 8, r->name, r->limit ? "" : " (unlimited)"); |
1da177e4 LT |
985 | |
986 | spin_unlock_irqrestore(&r->lock, flags); | |
987 | ||
b9809552 TT |
988 | if (wakeup_write) { |
989 | wake_up_interruptible(&random_write_wait); | |
990 | kill_fasync(&fasync, SIGIO, POLL_OUT); | |
991 | } | |
992 | ||
a283b5c4 | 993 | return ibytes; |
1da177e4 LT |
994 | } |
995 | ||
996 | static void extract_buf(struct entropy_store *r, __u8 *out) | |
997 | { | |
602b6aee | 998 | int i; |
d2e7c96a PA |
999 | union { |
1000 | __u32 w[5]; | |
85a1f777 | 1001 | unsigned long l[LONGS(20)]; |
d2e7c96a PA |
1002 | } hash; |
1003 | __u32 workspace[SHA_WORKSPACE_WORDS]; | |
e68e5b66 | 1004 | __u8 extract[64]; |
902c098a | 1005 | unsigned long flags; |
1da177e4 | 1006 | |
1c0ad3d4 | 1007 | /* Generate a hash across the pool, 16 words (512 bits) at a time */ |
d2e7c96a | 1008 | sha_init(hash.w); |
902c098a | 1009 | spin_lock_irqsave(&r->lock, flags); |
1c0ad3d4 | 1010 | for (i = 0; i < r->poolinfo->poolwords; i += 16) |
d2e7c96a | 1011 | sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); |
1c0ad3d4 | 1012 | |
85a1f777 TT |
1013 | /* |
1014 | * If we have a architectural hardware random number | |
1015 | * generator, mix that in, too. | |
1016 | */ | |
1017 | for (i = 0; i < LONGS(20); i++) { | |
1018 | unsigned long v; | |
1019 | if (!arch_get_random_long(&v)) | |
1020 | break; | |
1021 | hash.l[i] ^= v; | |
1022 | } | |
1023 | ||
1da177e4 | 1024 | /* |
1c0ad3d4 MM |
1025 | * We mix the hash back into the pool to prevent backtracking |
1026 | * attacks (where the attacker knows the state of the pool | |
1027 | * plus the current outputs, and attempts to find previous | |
1028 | * ouputs), unless the hash function can be inverted. By | |
1029 | * mixing at least a SHA1 worth of hash data back, we make | |
1030 | * brute-forcing the feedback as hard as brute-forcing the | |
1031 | * hash. | |
1da177e4 | 1032 | */ |
d2e7c96a | 1033 | __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract); |
902c098a | 1034 | spin_unlock_irqrestore(&r->lock, flags); |
1da177e4 LT |
1035 | |
1036 | /* | |
1c0ad3d4 MM |
1037 | * To avoid duplicates, we atomically extract a portion of the |
1038 | * pool while mixing, and hash one final time. | |
1da177e4 | 1039 | */ |
d2e7c96a | 1040 | sha_transform(hash.w, extract, workspace); |
ffd8d3fa MM |
1041 | memset(extract, 0, sizeof(extract)); |
1042 | memset(workspace, 0, sizeof(workspace)); | |
1da177e4 LT |
1043 | |
1044 | /* | |
1c0ad3d4 MM |
1045 | * In case the hash function has some recognizable output |
1046 | * pattern, we fold it in half. Thus, we always feed back | |
1047 | * twice as much data as we output. | |
1da177e4 | 1048 | */ |
d2e7c96a PA |
1049 | hash.w[0] ^= hash.w[3]; |
1050 | hash.w[1] ^= hash.w[4]; | |
1051 | hash.w[2] ^= rol32(hash.w[2], 16); | |
1052 | ||
d2e7c96a PA |
1053 | memcpy(out, &hash, EXTRACT_SIZE); |
1054 | memset(&hash, 0, sizeof(hash)); | |
1da177e4 LT |
1055 | } |
1056 | ||
90b75ee5 | 1057 | static ssize_t extract_entropy(struct entropy_store *r, void *buf, |
902c098a | 1058 | size_t nbytes, int min, int reserved) |
1da177e4 LT |
1059 | { |
1060 | ssize_t ret = 0, i; | |
1061 | __u8 tmp[EXTRACT_SIZE]; | |
1e7e2e05 | 1062 | unsigned long flags; |
1da177e4 | 1063 | |
ec8f02da | 1064 | /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */ |
1e7e2e05 JW |
1065 | if (fips_enabled) { |
1066 | spin_lock_irqsave(&r->lock, flags); | |
1067 | if (!r->last_data_init) { | |
c59974ae | 1068 | r->last_data_init = 1; |
1e7e2e05 JW |
1069 | spin_unlock_irqrestore(&r->lock, flags); |
1070 | trace_extract_entropy(r->name, EXTRACT_SIZE, | |
a283b5c4 | 1071 | ENTROPY_BITS(r), _RET_IP_); |
1e7e2e05 JW |
1072 | xfer_secondary_pool(r, EXTRACT_SIZE); |
1073 | extract_buf(r, tmp); | |
1074 | spin_lock_irqsave(&r->lock, flags); | |
1075 | memcpy(r->last_data, tmp, EXTRACT_SIZE); | |
1076 | } | |
1077 | spin_unlock_irqrestore(&r->lock, flags); | |
1078 | } | |
ec8f02da | 1079 | |
a283b5c4 | 1080 | trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); |
1da177e4 LT |
1081 | xfer_secondary_pool(r, nbytes); |
1082 | nbytes = account(r, nbytes, min, reserved); | |
1083 | ||
1084 | while (nbytes) { | |
1085 | extract_buf(r, tmp); | |
5b739ef8 | 1086 | |
e954bc91 | 1087 | if (fips_enabled) { |
5b739ef8 NH |
1088 | spin_lock_irqsave(&r->lock, flags); |
1089 | if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) | |
1090 | panic("Hardware RNG duplicated output!\n"); | |
1091 | memcpy(r->last_data, tmp, EXTRACT_SIZE); | |
1092 | spin_unlock_irqrestore(&r->lock, flags); | |
1093 | } | |
1da177e4 LT |
1094 | i = min_t(int, nbytes, EXTRACT_SIZE); |
1095 | memcpy(buf, tmp, i); | |
1096 | nbytes -= i; | |
1097 | buf += i; | |
1098 | ret += i; | |
1099 | } | |
1100 | ||
1101 | /* Wipe data just returned from memory */ | |
1102 | memset(tmp, 0, sizeof(tmp)); | |
1103 | ||
1104 | return ret; | |
1105 | } | |
1106 | ||
1107 | static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, | |
1108 | size_t nbytes) | |
1109 | { | |
1110 | ssize_t ret = 0, i; | |
1111 | __u8 tmp[EXTRACT_SIZE]; | |
1112 | ||
a283b5c4 | 1113 | trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); |
1da177e4 LT |
1114 | xfer_secondary_pool(r, nbytes); |
1115 | nbytes = account(r, nbytes, 0, 0); | |
1116 | ||
1117 | while (nbytes) { | |
1118 | if (need_resched()) { | |
1119 | if (signal_pending(current)) { | |
1120 | if (ret == 0) | |
1121 | ret = -ERESTARTSYS; | |
1122 | break; | |
1123 | } | |
1124 | schedule(); | |
1125 | } | |
1126 | ||
1127 | extract_buf(r, tmp); | |
1128 | i = min_t(int, nbytes, EXTRACT_SIZE); | |
1129 | if (copy_to_user(buf, tmp, i)) { | |
1130 | ret = -EFAULT; | |
1131 | break; | |
1132 | } | |
1133 | ||
1134 | nbytes -= i; | |
1135 | buf += i; | |
1136 | ret += i; | |
1137 | } | |
1138 | ||
1139 | /* Wipe data just returned from memory */ | |
1140 | memset(tmp, 0, sizeof(tmp)); | |
1141 | ||
1142 | return ret; | |
1143 | } | |
1144 | ||
1145 | /* | |
1146 | * This function is the exported kernel interface. It returns some | |
c2557a30 TT |
1147 | * number of good random numbers, suitable for key generation, seeding |
1148 | * TCP sequence numbers, etc. It does not use the hw random number | |
1149 | * generator, if available; use get_random_bytes_arch() for that. | |
1da177e4 LT |
1150 | */ |
1151 | void get_random_bytes(void *buf, int nbytes) | |
c2557a30 | 1152 | { |
5910895f | 1153 | trace_get_random_bytes(nbytes, _RET_IP_); |
c2557a30 TT |
1154 | extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); |
1155 | } | |
1156 | EXPORT_SYMBOL(get_random_bytes); | |
1157 | ||
1158 | /* | |
1159 | * This function will use the architecture-specific hardware random | |
1160 | * number generator if it is available. The arch-specific hw RNG will | |
1161 | * almost certainly be faster than what we can do in software, but it | |
1162 | * is impossible to verify that it is implemented securely (as | |
1163 | * opposed, to, say, the AES encryption of a sequence number using a | |
1164 | * key known by the NSA). So it's useful if we need the speed, but | |
1165 | * only if we're willing to trust the hardware manufacturer not to | |
1166 | * have put in a back door. | |
1167 | */ | |
1168 | void get_random_bytes_arch(void *buf, int nbytes) | |
1da177e4 | 1169 | { |
63d77173 PA |
1170 | char *p = buf; |
1171 | ||
5910895f | 1172 | trace_get_random_bytes_arch(nbytes, _RET_IP_); |
63d77173 PA |
1173 | while (nbytes) { |
1174 | unsigned long v; | |
1175 | int chunk = min(nbytes, (int)sizeof(unsigned long)); | |
c2557a30 | 1176 | |
63d77173 PA |
1177 | if (!arch_get_random_long(&v)) |
1178 | break; | |
1179 | ||
bd29e568 | 1180 | memcpy(p, &v, chunk); |
63d77173 PA |
1181 | p += chunk; |
1182 | nbytes -= chunk; | |
1183 | } | |
1184 | ||
c2557a30 TT |
1185 | if (nbytes) |
1186 | extract_entropy(&nonblocking_pool, p, nbytes, 0, 0); | |
1da177e4 | 1187 | } |
c2557a30 TT |
1188 | EXPORT_SYMBOL(get_random_bytes_arch); |
1189 | ||
1da177e4 LT |
1190 | |
1191 | /* | |
1192 | * init_std_data - initialize pool with system data | |
1193 | * | |
1194 | * @r: pool to initialize | |
1195 | * | |
1196 | * This function clears the pool's entropy count and mixes some system | |
1197 | * data into the pool to prepare it for use. The pool is not cleared | |
1198 | * as that can only decrease the entropy in the pool. | |
1199 | */ | |
1200 | static void init_std_data(struct entropy_store *r) | |
1201 | { | |
3e88bdff | 1202 | int i; |
902c098a TT |
1203 | ktime_t now = ktime_get_real(); |
1204 | unsigned long rv; | |
1da177e4 | 1205 | |
1da177e4 | 1206 | r->entropy_count = 0; |
775f4b29 | 1207 | r->entropy_total = 0; |
c59974ae | 1208 | r->last_data_init = 0; |
f5c2742c | 1209 | r->last_pulled = jiffies; |
902c098a | 1210 | mix_pool_bytes(r, &now, sizeof(now), NULL); |
9ed17b70 | 1211 | for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) { |
902c098a | 1212 | if (!arch_get_random_long(&rv)) |
3e88bdff | 1213 | break; |
902c098a | 1214 | mix_pool_bytes(r, &rv, sizeof(rv), NULL); |
3e88bdff | 1215 | } |
902c098a | 1216 | mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL); |
1da177e4 LT |
1217 | } |
1218 | ||
cbc96b75 TL |
1219 | /* |
1220 | * Note that setup_arch() may call add_device_randomness() | |
1221 | * long before we get here. This allows seeding of the pools | |
1222 | * with some platform dependent data very early in the boot | |
1223 | * process. But it limits our options here. We must use | |
1224 | * statically allocated structures that already have all | |
1225 | * initializations complete at compile time. We should also | |
1226 | * take care not to overwrite the precious per platform data | |
1227 | * we were given. | |
1228 | */ | |
53c3f63e | 1229 | static int rand_initialize(void) |
1da177e4 LT |
1230 | { |
1231 | init_std_data(&input_pool); | |
1232 | init_std_data(&blocking_pool); | |
1233 | init_std_data(&nonblocking_pool); | |
1234 | return 0; | |
1235 | } | |
1236 | module_init(rand_initialize); | |
1237 | ||
9361401e | 1238 | #ifdef CONFIG_BLOCK |
1da177e4 LT |
1239 | void rand_initialize_disk(struct gendisk *disk) |
1240 | { | |
1241 | struct timer_rand_state *state; | |
1242 | ||
1243 | /* | |
f8595815 | 1244 | * If kzalloc returns null, we just won't use that entropy |
1da177e4 LT |
1245 | * source. |
1246 | */ | |
f8595815 ED |
1247 | state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); |
1248 | if (state) | |
1da177e4 | 1249 | disk->random = state; |
1da177e4 | 1250 | } |
9361401e | 1251 | #endif |
1da177e4 LT |
1252 | |
1253 | static ssize_t | |
90b75ee5 | 1254 | random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) |
1da177e4 LT |
1255 | { |
1256 | ssize_t n, retval = 0, count = 0; | |
1257 | ||
1258 | if (nbytes == 0) | |
1259 | return 0; | |
1260 | ||
1261 | while (nbytes > 0) { | |
1262 | n = nbytes; | |
1263 | if (n > SEC_XFER_SIZE) | |
1264 | n = SEC_XFER_SIZE; | |
1265 | ||
8eb2ffbf | 1266 | DEBUG_ENT("reading %zu bits\n", n*8); |
1da177e4 LT |
1267 | |
1268 | n = extract_entropy_user(&blocking_pool, buf, n); | |
1269 | ||
8eb2ffbf JK |
1270 | if (n < 0) { |
1271 | retval = n; | |
1272 | break; | |
1273 | } | |
1274 | ||
1275 | DEBUG_ENT("read got %zd bits (%zd still needed)\n", | |
1da177e4 LT |
1276 | n*8, (nbytes-n)*8); |
1277 | ||
1278 | if (n == 0) { | |
1279 | if (file->f_flags & O_NONBLOCK) { | |
1280 | retval = -EAGAIN; | |
1281 | break; | |
1282 | } | |
1283 | ||
1284 | DEBUG_ENT("sleeping?\n"); | |
1285 | ||
1286 | wait_event_interruptible(random_read_wait, | |
a283b5c4 PA |
1287 | ENTROPY_BITS(&input_pool) >= |
1288 | random_read_wakeup_thresh); | |
1da177e4 LT |
1289 | |
1290 | DEBUG_ENT("awake\n"); | |
1291 | ||
1292 | if (signal_pending(current)) { | |
1293 | retval = -ERESTARTSYS; | |
1294 | break; | |
1295 | } | |
1296 | ||
1297 | continue; | |
1298 | } | |
1299 | ||
1da177e4 LT |
1300 | count += n; |
1301 | buf += n; | |
1302 | nbytes -= n; | |
1303 | break; /* This break makes the device work */ | |
1304 | /* like a named pipe */ | |
1305 | } | |
1306 | ||
1da177e4 LT |
1307 | return (count ? count : retval); |
1308 | } | |
1309 | ||
1310 | static ssize_t | |
90b75ee5 | 1311 | urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) |
1da177e4 LT |
1312 | { |
1313 | return extract_entropy_user(&nonblocking_pool, buf, nbytes); | |
1314 | } | |
1315 | ||
1316 | static unsigned int | |
1317 | random_poll(struct file *file, poll_table * wait) | |
1318 | { | |
1319 | unsigned int mask; | |
1320 | ||
1321 | poll_wait(file, &random_read_wait, wait); | |
1322 | poll_wait(file, &random_write_wait, wait); | |
1323 | mask = 0; | |
a283b5c4 | 1324 | if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_thresh) |
1da177e4 | 1325 | mask |= POLLIN | POLLRDNORM; |
a283b5c4 | 1326 | if (ENTROPY_BITS(&input_pool) < random_write_wakeup_thresh) |
1da177e4 LT |
1327 | mask |= POLLOUT | POLLWRNORM; |
1328 | return mask; | |
1329 | } | |
1330 | ||
7f397dcd MM |
1331 | static int |
1332 | write_pool(struct entropy_store *r, const char __user *buffer, size_t count) | |
1da177e4 | 1333 | { |
1da177e4 LT |
1334 | size_t bytes; |
1335 | __u32 buf[16]; | |
1336 | const char __user *p = buffer; | |
1da177e4 | 1337 | |
7f397dcd MM |
1338 | while (count > 0) { |
1339 | bytes = min(count, sizeof(buf)); | |
1340 | if (copy_from_user(&buf, p, bytes)) | |
1341 | return -EFAULT; | |
1da177e4 | 1342 | |
7f397dcd | 1343 | count -= bytes; |
1da177e4 LT |
1344 | p += bytes; |
1345 | ||
902c098a | 1346 | mix_pool_bytes(r, buf, bytes, NULL); |
91f3f1e3 | 1347 | cond_resched(); |
1da177e4 | 1348 | } |
7f397dcd MM |
1349 | |
1350 | return 0; | |
1351 | } | |
1352 | ||
90b75ee5 MM |
1353 | static ssize_t random_write(struct file *file, const char __user *buffer, |
1354 | size_t count, loff_t *ppos) | |
7f397dcd MM |
1355 | { |
1356 | size_t ret; | |
7f397dcd MM |
1357 | |
1358 | ret = write_pool(&blocking_pool, buffer, count); | |
1359 | if (ret) | |
1360 | return ret; | |
1361 | ret = write_pool(&nonblocking_pool, buffer, count); | |
1362 | if (ret) | |
1363 | return ret; | |
1364 | ||
7f397dcd | 1365 | return (ssize_t)count; |
1da177e4 LT |
1366 | } |
1367 | ||
43ae4860 | 1368 | static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) |
1da177e4 LT |
1369 | { |
1370 | int size, ent_count; | |
1371 | int __user *p = (int __user *)arg; | |
1372 | int retval; | |
1373 | ||
1374 | switch (cmd) { | |
1375 | case RNDGETENTCNT: | |
43ae4860 | 1376 | /* inherently racy, no point locking */ |
a283b5c4 PA |
1377 | ent_count = ENTROPY_BITS(&input_pool); |
1378 | if (put_user(ent_count, p)) | |
1da177e4 LT |
1379 | return -EFAULT; |
1380 | return 0; | |
1381 | case RNDADDTOENTCNT: | |
1382 | if (!capable(CAP_SYS_ADMIN)) | |
1383 | return -EPERM; | |
1384 | if (get_user(ent_count, p)) | |
1385 | return -EFAULT; | |
a283b5c4 | 1386 | credit_entropy_bits_safe(&input_pool, ent_count); |
1da177e4 LT |
1387 | return 0; |
1388 | case RNDADDENTROPY: | |
1389 | if (!capable(CAP_SYS_ADMIN)) | |
1390 | return -EPERM; | |
1391 | if (get_user(ent_count, p++)) | |
1392 | return -EFAULT; | |
1393 | if (ent_count < 0) | |
1394 | return -EINVAL; | |
1395 | if (get_user(size, p++)) | |
1396 | return -EFAULT; | |
7f397dcd MM |
1397 | retval = write_pool(&input_pool, (const char __user *)p, |
1398 | size); | |
1da177e4 LT |
1399 | if (retval < 0) |
1400 | return retval; | |
a283b5c4 | 1401 | credit_entropy_bits_safe(&input_pool, ent_count); |
1da177e4 LT |
1402 | return 0; |
1403 | case RNDZAPENTCNT: | |
1404 | case RNDCLEARPOOL: | |
1405 | /* Clear the entropy pool counters. */ | |
1406 | if (!capable(CAP_SYS_ADMIN)) | |
1407 | return -EPERM; | |
53c3f63e | 1408 | rand_initialize(); |
1da177e4 LT |
1409 | return 0; |
1410 | default: | |
1411 | return -EINVAL; | |
1412 | } | |
1413 | } | |
1414 | ||
9a6f70bb JD |
1415 | static int random_fasync(int fd, struct file *filp, int on) |
1416 | { | |
1417 | return fasync_helper(fd, filp, on, &fasync); | |
1418 | } | |
1419 | ||
2b8693c0 | 1420 | const struct file_operations random_fops = { |
1da177e4 LT |
1421 | .read = random_read, |
1422 | .write = random_write, | |
1423 | .poll = random_poll, | |
43ae4860 | 1424 | .unlocked_ioctl = random_ioctl, |
9a6f70bb | 1425 | .fasync = random_fasync, |
6038f373 | 1426 | .llseek = noop_llseek, |
1da177e4 LT |
1427 | }; |
1428 | ||
2b8693c0 | 1429 | const struct file_operations urandom_fops = { |
1da177e4 LT |
1430 | .read = urandom_read, |
1431 | .write = random_write, | |
43ae4860 | 1432 | .unlocked_ioctl = random_ioctl, |
9a6f70bb | 1433 | .fasync = random_fasync, |
6038f373 | 1434 | .llseek = noop_llseek, |
1da177e4 LT |
1435 | }; |
1436 | ||
1437 | /*************************************************************** | |
1438 | * Random UUID interface | |
1439 | * | |
1440 | * Used here for a Boot ID, but can be useful for other kernel | |
1441 | * drivers. | |
1442 | ***************************************************************/ | |
1443 | ||
1444 | /* | |
1445 | * Generate random UUID | |
1446 | */ | |
1447 | void generate_random_uuid(unsigned char uuid_out[16]) | |
1448 | { | |
1449 | get_random_bytes(uuid_out, 16); | |
c41b20e7 | 1450 | /* Set UUID version to 4 --- truly random generation */ |
1da177e4 LT |
1451 | uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40; |
1452 | /* Set the UUID variant to DCE */ | |
1453 | uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80; | |
1454 | } | |
1da177e4 LT |
1455 | EXPORT_SYMBOL(generate_random_uuid); |
1456 | ||
1457 | /******************************************************************** | |
1458 | * | |
1459 | * Sysctl interface | |
1460 | * | |
1461 | ********************************************************************/ | |
1462 | ||
1463 | #ifdef CONFIG_SYSCTL | |
1464 | ||
1465 | #include <linux/sysctl.h> | |
1466 | ||
1467 | static int min_read_thresh = 8, min_write_thresh; | |
1468 | static int max_read_thresh = INPUT_POOL_WORDS * 32; | |
1469 | static int max_write_thresh = INPUT_POOL_WORDS * 32; | |
1470 | static char sysctl_bootid[16]; | |
1471 | ||
1472 | /* | |
1473 | * These functions is used to return both the bootid UUID, and random | |
1474 | * UUID. The difference is in whether table->data is NULL; if it is, | |
1475 | * then a new UUID is generated and returned to the user. | |
1476 | * | |
1477 | * If the user accesses this via the proc interface, it will be returned | |
1478 | * as an ASCII string in the standard UUID format. If accesses via the | |
1479 | * sysctl system call, it is returned as 16 bytes of binary data. | |
1480 | */ | |
a151427e | 1481 | static int proc_do_uuid(struct ctl_table *table, int write, |
1da177e4 LT |
1482 | void __user *buffer, size_t *lenp, loff_t *ppos) |
1483 | { | |
a151427e | 1484 | struct ctl_table fake_table; |
1da177e4 LT |
1485 | unsigned char buf[64], tmp_uuid[16], *uuid; |
1486 | ||
1487 | uuid = table->data; | |
1488 | if (!uuid) { | |
1489 | uuid = tmp_uuid; | |
1da177e4 | 1490 | generate_random_uuid(uuid); |
44e4360f MD |
1491 | } else { |
1492 | static DEFINE_SPINLOCK(bootid_spinlock); | |
1493 | ||
1494 | spin_lock(&bootid_spinlock); | |
1495 | if (!uuid[8]) | |
1496 | generate_random_uuid(uuid); | |
1497 | spin_unlock(&bootid_spinlock); | |
1498 | } | |
1da177e4 | 1499 | |
35900771 JP |
1500 | sprintf(buf, "%pU", uuid); |
1501 | ||
1da177e4 LT |
1502 | fake_table.data = buf; |
1503 | fake_table.maxlen = sizeof(buf); | |
1504 | ||
8d65af78 | 1505 | return proc_dostring(&fake_table, write, buffer, lenp, ppos); |
1da177e4 LT |
1506 | } |
1507 | ||
a283b5c4 PA |
1508 | /* |
1509 | * Return entropy available scaled to integral bits | |
1510 | */ | |
1511 | static int proc_do_entropy(ctl_table *table, int write, | |
1512 | void __user *buffer, size_t *lenp, loff_t *ppos) | |
1513 | { | |
1514 | ctl_table fake_table; | |
1515 | int entropy_count; | |
1516 | ||
1517 | entropy_count = *(int *)table->data >> ENTROPY_SHIFT; | |
1518 | ||
1519 | fake_table.data = &entropy_count; | |
1520 | fake_table.maxlen = sizeof(entropy_count); | |
1521 | ||
1522 | return proc_dointvec(&fake_table, write, buffer, lenp, ppos); | |
1523 | } | |
1524 | ||
1da177e4 | 1525 | static int sysctl_poolsize = INPUT_POOL_WORDS * 32; |
a151427e JP |
1526 | extern struct ctl_table random_table[]; |
1527 | struct ctl_table random_table[] = { | |
1da177e4 | 1528 | { |
1da177e4 LT |
1529 | .procname = "poolsize", |
1530 | .data = &sysctl_poolsize, | |
1531 | .maxlen = sizeof(int), | |
1532 | .mode = 0444, | |
6d456111 | 1533 | .proc_handler = proc_dointvec, |
1da177e4 LT |
1534 | }, |
1535 | { | |
1da177e4 LT |
1536 | .procname = "entropy_avail", |
1537 | .maxlen = sizeof(int), | |
1538 | .mode = 0444, | |
a283b5c4 | 1539 | .proc_handler = proc_do_entropy, |
1da177e4 LT |
1540 | .data = &input_pool.entropy_count, |
1541 | }, | |
1542 | { | |
1da177e4 LT |
1543 | .procname = "read_wakeup_threshold", |
1544 | .data = &random_read_wakeup_thresh, | |
1545 | .maxlen = sizeof(int), | |
1546 | .mode = 0644, | |
6d456111 | 1547 | .proc_handler = proc_dointvec_minmax, |
1da177e4 LT |
1548 | .extra1 = &min_read_thresh, |
1549 | .extra2 = &max_read_thresh, | |
1550 | }, | |
1551 | { | |
1da177e4 LT |
1552 | .procname = "write_wakeup_threshold", |
1553 | .data = &random_write_wakeup_thresh, | |
1554 | .maxlen = sizeof(int), | |
1555 | .mode = 0644, | |
6d456111 | 1556 | .proc_handler = proc_dointvec_minmax, |
1da177e4 LT |
1557 | .extra1 = &min_write_thresh, |
1558 | .extra2 = &max_write_thresh, | |
1559 | }, | |
f5c2742c TT |
1560 | { |
1561 | .procname = "urandom_min_reseed_secs", | |
1562 | .data = &random_min_urandom_seed, | |
1563 | .maxlen = sizeof(int), | |
1564 | .mode = 0644, | |
1565 | .proc_handler = proc_dointvec, | |
1566 | }, | |
1da177e4 | 1567 | { |
1da177e4 LT |
1568 | .procname = "boot_id", |
1569 | .data = &sysctl_bootid, | |
1570 | .maxlen = 16, | |
1571 | .mode = 0444, | |
6d456111 | 1572 | .proc_handler = proc_do_uuid, |
1da177e4 LT |
1573 | }, |
1574 | { | |
1da177e4 LT |
1575 | .procname = "uuid", |
1576 | .maxlen = 16, | |
1577 | .mode = 0444, | |
6d456111 | 1578 | .proc_handler = proc_do_uuid, |
1da177e4 | 1579 | }, |
894d2491 | 1580 | { } |
1da177e4 LT |
1581 | }; |
1582 | #endif /* CONFIG_SYSCTL */ | |
1583 | ||
6e5714ea | 1584 | static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; |
1da177e4 | 1585 | |
47d06e53 | 1586 | int random_int_secret_init(void) |
1da177e4 | 1587 | { |
6e5714ea | 1588 | get_random_bytes(random_int_secret, sizeof(random_int_secret)); |
1da177e4 LT |
1589 | return 0; |
1590 | } | |
1da177e4 LT |
1591 | |
1592 | /* | |
1593 | * Get a random word for internal kernel use only. Similar to urandom but | |
1594 | * with the goal of minimal entropy pool depletion. As a result, the random | |
1595 | * value is not cryptographically secure but for several uses the cost of | |
1596 | * depleting entropy is too high | |
1597 | */ | |
74feec5d | 1598 | static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash); |
1da177e4 LT |
1599 | unsigned int get_random_int(void) |
1600 | { | |
63d77173 | 1601 | __u32 *hash; |
6e5714ea | 1602 | unsigned int ret; |
8a0a9bd4 | 1603 | |
63d77173 PA |
1604 | if (arch_get_random_int(&ret)) |
1605 | return ret; | |
1606 | ||
1607 | hash = get_cpu_var(get_random_int_hash); | |
8a0a9bd4 | 1608 | |
61875f30 | 1609 | hash[0] += current->pid + jiffies + random_get_entropy(); |
6e5714ea DM |
1610 | md5_transform(hash, random_int_secret); |
1611 | ret = hash[0]; | |
8a0a9bd4 LT |
1612 | put_cpu_var(get_random_int_hash); |
1613 | ||
1614 | return ret; | |
1da177e4 | 1615 | } |
16c7fa05 | 1616 | EXPORT_SYMBOL(get_random_int); |
1da177e4 LT |
1617 | |
1618 | /* | |
1619 | * randomize_range() returns a start address such that | |
1620 | * | |
1621 | * [...... <range> .....] | |
1622 | * start end | |
1623 | * | |
1624 | * a <range> with size "len" starting at the return value is inside in the | |
1625 | * area defined by [start, end], but is otherwise randomized. | |
1626 | */ | |
1627 | unsigned long | |
1628 | randomize_range(unsigned long start, unsigned long end, unsigned long len) | |
1629 | { | |
1630 | unsigned long range = end - len - start; | |
1631 | ||
1632 | if (end <= start + len) | |
1633 | return 0; | |
1634 | return PAGE_ALIGN(get_random_int() % range + start); | |
1635 | } |