hwrng: core - Do not register device opportunistically
[deliverable/linux.git] / drivers / char / hw_random / core.c
1 /*
2 Added support for the AMD Geode LX RNG
3 (c) Copyright 2004-2005 Advanced Micro Devices, Inc.
4
5 derived from
6
7 Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
8 (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
9
10 derived from
11
12 Hardware driver for the AMD 768 Random Number Generator (RNG)
13 (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
14
15 derived from
16
17 Hardware driver for Intel i810 Random Number Generator (RNG)
18 Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
19 Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
20
21 Added generic RNG API
22 Copyright 2006 Michael Buesch <m@bues.ch>
23 Copyright 2005 (c) MontaVista Software, Inc.
24
25 Please read Documentation/hw_random.txt for details on use.
26
27 ----------------------------------------------------------
28 This software may be used and distributed according to the terms
29 of the GNU General Public License, incorporated herein by reference.
30
31 */
32
33
34 #include <linux/device.h>
35 #include <linux/hw_random.h>
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/fs.h>
39 #include <linux/sched.h>
40 #include <linux/miscdevice.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/err.h>
46 #include <asm/uaccess.h>
47
48
49 #define RNG_MODULE_NAME "hw_random"
50 #define PFX RNG_MODULE_NAME ": "
51 #define RNG_MISCDEV_MINOR 183 /* official */
52
53
54 static struct hwrng *current_rng;
55 static struct task_struct *hwrng_fill;
56 static LIST_HEAD(rng_list);
57 /* Protects rng_list and current_rng */
58 static DEFINE_MUTEX(rng_mutex);
59 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
60 static DEFINE_MUTEX(reading_mutex);
61 static int data_avail;
62 static u8 *rng_buffer, *rng_fillbuf;
63 static unsigned short current_quality;
64 static unsigned short default_quality; /* = 0; default to "off" */
65
66 module_param(current_quality, ushort, 0644);
67 MODULE_PARM_DESC(current_quality,
68 "current hwrng entropy estimation per mill");
69 module_param(default_quality, ushort, 0644);
70 MODULE_PARM_DESC(default_quality,
71 "default entropy content of hwrng per mill");
72
73 static void start_khwrngd(void);
74
75 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
76 int wait);
77
78 static size_t rng_buffer_size(void)
79 {
80 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
81 }
82
83 static void add_early_randomness(struct hwrng *rng)
84 {
85 unsigned char bytes[16];
86 int bytes_read;
87
88 mutex_lock(&reading_mutex);
89 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
90 mutex_unlock(&reading_mutex);
91 if (bytes_read > 0)
92 add_device_randomness(bytes, bytes_read);
93 }
94
95 static inline void cleanup_rng(struct kref *kref)
96 {
97 struct hwrng *rng = container_of(kref, struct hwrng, ref);
98
99 if (rng->cleanup)
100 rng->cleanup(rng);
101
102 complete(&rng->cleanup_done);
103 }
104
105 static void set_current_rng(struct hwrng *rng)
106 {
107 BUG_ON(!mutex_is_locked(&rng_mutex));
108 current_rng = rng;
109 }
110
111 static void drop_current_rng(void)
112 {
113 BUG_ON(!mutex_is_locked(&rng_mutex));
114 if (!current_rng)
115 return;
116
117 /* decrease last reference for triggering the cleanup */
118 kref_put(&current_rng->ref, cleanup_rng);
119 current_rng = NULL;
120 }
121
122 /* Returns ERR_PTR(), NULL or refcounted hwrng */
123 static struct hwrng *get_current_rng(void)
124 {
125 struct hwrng *rng;
126
127 if (mutex_lock_interruptible(&rng_mutex))
128 return ERR_PTR(-ERESTARTSYS);
129
130 rng = current_rng;
131 if (rng)
132 kref_get(&rng->ref);
133
134 mutex_unlock(&rng_mutex);
135 return rng;
136 }
137
138 static void put_rng(struct hwrng *rng)
139 {
140 /*
141 * Hold rng_mutex here so we serialize in case they set_current_rng
142 * on rng again immediately.
143 */
144 mutex_lock(&rng_mutex);
145 if (rng)
146 kref_put(&rng->ref, cleanup_rng);
147 mutex_unlock(&rng_mutex);
148 }
149
150 static inline int hwrng_init(struct hwrng *rng)
151 {
152 if (kref_get_unless_zero(&rng->ref))
153 goto skip_init;
154
155 if (rng->init) {
156 int ret;
157
158 ret = rng->init(rng);
159 if (ret)
160 return ret;
161 }
162
163 kref_init(&rng->ref);
164 reinit_completion(&rng->cleanup_done);
165
166 skip_init:
167 add_early_randomness(rng);
168
169 current_quality = rng->quality ? : default_quality;
170 current_quality &= 1023;
171
172 if (current_quality == 0 && hwrng_fill)
173 kthread_stop(hwrng_fill);
174 if (current_quality > 0 && !hwrng_fill)
175 start_khwrngd();
176
177 return 0;
178 }
179
180 static int rng_dev_open(struct inode *inode, struct file *filp)
181 {
182 /* enforce read-only access to this chrdev */
183 if ((filp->f_mode & FMODE_READ) == 0)
184 return -EINVAL;
185 if (filp->f_mode & FMODE_WRITE)
186 return -EINVAL;
187 return 0;
188 }
189
190 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
191 int wait) {
192 int present;
193
194 BUG_ON(!mutex_is_locked(&reading_mutex));
195 if (rng->read)
196 return rng->read(rng, (void *)buffer, size, wait);
197
198 if (rng->data_present)
199 present = rng->data_present(rng, wait);
200 else
201 present = 1;
202
203 if (present)
204 return rng->data_read(rng, (u32 *)buffer);
205
206 return 0;
207 }
208
209 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
210 size_t size, loff_t *offp)
211 {
212 ssize_t ret = 0;
213 int err = 0;
214 int bytes_read, len;
215 struct hwrng *rng;
216
217 while (size) {
218 rng = get_current_rng();
219 if (IS_ERR(rng)) {
220 err = PTR_ERR(rng);
221 goto out;
222 }
223 if (!rng) {
224 err = -ENODEV;
225 goto out;
226 }
227
228 mutex_lock(&reading_mutex);
229 if (!data_avail) {
230 bytes_read = rng_get_data(rng, rng_buffer,
231 rng_buffer_size(),
232 !(filp->f_flags & O_NONBLOCK));
233 if (bytes_read < 0) {
234 err = bytes_read;
235 goto out_unlock_reading;
236 }
237 data_avail = bytes_read;
238 }
239
240 if (!data_avail) {
241 if (filp->f_flags & O_NONBLOCK) {
242 err = -EAGAIN;
243 goto out_unlock_reading;
244 }
245 } else {
246 len = data_avail;
247 if (len > size)
248 len = size;
249
250 data_avail -= len;
251
252 if (copy_to_user(buf + ret, rng_buffer + data_avail,
253 len)) {
254 err = -EFAULT;
255 goto out_unlock_reading;
256 }
257
258 size -= len;
259 ret += len;
260 }
261
262 mutex_unlock(&reading_mutex);
263 put_rng(rng);
264
265 if (need_resched())
266 schedule_timeout_interruptible(1);
267
268 if (signal_pending(current)) {
269 err = -ERESTARTSYS;
270 goto out;
271 }
272 }
273 out:
274 return ret ? : err;
275
276 out_unlock_reading:
277 mutex_unlock(&reading_mutex);
278 put_rng(rng);
279 goto out;
280 }
281
282
283 static const struct file_operations rng_chrdev_ops = {
284 .owner = THIS_MODULE,
285 .open = rng_dev_open,
286 .read = rng_dev_read,
287 .llseek = noop_llseek,
288 };
289
290 static struct miscdevice rng_miscdev = {
291 .minor = RNG_MISCDEV_MINOR,
292 .name = RNG_MODULE_NAME,
293 .nodename = "hwrng",
294 .fops = &rng_chrdev_ops,
295 };
296
297
298 static ssize_t hwrng_attr_current_store(struct device *dev,
299 struct device_attribute *attr,
300 const char *buf, size_t len)
301 {
302 int err;
303 struct hwrng *rng;
304
305 err = mutex_lock_interruptible(&rng_mutex);
306 if (err)
307 return -ERESTARTSYS;
308 err = -ENODEV;
309 list_for_each_entry(rng, &rng_list, list) {
310 if (strcmp(rng->name, buf) == 0) {
311 if (rng == current_rng) {
312 err = 0;
313 break;
314 }
315 err = hwrng_init(rng);
316 if (err)
317 break;
318 drop_current_rng();
319 set_current_rng(rng);
320 err = 0;
321 break;
322 }
323 }
324 mutex_unlock(&rng_mutex);
325
326 return err ? : len;
327 }
328
329 static ssize_t hwrng_attr_current_show(struct device *dev,
330 struct device_attribute *attr,
331 char *buf)
332 {
333 ssize_t ret;
334 struct hwrng *rng;
335
336 rng = get_current_rng();
337 if (IS_ERR(rng))
338 return PTR_ERR(rng);
339
340 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
341 put_rng(rng);
342
343 return ret;
344 }
345
346 static ssize_t hwrng_attr_available_show(struct device *dev,
347 struct device_attribute *attr,
348 char *buf)
349 {
350 int err;
351 struct hwrng *rng;
352
353 err = mutex_lock_interruptible(&rng_mutex);
354 if (err)
355 return -ERESTARTSYS;
356 buf[0] = '\0';
357 list_for_each_entry(rng, &rng_list, list) {
358 strlcat(buf, rng->name, PAGE_SIZE);
359 strlcat(buf, " ", PAGE_SIZE);
360 }
361 strlcat(buf, "\n", PAGE_SIZE);
362 mutex_unlock(&rng_mutex);
363
364 return strlen(buf);
365 }
366
367 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
368 hwrng_attr_current_show,
369 hwrng_attr_current_store);
370 static DEVICE_ATTR(rng_available, S_IRUGO,
371 hwrng_attr_available_show,
372 NULL);
373
374
375 static void __exit unregister_miscdev(void)
376 {
377 device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available);
378 device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
379 misc_deregister(&rng_miscdev);
380 }
381
382 static int __init register_miscdev(void)
383 {
384 int err;
385
386 err = misc_register(&rng_miscdev);
387 if (err)
388 goto out;
389 err = device_create_file(rng_miscdev.this_device,
390 &dev_attr_rng_current);
391 if (err)
392 goto err_misc_dereg;
393 err = device_create_file(rng_miscdev.this_device,
394 &dev_attr_rng_available);
395 if (err)
396 goto err_remove_current;
397 out:
398 return err;
399
400 err_remove_current:
401 device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
402 err_misc_dereg:
403 misc_deregister(&rng_miscdev);
404 goto out;
405 }
406
407 static int hwrng_fillfn(void *unused)
408 {
409 long rc;
410
411 while (!kthread_should_stop()) {
412 struct hwrng *rng;
413
414 rng = get_current_rng();
415 if (IS_ERR(rng) || !rng)
416 break;
417 mutex_lock(&reading_mutex);
418 rc = rng_get_data(rng, rng_fillbuf,
419 rng_buffer_size(), 1);
420 mutex_unlock(&reading_mutex);
421 put_rng(rng);
422 if (rc <= 0) {
423 pr_warn("hwrng: no data available\n");
424 msleep_interruptible(10000);
425 continue;
426 }
427 /* Outside lock, sure, but y'know: randomness. */
428 add_hwgenerator_randomness((void *)rng_fillbuf, rc,
429 rc * current_quality * 8 >> 10);
430 }
431 hwrng_fill = NULL;
432 return 0;
433 }
434
435 static void start_khwrngd(void)
436 {
437 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
438 if (hwrng_fill == ERR_PTR(-ENOMEM)) {
439 pr_err("hwrng_fill thread creation failed");
440 hwrng_fill = NULL;
441 }
442 }
443
444 int hwrng_register(struct hwrng *rng)
445 {
446 int err = -EINVAL;
447 struct hwrng *old_rng, *tmp;
448
449 if (rng->name == NULL ||
450 (rng->data_read == NULL && rng->read == NULL))
451 goto out;
452
453 mutex_lock(&rng_mutex);
454
455 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
456 err = -ENOMEM;
457 if (!rng_buffer) {
458 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
459 if (!rng_buffer)
460 goto out_unlock;
461 }
462 if (!rng_fillbuf) {
463 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
464 if (!rng_fillbuf) {
465 kfree(rng_buffer);
466 goto out_unlock;
467 }
468 }
469
470 /* Must not register two RNGs with the same name. */
471 err = -EEXIST;
472 list_for_each_entry(tmp, &rng_list, list) {
473 if (strcmp(tmp->name, rng->name) == 0)
474 goto out_unlock;
475 }
476
477 init_completion(&rng->cleanup_done);
478 complete(&rng->cleanup_done);
479
480 old_rng = current_rng;
481 err = 0;
482 if (!old_rng) {
483 err = hwrng_init(rng);
484 if (err)
485 goto out_unlock;
486 set_current_rng(rng);
487 }
488 list_add_tail(&rng->list, &rng_list);
489
490 if (old_rng && !rng->init) {
491 /*
492 * Use a new device's input to add some randomness to
493 * the system. If this rng device isn't going to be
494 * used right away, its init function hasn't been
495 * called yet; so only use the randomness from devices
496 * that don't need an init callback.
497 */
498 add_early_randomness(rng);
499 }
500
501 out_unlock:
502 mutex_unlock(&rng_mutex);
503 out:
504 return err;
505 }
506 EXPORT_SYMBOL_GPL(hwrng_register);
507
508 void hwrng_unregister(struct hwrng *rng)
509 {
510 mutex_lock(&rng_mutex);
511
512 list_del(&rng->list);
513 if (current_rng == rng) {
514 drop_current_rng();
515 if (!list_empty(&rng_list)) {
516 struct hwrng *tail;
517
518 tail = list_entry(rng_list.prev, struct hwrng, list);
519
520 if (hwrng_init(tail) == 0)
521 set_current_rng(tail);
522 }
523 }
524
525 if (list_empty(&rng_list)) {
526 mutex_unlock(&rng_mutex);
527 if (hwrng_fill)
528 kthread_stop(hwrng_fill);
529 } else
530 mutex_unlock(&rng_mutex);
531
532 wait_for_completion(&rng->cleanup_done);
533 }
534 EXPORT_SYMBOL_GPL(hwrng_unregister);
535
536 static int __init hwrng_modinit(void)
537 {
538 return register_miscdev();
539 }
540
541 static void __exit hwrng_modexit(void)
542 {
543 mutex_lock(&rng_mutex);
544 BUG_ON(current_rng);
545 kfree(rng_buffer);
546 kfree(rng_fillbuf);
547 mutex_unlock(&rng_mutex);
548
549 unregister_miscdev();
550 }
551
552 module_init(hwrng_modinit);
553 module_exit(hwrng_modexit);
554
555 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
556 MODULE_LICENSE("GPL");
This page took 0.096997 seconds and 5 git commands to generate.