random: always update the entropy pool under the spinlock
authorTheodore Ts'o <tytso@mit.edu>
Wed, 11 Jun 2014 02:46:37 +0000 (22:46 -0400)
committerTheodore Ts'o <tytso@mit.edu>
Tue, 15 Jul 2014 08:49:39 +0000 (04:49 -0400)
Instead of using lockless techniques introduced in commit
902c098a3663, use spin_trylock to try to grab entropy pool's lock.  If
we can't get the lock, then just try again on the next interrupt.

Based on discussions with George Spelvin.

Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Cc: George Spelvin <linux@horizon.com>
drivers/char/random.c

index 0a7ac0a7b2520a852be7e61d32702dab391fced0..922a2e4089f9edcf61c66a419d98f7d563aa8dae 100644 (file)
@@ -495,9 +495,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
        tap4 = r->poolinfo->tap4;
        tap5 = r->poolinfo->tap5;
 
-       smp_rmb();
-       input_rotate = ACCESS_ONCE(r->input_rotate);
-       i = ACCESS_ONCE(r->add_ptr);
+       input_rotate = r->input_rotate;
+       i = r->add_ptr;
 
        /* mix one byte at a time to simplify size handling and churn faster */
        while (nbytes--) {
@@ -524,9 +523,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
                input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
        }
 
-       ACCESS_ONCE(r->input_rotate) = input_rotate;
-       ACCESS_ONCE(r->add_ptr) = i;
-       smp_wmb();
+       r->input_rotate = input_rotate;
+       r->add_ptr = i;
 
        if (out)
                for (j = 0; j < 16; j++)
@@ -845,7 +843,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
        __u32                   input[4], c_high, j_high;
        __u64                   ip;
        unsigned long           seed;
-       int                     credit;
+       int                     credit = 0;
 
        c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
        j_high = (sizeof(now) > 4) ? now >> 32 : 0;
@@ -860,36 +858,40 @@ void add_interrupt_randomness(int irq, int irq_flags)
        if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ))
                return;
 
-       fast_pool->last = now;
-
        r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
+       if (!spin_trylock(&r->lock)) {
+               fast_pool->count--;
+               return;
+       }
+       fast_pool->last = now;
        __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
 
+       /*
+        * If we have architectural seed generator, produce a seed and
+        * add it to the pool.  For the sake of paranoia count it as
+        * 50% entropic.
+        */
+       if (arch_get_random_seed_long(&seed)) {
+               __mix_pool_bytes(r, &seed, sizeof(seed), NULL);
+               credit += sizeof(seed) * 4;
+       }
+       spin_unlock(&r->lock);
+
        /*
         * If we don't have a valid cycle counter, and we see
         * back-to-back timer interrupts, then skip giving credit for
         * any entropy, otherwise credit 1 bit.
         */
-       credit = 1;
+       credit++;
        if (cycles == 0) {
                if (irq_flags & __IRQF_TIMER) {
                        if (fast_pool->last_timer_intr)
-                               credit = 0;
+                               credit--;
                        fast_pool->last_timer_intr = 1;
                } else
                        fast_pool->last_timer_intr = 0;
        }
 
-       /*
-        * If we have architectural seed generator, produce a seed and
-        * add it to the pool.  For the sake of paranoia count it as
-        * 50% entropic.
-        */
-       if (arch_get_random_seed_long(&seed)) {
-               __mix_pool_bytes(r, &seed, sizeof(seed), NULL);
-               credit += sizeof(seed) * 4;
-       }
-
        credit_entropy_bits(r, credit);
 }
 
This page took 0.041214 seconds and 5 git commands to generate.