random: clean up interrupt entropy accounting for archs w/o cycle counters
authorTheodore Ts'o <tytso@mit.edu>
Sat, 14 Jun 2014 07:06:57 +0000 (03:06 -0400)
committerTheodore Ts'o <tytso@mit.edu>
Tue, 15 Jul 2014 08:49:39 +0000 (04:49 -0400)
For architectures that don't have cycle counters, the algorithm for
deciding when to avoid giving entropy credit due to back-to-back timer
interrupts didn't make any sense, since we were checking every 64
interrupts.  Change it so that we only give an entropy credit if the
majority of the interrupts are not based on the timer.

Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Cc: George Spelvin <linux@horizon.com>
drivers/char/random.c

index 364a8001a2bd573963459ba1e5ae0a0019b546c2..dfe918a21e32df3d13a0d904e2010c5bc9645066 100644 (file)
@@ -548,9 +548,9 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in,
 struct fast_pool {
        __u32           pool[4];
        unsigned long   last;
-       unsigned short  count;
+       unsigned char   count;
+       unsigned char   notimer_count;
        unsigned char   rotate;
-       unsigned char   last_timer_intr;
 };
 
 /*
@@ -850,15 +850,23 @@ void add_interrupt_randomness(int irq, int irq_flags)
        input[3] = ip >> 32;
 
        fast_mix(fast_pool, input);
+       if ((irq_flags & __IRQF_TIMER) == 0)
+               fast_pool->notimer_count++;
 
-       if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ))
-               return;
+       if (cycles) {
+               if ((fast_pool->count < 64) &&
+                   !time_after(now, fast_pool->last + HZ))
+                       return;
+       } else {
+               /* CPU does not have a cycle counting register :-( */
+               if (fast_pool->count < 64)
+                       return;
+       }
 
        r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
-       if (!spin_trylock(&r->lock)) {
-               fast_pool->count--;
+       if (!spin_trylock(&r->lock))
                return;
-       }
+
        fast_pool->last = now;
        __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
 
@@ -874,19 +882,15 @@ void add_interrupt_randomness(int irq, int irq_flags)
        spin_unlock(&r->lock);
 
        /*
-        * If we don't have a valid cycle counter, and we see
-        * back-to-back timer interrupts, then skip giving credit for
-        * any entropy, otherwise credit 1 bit.
+        * If we have a valid cycle counter or if the majority of
+        * interrupts collected were non-timer interrupts, then give
+        * an entropy credit of 1 bit.  Yes, this is being very
+        * conservative.
         */
-       credit++;
-       if (cycles == 0) {
-               if (irq_flags & __IRQF_TIMER) {
-                       if (fast_pool->last_timer_intr)
-                               credit--;
-                       fast_pool->last_timer_intr = 1;
-               } else
-                       fast_pool->last_timer_intr = 0;
-       }
+       if (cycles || (fast_pool->notimer_count >= 32))
+               credit++;
+
+       fast_pool->count = fast_pool->notimer_count = 0;
 
        credit_entropy_bits(r, credit);
 }
This page took 0.029849 seconds and 5 git commands to generate.