workqueue: update debugobjects fixup callbacks return type
[deliverable/linux.git] / kernel / workqueue.c
index 2232ae3e3ad655ad4a697cf4ed0bd3fb07878a43..6751b18fd9acade416807846bd232815a1e4bc7e 100644 (file)
@@ -437,7 +437,7 @@ static void *work_debug_hint(void *addr)
  * fixup_init is called when:
  * - an active object is initialized
  */
-static int work_fixup_init(void *addr, enum debug_obj_state state)
+static bool work_fixup_init(void *addr, enum debug_obj_state state)
 {
        struct work_struct *work = addr;
 
@@ -445,9 +445,9 @@ static int work_fixup_init(void *addr, enum debug_obj_state state)
        case ODEBUG_STATE_ACTIVE:
                cancel_work_sync(work);
                debug_object_init(work, &work_debug_descr);
-               return 1;
+               return true;
        default:
-               return 0;
+               return false;
        }
 }
 
@@ -456,7 +456,7 @@ static int work_fixup_init(void *addr, enum debug_obj_state state)
  * - an active object is activated
  * - an unknown object is activated (might be a statically initialized object)
  */
-static int work_fixup_activate(void *addr, enum debug_obj_state state)
+static bool work_fixup_activate(void *addr, enum debug_obj_state state)
 {
        struct work_struct *work = addr;
 
@@ -471,16 +471,16 @@ static int work_fixup_activate(void *addr, enum debug_obj_state state)
                if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
                        debug_object_init(work, &work_debug_descr);
                        debug_object_activate(work, &work_debug_descr);
-                       return 0;
+                       return false;
                }
                WARN_ON_ONCE(1);
-               return 0;
+               return false;
 
        case ODEBUG_STATE_ACTIVE:
                WARN_ON(1);
 
        default:
-               return 0;
+               return false;
        }
 }
 
@@ -488,7 +488,7 @@ static int work_fixup_activate(void *addr, enum debug_obj_state state)
  * fixup_free is called when:
  * - an active object is freed
  */
-static int work_fixup_free(void *addr, enum debug_obj_state state)
+static bool work_fixup_free(void *addr, enum debug_obj_state state)
 {
        struct work_struct *work = addr;
 
@@ -496,9 +496,9 @@ static int work_fixup_free(void *addr, enum debug_obj_state state)
        case ODEBUG_STATE_ACTIVE:
                cancel_work_sync(work);
                debug_object_free(work, &work_debug_descr);
-               return 1;
+               return true;
        default:
-               return 0;
+               return false;
        }
 }
 
@@ -666,6 +666,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
         */
        smp_wmb();
        set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
+       /*
+        * The following mb guarantees that previous clear of a PENDING bit
+        * will not be reordered with any speculative LOADS or STORES from
+        * work->current_func, which is executed afterwards.  This possible
+        * reordering can lead to a missed execution on attempt to qeueue
+        * the same @work.  E.g. consider this case:
+        *
+        *   CPU#0                         CPU#1
+        *   ----------------------------  --------------------------------
+        *
+        * 1  STORE event_indicated
+        * 2  queue_work_on() {
+        * 3    test_and_set_bit(PENDING)
+        * 4 }                             set_..._and_clear_pending() {
+        * 5                                 set_work_data() # clear bit
+        * 6                                 smp_mb()
+        * 7                               work->current_func() {
+        * 8                                  LOAD event_indicated
+        *                                 }
+        *
+        * Without an explicit full barrier speculative LOAD on line 8 can
+        * be executed before CPU#0 does STORE on line 1.  If that happens,
+        * CPU#0 observes the PENDING bit is still set and new execution of
+        * a @work is not queued in a hope, that CPU#1 will eventually
+        * finish the queued @work.  Meanwhile CPU#1 does not see
+        * event_indicated is set, because speculative LOAD was executed
+        * before actual STORE.
+        */
+       smp_mb();
 }
 
 static void clear_work_data(struct work_struct *work)
@@ -4525,6 +4554,17 @@ static void rebind_workers(struct worker_pool *pool)
                                                  pool->attrs->cpumask) < 0);
 
        spin_lock_irq(&pool->lock);
+
+       /*
+        * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
+        * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
+        * being reworked and this can go away in time.
+        */
+       if (!(pool->flags & POOL_DISASSOCIATED)) {
+               spin_unlock_irq(&pool->lock);
+               return;
+       }
+
        pool->flags &= ~POOL_DISASSOCIATED;
 
        for_each_pool_worker(worker, pool) {
This page took 0.027436 seconds and 5 git commands to generate.