workqueue: use mod_delayed_work() instead of __cancel + queue
authorTejun Heo <tj@kernel.org>
Tue, 21 Aug 2012 20:18:24 +0000 (13:18 -0700)
committerTejun Heo <tj@kernel.org>
Tue, 21 Aug 2012 20:18:24 +0000 (13:18 -0700)
Now that mod_delayed_work() is safe to call from IRQ handlers,
__cancel_delayed_work() followed by queue_delayed_work() can be
replaced with mod_delayed_work().

Most conversions are straight-forward except for the following.

* net/core/link_watch.c: linkwatch_schedule_work() was doing a quite
  elaborate dancing around its delayed_work.  Collapse it such that
  linkwatch_work is queued for immediate execution if LW_URGENT and
  existing timer is kept otherwise.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Tomi Valkeinen <tomi.valkeinen@ti.com>
block/blk-core.c
block/blk-throttle.c
drivers/block/floppy.c
drivers/infiniband/core/mad.c
drivers/input/keyboard/qt2160.c
drivers/input/mouse/synaptics_i2c.c
net/core/link_watch.c

index 4b4dbdfbca89fe5769fd4b2f6826f305fca18e26..4b8b606dbb01805c2e1f3393ace97558b22f9a46 100644 (file)
@@ -319,10 +319,8 @@ EXPORT_SYMBOL(__blk_run_queue);
  */
 void blk_run_queue_async(struct request_queue *q)
 {
-       if (likely(!blk_queue_stopped(q))) {
-               __cancel_delayed_work(&q->delay_work);
-               queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
-       }
+       if (likely(!blk_queue_stopped(q)))
+               mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 }
 EXPORT_SYMBOL(blk_run_queue_async);
 
index 5a58e779912b909ece2ac419c3ef823e0b0f0b7b..a9664fa0b6097ace6a48d48e0992a6d83ed32a9d 100644 (file)
@@ -929,12 +929,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
 
        /* schedule work if limits changed even if no bio is queued */
        if (total_nr_queued(td) || td->limits_changed) {
-               /*
-                * We might have a work scheduled to be executed in future.
-                * Cancel that and schedule a new one.
-                */
-               __cancel_delayed_work(dwork);
-               queue_delayed_work(kthrotld_workqueue, dwork, delay);
+               mod_delayed_work(kthrotld_workqueue, dwork, delay);
                throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
                                delay, jiffies);
        }
index a7d6347aaa7913b2a029014a95a2558d8360597e..55a5bc002c069a708fd6f0de38177a22ead5f4db 100644 (file)
@@ -672,7 +672,6 @@ static void __reschedule_timeout(int drive, const char *message)
 
        if (drive == current_reqD)
                drive = current_drive;
-       __cancel_delayed_work(&fd_timeout);
 
        if (drive < 0 || drive >= N_DRIVE) {
                delay = 20UL * HZ;
@@ -680,7 +679,7 @@ static void __reschedule_timeout(int drive, const char *message)
        } else
                delay = UDP->timeout;
 
-       queue_delayed_work(floppy_wq, &fd_timeout, delay);
+       mod_delayed_work(floppy_wq, &fd_timeout, delay);
        if (UDP->flags & FD_DEBUG)
                DPRINT("reschedule timeout %s\n", message);
        timeout_message = message;
index b0d0bc8a6fb6ca58c61206ff11dab9f2f5cd5e8e..b5938147fc898c4350ef408052782d1d7307c586 100644 (file)
@@ -2013,13 +2013,11 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
                if (time_after(mad_agent_priv->timeout,
                               mad_send_wr->timeout)) {
                        mad_agent_priv->timeout = mad_send_wr->timeout;
-                       __cancel_delayed_work(&mad_agent_priv->timed_work);
                        delay = mad_send_wr->timeout - jiffies;
                        if ((long)delay <= 0)
                                delay = 1;
-                       queue_delayed_work(mad_agent_priv->qp_info->
-                                          port_priv->wq,
-                                          &mad_agent_priv->timed_work, delay);
+                       mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
+                                        &mad_agent_priv->timed_work, delay);
                }
        }
 }
@@ -2052,11 +2050,9 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
        list_add(&mad_send_wr->agent_list, list_item);
 
        /* Reschedule a work item if we have a shorter timeout */
-       if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
-               __cancel_delayed_work(&mad_agent_priv->timed_work);
-               queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
-                                  &mad_agent_priv->timed_work, delay);
-       }
+       if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
+               mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
+                                &mad_agent_priv->timed_work, delay);
 }
 
 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
index e7a5e36e1203c26545ccb44dc7c4af82c9509011..76b7d430d03a4a26f18666469413be1449f8fc20 100644 (file)
@@ -156,8 +156,7 @@ static irqreturn_t qt2160_irq(int irq, void *_qt2160)
 
        spin_lock_irqsave(&qt2160->lock, flags);
 
-       __cancel_delayed_work(&qt2160->dwork);
-       schedule_delayed_work(&qt2160->dwork, 0);
+       mod_delayed_work(system_wq, &qt2160->dwork, 0);
 
        spin_unlock_irqrestore(&qt2160->lock, flags);
 
index f14675702c0f0bdd2bf36c85188c7da0a7a94ea4..063a174d3a88c4a22acb67a7e474b330e355b336 100644 (file)
@@ -376,12 +376,7 @@ static void synaptics_i2c_reschedule_work(struct synaptics_i2c *touch,
 
        spin_lock_irqsave(&touch->lock, flags);
 
-       /*
-        * If work is already scheduled then subsequent schedules will not
-        * change the scheduled time that's why we have to cancel it first.
-        */
-       __cancel_delayed_work(&touch->dwork);
-       schedule_delayed_work(&touch->dwork, delay);
+       mod_delayed_work(system_wq, &touch->dwork, delay);
 
        spin_unlock_irqrestore(&touch->lock, flags);
 }
index c3519c6d1b169a5c895efd781c7958218d7f8dc6..8e397a69005afd51726a2310e735416b31d665d3 100644 (file)
@@ -120,22 +120,13 @@ static void linkwatch_schedule_work(int urgent)
                delay = 0;
 
        /*
-        * This is true if we've scheduled it immeditately or if we don't
-        * need an immediate execution and it's already pending.
+        * If urgent, schedule immediate execution; otherwise, don't
+        * override the existing timer.
         */
-       if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
-               return;
-
-       /* Don't bother if there is nothing urgent. */
-       if (!test_bit(LW_URGENT, &linkwatch_flags))
-               return;
-
-       /* It's already running which is good enough. */
-       if (!__cancel_delayed_work(&linkwatch_work))
-               return;
-
-       /* Otherwise we reschedule it again for immediate execution. */
-       schedule_delayed_work(&linkwatch_work, 0);
+       if (test_bit(LW_URGENT, &linkwatch_flags))
+               mod_delayed_work(system_wq, &linkwatch_work, 0);
+       else
+               schedule_delayed_work(&linkwatch_work, delay);
 }
 
 
This page took 0.041318 seconds and 5 git commands to generate.