Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / watchdog / watchdog_dev.c
index ba2ecce4aae685df4ce30a8263fec56f3e0b729d..e2c5abbb45ffd77d0b838fcebdab0536a637af8e 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/errno.h>       /* For the -ENODEV/... values */
 #include <linux/fs.h>          /* For file operations */
 #include <linux/init.h>                /* For __init/__exit/... */
+#include <linux/jiffies.h>     /* For timeout functions */
 #include <linux/kernel.h>      /* For printk/panic/... */
 #include <linux/kref.h>                /* For data references */
 #include <linux/miscdevice.h>  /* For handling misc devices */
@@ -44,6 +45,7 @@
 #include <linux/slab.h>                /* For memory functions */
 #include <linux/types.h>       /* For standard types (like size_t) */
 #include <linux/watchdog.h>    /* For watchdog specific items */
+#include <linux/workqueue.h>   /* For workqueue */
 #include <linux/uaccess.h>     /* For copy_to_user/put_user/... */
 
 #include "watchdog_core.h"
@@ -61,6 +63,9 @@ struct watchdog_core_data {
        struct cdev cdev;
        struct watchdog_device *wdd;
        struct mutex lock;
+       unsigned long last_keepalive;
+       unsigned long last_hw_keepalive;
+       struct delayed_work work;
        unsigned long status;           /* Internal status bits */
 #define _WDOG_DEV_OPEN         0       /* Opened ? */
 #define _WDOG_ALLOW_RELEASE    1       /* Did we receive the magic char ? */
@@ -71,6 +76,91 @@ static dev_t watchdog_devt;
 /* Reference to watchdog device behind /dev/watchdog */
 static struct watchdog_core_data *old_wd_data;
 
+static struct workqueue_struct *watchdog_wq;
+
+static inline bool watchdog_need_worker(struct watchdog_device *wdd)
+{
+       /* All variables in milli-seconds */
+       unsigned int hm = wdd->max_hw_heartbeat_ms;
+       unsigned int t = wdd->timeout * 1000;
+
+       /*
+        * A worker to generate heartbeat requests is needed if all of the
+        * following conditions are true.
+        * - Userspace activated the watchdog.
+        * - The driver provided a value for the maximum hardware timeout, and
+        *   thus is aware that the framework supports generating heartbeat
+        *   requests.
+        * - Userspace requests a longer timeout than the hardware can handle.
+        */
+       return hm && ((watchdog_active(wdd) && t > hm) ||
+                     (t && !watchdog_active(wdd) && watchdog_hw_running(wdd)));
+}
+
+static long watchdog_next_keepalive(struct watchdog_device *wdd)
+{
+       struct watchdog_core_data *wd_data = wdd->wd_data;
+       unsigned int timeout_ms = wdd->timeout * 1000;
+       unsigned long keepalive_interval;
+       unsigned long last_heartbeat;
+       unsigned long virt_timeout;
+       unsigned int hw_heartbeat_ms;
+
+       virt_timeout = wd_data->last_keepalive + msecs_to_jiffies(timeout_ms);
+       hw_heartbeat_ms = min(timeout_ms, wdd->max_hw_heartbeat_ms);
+       keepalive_interval = msecs_to_jiffies(hw_heartbeat_ms / 2);
+
+       if (!watchdog_active(wdd))
+               return keepalive_interval;
+
+       /*
+        * To ensure that the watchdog times out wdd->timeout seconds
+        * after the most recent ping from userspace, the last
+        * worker ping has to come in hw_heartbeat_ms before this timeout.
+        */
+       last_heartbeat = virt_timeout - msecs_to_jiffies(hw_heartbeat_ms);
+       return min_t(long, last_heartbeat - jiffies, keepalive_interval);
+}
+
+static inline void watchdog_update_worker(struct watchdog_device *wdd)
+{
+       struct watchdog_core_data *wd_data = wdd->wd_data;
+
+       if (watchdog_need_worker(wdd)) {
+               long t = watchdog_next_keepalive(wdd);
+
+               if (t > 0)
+                       mod_delayed_work(watchdog_wq, &wd_data->work, t);
+       } else {
+               cancel_delayed_work(&wd_data->work);
+       }
+}
+
+static int __watchdog_ping(struct watchdog_device *wdd)
+{
+       struct watchdog_core_data *wd_data = wdd->wd_data;
+       unsigned long earliest_keepalive = wd_data->last_hw_keepalive +
+                               msecs_to_jiffies(wdd->min_hw_heartbeat_ms);
+       int err;
+
+       if (time_is_after_jiffies(earliest_keepalive)) {
+               mod_delayed_work(watchdog_wq, &wd_data->work,
+                                earliest_keepalive - jiffies);
+               return 0;
+       }
+
+       wd_data->last_hw_keepalive = jiffies;
+
+       if (wdd->ops->ping)
+               err = wdd->ops->ping(wdd);  /* ping the watchdog */
+       else
+               err = wdd->ops->start(wdd); /* restart watchdog */
+
+       watchdog_update_worker(wdd);
+
+       return err;
+}
+
 /*
  *     watchdog_ping: ping the watchdog.
  *     @wdd: the watchdog device to ping
@@ -85,17 +175,28 @@ static struct watchdog_core_data *old_wd_data;
 
 static int watchdog_ping(struct watchdog_device *wdd)
 {
-       int err;
+       struct watchdog_core_data *wd_data = wdd->wd_data;
 
-       if (!watchdog_active(wdd))
+       if (!watchdog_active(wdd) && !watchdog_hw_running(wdd))
                return 0;
 
-       if (wdd->ops->ping)
-               err = wdd->ops->ping(wdd);      /* ping the watchdog */
-       else
-               err = wdd->ops->start(wdd);     /* restart watchdog */
+       wd_data->last_keepalive = jiffies;
+       return __watchdog_ping(wdd);
+}
 
-       return err;
+static void watchdog_ping_work(struct work_struct *work)
+{
+       struct watchdog_core_data *wd_data;
+       struct watchdog_device *wdd;
+
+       wd_data = container_of(to_delayed_work(work), struct watchdog_core_data,
+                              work);
+
+       mutex_lock(&wd_data->lock);
+       wdd = wd_data->wdd;
+       if (wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd)))
+               __watchdog_ping(wdd);
+       mutex_unlock(&wd_data->lock);
 }
 
 /*
@@ -111,14 +212,23 @@ static int watchdog_ping(struct watchdog_device *wdd)
 
 static int watchdog_start(struct watchdog_device *wdd)
 {
+       struct watchdog_core_data *wd_data = wdd->wd_data;
+       unsigned long started_at;
        int err;
 
        if (watchdog_active(wdd))
                return 0;
 
-       err = wdd->ops->start(wdd);
-       if (err == 0)
+       started_at = jiffies;
+       if (watchdog_hw_running(wdd) && wdd->ops->ping)
+               err = wdd->ops->ping(wdd);
+       else
+               err = wdd->ops->start(wdd);
+       if (err == 0) {
                set_bit(WDOG_ACTIVE, &wdd->status);
+               wd_data->last_keepalive = started_at;
+               watchdog_update_worker(wdd);
+       }
 
        return err;
 }
@@ -137,7 +247,7 @@ static int watchdog_start(struct watchdog_device *wdd)
 
 static int watchdog_stop(struct watchdog_device *wdd)
 {
-       int err;
+       int err = 0;
 
        if (!watchdog_active(wdd))
                return 0;
@@ -148,9 +258,15 @@ static int watchdog_stop(struct watchdog_device *wdd)
                return -EBUSY;
        }
 
-       err = wdd->ops->stop(wdd);
-       if (err == 0)
+       if (wdd->ops->stop)
+               err = wdd->ops->stop(wdd);
+       else
+               set_bit(WDOG_HW_RUNNING, &wdd->status);
+
+       if (err == 0) {
                clear_bit(WDOG_ACTIVE, &wdd->status);
+               watchdog_update_worker(wdd);
+       }
 
        return err;
 }
@@ -183,13 +299,22 @@ static unsigned int watchdog_get_status(struct watchdog_device *wdd)
 static int watchdog_set_timeout(struct watchdog_device *wdd,
                                                        unsigned int timeout)
 {
-       if (!wdd->ops->set_timeout || !(wdd->info->options & WDIOF_SETTIMEOUT))
+       int err = 0;
+
+       if (!(wdd->info->options & WDIOF_SETTIMEOUT))
                return -EOPNOTSUPP;
 
        if (watchdog_timeout_invalid(wdd, timeout))
                return -EINVAL;
 
-       return wdd->ops->set_timeout(wdd, timeout);
+       if (wdd->ops->set_timeout)
+               err = wdd->ops->set_timeout(wdd, timeout);
+       else
+               wdd->timeout = timeout;
+
+       watchdog_update_worker(wdd);
+
+       return err;
 }
 
 /*
@@ -538,7 +663,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
         * If the /dev/watchdog device is open, we don't want the module
         * to be unloaded.
         */
-       if (!try_module_get(wdd->ops->owner)) {
+       if (!watchdog_hw_running(wdd) && !try_module_get(wdd->ops->owner)) {
                err = -EBUSY;
                goto out_clear;
        }
@@ -549,7 +674,8 @@ static int watchdog_open(struct inode *inode, struct file *file)
 
        file->private_data = wd_data;
 
-       kref_get(&wd_data->kref);
+       if (!watchdog_hw_running(wdd))
+               kref_get(&wd_data->kref);
 
        /* dev/watchdog is a virtual (and thus non-seekable) filesystem */
        return nonseekable_open(inode, file);
@@ -585,6 +711,7 @@ static int watchdog_release(struct inode *inode, struct file *file)
        struct watchdog_core_data *wd_data = file->private_data;
        struct watchdog_device *wdd;
        int err = -EBUSY;
+       bool running;
 
        mutex_lock(&wd_data->lock);
 
@@ -609,14 +736,24 @@ static int watchdog_release(struct inode *inode, struct file *file)
                watchdog_ping(wdd);
        }
 
+       cancel_delayed_work_sync(&wd_data->work);
+       watchdog_update_worker(wdd);
+
        /* make sure that /dev/watchdog can be re-opened */
        clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
 
 done:
+       running = wdd && watchdog_hw_running(wdd);
        mutex_unlock(&wd_data->lock);
-       /* Allow the owner module to be unloaded again */
-       module_put(wd_data->cdev.owner);
-       kref_put(&wd_data->kref, watchdog_core_data_release);
+       /*
+        * Allow the owner module to be unloaded again unless the watchdog
+        * is still running. If the watchdog is still running, it can not
+        * be stopped, and its driver must not be unloaded.
+        */
+       if (!running) {
+               module_put(wd_data->cdev.owner);
+               kref_put(&wd_data->kref, watchdog_core_data_release);
+       }
        return 0;
 }
 
@@ -658,6 +795,11 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
        wd_data->wdd = wdd;
        wdd->wd_data = wd_data;
 
+       if (!watchdog_wq)
+               return -ENODEV;
+
+       INIT_DELAYED_WORK(&wd_data->work, watchdog_ping_work);
+
        if (wdd->id == 0) {
                old_wd_data = wd_data;
                watchdog_miscdev.parent = wdd->parent;
@@ -688,8 +830,23 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
                        old_wd_data = NULL;
                        kref_put(&wd_data->kref, watchdog_core_data_release);
                }
+               return err;
        }
-       return err;
+
+       /* Record time of most recent heartbeat as 'just before now'. */
+       wd_data->last_hw_keepalive = jiffies - 1;
+
+       /*
+        * If the watchdog is running, prevent its driver from being unloaded,
+        * and schedule an immediate ping.
+        */
+       if (watchdog_hw_running(wdd)) {
+               __module_get(wdd->ops->owner);
+               kref_get(&wd_data->kref);
+               queue_delayed_work(watchdog_wq, &wd_data->work, 0);
+       }
+
+       return 0;
 }
 
 /*
@@ -715,6 +872,8 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
        wdd->wd_data = NULL;
        mutex_unlock(&wd_data->lock);
 
+       cancel_delayed_work_sync(&wd_data->work);
+
        kref_put(&wd_data->kref, watchdog_core_data_release);
 }
 
@@ -780,6 +939,13 @@ int __init watchdog_dev_init(void)
 {
        int err;
 
+       watchdog_wq = alloc_workqueue("watchdogd",
+                                     WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
+       if (!watchdog_wq) {
+               pr_err("Failed to create watchdog workqueue\n");
+               return -ENOMEM;
+       }
+
        err = class_register(&watchdog_class);
        if (err < 0) {
                pr_err("couldn't register class\n");
@@ -806,4 +972,5 @@ void __exit watchdog_dev_exit(void)
 {
        unregister_chrdev_region(watchdog_devt, MAX_DOGS);
        class_unregister(&watchdog_class);
+       destroy_workqueue(watchdog_wq);
 }
This page took 0.02968 seconds and 5 git commands to generate.