staging/lustre/ldlm: Solve a race for LRU lock cancel
authorVitaly Fertman <vitaly.fertman@seagate.com>
Wed, 30 Mar 2016 23:49:06 +0000 (19:49 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 31 Mar 2016 04:38:13 +0000 (21:38 -0700)
This patch solves a race condition that the lock may be used again
after LRU cancellation policy check. In that case, the lock may have
locked or dirty pages that makes the policy check totally useless.
The problem is solved by checking l_last_used at cancellation time
therefore it can make sure that the lock has not been used.

Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com>
Signed-off-by: Vitaly Fertman <vitaly_fertman@xyratex.com>
Reviewed-on: http://review.whamcloud.com/12603
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5781
Reviewed-by: James Simmons <uja.ornl@yahoo.com>
Signed-off-by: Oleg Drokin <green@linuxhacker.ru>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
drivers/staging/lustre/lustre/ldlm/ldlm_request.c

index e31d84aad4e4694f5d58d0cd02274e8352053ebb..351f8b44947f68fdd7c2bac265435856561be87b 100644 (file)
@@ -146,7 +146,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
                      enum ldlm_desc_ast_t ast_type);
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock);
+int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use);
+#define ldlm_lock_remove_from_lru(lock) ldlm_lock_remove_from_lru_check(lock, 0)
 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock);
 
index 27a051b085b5d0f4f5e592536828321c66c5b025..3f9b852627705056ea3f1ac31d174ca29ad36c30 100644 (file)
@@ -229,15 +229,25 @@ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
 
 /**
  * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
+ *
+ * If \a last_use is non-zero, it will remove the lock from LRU only if
+ * it matches lock's l_last_used.
+ *
+ * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use
+ *           doesn't match lock's l_last_used;
+ *           otherwise, the lock hasn't been in the LRU list.
+ * \retval 1 the lock was in LRU list and removed.
  */
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
+int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use)
 {
        struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-       int rc;
+       int rc = 0;
 
        spin_lock(&ns->ns_lock);
-       rc = ldlm_lock_remove_from_lru_nolock(lock);
+       if (last_use == 0 || last_use == lock->l_last_used)
+               rc = ldlm_lock_remove_from_lru_nolock(lock);
        spin_unlock(&ns->ns_lock);
+
        return rc;
 }
 
index 9aa4c2dfe143d989cfd0c61222ad6dc615c8be62..5b0e396a99088aa94e16d13e4c990fa78484a685 100644 (file)
@@ -1369,6 +1369,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
 
        while (!list_empty(&ns->ns_unused_list)) {
                ldlm_policy_res_t result;
+               time_t last_use = 0;
 
                /* all unused locks */
                if (remained-- <= 0)
@@ -1387,6 +1388,10 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
                                /* already processed */
                                continue;
 
+                       last_use = lock->l_last_used;
+                       if (last_use == cfs_time_current())
+                               continue;
+
                        /* Somebody is already doing CANCEL. No need for this
                         * lock in LRU, do not traverse it again.
                         */
@@ -1434,11 +1439,13 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
                lock_res_and_lock(lock);
                /* Check flags again under the lock. */
                if ((lock->l_flags & LDLM_FL_CANCELING) ||
-                   (ldlm_lock_remove_from_lru(lock) == 0)) {
+                   (ldlm_lock_remove_from_lru_check(lock, last_use) == 0)) {
                        /* Another thread is removing lock from LRU, or
                         * somebody is already doing CANCEL, or there
                         * is a blocking request which will send cancel
-                        * by itself, or the lock is no longer unused.
+                        * by itself, or the lock is no longer unused or
+                        * the lock has been used since the pf() call and
+                        * pages could be put under it.
                         */
                        unlock_res_and_lock(lock);
                        lu_ref_del(&lock->l_reference,
This page took 0.027545 seconds and 5 git commands to generate.