projects
/
deliverable
/
linux.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
thp: implement split_huge_pmd()
[deliverable/linux.git]
/
mm
/
mempolicy.c
diff --git
a/mm/mempolicy.c
b/mm/mempolicy.c
index 87a177917cb2e60a13b09e6a53836ccd9f9275bf..5f7f9dace3546cf08256dd2674c54e25604448c6 100644
(file)
--- a/
mm/mempolicy.c
+++ b/
mm/mempolicy.c
@@
-493,7
+493,7
@@
static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
pte_t *pte;
spinlock_t *ptl;
pte_t *pte;
spinlock_t *ptl;
- split_huge_p
age_pmd(vma, addr, pmd
);
+ split_huge_p
md(vma, pmd, addr
);
if (pmd_trans_unstable(pmd))
return 0;
if (pmd_trans_unstable(pmd))
return 0;
@@
-2142,12
+2142,14
@@
bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
*
* Remember policies even when nobody has shared memory mapped.
* The policies are kept in Red-Black tree linked from the inode.
*
* Remember policies even when nobody has shared memory mapped.
* The policies are kept in Red-Black tree linked from the inode.
- * They are protected by the sp->lock
spin
lock, which should be held
+ * They are protected by the sp->lock
rw
lock, which should be held
* for any accesses to the tree.
*/
* for any accesses to the tree.
*/
-/* lookup first element intersecting start-end */
-/* Caller holds sp->lock */
+/*
+ * lookup first element intersecting start-end. Caller holds sp->lock for
+ * reading or for writing
+ */
static struct sp_node *
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
{
static struct sp_node *
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
{
@@
-2178,8
+2180,10
@@
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
return rb_entry(n, struct sp_node, nd);
}
return rb_entry(n, struct sp_node, nd);
}
-/* Insert a new shared policy into the list. */
-/* Caller holds sp->lock */
+/*
+ * Insert a new shared policy into the list. Caller holds sp->lock for
+ * writing.
+ */
static void sp_insert(struct shared_policy *sp, struct sp_node *new)
{
struct rb_node **p = &sp->root.rb_node;
static void sp_insert(struct shared_policy *sp, struct sp_node *new)
{
struct rb_node **p = &sp->root.rb_node;
@@
-2211,13
+2215,13
@@
mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
if (!sp->root.rb_node)
return NULL;
if (!sp->root.rb_node)
return NULL;
-
spin
_lock(&sp->lock);
+
read
_lock(&sp->lock);
sn = sp_lookup(sp, idx, idx+1);
if (sn) {
mpol_get(sn->policy);
pol = sn->policy;
}
sn = sp_lookup(sp, idx, idx+1);
if (sn) {
mpol_get(sn->policy);
pol = sn->policy;
}
-
spin
_unlock(&sp->lock);
+
read
_unlock(&sp->lock);
return pol;
}
return pol;
}
@@
-2360,7
+2364,7
@@
static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
int ret = 0;
restart:
int ret = 0;
restart:
-
spin
_lock(&sp->lock);
+
write
_lock(&sp->lock);
n = sp_lookup(sp, start, end);
/* Take care of old policies in the same range. */
while (n && n->start < end) {
n = sp_lookup(sp, start, end);
/* Take care of old policies in the same range. */
while (n && n->start < end) {
@@
-2393,7
+2397,7
@@
restart:
}
if (new)
sp_insert(sp, new);
}
if (new)
sp_insert(sp, new);
-
spin
_unlock(&sp->lock);
+
write
_unlock(&sp->lock);
ret = 0;
err_out:
ret = 0;
err_out:
@@
-2405,7
+2409,7
@@
err_out:
return ret;
alloc_new:
return ret;
alloc_new:
-
spin
_unlock(&sp->lock);
+
write
_unlock(&sp->lock);
ret = -ENOMEM;
n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
if (!n_new)
ret = -ENOMEM;
n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
if (!n_new)
@@
-2431,7
+2435,7
@@
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
int ret;
sp->root = RB_ROOT; /* empty tree == default mempolicy */
int ret;
sp->root = RB_ROOT; /* empty tree == default mempolicy */
-
spin_
lock_init(&sp->lock);
+
rw
lock_init(&sp->lock);
if (mpol) {
struct vm_area_struct pvma;
if (mpol) {
struct vm_area_struct pvma;
@@
-2497,14
+2501,14
@@
void mpol_free_shared_policy(struct shared_policy *p)
if (!p->root.rb_node)
return;
if (!p->root.rb_node)
return;
-
spin
_lock(&p->lock);
+
write
_lock(&p->lock);
next = rb_first(&p->root);
while (next) {
n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd);
sp_delete(p, n);
}
next = rb_first(&p->root);
while (next) {
n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd);
sp_delete(p, n);
}
-
spin
_unlock(&p->lock);
+
write
_unlock(&p->lock);
}
#ifdef CONFIG_NUMA_BALANCING
}
#ifdef CONFIG_NUMA_BALANCING
This page took
0.026169 seconds
and
5
git commands to generate.