mm: move most file-based accounting to the node
[deliverable/linux.git] / mm / huge_memory.c
index d3abbf249fa048b23f41f37960f4b90d7ba4779f..121a7f808216fbc157b8b069099935b05b62aa9d 100644 (file)
@@ -1458,10 +1458,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 }
 
 /*
- * Returns true if a given pmd maps a thp, false otherwise.
+ * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
  *
- * Note that if it returns true, this routine returns without unlocking page
- * table lock. So callers must unlock it.
+ * Note that if it returns page table lock pointer, this routine returns without
+ * unlocking page table lock. So callers must unlock it.
  */
 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
 {
@@ -1586,7 +1586,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 
        if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
                /* Last compound_mapcount is gone. */
-               __dec_zone_page_state(page, NR_ANON_THPS);
+               __dec_node_page_state(page, NR_ANON_THPS);
                if (TestClearPageDoubleMap(page)) {
                        /* No need in mapcount reference anymore */
                        for (i = 0; i < HPAGE_PMD_NR; i++)
@@ -1818,7 +1818,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
        pgoff_t end = -1;
        int i;
 
-       lruvec = mem_cgroup_page_lruvec(head, zone);
+       lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
 
        /* complete memcg works before add pages to LRU */
        mem_cgroup_split_huge_fixup(head);
@@ -1848,7 +1848,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
                spin_unlock(&head->mapping->tree_lock);
        }
 
-       spin_unlock_irqrestore(&page_zone(head)->lru_lock, flags);
+       spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
 
        unfreeze_page(head);
 
@@ -2034,7 +2034,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                lru_add_drain();
 
        /* prevent PageLRU to go away from under us, and freeze lru stats */
-       spin_lock_irqsave(&page_zone(head)->lru_lock, flags);
+       spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags);
 
        if (mapping) {
                void **pslot;
@@ -2061,7 +2061,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                        list_del(page_deferred_list(head));
                }
                if (mapping)
-                       __dec_zone_page_state(page, NR_SHMEM_THPS);
+                       __dec_node_page_state(page, NR_SHMEM_THPS);
                spin_unlock(&pgdata->split_queue_lock);
                __split_huge_page(page, list, flags);
                ret = 0;
@@ -2077,7 +2077,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                spin_unlock(&pgdata->split_queue_lock);
 fail:          if (mapping)
                        spin_unlock(&mapping->tree_lock);
-               spin_unlock_irqrestore(&page_zone(head)->lru_lock, flags);
+               spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
                unfreeze_page(head);
                ret = -EBUSY;
        }
This page took 0.024803 seconds and 5 git commands to generate.