Commit | Line | Data |
---|---|---|
ce0ad7f0 NP |
1 | /* |
2 | * Lockless get_user_pages_fast for powerpc | |
3 | * | |
4 | * Copyright (C) 2008 Nick Piggin | |
5 | * Copyright (C) 2008 Novell Inc. | |
6 | */ | |
7 | #undef DEBUG | |
8 | ||
9 | #include <linux/sched.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/hugetlb.h> | |
12 | #include <linux/vmstat.h> | |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/rwsem.h> | |
15 | #include <asm/pgtable.h> | |
16 | ||
9e5efaa9 BH |
17 | #ifdef __HAVE_ARCH_PTE_SPECIAL |
18 | ||
ce0ad7f0 NP |
19 | /* |
20 | * The performance critical leaf functions are made noinline otherwise gcc | |
21 | * inlines everything into a single function which results in too much | |
22 | * register pressure. | |
23 | */ | |
24 | static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |
25 | unsigned long end, int write, struct page **pages, int *nr) | |
26 | { | |
27 | unsigned long mask, result; | |
28 | pte_t *ptep; | |
29 | ||
30 | result = _PAGE_PRESENT|_PAGE_USER; | |
31 | if (write) | |
32 | result |= _PAGE_RW; | |
33 | mask = result | _PAGE_SPECIAL; | |
34 | ||
35 | ptep = pte_offset_kernel(&pmd, addr); | |
36 | do { | |
7888b4dd | 37 | pte_t pte = ACCESS_ONCE(*ptep); |
ce0ad7f0 NP |
38 | struct page *page; |
39 | ||
40 | if ((pte_val(pte) & mask) != result) | |
41 | return 0; | |
42 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | |
43 | page = pte_page(pte); | |
44 | if (!page_cache_get_speculative(page)) | |
45 | return 0; | |
f5ea64dc | 46 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { |
ce0ad7f0 NP |
47 | put_page(page); |
48 | return 0; | |
49 | } | |
50 | pages[*nr] = page; | |
51 | (*nr)++; | |
52 | ||
53 | } while (ptep++, addr += PAGE_SIZE, addr != end); | |
54 | ||
55 | return 1; | |
56 | } | |
57 | ||
ce0ad7f0 NP |
58 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, |
59 | int write, struct page **pages, int *nr) | |
60 | { | |
61 | unsigned long next; | |
62 | pmd_t *pmdp; | |
63 | ||
64 | pmdp = pmd_offset(&pud, addr); | |
65 | do { | |
7888b4dd | 66 | pmd_t pmd = ACCESS_ONCE(*pmdp); |
ce0ad7f0 NP |
67 | |
68 | next = pmd_addr_end(addr, end); | |
c367714c AK |
69 | /* |
70 | * If we find a splitting transparent hugepage we | |
71 | * return zero. That will result in taking the slow | |
72 | * path which will call wait_split_huge_page() | |
73 | * if the pmd is still in splitting state | |
74 | */ | |
75 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) | |
ce0ad7f0 | 76 | return 0; |
c367714c | 77 | if (pmd_huge(pmd) || pmd_large(pmd)) { |
e2b3d202 AK |
78 | if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next, |
79 | write, pages, nr)) | |
80 | return 0; | |
81 | } else if (is_hugepd(pmdp)) { | |
a4fe3ce7 DG |
82 | if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT, |
83 | addr, next, write, pages, nr)) | |
84 | return 0; | |
85 | } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) | |
ce0ad7f0 NP |
86 | return 0; |
87 | } while (pmdp++, addr = next, addr != end); | |
88 | ||
89 | return 1; | |
90 | } | |
91 | ||
92 | static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, | |
93 | int write, struct page **pages, int *nr) | |
94 | { | |
95 | unsigned long next; | |
96 | pud_t *pudp; | |
97 | ||
98 | pudp = pud_offset(&pgd, addr); | |
99 | do { | |
7888b4dd | 100 | pud_t pud = ACCESS_ONCE(*pudp); |
ce0ad7f0 NP |
101 | |
102 | next = pud_addr_end(addr, end); | |
103 | if (pud_none(pud)) | |
104 | return 0; | |
e2b3d202 AK |
105 | if (pud_huge(pud)) { |
106 | if (!gup_hugepte((pte_t *)pudp, PUD_SIZE, addr, next, | |
107 | write, pages, nr)) | |
108 | return 0; | |
109 | } else if (is_hugepd(pudp)) { | |
a4fe3ce7 DG |
110 | if (!gup_hugepd((hugepd_t *)pudp, PUD_SHIFT, |
111 | addr, next, write, pages, nr)) | |
112 | return 0; | |
113 | } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) | |
ce0ad7f0 NP |
114 | return 0; |
115 | } while (pudp++, addr = next, addr != end); | |
116 | ||
117 | return 1; | |
118 | } | |
119 | ||
1f7bf028 PM |
120 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, |
121 | struct page **pages) | |
ce0ad7f0 NP |
122 | { |
123 | struct mm_struct *mm = current->mm; | |
124 | unsigned long addr, len, end; | |
125 | unsigned long next; | |
95f715b0 | 126 | unsigned long flags; |
ce0ad7f0 | 127 | pgd_t *pgdp; |
9e5efaa9 | 128 | int nr = 0; |
ce0ad7f0 | 129 | |
29e5fa59 | 130 | pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read"); |
ce0ad7f0 NP |
131 | |
132 | start &= PAGE_MASK; | |
133 | addr = start; | |
134 | len = (unsigned long) nr_pages << PAGE_SHIFT; | |
135 | end = start + len; | |
136 | ||
137 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, | |
138 | start, len))) | |
1f7bf028 | 139 | return 0; |
ce0ad7f0 | 140 | |
29e5fa59 | 141 | pr_devel(" aligned: %lx .. %lx\n", start, end); |
ce0ad7f0 | 142 | |
ce0ad7f0 NP |
143 | /* |
144 | * XXX: batch / limit 'nr', to avoid large irq off latency | |
145 | * needs some instrumenting to determine the common sizes used by | |
146 | * important workloads (eg. DB2), and whether limiting the batch size | |
147 | * will decrease performance. | |
148 | * | |
149 | * It seems like we're in the clear for the moment. Direct-IO is | |
150 | * the main guy that batches up lots of get_user_pages, and even | |
151 | * they are limited to 64-at-a-time which is not so many. | |
152 | */ | |
153 | /* | |
154 | * This doesn't prevent pagetable teardown, but does prevent | |
155 | * the pagetables from being freed on powerpc. | |
156 | * | |
157 | * So long as we atomically load page table pointers versus teardown, | |
158 | * we can follow the address down to the the page and take a ref on it. | |
159 | */ | |
95f715b0 | 160 | local_irq_save(flags); |
ce0ad7f0 | 161 | |
a4fe3ce7 DG |
162 | pgdp = pgd_offset(mm, addr); |
163 | do { | |
7888b4dd | 164 | pgd_t pgd = ACCESS_ONCE(*pgdp); |
a4fe3ce7 DG |
165 | |
166 | pr_devel(" %016lx: normal pgd %p\n", addr, | |
167 | (void *)pgd_val(pgd)); | |
168 | next = pgd_addr_end(addr, end); | |
169 | if (pgd_none(pgd)) | |
1f7bf028 | 170 | break; |
e2b3d202 AK |
171 | if (pgd_huge(pgd)) { |
172 | if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next, | |
173 | write, pages, &nr)) | |
1f7bf028 | 174 | break; |
e2b3d202 | 175 | } else if (is_hugepd(pgdp)) { |
a4fe3ce7 DG |
176 | if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT, |
177 | addr, next, write, pages, &nr)) | |
1f7bf028 | 178 | break; |
a4fe3ce7 | 179 | } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) |
1f7bf028 | 180 | break; |
a4fe3ce7 DG |
181 | } while (pgdp++, addr = next, addr != end); |
182 | ||
95f715b0 | 183 | local_irq_restore(flags); |
ce0ad7f0 | 184 | |
ce0ad7f0 | 185 | return nr; |
1f7bf028 | 186 | } |
ce0ad7f0 | 187 | |
1f7bf028 PM |
188 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
189 | struct page **pages) | |
190 | { | |
191 | struct mm_struct *mm = current->mm; | |
192 | int nr, ret; | |
193 | ||
194 | start &= PAGE_MASK; | |
195 | nr = __get_user_pages_fast(start, nr_pages, write, pages); | |
196 | ret = nr; | |
ce0ad7f0 | 197 | |
1f7bf028 | 198 | if (nr < nr_pages) { |
29e5fa59 | 199 | pr_devel(" slow path ! nr = %d\n", nr); |
ce0ad7f0 NP |
200 | |
201 | /* Try to get the remaining pages with get_user_pages */ | |
202 | start += nr << PAGE_SHIFT; | |
203 | pages += nr; | |
204 | ||
205 | down_read(&mm->mmap_sem); | |
206 | ret = get_user_pages(current, mm, start, | |
1f7bf028 | 207 | nr_pages - nr, write, 0, pages, NULL); |
ce0ad7f0 NP |
208 | up_read(&mm->mmap_sem); |
209 | ||
210 | /* Have to be a bit careful with return values */ | |
211 | if (nr > 0) { | |
212 | if (ret < 0) | |
213 | ret = nr; | |
214 | else | |
215 | ret += nr; | |
216 | } | |
ce0ad7f0 | 217 | } |
1f7bf028 PM |
218 | |
219 | return ret; | |
ce0ad7f0 | 220 | } |
9e5efaa9 BH |
221 | |
222 | #endif /* __HAVE_ARCH_PTE_SPECIAL */ |