Commit | Line | Data |
---|---|---|
8174c430 NP |
1 | /* |
2 | * Lockless get_user_pages_fast for x86 | |
3 | * | |
4 | * Copyright (C) 2008 Nick Piggin | |
5 | * Copyright (C) 2008 Novell Inc. | |
6 | */ | |
7 | #include <linux/sched.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/vmstat.h> | |
10 | #include <linux/highmem.h> | |
8ee53820 | 11 | #include <linux/swap.h> |
3565fce3 | 12 | #include <linux/memremap.h> |
8174c430 NP |
13 | |
14 | #include <asm/pgtable.h> | |
15 | ||
16 | static inline pte_t gup_get_pte(pte_t *ptep) | |
17 | { | |
18 | #ifndef CONFIG_X86_PAE | |
14cf3d97 | 19 | return READ_ONCE(*ptep); |
8174c430 NP |
20 | #else |
21 | /* | |
22 | * With get_user_pages_fast, we walk down the pagetables without taking | |
ab09809f | 23 | * any locks. For this we would like to load the pointers atomically, |
8174c430 NP |
24 | * but that is not possible (without expensive cmpxchg8b) on PAE. What |
25 | * we do have is the guarantee that a pte will only either go from not | |
26 | * present to present, or present to not present or both -- it will not | |
27 | * switch to a completely different present page without a TLB flush in | |
28 | * between; something that we are blocking by holding interrupts off. | |
29 | * | |
30 | * Setting ptes from not present to present goes: | |
31 | * ptep->pte_high = h; | |
32 | * smp_wmb(); | |
33 | * ptep->pte_low = l; | |
34 | * | |
35 | * And present to not present goes: | |
36 | * ptep->pte_low = 0; | |
37 | * smp_wmb(); | |
38 | * ptep->pte_high = 0; | |
39 | * | |
40 | * We must ensure here that the load of pte_low sees l iff pte_high | |
41 | * sees h. We load pte_high *after* loading pte_low, which ensures we | |
42 | * don't see an older value of pte_high. *Then* we recheck pte_low, | |
43 | * which ensures that we haven't picked up a changed pte high. We might | |
44 | * have got rubbish values from pte_low and pte_high, but we are | |
45 | * guaranteed that pte_low will not have the present bit set *unless* | |
46 | * it is 'l'. And get_user_pages_fast only operates on present ptes, so | |
47 | * we're safe. | |
48 | * | |
49 | * gup_get_pte should not be used or copied outside gup.c without being | |
50 | * very careful -- it does not atomically load the pte or anything that | |
51 | * is likely to be useful for you. | |
52 | */ | |
53 | pte_t pte; | |
54 | ||
55 | retry: | |
56 | pte.pte_low = ptep->pte_low; | |
57 | smp_rmb(); | |
58 | pte.pte_high = ptep->pte_high; | |
59 | smp_rmb(); | |
60 | if (unlikely(pte.pte_low != ptep->pte_low)) | |
61 | goto retry; | |
62 | ||
63 | return pte; | |
64 | #endif | |
65 | } | |
66 | ||
3565fce3 DW |
67 | static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) |
68 | { | |
69 | while ((*nr) - nr_start) { | |
70 | struct page *page = pages[--(*nr)]; | |
71 | ||
72 | ClearPageReferenced(page); | |
73 | put_page(page); | |
74 | } | |
75 | } | |
76 | ||
8174c430 NP |
77 | /* |
78 | * The performance critical leaf functions are made noinline otherwise gcc | |
79 | * inlines everything into a single function which results in too much | |
80 | * register pressure. | |
81 | */ | |
82 | static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |
83 | unsigned long end, int write, struct page **pages, int *nr) | |
84 | { | |
3565fce3 | 85 | struct dev_pagemap *pgmap = NULL; |
8174c430 | 86 | unsigned long mask; |
3565fce3 | 87 | int nr_start = *nr; |
8174c430 NP |
88 | pte_t *ptep; |
89 | ||
90 | mask = _PAGE_PRESENT|_PAGE_USER; | |
91 | if (write) | |
92 | mask |= _PAGE_RW; | |
93 | ||
94 | ptep = pte_offset_map(&pmd, addr); | |
95 | do { | |
96 | pte_t pte = gup_get_pte(ptep); | |
97 | struct page *page; | |
98 | ||
2b4847e7 | 99 | /* Similar to the PMD case, NUMA hinting must take slow path */ |
8a0516ed | 100 | if (pte_protnone(pte)) { |
2b4847e7 MG |
101 | pte_unmap(ptep); |
102 | return 0; | |
103 | } | |
104 | ||
3565fce3 DW |
105 | page = pte_page(pte); |
106 | if (pte_devmap(pte)) { | |
107 | pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); | |
108 | if (unlikely(!pgmap)) { | |
109 | undo_dev_pagemap(nr, nr_start, pages); | |
110 | pte_unmap(ptep); | |
111 | return 0; | |
112 | } | |
113 | } else if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { | |
8174c430 NP |
114 | pte_unmap(ptep); |
115 | return 0; | |
116 | } | |
117 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | |
8174c430 | 118 | get_page(page); |
3565fce3 | 119 | put_dev_pagemap(pgmap); |
8ee53820 | 120 | SetPageReferenced(page); |
8174c430 NP |
121 | pages[*nr] = page; |
122 | (*nr)++; | |
123 | ||
124 | } while (ptep++, addr += PAGE_SIZE, addr != end); | |
125 | pte_unmap(ptep - 1); | |
126 | ||
127 | return 1; | |
128 | } | |
129 | ||
130 | static inline void get_head_page_multiple(struct page *page, int nr) | |
131 | { | |
309381fe SL |
132 | VM_BUG_ON_PAGE(page != compound_head(page), page); |
133 | VM_BUG_ON_PAGE(page_count(page) == 0, page); | |
8174c430 | 134 | atomic_add(nr, &page->_count); |
8ee53820 | 135 | SetPageReferenced(page); |
8174c430 NP |
136 | } |
137 | ||
3565fce3 DW |
138 | static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, |
139 | unsigned long end, struct page **pages, int *nr) | |
140 | { | |
141 | int nr_start = *nr; | |
142 | unsigned long pfn = pmd_pfn(pmd); | |
143 | struct dev_pagemap *pgmap = NULL; | |
144 | ||
145 | pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; | |
146 | do { | |
147 | struct page *page = pfn_to_page(pfn); | |
148 | ||
149 | pgmap = get_dev_pagemap(pfn, pgmap); | |
150 | if (unlikely(!pgmap)) { | |
151 | undo_dev_pagemap(nr, nr_start, pages); | |
152 | return 0; | |
153 | } | |
154 | SetPageReferenced(page); | |
155 | pages[*nr] = page; | |
156 | get_page(page); | |
157 | put_dev_pagemap(pgmap); | |
158 | (*nr)++; | |
159 | pfn++; | |
160 | } while (addr += PAGE_SIZE, addr != end); | |
161 | return 1; | |
162 | } | |
163 | ||
8174c430 NP |
164 | static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, |
165 | unsigned long end, int write, struct page **pages, int *nr) | |
166 | { | |
167 | unsigned long mask; | |
8174c430 NP |
168 | struct page *head, *page; |
169 | int refs; | |
170 | ||
171 | mask = _PAGE_PRESENT|_PAGE_USER; | |
172 | if (write) | |
173 | mask |= _PAGE_RW; | |
daf3e35c | 174 | if ((pmd_flags(pmd) & mask) != mask) |
8174c430 | 175 | return 0; |
3565fce3 DW |
176 | |
177 | VM_BUG_ON(!pfn_valid(pmd_pfn(pmd))); | |
178 | if (pmd_devmap(pmd)) | |
179 | return __gup_device_huge_pmd(pmd, addr, end, pages, nr); | |
180 | ||
8174c430 | 181 | /* hugepages are never "special" */ |
daf3e35c | 182 | VM_BUG_ON(pmd_flags(pmd) & _PAGE_SPECIAL); |
8174c430 NP |
183 | |
184 | refs = 0; | |
daf3e35c | 185 | head = pmd_page(pmd); |
652ea695 | 186 | page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
8174c430 | 187 | do { |
309381fe | 188 | VM_BUG_ON_PAGE(compound_head(page) != head, page); |
8174c430 NP |
189 | pages[*nr] = page; |
190 | (*nr)++; | |
191 | page++; | |
192 | refs++; | |
193 | } while (addr += PAGE_SIZE, addr != end); | |
194 | get_head_page_multiple(head, refs); | |
195 | ||
196 | return 1; | |
197 | } | |
198 | ||
199 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |
200 | int write, struct page **pages, int *nr) | |
201 | { | |
202 | unsigned long next; | |
203 | pmd_t *pmdp; | |
204 | ||
205 | pmdp = pmd_offset(&pud, addr); | |
206 | do { | |
207 | pmd_t pmd = *pmdp; | |
208 | ||
209 | next = pmd_addr_end(addr, end); | |
1f19617d | 210 | if (pmd_none(pmd)) |
8174c430 | 211 | return 0; |
cbef8478 | 212 | if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) { |
2b4847e7 MG |
213 | /* |
214 | * NUMA hinting faults need to be handled in the GUP | |
215 | * slowpath for accounting purposes and so that they | |
216 | * can be serialised against THP migration. | |
217 | */ | |
8a0516ed | 218 | if (pmd_protnone(pmd)) |
2b4847e7 | 219 | return 0; |
8174c430 NP |
220 | if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) |
221 | return 0; | |
222 | } else { | |
223 | if (!gup_pte_range(pmd, addr, next, write, pages, nr)) | |
224 | return 0; | |
225 | } | |
226 | } while (pmdp++, addr = next, addr != end); | |
227 | ||
228 | return 1; | |
229 | } | |
230 | ||
652ea695 NP |
231 | static noinline int gup_huge_pud(pud_t pud, unsigned long addr, |
232 | unsigned long end, int write, struct page **pages, int *nr) | |
233 | { | |
234 | unsigned long mask; | |
652ea695 NP |
235 | struct page *head, *page; |
236 | int refs; | |
237 | ||
238 | mask = _PAGE_PRESENT|_PAGE_USER; | |
239 | if (write) | |
240 | mask |= _PAGE_RW; | |
daf3e35c | 241 | if ((pud_flags(pud) & mask) != mask) |
652ea695 NP |
242 | return 0; |
243 | /* hugepages are never "special" */ | |
daf3e35c TK |
244 | VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL); |
245 | VM_BUG_ON(!pfn_valid(pud_pfn(pud))); | |
652ea695 NP |
246 | |
247 | refs = 0; | |
daf3e35c | 248 | head = pud_page(pud); |
652ea695 NP |
249 | page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
250 | do { | |
309381fe | 251 | VM_BUG_ON_PAGE(compound_head(page) != head, page); |
652ea695 NP |
252 | pages[*nr] = page; |
253 | (*nr)++; | |
254 | page++; | |
255 | refs++; | |
256 | } while (addr += PAGE_SIZE, addr != end); | |
257 | get_head_page_multiple(head, refs); | |
258 | ||
259 | return 1; | |
260 | } | |
261 | ||
8174c430 NP |
262 | static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, |
263 | int write, struct page **pages, int *nr) | |
264 | { | |
265 | unsigned long next; | |
266 | pud_t *pudp; | |
267 | ||
268 | pudp = pud_offset(&pgd, addr); | |
269 | do { | |
270 | pud_t pud = *pudp; | |
271 | ||
272 | next = pud_addr_end(addr, end); | |
273 | if (pud_none(pud)) | |
274 | return 0; | |
652ea695 NP |
275 | if (unlikely(pud_large(pud))) { |
276 | if (!gup_huge_pud(pud, addr, next, write, pages, nr)) | |
277 | return 0; | |
278 | } else { | |
279 | if (!gup_pmd_range(pud, addr, next, write, pages, nr)) | |
280 | return 0; | |
281 | } | |
8174c430 NP |
282 | } while (pudp++, addr = next, addr != end); |
283 | ||
284 | return 1; | |
285 | } | |
286 | ||
465a454f PZ |
287 | /* |
288 | * Like get_user_pages_fast() except its IRQ-safe in that it won't fall | |
289 | * back to the regular GUP. | |
290 | */ | |
291 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |
292 | struct page **pages) | |
293 | { | |
294 | struct mm_struct *mm = current->mm; | |
295 | unsigned long addr, len, end; | |
296 | unsigned long next; | |
297 | unsigned long flags; | |
298 | pgd_t *pgdp; | |
299 | int nr = 0; | |
300 | ||
301 | start &= PAGE_MASK; | |
302 | addr = start; | |
303 | len = (unsigned long) nr_pages << PAGE_SHIFT; | |
304 | end = start + len; | |
305 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, | |
306 | (void __user *)start, len))) | |
307 | return 0; | |
308 | ||
309 | /* | |
310 | * XXX: batch / limit 'nr', to avoid large irq off latency | |
311 | * needs some instrumenting to determine the common sizes used by | |
312 | * important workloads (eg. DB2), and whether limiting the batch size | |
313 | * will decrease performance. | |
314 | * | |
315 | * It seems like we're in the clear for the moment. Direct-IO is | |
316 | * the main guy that batches up lots of get_user_pages, and even | |
317 | * they are limited to 64-at-a-time which is not so many. | |
318 | */ | |
319 | /* | |
320 | * This doesn't prevent pagetable teardown, but does prevent | |
321 | * the pagetables and pages from being freed on x86. | |
322 | * | |
323 | * So long as we atomically load page table pointers versus teardown | |
324 | * (which we do on x86, with the above PAE exception), we can follow the | |
325 | * address down to the the page and take a ref on it. | |
326 | */ | |
327 | local_irq_save(flags); | |
328 | pgdp = pgd_offset(mm, addr); | |
329 | do { | |
330 | pgd_t pgd = *pgdp; | |
331 | ||
332 | next = pgd_addr_end(addr, end); | |
333 | if (pgd_none(pgd)) | |
334 | break; | |
335 | if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) | |
336 | break; | |
337 | } while (pgdp++, addr = next, addr != end); | |
338 | local_irq_restore(flags); | |
339 | ||
340 | return nr; | |
341 | } | |
342 | ||
a0d22f48 AG |
343 | /** |
344 | * get_user_pages_fast() - pin user pages in memory | |
345 | * @start: starting user address | |
346 | * @nr_pages: number of pages from start to pin | |
347 | * @write: whether pages will be written to | |
348 | * @pages: array that receives pointers to the pages pinned. | |
349 | * Should be at least nr_pages long. | |
350 | * | |
351 | * Attempt to pin user pages in memory without taking mm->mmap_sem. | |
352 | * If not successful, it will fall back to taking the lock and | |
353 | * calling get_user_pages(). | |
354 | * | |
355 | * Returns number of pages pinned. This may be fewer than the number | |
356 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | |
357 | * were pinned, returns -errno. | |
358 | */ | |
8174c430 NP |
359 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
360 | struct page **pages) | |
361 | { | |
362 | struct mm_struct *mm = current->mm; | |
9b79022c | 363 | unsigned long addr, len, end; |
8174c430 NP |
364 | unsigned long next; |
365 | pgd_t *pgdp; | |
366 | int nr = 0; | |
367 | ||
9b79022c LT |
368 | start &= PAGE_MASK; |
369 | addr = start; | |
370 | len = (unsigned long) nr_pages << PAGE_SHIFT; | |
7f818906 | 371 | |
9b79022c | 372 | end = start + len; |
7f818906 LT |
373 | if (end < start) |
374 | goto slow_irqon; | |
375 | ||
376 | #ifdef CONFIG_X86_64 | |
377 | if (end >> __VIRTUAL_MASK_SHIFT) | |
8174c430 | 378 | goto slow_irqon; |
7f818906 | 379 | #endif |
8174c430 NP |
380 | |
381 | /* | |
382 | * XXX: batch / limit 'nr', to avoid large irq off latency | |
383 | * needs some instrumenting to determine the common sizes used by | |
384 | * important workloads (eg. DB2), and whether limiting the batch size | |
385 | * will decrease performance. | |
386 | * | |
387 | * It seems like we're in the clear for the moment. Direct-IO is | |
388 | * the main guy that batches up lots of get_user_pages, and even | |
389 | * they are limited to 64-at-a-time which is not so many. | |
390 | */ | |
391 | /* | |
392 | * This doesn't prevent pagetable teardown, but does prevent | |
393 | * the pagetables and pages from being freed on x86. | |
394 | * | |
395 | * So long as we atomically load page table pointers versus teardown | |
396 | * (which we do on x86, with the above PAE exception), we can follow the | |
397 | * address down to the the page and take a ref on it. | |
398 | */ | |
399 | local_irq_disable(); | |
400 | pgdp = pgd_offset(mm, addr); | |
401 | do { | |
402 | pgd_t pgd = *pgdp; | |
403 | ||
404 | next = pgd_addr_end(addr, end); | |
405 | if (pgd_none(pgd)) | |
406 | goto slow; | |
407 | if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) | |
408 | goto slow; | |
409 | } while (pgdp++, addr = next, addr != end); | |
410 | local_irq_enable(); | |
411 | ||
412 | VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); | |
413 | return nr; | |
414 | ||
415 | { | |
416 | int ret; | |
417 | ||
418 | slow: | |
419 | local_irq_enable(); | |
420 | slow_irqon: | |
421 | /* Try to get the remaining pages with get_user_pages */ | |
422 | start += nr << PAGE_SHIFT; | |
423 | pages += nr; | |
424 | ||
d4edcf0d | 425 | ret = get_user_pages_unlocked(start, |
a7b78075 AA |
426 | (end - start) >> PAGE_SHIFT, |
427 | write, 0, pages); | |
8174c430 NP |
428 | |
429 | /* Have to be a bit careful with return values */ | |
430 | if (nr > 0) { | |
431 | if (ret < 0) | |
432 | ret = nr; | |
433 | else | |
434 | ret += nr; | |
435 | } | |
436 | ||
437 | return ret; | |
438 | } | |
439 | } |