Commit | Line | Data |
---|---|---|
9f4c815c IM |
1 | /* |
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
1da177e4 | 3 | * Thanks to Ben LaHaise for precious feedback. |
9f4c815c | 4 | */ |
1da177e4 | 5 | #include <linux/highmem.h> |
8192206d | 6 | #include <linux/bootmem.h> |
1da177e4 | 7 | #include <linux/module.h> |
9f4c815c | 8 | #include <linux/sched.h> |
1da177e4 | 9 | #include <linux/slab.h> |
9f4c815c | 10 | #include <linux/mm.h> |
76ebd054 | 11 | #include <linux/interrupt.h> |
ee7ae7a1 TG |
12 | #include <linux/seq_file.h> |
13 | #include <linux/debugfs.h> | |
9f4c815c | 14 | |
950f9d95 | 15 | #include <asm/e820.h> |
1da177e4 LT |
16 | #include <asm/processor.h> |
17 | #include <asm/tlbflush.h> | |
f8af095d | 18 | #include <asm/sections.h> |
9f4c815c IM |
19 | #include <asm/uaccess.h> |
20 | #include <asm/pgalloc.h> | |
c31c7d48 | 21 | #include <asm/proto.h> |
1219333d | 22 | #include <asm/pat.h> |
1da177e4 | 23 | |
9df84993 IM |
24 | /* |
25 | * The current flushing context - we pass it instead of 5 arguments: | |
26 | */ | |
72e458df TG |
27 | struct cpa_data { |
28 | unsigned long vaddr; | |
72e458df TG |
29 | pgprot_t mask_set; |
30 | pgprot_t mask_clr; | |
65e074df | 31 | int numpages; |
f4ae5da0 | 32 | int flushtlb; |
c31c7d48 | 33 | unsigned long pfn; |
c9caa02c | 34 | unsigned force_split : 1; |
72e458df TG |
35 | }; |
36 | ||
65280e61 | 37 | #ifdef CONFIG_PROC_FS |
ce0c0e50 AK |
38 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; |
39 | ||
65280e61 | 40 | void update_page_count(int level, unsigned long pages) |
ce0c0e50 | 41 | { |
ce0c0e50 | 42 | unsigned long flags; |
65280e61 | 43 | |
ce0c0e50 AK |
44 | /* Protect against CPA */ |
45 | spin_lock_irqsave(&pgd_lock, flags); | |
46 | direct_pages_count[level] += pages; | |
47 | spin_unlock_irqrestore(&pgd_lock, flags); | |
65280e61 TG |
48 | } |
49 | ||
50 | static void split_page_count(int level) | |
51 | { | |
52 | direct_pages_count[level]--; | |
53 | direct_pages_count[level - 1] += PTRS_PER_PTE; | |
54 | } | |
55 | ||
56 | int arch_report_meminfo(char *page) | |
57 | { | |
a06de630 HD |
58 | int n = sprintf(page, "DirectMap4k: %8lu kB\n", |
59 | direct_pages_count[PG_LEVEL_4K] << 2); | |
60 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | |
61 | n += sprintf(page + n, "DirectMap2M: %8lu kB\n", | |
62 | direct_pages_count[PG_LEVEL_2M] << 11); | |
63 | #else | |
64 | n += sprintf(page + n, "DirectMap4M: %8lu kB\n", | |
65 | direct_pages_count[PG_LEVEL_2M] << 12); | |
66 | #endif | |
65280e61 | 67 | #ifdef CONFIG_X86_64 |
a06de630 HD |
68 | if (direct_gbpages) |
69 | n += sprintf(page + n, "DirectMap1G: %8lu kB\n", | |
70 | direct_pages_count[PG_LEVEL_1G] << 20); | |
ce0c0e50 | 71 | #endif |
65280e61 | 72 | return n; |
ce0c0e50 | 73 | } |
65280e61 TG |
74 | #else |
75 | static inline void split_page_count(int level) { } | |
76 | #endif | |
ce0c0e50 | 77 | |
c31c7d48 TG |
78 | #ifdef CONFIG_X86_64 |
79 | ||
80 | static inline unsigned long highmap_start_pfn(void) | |
81 | { | |
82 | return __pa(_text) >> PAGE_SHIFT; | |
83 | } | |
84 | ||
85 | static inline unsigned long highmap_end_pfn(void) | |
86 | { | |
87 | return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; | |
88 | } | |
89 | ||
90 | #endif | |
91 | ||
92cb54a3 IM |
92 | #ifdef CONFIG_DEBUG_PAGEALLOC |
93 | # define debug_pagealloc 1 | |
94 | #else | |
95 | # define debug_pagealloc 0 | |
96 | #endif | |
97 | ||
ed724be6 AV |
98 | static inline int |
99 | within(unsigned long addr, unsigned long start, unsigned long end) | |
687c4825 | 100 | { |
ed724be6 AV |
101 | return addr >= start && addr < end; |
102 | } | |
103 | ||
d7c8f21a TG |
104 | /* |
105 | * Flushing functions | |
106 | */ | |
cd8ddf1a | 107 | |
cd8ddf1a TG |
108 | /** |
109 | * clflush_cache_range - flush a cache range with clflush | |
110 | * @addr: virtual start address | |
111 | * @size: number of bytes to flush | |
112 | * | |
113 | * clflush is an unordered instruction which needs fencing with mfence | |
114 | * to avoid ordering issues. | |
115 | */ | |
4c61afcd | 116 | void clflush_cache_range(void *vaddr, unsigned int size) |
d7c8f21a | 117 | { |
4c61afcd | 118 | void *vend = vaddr + size - 1; |
d7c8f21a | 119 | |
cd8ddf1a | 120 | mb(); |
4c61afcd IM |
121 | |
122 | for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) | |
123 | clflush(vaddr); | |
124 | /* | |
125 | * Flush any possible final partial cacheline: | |
126 | */ | |
127 | clflush(vend); | |
128 | ||
cd8ddf1a | 129 | mb(); |
d7c8f21a TG |
130 | } |
131 | ||
af1e6844 | 132 | static void __cpa_flush_all(void *arg) |
d7c8f21a | 133 | { |
6bb8383b AK |
134 | unsigned long cache = (unsigned long)arg; |
135 | ||
d7c8f21a TG |
136 | /* |
137 | * Flush all to work around Errata in early athlons regarding | |
138 | * large page flushing. | |
139 | */ | |
140 | __flush_tlb_all(); | |
141 | ||
6bb8383b | 142 | if (cache && boot_cpu_data.x86_model >= 4) |
d7c8f21a TG |
143 | wbinvd(); |
144 | } | |
145 | ||
6bb8383b | 146 | static void cpa_flush_all(unsigned long cache) |
d7c8f21a TG |
147 | { |
148 | BUG_ON(irqs_disabled()); | |
149 | ||
15c8b6c1 | 150 | on_each_cpu(__cpa_flush_all, (void *) cache, 1); |
d7c8f21a TG |
151 | } |
152 | ||
57a6a46a TG |
153 | static void __cpa_flush_range(void *arg) |
154 | { | |
57a6a46a TG |
155 | /* |
156 | * We could optimize that further and do individual per page | |
157 | * tlb invalidates for a low number of pages. Caveat: we must | |
158 | * flush the high aliases on 64bit as well. | |
159 | */ | |
160 | __flush_tlb_all(); | |
57a6a46a TG |
161 | } |
162 | ||
6bb8383b | 163 | static void cpa_flush_range(unsigned long start, int numpages, int cache) |
57a6a46a | 164 | { |
4c61afcd IM |
165 | unsigned int i, level; |
166 | unsigned long addr; | |
167 | ||
57a6a46a | 168 | BUG_ON(irqs_disabled()); |
4c61afcd | 169 | WARN_ON(PAGE_ALIGN(start) != start); |
57a6a46a | 170 | |
15c8b6c1 | 171 | on_each_cpu(__cpa_flush_range, NULL, 1); |
57a6a46a | 172 | |
6bb8383b AK |
173 | if (!cache) |
174 | return; | |
175 | ||
3b233e52 TG |
176 | /* |
177 | * We only need to flush on one CPU, | |
178 | * clflush is a MESI-coherent instruction that | |
179 | * will cause all other CPUs to flush the same | |
180 | * cachelines: | |
181 | */ | |
4c61afcd IM |
182 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { |
183 | pte_t *pte = lookup_address(addr, &level); | |
184 | ||
185 | /* | |
186 | * Only flush present addresses: | |
187 | */ | |
7bfb72e8 | 188 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
4c61afcd IM |
189 | clflush_cache_range((void *) addr, PAGE_SIZE); |
190 | } | |
57a6a46a TG |
191 | } |
192 | ||
ed724be6 AV |
193 | /* |
194 | * Certain areas of memory on x86 require very specific protection flags, | |
195 | * for example the BIOS area or kernel text. Callers don't always get this | |
196 | * right (again, ioremap() on BIOS memory is not uncommon) so this function | |
197 | * checks and fixes these known static required protection bits. | |
198 | */ | |
c31c7d48 TG |
199 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, |
200 | unsigned long pfn) | |
ed724be6 AV |
201 | { |
202 | pgprot_t forbidden = __pgprot(0); | |
203 | ||
687c4825 | 204 | /* |
ed724be6 AV |
205 | * The BIOS area between 640k and 1Mb needs to be executable for |
206 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. | |
687c4825 | 207 | */ |
c31c7d48 | 208 | if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) |
ed724be6 AV |
209 | pgprot_val(forbidden) |= _PAGE_NX; |
210 | ||
211 | /* | |
212 | * The kernel text needs to be executable for obvious reasons | |
c31c7d48 TG |
213 | * Does not cover __inittext since that is gone later on. On |
214 | * 64bit we do not enforce !NX on the low mapping | |
ed724be6 AV |
215 | */ |
216 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) | |
217 | pgprot_val(forbidden) |= _PAGE_NX; | |
cc0f21bb | 218 | |
cc0f21bb | 219 | /* |
c31c7d48 TG |
220 | * The .rodata section needs to be read-only. Using the pfn |
221 | * catches all aliases. | |
cc0f21bb | 222 | */ |
c31c7d48 TG |
223 | if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, |
224 | __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) | |
cc0f21bb | 225 | pgprot_val(forbidden) |= _PAGE_RW; |
ed724be6 AV |
226 | |
227 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); | |
687c4825 IM |
228 | |
229 | return prot; | |
230 | } | |
231 | ||
9a14aefc TG |
232 | /* |
233 | * Lookup the page table entry for a virtual address. Return a pointer | |
234 | * to the entry and the level of the mapping. | |
235 | * | |
236 | * Note: We return pud and pmd either when the entry is marked large | |
237 | * or when the present bit is not set. Otherwise we would return a | |
238 | * pointer to a nonexisting mapping. | |
239 | */ | |
da7bfc50 | 240 | pte_t *lookup_address(unsigned long address, unsigned int *level) |
9f4c815c | 241 | { |
1da177e4 LT |
242 | pgd_t *pgd = pgd_offset_k(address); |
243 | pud_t *pud; | |
244 | pmd_t *pmd; | |
9f4c815c | 245 | |
30551bb3 TG |
246 | *level = PG_LEVEL_NONE; |
247 | ||
1da177e4 LT |
248 | if (pgd_none(*pgd)) |
249 | return NULL; | |
9df84993 | 250 | |
1da177e4 LT |
251 | pud = pud_offset(pgd, address); |
252 | if (pud_none(*pud)) | |
253 | return NULL; | |
c2f71ee2 AK |
254 | |
255 | *level = PG_LEVEL_1G; | |
256 | if (pud_large(*pud) || !pud_present(*pud)) | |
257 | return (pte_t *)pud; | |
258 | ||
1da177e4 LT |
259 | pmd = pmd_offset(pud, address); |
260 | if (pmd_none(*pmd)) | |
261 | return NULL; | |
30551bb3 TG |
262 | |
263 | *level = PG_LEVEL_2M; | |
9a14aefc | 264 | if (pmd_large(*pmd) || !pmd_present(*pmd)) |
1da177e4 | 265 | return (pte_t *)pmd; |
1da177e4 | 266 | |
30551bb3 | 267 | *level = PG_LEVEL_4K; |
9df84993 | 268 | |
9f4c815c IM |
269 | return pte_offset_kernel(pmd, address); |
270 | } | |
75bb8835 | 271 | EXPORT_SYMBOL_GPL(lookup_address); |
9f4c815c | 272 | |
9df84993 IM |
273 | /* |
274 | * Set the new pmd in all the pgds we know about: | |
275 | */ | |
9a3dc780 | 276 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
9f4c815c | 277 | { |
9f4c815c IM |
278 | /* change init_mm */ |
279 | set_pte_atomic(kpte, pte); | |
44af6c41 | 280 | #ifdef CONFIG_X86_32 |
e4b71dcf | 281 | if (!SHARED_KERNEL_PMD) { |
44af6c41 IM |
282 | struct page *page; |
283 | ||
e3ed910d | 284 | list_for_each_entry(page, &pgd_list, lru) { |
44af6c41 IM |
285 | pgd_t *pgd; |
286 | pud_t *pud; | |
287 | pmd_t *pmd; | |
288 | ||
289 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | |
290 | pud = pud_offset(pgd, address); | |
291 | pmd = pmd_offset(pud, address); | |
292 | set_pte_atomic((pte_t *)pmd, pte); | |
293 | } | |
1da177e4 | 294 | } |
44af6c41 | 295 | #endif |
1da177e4 LT |
296 | } |
297 | ||
9df84993 IM |
298 | static int |
299 | try_preserve_large_page(pte_t *kpte, unsigned long address, | |
300 | struct cpa_data *cpa) | |
65e074df | 301 | { |
c31c7d48 | 302 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; |
65e074df TG |
303 | pte_t new_pte, old_pte, *tmp; |
304 | pgprot_t old_prot, new_prot; | |
fac84939 | 305 | int i, do_split = 1; |
da7bfc50 | 306 | unsigned int level; |
65e074df | 307 | |
c9caa02c AK |
308 | if (cpa->force_split) |
309 | return 1; | |
310 | ||
65e074df TG |
311 | spin_lock_irqsave(&pgd_lock, flags); |
312 | /* | |
313 | * Check for races, another CPU might have split this page | |
314 | * up already: | |
315 | */ | |
316 | tmp = lookup_address(address, &level); | |
317 | if (tmp != kpte) | |
318 | goto out_unlock; | |
319 | ||
320 | switch (level) { | |
321 | case PG_LEVEL_2M: | |
31422c51 AK |
322 | psize = PMD_PAGE_SIZE; |
323 | pmask = PMD_PAGE_MASK; | |
65e074df | 324 | break; |
f07333fd | 325 | #ifdef CONFIG_X86_64 |
65e074df | 326 | case PG_LEVEL_1G: |
5d3c8b21 AK |
327 | psize = PUD_PAGE_SIZE; |
328 | pmask = PUD_PAGE_MASK; | |
f07333fd AK |
329 | break; |
330 | #endif | |
65e074df | 331 | default: |
beaff633 | 332 | do_split = -EINVAL; |
65e074df TG |
333 | goto out_unlock; |
334 | } | |
335 | ||
336 | /* | |
337 | * Calculate the number of pages, which fit into this large | |
338 | * page starting at address: | |
339 | */ | |
340 | nextpage_addr = (address + psize) & pmask; | |
341 | numpages = (nextpage_addr - address) >> PAGE_SHIFT; | |
9b5cf48b RW |
342 | if (numpages < cpa->numpages) |
343 | cpa->numpages = numpages; | |
65e074df TG |
344 | |
345 | /* | |
346 | * We are safe now. Check whether the new pgprot is the same: | |
347 | */ | |
348 | old_pte = *kpte; | |
349 | old_prot = new_prot = pte_pgprot(old_pte); | |
350 | ||
351 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); | |
352 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | |
c31c7d48 TG |
353 | |
354 | /* | |
355 | * old_pte points to the large page base address. So we need | |
356 | * to add the offset of the virtual address: | |
357 | */ | |
358 | pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); | |
359 | cpa->pfn = pfn; | |
360 | ||
361 | new_prot = static_protections(new_prot, address, pfn); | |
65e074df | 362 | |
fac84939 TG |
363 | /* |
364 | * We need to check the full range, whether | |
365 | * static_protection() requires a different pgprot for one of | |
366 | * the pages in the range we try to preserve: | |
367 | */ | |
368 | addr = address + PAGE_SIZE; | |
c31c7d48 | 369 | pfn++; |
9b5cf48b | 370 | for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) { |
c31c7d48 | 371 | pgprot_t chk_prot = static_protections(new_prot, addr, pfn); |
fac84939 TG |
372 | |
373 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) | |
374 | goto out_unlock; | |
375 | } | |
376 | ||
65e074df TG |
377 | /* |
378 | * If there are no changes, return. maxpages has been updated | |
379 | * above: | |
380 | */ | |
381 | if (pgprot_val(new_prot) == pgprot_val(old_prot)) { | |
beaff633 | 382 | do_split = 0; |
65e074df TG |
383 | goto out_unlock; |
384 | } | |
385 | ||
386 | /* | |
387 | * We need to change the attributes. Check, whether we can | |
388 | * change the large page in one go. We request a split, when | |
389 | * the address is not aligned and the number of pages is | |
390 | * smaller than the number of pages in the large page. Note | |
391 | * that we limited the number of possible pages already to | |
392 | * the number of pages in the large page. | |
393 | */ | |
9b5cf48b | 394 | if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { |
65e074df TG |
395 | /* |
396 | * The address is aligned and the number of pages | |
397 | * covers the full page. | |
398 | */ | |
399 | new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); | |
400 | __set_pmd_pte(kpte, address, new_pte); | |
401 | cpa->flushtlb = 1; | |
beaff633 | 402 | do_split = 0; |
65e074df TG |
403 | } |
404 | ||
405 | out_unlock: | |
406 | spin_unlock_irqrestore(&pgd_lock, flags); | |
9df84993 | 407 | |
beaff633 | 408 | return do_split; |
65e074df TG |
409 | } |
410 | ||
76ebd054 TG |
411 | static LIST_HEAD(page_pool); |
412 | static unsigned long pool_size, pool_pages, pool_low; | |
92cb54a3 | 413 | static unsigned long pool_used, pool_failed; |
76ebd054 | 414 | |
92cb54a3 | 415 | static void cpa_fill_pool(struct page **ret) |
76ebd054 | 416 | { |
76ebd054 | 417 | gfp_t gfp = GFP_KERNEL; |
92cb54a3 IM |
418 | unsigned long flags; |
419 | struct page *p; | |
76ebd054 | 420 | |
76ebd054 | 421 | /* |
92cb54a3 IM |
422 | * Avoid recursion (on debug-pagealloc) and also signal |
423 | * our priority to get to these pagetables: | |
76ebd054 | 424 | */ |
92cb54a3 | 425 | if (current->flags & PF_MEMALLOC) |
76ebd054 | 426 | return; |
92cb54a3 | 427 | current->flags |= PF_MEMALLOC; |
76ebd054 | 428 | |
76ebd054 | 429 | /* |
92cb54a3 | 430 | * Allocate atomically from atomic contexts: |
76ebd054 | 431 | */ |
92cb54a3 IM |
432 | if (in_atomic() || irqs_disabled() || debug_pagealloc) |
433 | gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | |
76ebd054 | 434 | |
92cb54a3 | 435 | while (pool_pages < pool_size || (ret && !*ret)) { |
76ebd054 TG |
436 | p = alloc_pages(gfp, 0); |
437 | if (!p) { | |
438 | pool_failed++; | |
439 | break; | |
440 | } | |
92cb54a3 IM |
441 | /* |
442 | * If the call site needs a page right now, provide it: | |
443 | */ | |
444 | if (ret && !*ret) { | |
445 | *ret = p; | |
446 | continue; | |
447 | } | |
448 | spin_lock_irqsave(&pgd_lock, flags); | |
76ebd054 TG |
449 | list_add(&p->lru, &page_pool); |
450 | pool_pages++; | |
92cb54a3 | 451 | spin_unlock_irqrestore(&pgd_lock, flags); |
76ebd054 | 452 | } |
92cb54a3 IM |
453 | |
454 | current->flags &= ~PF_MEMALLOC; | |
76ebd054 TG |
455 | } |
456 | ||
457 | #define SHIFT_MB (20 - PAGE_SHIFT) | |
458 | #define ROUND_MB_GB ((1 << 10) - 1) | |
459 | #define SHIFT_MB_GB 10 | |
460 | #define POOL_PAGES_PER_GB 16 | |
461 | ||
462 | void __init cpa_init(void) | |
463 | { | |
464 | struct sysinfo si; | |
465 | unsigned long gb; | |
466 | ||
467 | si_meminfo(&si); | |
468 | /* | |
469 | * Calculate the number of pool pages: | |
470 | * | |
471 | * Convert totalram (nr of pages) to MiB and round to the next | |
472 | * GiB. Shift MiB to Gib and multiply the result by | |
473 | * POOL_PAGES_PER_GB: | |
474 | */ | |
92cb54a3 IM |
475 | if (debug_pagealloc) { |
476 | gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB; | |
477 | pool_size = POOL_PAGES_PER_GB * gb; | |
478 | } else { | |
479 | pool_size = 1; | |
480 | } | |
76ebd054 TG |
481 | pool_low = pool_size; |
482 | ||
92cb54a3 | 483 | cpa_fill_pool(NULL); |
76ebd054 TG |
484 | printk(KERN_DEBUG |
485 | "CPA: page pool initialized %lu of %lu pages preallocated\n", | |
486 | pool_pages, pool_size); | |
487 | } | |
488 | ||
7afe15b9 | 489 | static int split_large_page(pte_t *kpte, unsigned long address) |
bb5c2dbd | 490 | { |
7b610eec | 491 | unsigned long flags, pfn, pfninc = 1; |
9df84993 | 492 | unsigned int i, level; |
bb5c2dbd | 493 | pte_t *pbase, *tmp; |
9df84993 | 494 | pgprot_t ref_prot; |
bb5c2dbd IM |
495 | struct page *base; |
496 | ||
eb5b5f02 TG |
497 | /* |
498 | * Get a page from the pool. The pool list is protected by the | |
499 | * pgd_lock, which we have to take anyway for the split | |
500 | * operation: | |
501 | */ | |
502 | spin_lock_irqsave(&pgd_lock, flags); | |
503 | if (list_empty(&page_pool)) { | |
504 | spin_unlock_irqrestore(&pgd_lock, flags); | |
92cb54a3 IM |
505 | base = NULL; |
506 | cpa_fill_pool(&base); | |
507 | if (!base) | |
508 | return -ENOMEM; | |
509 | spin_lock_irqsave(&pgd_lock, flags); | |
510 | } else { | |
511 | base = list_first_entry(&page_pool, struct page, lru); | |
512 | list_del(&base->lru); | |
513 | pool_pages--; | |
514 | ||
515 | if (pool_pages < pool_low) | |
516 | pool_low = pool_pages; | |
eb5b5f02 TG |
517 | } |
518 | ||
bb5c2dbd IM |
519 | /* |
520 | * Check for races, another CPU might have split this page | |
521 | * up for us already: | |
522 | */ | |
523 | tmp = lookup_address(address, &level); | |
6ce9fc17 | 524 | if (tmp != kpte) |
bb5c2dbd IM |
525 | goto out_unlock; |
526 | ||
bb5c2dbd | 527 | pbase = (pte_t *)page_address(base); |
6944a9c8 | 528 | paravirt_alloc_pte(&init_mm, page_to_pfn(base)); |
07cf89c0 | 529 | ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
bb5c2dbd | 530 | |
f07333fd AK |
531 | #ifdef CONFIG_X86_64 |
532 | if (level == PG_LEVEL_1G) { | |
533 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; | |
534 | pgprot_val(ref_prot) |= _PAGE_PSE; | |
f07333fd AK |
535 | } |
536 | #endif | |
537 | ||
63c1dcf4 TG |
538 | /* |
539 | * Get the target pfn from the original entry: | |
540 | */ | |
541 | pfn = pte_pfn(*kpte); | |
f07333fd | 542 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) |
63c1dcf4 | 543 | set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); |
bb5c2dbd | 544 | |
ce0c0e50 | 545 | if (address >= (unsigned long)__va(0) && |
f361a450 YL |
546 | address < (unsigned long)__va(max_low_pfn_mapped << PAGE_SHIFT)) |
547 | split_page_count(level); | |
548 | ||
549 | #ifdef CONFIG_X86_64 | |
550 | if (address >= (unsigned long)__va(1UL<<32) && | |
65280e61 TG |
551 | address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT)) |
552 | split_page_count(level); | |
f361a450 | 553 | #endif |
ce0c0e50 | 554 | |
bb5c2dbd | 555 | /* |
07cf89c0 | 556 | * Install the new, split up pagetable. Important details here: |
4c881ca1 HY |
557 | * |
558 | * On Intel the NX bit of all levels must be cleared to make a | |
559 | * page executable. See section 4.13.2 of Intel 64 and IA-32 | |
560 | * Architectures Software Developer's Manual). | |
07cf89c0 TG |
561 | * |
562 | * Mark the entry present. The current mapping might be | |
563 | * set to not present, which we preserved above. | |
bb5c2dbd | 564 | */ |
4c881ca1 | 565 | ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); |
07cf89c0 | 566 | pgprot_val(ref_prot) |= _PAGE_PRESENT; |
9a3dc780 | 567 | __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); |
bb5c2dbd IM |
568 | base = NULL; |
569 | ||
570 | out_unlock: | |
eb5b5f02 TG |
571 | /* |
572 | * If we dropped out via the lookup_address check under | |
573 | * pgd_lock then stick the page back into the pool: | |
574 | */ | |
575 | if (base) { | |
576 | list_add(&base->lru, &page_pool); | |
577 | pool_pages++; | |
578 | } else | |
579 | pool_used++; | |
9a3dc780 | 580 | spin_unlock_irqrestore(&pgd_lock, flags); |
bb5c2dbd | 581 | |
bb5c2dbd IM |
582 | return 0; |
583 | } | |
584 | ||
c31c7d48 | 585 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
9f4c815c | 586 | { |
c31c7d48 | 587 | unsigned long address = cpa->vaddr; |
da7bfc50 HH |
588 | int do_split, err; |
589 | unsigned int level; | |
c31c7d48 | 590 | pte_t *kpte, old_pte; |
1da177e4 | 591 | |
97f99fed | 592 | repeat: |
f0646e43 | 593 | kpte = lookup_address(address, &level); |
1da177e4 | 594 | if (!kpte) |
d1a4be63 | 595 | return 0; |
c31c7d48 TG |
596 | |
597 | old_pte = *kpte; | |
598 | if (!pte_val(old_pte)) { | |
599 | if (!primary) | |
600 | return 0; | |
875e40b9 | 601 | WARN(1, KERN_WARNING "CPA: called for zero pte. " |
c31c7d48 TG |
602 | "vaddr = %lx cpa->vaddr = %lx\n", address, |
603 | cpa->vaddr); | |
1da177e4 | 604 | return -EINVAL; |
c31c7d48 | 605 | } |
9f4c815c | 606 | |
30551bb3 | 607 | if (level == PG_LEVEL_4K) { |
c31c7d48 | 608 | pte_t new_pte; |
626c2c9d | 609 | pgprot_t new_prot = pte_pgprot(old_pte); |
c31c7d48 | 610 | unsigned long pfn = pte_pfn(old_pte); |
86f03989 | 611 | |
72e458df TG |
612 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); |
613 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | |
86f03989 | 614 | |
c31c7d48 | 615 | new_prot = static_protections(new_prot, address, pfn); |
86f03989 | 616 | |
626c2c9d AV |
617 | /* |
618 | * We need to keep the pfn from the existing PTE, | |
619 | * after all we're only going to change it's attributes | |
620 | * not the memory it points to | |
621 | */ | |
c31c7d48 TG |
622 | new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); |
623 | cpa->pfn = pfn; | |
f4ae5da0 TG |
624 | /* |
625 | * Do we really change anything ? | |
626 | */ | |
627 | if (pte_val(old_pte) != pte_val(new_pte)) { | |
628 | set_pte_atomic(kpte, new_pte); | |
629 | cpa->flushtlb = 1; | |
630 | } | |
9b5cf48b | 631 | cpa->numpages = 1; |
65e074df | 632 | return 0; |
1da177e4 | 633 | } |
65e074df TG |
634 | |
635 | /* | |
636 | * Check, whether we can keep the large page intact | |
637 | * and just change the pte: | |
638 | */ | |
beaff633 | 639 | do_split = try_preserve_large_page(kpte, address, cpa); |
65e074df TG |
640 | /* |
641 | * When the range fits into the existing large page, | |
9b5cf48b | 642 | * return. cp->numpages and cpa->tlbflush have been updated in |
65e074df TG |
643 | * try_large_page: |
644 | */ | |
87f7f8fe IM |
645 | if (do_split <= 0) |
646 | return do_split; | |
65e074df TG |
647 | |
648 | /* | |
649 | * We have to split the large page: | |
650 | */ | |
87f7f8fe IM |
651 | err = split_large_page(kpte, address); |
652 | if (!err) { | |
653 | cpa->flushtlb = 1; | |
654 | goto repeat; | |
655 | } | |
beaff633 | 656 | |
87f7f8fe | 657 | return err; |
9f4c815c | 658 | } |
1da177e4 | 659 | |
c31c7d48 TG |
660 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); |
661 | ||
662 | static int cpa_process_alias(struct cpa_data *cpa) | |
1da177e4 | 663 | { |
c31c7d48 | 664 | struct cpa_data alias_cpa; |
f34b439f | 665 | int ret = 0; |
44af6c41 | 666 | |
965194c1 | 667 | if (cpa->pfn >= max_pfn_mapped) |
c31c7d48 | 668 | return 0; |
626c2c9d | 669 | |
f361a450 | 670 | #ifdef CONFIG_X86_64 |
965194c1 | 671 | if (cpa->pfn >= max_low_pfn_mapped && cpa->pfn < (1UL<<(32-PAGE_SHIFT))) |
f361a450 YL |
672 | return 0; |
673 | #endif | |
f34b439f TG |
674 | /* |
675 | * No need to redo, when the primary call touched the direct | |
676 | * mapping already: | |
677 | */ | |
f361a450 YL |
678 | if (!(within(cpa->vaddr, PAGE_OFFSET, |
679 | PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT)) | |
680 | #ifdef CONFIG_X86_64 | |
681 | || within(cpa->vaddr, PAGE_OFFSET + (1UL<<32), | |
682 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)) | |
683 | #endif | |
684 | )) { | |
44af6c41 | 685 | |
f34b439f TG |
686 | alias_cpa = *cpa; |
687 | alias_cpa.vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); | |
688 | ||
689 | ret = __change_page_attr_set_clr(&alias_cpa, 0); | |
690 | } | |
44af6c41 | 691 | |
44af6c41 | 692 | #ifdef CONFIG_X86_64 |
c31c7d48 TG |
693 | if (ret) |
694 | return ret; | |
f34b439f TG |
695 | /* |
696 | * No need to redo, when the primary call touched the high | |
697 | * mapping already: | |
698 | */ | |
699 | if (within(cpa->vaddr, (unsigned long) _text, (unsigned long) _end)) | |
700 | return 0; | |
701 | ||
488fd995 | 702 | /* |
0879750f TG |
703 | * If the physical address is inside the kernel map, we need |
704 | * to touch the high mapped kernel as well: | |
488fd995 | 705 | */ |
c31c7d48 TG |
706 | if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) |
707 | return 0; | |
0879750f | 708 | |
c31c7d48 TG |
709 | alias_cpa = *cpa; |
710 | alias_cpa.vaddr = | |
711 | (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; | |
712 | ||
713 | /* | |
714 | * The high mapping range is imprecise, so ignore the return value. | |
715 | */ | |
716 | __change_page_attr_set_clr(&alias_cpa, 0); | |
488fd995 | 717 | #endif |
c31c7d48 | 718 | return ret; |
1da177e4 LT |
719 | } |
720 | ||
c31c7d48 | 721 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) |
ff31452b | 722 | { |
65e074df | 723 | int ret, numpages = cpa->numpages; |
ff31452b | 724 | |
65e074df TG |
725 | while (numpages) { |
726 | /* | |
727 | * Store the remaining nr of pages for the large page | |
728 | * preservation check. | |
729 | */ | |
9b5cf48b | 730 | cpa->numpages = numpages; |
c31c7d48 TG |
731 | |
732 | ret = __change_page_attr(cpa, checkalias); | |
ff31452b TG |
733 | if (ret) |
734 | return ret; | |
ff31452b | 735 | |
c31c7d48 TG |
736 | if (checkalias) { |
737 | ret = cpa_process_alias(cpa); | |
738 | if (ret) | |
739 | return ret; | |
740 | } | |
741 | ||
65e074df TG |
742 | /* |
743 | * Adjust the number of pages with the result of the | |
744 | * CPA operation. Either a large page has been | |
745 | * preserved or a single page update happened. | |
746 | */ | |
9b5cf48b RW |
747 | BUG_ON(cpa->numpages > numpages); |
748 | numpages -= cpa->numpages; | |
749 | cpa->vaddr += cpa->numpages * PAGE_SIZE; | |
65e074df | 750 | } |
ff31452b TG |
751 | return 0; |
752 | } | |
753 | ||
6bb8383b AK |
754 | static inline int cache_attr(pgprot_t attr) |
755 | { | |
756 | return pgprot_val(attr) & | |
757 | (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); | |
758 | } | |
759 | ||
ff31452b | 760 | static int change_page_attr_set_clr(unsigned long addr, int numpages, |
c9caa02c AK |
761 | pgprot_t mask_set, pgprot_t mask_clr, |
762 | int force_split) | |
ff31452b | 763 | { |
72e458df | 764 | struct cpa_data cpa; |
af96e443 | 765 | int ret, cache, checkalias; |
331e4065 TG |
766 | |
767 | /* | |
768 | * Check, if we are requested to change a not supported | |
769 | * feature: | |
770 | */ | |
771 | mask_set = canon_pgprot(mask_set); | |
772 | mask_clr = canon_pgprot(mask_clr); | |
c9caa02c | 773 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) |
331e4065 TG |
774 | return 0; |
775 | ||
69b1415e TG |
776 | /* Ensure we are PAGE_SIZE aligned */ |
777 | if (addr & ~PAGE_MASK) { | |
778 | addr &= PAGE_MASK; | |
779 | /* | |
780 | * People should not be passing in unaligned addresses: | |
781 | */ | |
782 | WARN_ON_ONCE(1); | |
783 | } | |
784 | ||
72e458df TG |
785 | cpa.vaddr = addr; |
786 | cpa.numpages = numpages; | |
787 | cpa.mask_set = mask_set; | |
788 | cpa.mask_clr = mask_clr; | |
f4ae5da0 | 789 | cpa.flushtlb = 0; |
c9caa02c | 790 | cpa.force_split = force_split; |
72e458df | 791 | |
af96e443 TG |
792 | /* No alias checking for _NX bit modifications */ |
793 | checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; | |
794 | ||
795 | ret = __change_page_attr_set_clr(&cpa, checkalias); | |
ff31452b | 796 | |
f4ae5da0 TG |
797 | /* |
798 | * Check whether we really changed something: | |
799 | */ | |
800 | if (!cpa.flushtlb) | |
76ebd054 | 801 | goto out; |
f4ae5da0 | 802 | |
6bb8383b AK |
803 | /* |
804 | * No need to flush, when we did not set any of the caching | |
805 | * attributes: | |
806 | */ | |
807 | cache = cache_attr(mask_set); | |
808 | ||
57a6a46a TG |
809 | /* |
810 | * On success we use clflush, when the CPU supports it to | |
811 | * avoid the wbindv. If the CPU does not support it and in the | |
af1e6844 | 812 | * error case we fall back to cpa_flush_all (which uses |
57a6a46a TG |
813 | * wbindv): |
814 | */ | |
815 | if (!ret && cpu_has_clflush) | |
6bb8383b | 816 | cpa_flush_range(addr, numpages, cache); |
57a6a46a | 817 | else |
6bb8383b | 818 | cpa_flush_all(cache); |
ff31452b | 819 | |
76ebd054 | 820 | out: |
92cb54a3 IM |
821 | cpa_fill_pool(NULL); |
822 | ||
ff31452b TG |
823 | return ret; |
824 | } | |
825 | ||
56744546 TG |
826 | static inline int change_page_attr_set(unsigned long addr, int numpages, |
827 | pgprot_t mask) | |
75cbade8 | 828 | { |
c9caa02c | 829 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0); |
75cbade8 AV |
830 | } |
831 | ||
56744546 TG |
832 | static inline int change_page_attr_clear(unsigned long addr, int numpages, |
833 | pgprot_t mask) | |
72932c7a | 834 | { |
c9caa02c | 835 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0); |
72932c7a TG |
836 | } |
837 | ||
1219333d | 838 | int _set_memory_uc(unsigned long addr, int numpages) |
72932c7a | 839 | { |
de33c442 SS |
840 | /* |
841 | * for now UC MINUS. see comments in ioremap_nocache() | |
842 | */ | |
72932c7a | 843 | return change_page_attr_set(addr, numpages, |
de33c442 | 844 | __pgprot(_PAGE_CACHE_UC_MINUS)); |
75cbade8 | 845 | } |
1219333d | 846 | |
847 | int set_memory_uc(unsigned long addr, int numpages) | |
848 | { | |
de33c442 SS |
849 | /* |
850 | * for now UC MINUS. see comments in ioremap_nocache() | |
851 | */ | |
c15238df | 852 | if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
de33c442 | 853 | _PAGE_CACHE_UC_MINUS, NULL)) |
1219333d | 854 | return -EINVAL; |
855 | ||
856 | return _set_memory_uc(addr, numpages); | |
857 | } | |
75cbade8 AV |
858 | EXPORT_SYMBOL(set_memory_uc); |
859 | ||
ef354af4 | 860 | int _set_memory_wc(unsigned long addr, int numpages) |
861 | { | |
862 | return change_page_attr_set(addr, numpages, | |
863 | __pgprot(_PAGE_CACHE_WC)); | |
864 | } | |
865 | ||
866 | int set_memory_wc(unsigned long addr, int numpages) | |
867 | { | |
499f8f84 | 868 | if (!pat_enabled) |
ef354af4 | 869 | return set_memory_uc(addr, numpages); |
870 | ||
c15238df | 871 | if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
ef354af4 | 872 | _PAGE_CACHE_WC, NULL)) |
873 | return -EINVAL; | |
874 | ||
875 | return _set_memory_wc(addr, numpages); | |
876 | } | |
877 | EXPORT_SYMBOL(set_memory_wc); | |
878 | ||
1219333d | 879 | int _set_memory_wb(unsigned long addr, int numpages) |
75cbade8 | 880 | { |
72932c7a | 881 | return change_page_attr_clear(addr, numpages, |
2e5d9c85 | 882 | __pgprot(_PAGE_CACHE_MASK)); |
75cbade8 | 883 | } |
1219333d | 884 | |
885 | int set_memory_wb(unsigned long addr, int numpages) | |
886 | { | |
c15238df | 887 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
1219333d | 888 | |
889 | return _set_memory_wb(addr, numpages); | |
890 | } | |
75cbade8 AV |
891 | EXPORT_SYMBOL(set_memory_wb); |
892 | ||
893 | int set_memory_x(unsigned long addr, int numpages) | |
894 | { | |
72932c7a | 895 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX)); |
75cbade8 AV |
896 | } |
897 | EXPORT_SYMBOL(set_memory_x); | |
898 | ||
899 | int set_memory_nx(unsigned long addr, int numpages) | |
900 | { | |
72932c7a | 901 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX)); |
75cbade8 AV |
902 | } |
903 | EXPORT_SYMBOL(set_memory_nx); | |
904 | ||
905 | int set_memory_ro(unsigned long addr, int numpages) | |
906 | { | |
72932c7a | 907 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); |
75cbade8 | 908 | } |
75cbade8 AV |
909 | |
910 | int set_memory_rw(unsigned long addr, int numpages) | |
911 | { | |
72932c7a | 912 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); |
75cbade8 | 913 | } |
f62d0f00 IM |
914 | |
915 | int set_memory_np(unsigned long addr, int numpages) | |
916 | { | |
72932c7a | 917 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); |
f62d0f00 | 918 | } |
75cbade8 | 919 | |
c9caa02c AK |
920 | int set_memory_4k(unsigned long addr, int numpages) |
921 | { | |
922 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), | |
923 | __pgprot(0), 1); | |
924 | } | |
925 | ||
75cbade8 AV |
926 | int set_pages_uc(struct page *page, int numpages) |
927 | { | |
928 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 929 | |
d7c8f21a | 930 | return set_memory_uc(addr, numpages); |
75cbade8 AV |
931 | } |
932 | EXPORT_SYMBOL(set_pages_uc); | |
933 | ||
934 | int set_pages_wb(struct page *page, int numpages) | |
935 | { | |
936 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 937 | |
d7c8f21a | 938 | return set_memory_wb(addr, numpages); |
75cbade8 AV |
939 | } |
940 | EXPORT_SYMBOL(set_pages_wb); | |
941 | ||
942 | int set_pages_x(struct page *page, int numpages) | |
943 | { | |
944 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 945 | |
d7c8f21a | 946 | return set_memory_x(addr, numpages); |
75cbade8 AV |
947 | } |
948 | EXPORT_SYMBOL(set_pages_x); | |
949 | ||
950 | int set_pages_nx(struct page *page, int numpages) | |
951 | { | |
952 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 953 | |
d7c8f21a | 954 | return set_memory_nx(addr, numpages); |
75cbade8 AV |
955 | } |
956 | EXPORT_SYMBOL(set_pages_nx); | |
957 | ||
958 | int set_pages_ro(struct page *page, int numpages) | |
959 | { | |
960 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 961 | |
d7c8f21a | 962 | return set_memory_ro(addr, numpages); |
75cbade8 | 963 | } |
75cbade8 AV |
964 | |
965 | int set_pages_rw(struct page *page, int numpages) | |
966 | { | |
967 | unsigned long addr = (unsigned long)page_address(page); | |
e81d5dc4 | 968 | |
d7c8f21a | 969 | return set_memory_rw(addr, numpages); |
78c94aba IM |
970 | } |
971 | ||
1da177e4 | 972 | #ifdef CONFIG_DEBUG_PAGEALLOC |
f62d0f00 IM |
973 | |
974 | static int __set_pages_p(struct page *page, int numpages) | |
975 | { | |
72e458df TG |
976 | struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), |
977 | .numpages = numpages, | |
978 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), | |
979 | .mask_clr = __pgprot(0)}; | |
72932c7a | 980 | |
c31c7d48 | 981 | return __change_page_attr_set_clr(&cpa, 1); |
f62d0f00 IM |
982 | } |
983 | ||
984 | static int __set_pages_np(struct page *page, int numpages) | |
985 | { | |
72e458df TG |
986 | struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), |
987 | .numpages = numpages, | |
988 | .mask_set = __pgprot(0), | |
989 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)}; | |
72932c7a | 990 | |
c31c7d48 | 991 | return __change_page_attr_set_clr(&cpa, 1); |
f62d0f00 IM |
992 | } |
993 | ||
1da177e4 LT |
994 | void kernel_map_pages(struct page *page, int numpages, int enable) |
995 | { | |
996 | if (PageHighMem(page)) | |
997 | return; | |
9f4c815c | 998 | if (!enable) { |
f9b8404c IM |
999 | debug_check_no_locks_freed(page_address(page), |
1000 | numpages * PAGE_SIZE); | |
9f4c815c | 1001 | } |
de5097c2 | 1002 | |
12d6f21e IM |
1003 | /* |
1004 | * If page allocator is not up yet then do not call c_p_a(): | |
1005 | */ | |
1006 | if (!debug_pagealloc_enabled) | |
1007 | return; | |
1008 | ||
9f4c815c | 1009 | /* |
f8d8406b IM |
1010 | * The return value is ignored as the calls cannot fail. |
1011 | * Large pages are kept enabled at boot time, and are | |
1012 | * split up quickly with DEBUG_PAGEALLOC. If a splitup | |
1013 | * fails here (due to temporary memory shortage) no damage | |
1014 | * is done because we just keep the largepage intact up | |
1015 | * to the next attempt when it will likely be split up: | |
1da177e4 | 1016 | */ |
f62d0f00 IM |
1017 | if (enable) |
1018 | __set_pages_p(page, numpages); | |
1019 | else | |
1020 | __set_pages_np(page, numpages); | |
9f4c815c IM |
1021 | |
1022 | /* | |
e4b71dcf IM |
1023 | * We should perform an IPI and flush all tlbs, |
1024 | * but that can deadlock->flush only current cpu: | |
1da177e4 LT |
1025 | */ |
1026 | __flush_tlb_all(); | |
76ebd054 TG |
1027 | |
1028 | /* | |
1029 | * Try to refill the page pool here. We can do this only after | |
1030 | * the tlb flush. | |
1031 | */ | |
92cb54a3 | 1032 | cpa_fill_pool(NULL); |
1da177e4 | 1033 | } |
8a235efa | 1034 | |
ee7ae7a1 TG |
1035 | #ifdef CONFIG_DEBUG_FS |
1036 | static int dpa_show(struct seq_file *m, void *v) | |
1037 | { | |
1038 | seq_puts(m, "DEBUG_PAGEALLOC\n"); | |
1039 | seq_printf(m, "pool_size : %lu\n", pool_size); | |
1040 | seq_printf(m, "pool_pages : %lu\n", pool_pages); | |
1041 | seq_printf(m, "pool_low : %lu\n", pool_low); | |
1042 | seq_printf(m, "pool_used : %lu\n", pool_used); | |
1043 | seq_printf(m, "pool_failed : %lu\n", pool_failed); | |
1044 | ||
1045 | return 0; | |
1046 | } | |
1047 | ||
1048 | static int dpa_open(struct inode *inode, struct file *filp) | |
1049 | { | |
1050 | return single_open(filp, dpa_show, NULL); | |
1051 | } | |
1052 | ||
1053 | static const struct file_operations dpa_fops = { | |
1054 | .open = dpa_open, | |
1055 | .read = seq_read, | |
1056 | .llseek = seq_lseek, | |
1057 | .release = single_release, | |
1058 | }; | |
1059 | ||
a4928cff | 1060 | static int __init debug_pagealloc_proc_init(void) |
ee7ae7a1 TG |
1061 | { |
1062 | struct dentry *de; | |
1063 | ||
1064 | de = debugfs_create_file("debug_pagealloc", 0600, NULL, NULL, | |
1065 | &dpa_fops); | |
1066 | if (!de) | |
1067 | return -ENOMEM; | |
1068 | ||
1069 | return 0; | |
1070 | } | |
1071 | __initcall(debug_pagealloc_proc_init); | |
1072 | #endif | |
1073 | ||
8a235efa RW |
1074 | #ifdef CONFIG_HIBERNATION |
1075 | ||
1076 | bool kernel_page_present(struct page *page) | |
1077 | { | |
1078 | unsigned int level; | |
1079 | pte_t *pte; | |
1080 | ||
1081 | if (PageHighMem(page)) | |
1082 | return false; | |
1083 | ||
1084 | pte = lookup_address((unsigned long)page_address(page), &level); | |
1085 | return (pte_val(*pte) & _PAGE_PRESENT); | |
1086 | } | |
1087 | ||
1088 | #endif /* CONFIG_HIBERNATION */ | |
1089 | ||
1090 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | |
d1028a15 AV |
1091 | |
1092 | /* | |
1093 | * The testcases use internal knowledge of the implementation that shouldn't | |
1094 | * be exposed to the rest of the kernel. Include these directly here. | |
1095 | */ | |
1096 | #ifdef CONFIG_CPA_DEBUG | |
1097 | #include "pageattr-test.c" | |
1098 | #endif |