Commit | Line | Data |
---|---|---|
9f4c815c IM |
1 | /* |
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
1da177e4 | 3 | * Thanks to Ben LaHaise for precious feedback. |
9f4c815c | 4 | */ |
1da177e4 | 5 | #include <linux/highmem.h> |
8192206d | 6 | #include <linux/bootmem.h> |
1da177e4 | 7 | #include <linux/module.h> |
9f4c815c | 8 | #include <linux/sched.h> |
9f4c815c | 9 | #include <linux/mm.h> |
76ebd054 | 10 | #include <linux/interrupt.h> |
ee7ae7a1 TG |
11 | #include <linux/seq_file.h> |
12 | #include <linux/debugfs.h> | |
e59a1bb2 | 13 | #include <linux/pfn.h> |
8c4bfc6e | 14 | #include <linux/percpu.h> |
5a0e3ad6 | 15 | #include <linux/gfp.h> |
5bd5a452 | 16 | #include <linux/pci.h> |
d6472302 | 17 | #include <linux/vmalloc.h> |
9f4c815c | 18 | |
950f9d95 | 19 | #include <asm/e820.h> |
1da177e4 LT |
20 | #include <asm/processor.h> |
21 | #include <asm/tlbflush.h> | |
f8af095d | 22 | #include <asm/sections.h> |
93dbda7c | 23 | #include <asm/setup.h> |
9f4c815c IM |
24 | #include <asm/uaccess.h> |
25 | #include <asm/pgalloc.h> | |
c31c7d48 | 26 | #include <asm/proto.h> |
1219333d | 27 | #include <asm/pat.h> |
1da177e4 | 28 | |
9df84993 IM |
29 | /* |
30 | * The current flushing context - we pass it instead of 5 arguments: | |
31 | */ | |
72e458df | 32 | struct cpa_data { |
d75586ad | 33 | unsigned long *vaddr; |
0fd64c23 | 34 | pgd_t *pgd; |
72e458df TG |
35 | pgprot_t mask_set; |
36 | pgprot_t mask_clr; | |
65e074df | 37 | int numpages; |
d75586ad | 38 | int flags; |
c31c7d48 | 39 | unsigned long pfn; |
c9caa02c | 40 | unsigned force_split : 1; |
d75586ad | 41 | int curpage; |
9ae28475 | 42 | struct page **pages; |
72e458df TG |
43 | }; |
44 | ||
ad5ca55f SS |
45 | /* |
46 | * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) | |
47 | * using cpa_lock. So that we don't allow any other cpu, with stale large tlb | |
48 | * entries change the page attribute in parallel to some other cpu | |
49 | * splitting a large page entry along with changing the attribute. | |
50 | */ | |
51 | static DEFINE_SPINLOCK(cpa_lock); | |
52 | ||
d75586ad SL |
53 | #define CPA_FLUSHTLB 1 |
54 | #define CPA_ARRAY 2 | |
9ae28475 | 55 | #define CPA_PAGES_ARRAY 4 |
d75586ad | 56 | |
65280e61 | 57 | #ifdef CONFIG_PROC_FS |
ce0c0e50 AK |
58 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; |
59 | ||
65280e61 | 60 | void update_page_count(int level, unsigned long pages) |
ce0c0e50 | 61 | { |
ce0c0e50 | 62 | /* Protect against CPA */ |
a79e53d8 | 63 | spin_lock(&pgd_lock); |
ce0c0e50 | 64 | direct_pages_count[level] += pages; |
a79e53d8 | 65 | spin_unlock(&pgd_lock); |
65280e61 TG |
66 | } |
67 | ||
68 | static void split_page_count(int level) | |
69 | { | |
70 | direct_pages_count[level]--; | |
71 | direct_pages_count[level - 1] += PTRS_PER_PTE; | |
72 | } | |
73 | ||
e1759c21 | 74 | void arch_report_meminfo(struct seq_file *m) |
65280e61 | 75 | { |
b9c3bfc2 | 76 | seq_printf(m, "DirectMap4k: %8lu kB\n", |
a06de630 HD |
77 | direct_pages_count[PG_LEVEL_4K] << 2); |
78 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | |
b9c3bfc2 | 79 | seq_printf(m, "DirectMap2M: %8lu kB\n", |
a06de630 HD |
80 | direct_pages_count[PG_LEVEL_2M] << 11); |
81 | #else | |
b9c3bfc2 | 82 | seq_printf(m, "DirectMap4M: %8lu kB\n", |
a06de630 HD |
83 | direct_pages_count[PG_LEVEL_2M] << 12); |
84 | #endif | |
a06de630 | 85 | if (direct_gbpages) |
b9c3bfc2 | 86 | seq_printf(m, "DirectMap1G: %8lu kB\n", |
a06de630 | 87 | direct_pages_count[PG_LEVEL_1G] << 20); |
ce0c0e50 | 88 | } |
65280e61 TG |
89 | #else |
90 | static inline void split_page_count(int level) { } | |
91 | #endif | |
ce0c0e50 | 92 | |
c31c7d48 TG |
93 | #ifdef CONFIG_X86_64 |
94 | ||
95 | static inline unsigned long highmap_start_pfn(void) | |
96 | { | |
fc8d7826 | 97 | return __pa_symbol(_text) >> PAGE_SHIFT; |
c31c7d48 TG |
98 | } |
99 | ||
100 | static inline unsigned long highmap_end_pfn(void) | |
101 | { | |
fc8d7826 | 102 | return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT; |
c31c7d48 TG |
103 | } |
104 | ||
105 | #endif | |
106 | ||
92cb54a3 IM |
107 | #ifdef CONFIG_DEBUG_PAGEALLOC |
108 | # define debug_pagealloc 1 | |
109 | #else | |
110 | # define debug_pagealloc 0 | |
111 | #endif | |
112 | ||
ed724be6 AV |
113 | static inline int |
114 | within(unsigned long addr, unsigned long start, unsigned long end) | |
687c4825 | 115 | { |
ed724be6 AV |
116 | return addr >= start && addr < end; |
117 | } | |
118 | ||
d7c8f21a TG |
119 | /* |
120 | * Flushing functions | |
121 | */ | |
cd8ddf1a | 122 | |
cd8ddf1a TG |
123 | /** |
124 | * clflush_cache_range - flush a cache range with clflush | |
9efc31b8 | 125 | * @vaddr: virtual start address |
cd8ddf1a TG |
126 | * @size: number of bytes to flush |
127 | * | |
8b80fd8b RZ |
128 | * clflushopt is an unordered instruction which needs fencing with mfence or |
129 | * sfence to avoid ordering issues. | |
cd8ddf1a | 130 | */ |
4c61afcd | 131 | void clflush_cache_range(void *vaddr, unsigned int size) |
d7c8f21a | 132 | { |
6c434d61 RZ |
133 | unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1; |
134 | void *vend = vaddr + size; | |
135 | void *p; | |
d7c8f21a | 136 | |
cd8ddf1a | 137 | mb(); |
4c61afcd | 138 | |
6c434d61 RZ |
139 | for (p = (void *)((unsigned long)vaddr & ~clflush_mask); |
140 | p < vend; p += boot_cpu_data.x86_clflush_size) | |
141 | clflushopt(p); | |
4c61afcd | 142 | |
cd8ddf1a | 143 | mb(); |
d7c8f21a | 144 | } |
e517a5e9 | 145 | EXPORT_SYMBOL_GPL(clflush_cache_range); |
d7c8f21a | 146 | |
af1e6844 | 147 | static void __cpa_flush_all(void *arg) |
d7c8f21a | 148 | { |
6bb8383b AK |
149 | unsigned long cache = (unsigned long)arg; |
150 | ||
d7c8f21a TG |
151 | /* |
152 | * Flush all to work around Errata in early athlons regarding | |
153 | * large page flushing. | |
154 | */ | |
155 | __flush_tlb_all(); | |
156 | ||
0b827537 | 157 | if (cache && boot_cpu_data.x86 >= 4) |
d7c8f21a TG |
158 | wbinvd(); |
159 | } | |
160 | ||
6bb8383b | 161 | static void cpa_flush_all(unsigned long cache) |
d7c8f21a TG |
162 | { |
163 | BUG_ON(irqs_disabled()); | |
164 | ||
15c8b6c1 | 165 | on_each_cpu(__cpa_flush_all, (void *) cache, 1); |
d7c8f21a TG |
166 | } |
167 | ||
57a6a46a TG |
168 | static void __cpa_flush_range(void *arg) |
169 | { | |
57a6a46a TG |
170 | /* |
171 | * We could optimize that further and do individual per page | |
172 | * tlb invalidates for a low number of pages. Caveat: we must | |
173 | * flush the high aliases on 64bit as well. | |
174 | */ | |
175 | __flush_tlb_all(); | |
57a6a46a TG |
176 | } |
177 | ||
6bb8383b | 178 | static void cpa_flush_range(unsigned long start, int numpages, int cache) |
57a6a46a | 179 | { |
4c61afcd IM |
180 | unsigned int i, level; |
181 | unsigned long addr; | |
182 | ||
57a6a46a | 183 | BUG_ON(irqs_disabled()); |
4c61afcd | 184 | WARN_ON(PAGE_ALIGN(start) != start); |
57a6a46a | 185 | |
15c8b6c1 | 186 | on_each_cpu(__cpa_flush_range, NULL, 1); |
57a6a46a | 187 | |
6bb8383b AK |
188 | if (!cache) |
189 | return; | |
190 | ||
3b233e52 TG |
191 | /* |
192 | * We only need to flush on one CPU, | |
193 | * clflush is a MESI-coherent instruction that | |
194 | * will cause all other CPUs to flush the same | |
195 | * cachelines: | |
196 | */ | |
4c61afcd IM |
197 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { |
198 | pte_t *pte = lookup_address(addr, &level); | |
199 | ||
200 | /* | |
201 | * Only flush present addresses: | |
202 | */ | |
7bfb72e8 | 203 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
4c61afcd IM |
204 | clflush_cache_range((void *) addr, PAGE_SIZE); |
205 | } | |
57a6a46a TG |
206 | } |
207 | ||
9ae28475 | 208 | static void cpa_flush_array(unsigned long *start, int numpages, int cache, |
209 | int in_flags, struct page **pages) | |
d75586ad SL |
210 | { |
211 | unsigned int i, level; | |
2171787b | 212 | unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ |
d75586ad SL |
213 | |
214 | BUG_ON(irqs_disabled()); | |
215 | ||
2171787b | 216 | on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); |
d75586ad | 217 | |
2171787b | 218 | if (!cache || do_wbinvd) |
d75586ad SL |
219 | return; |
220 | ||
d75586ad SL |
221 | /* |
222 | * We only need to flush on one CPU, | |
223 | * clflush is a MESI-coherent instruction that | |
224 | * will cause all other CPUs to flush the same | |
225 | * cachelines: | |
226 | */ | |
9ae28475 | 227 | for (i = 0; i < numpages; i++) { |
228 | unsigned long addr; | |
229 | pte_t *pte; | |
230 | ||
231 | if (in_flags & CPA_PAGES_ARRAY) | |
232 | addr = (unsigned long)page_address(pages[i]); | |
233 | else | |
234 | addr = start[i]; | |
235 | ||
236 | pte = lookup_address(addr, &level); | |
d75586ad SL |
237 | |
238 | /* | |
239 | * Only flush present addresses: | |
240 | */ | |
241 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) | |
9ae28475 | 242 | clflush_cache_range((void *)addr, PAGE_SIZE); |
d75586ad SL |
243 | } |
244 | } | |
245 | ||
ed724be6 AV |
246 | /* |
247 | * Certain areas of memory on x86 require very specific protection flags, | |
248 | * for example the BIOS area or kernel text. Callers don't always get this | |
249 | * right (again, ioremap() on BIOS memory is not uncommon) so this function | |
250 | * checks and fixes these known static required protection bits. | |
251 | */ | |
c31c7d48 TG |
252 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, |
253 | unsigned long pfn) | |
ed724be6 AV |
254 | { |
255 | pgprot_t forbidden = __pgprot(0); | |
256 | ||
687c4825 | 257 | /* |
ed724be6 AV |
258 | * The BIOS area between 640k and 1Mb needs to be executable for |
259 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. | |
687c4825 | 260 | */ |
5bd5a452 MC |
261 | #ifdef CONFIG_PCI_BIOS |
262 | if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) | |
ed724be6 | 263 | pgprot_val(forbidden) |= _PAGE_NX; |
5bd5a452 | 264 | #endif |
ed724be6 AV |
265 | |
266 | /* | |
267 | * The kernel text needs to be executable for obvious reasons | |
c31c7d48 TG |
268 | * Does not cover __inittext since that is gone later on. On |
269 | * 64bit we do not enforce !NX on the low mapping | |
ed724be6 AV |
270 | */ |
271 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) | |
272 | pgprot_val(forbidden) |= _PAGE_NX; | |
cc0f21bb | 273 | |
cc0f21bb | 274 | /* |
c31c7d48 TG |
275 | * The .rodata section needs to be read-only. Using the pfn |
276 | * catches all aliases. | |
cc0f21bb | 277 | */ |
fc8d7826 AD |
278 | if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, |
279 | __pa_symbol(__end_rodata) >> PAGE_SHIFT)) | |
cc0f21bb | 280 | pgprot_val(forbidden) |= _PAGE_RW; |
ed724be6 | 281 | |
55ca3cc1 | 282 | #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) |
74e08179 | 283 | /* |
502f6604 SS |
284 | * Once the kernel maps the text as RO (kernel_set_to_readonly is set), |
285 | * kernel text mappings for the large page aligned text, rodata sections | |
286 | * will be always read-only. For the kernel identity mappings covering | |
287 | * the holes caused by this alignment can be anything that user asks. | |
74e08179 SS |
288 | * |
289 | * This will preserve the large page mappings for kernel text/data | |
290 | * at no extra cost. | |
291 | */ | |
502f6604 SS |
292 | if (kernel_set_to_readonly && |
293 | within(address, (unsigned long)_text, | |
281ff33b SS |
294 | (unsigned long)__end_rodata_hpage_align)) { |
295 | unsigned int level; | |
296 | ||
297 | /* | |
298 | * Don't enforce the !RW mapping for the kernel text mapping, | |
299 | * if the current mapping is already using small page mapping. | |
300 | * No need to work hard to preserve large page mappings in this | |
301 | * case. | |
302 | * | |
303 | * This also fixes the Linux Xen paravirt guest boot failure | |
304 | * (because of unexpected read-only mappings for kernel identity | |
305 | * mappings). In this paravirt guest case, the kernel text | |
306 | * mapping and the kernel identity mapping share the same | |
307 | * page-table pages. Thus we can't really use different | |
308 | * protections for the kernel text and identity mappings. Also, | |
309 | * these shared mappings are made of small page mappings. | |
310 | * Thus this don't enforce !RW mapping for small page kernel | |
311 | * text mapping logic will help Linux Xen parvirt guest boot | |
0d2eb44f | 312 | * as well. |
281ff33b SS |
313 | */ |
314 | if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) | |
315 | pgprot_val(forbidden) |= _PAGE_RW; | |
316 | } | |
74e08179 SS |
317 | #endif |
318 | ||
ed724be6 | 319 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); |
687c4825 IM |
320 | |
321 | return prot; | |
322 | } | |
323 | ||
426e34cc MF |
324 | /* |
325 | * Lookup the page table entry for a virtual address in a specific pgd. | |
326 | * Return a pointer to the entry and the level of the mapping. | |
327 | */ | |
328 | pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, | |
329 | unsigned int *level) | |
9f4c815c | 330 | { |
1da177e4 LT |
331 | pud_t *pud; |
332 | pmd_t *pmd; | |
9f4c815c | 333 | |
30551bb3 TG |
334 | *level = PG_LEVEL_NONE; |
335 | ||
1da177e4 LT |
336 | if (pgd_none(*pgd)) |
337 | return NULL; | |
9df84993 | 338 | |
1da177e4 LT |
339 | pud = pud_offset(pgd, address); |
340 | if (pud_none(*pud)) | |
341 | return NULL; | |
c2f71ee2 AK |
342 | |
343 | *level = PG_LEVEL_1G; | |
344 | if (pud_large(*pud) || !pud_present(*pud)) | |
345 | return (pte_t *)pud; | |
346 | ||
1da177e4 LT |
347 | pmd = pmd_offset(pud, address); |
348 | if (pmd_none(*pmd)) | |
349 | return NULL; | |
30551bb3 TG |
350 | |
351 | *level = PG_LEVEL_2M; | |
9a14aefc | 352 | if (pmd_large(*pmd) || !pmd_present(*pmd)) |
1da177e4 | 353 | return (pte_t *)pmd; |
1da177e4 | 354 | |
30551bb3 | 355 | *level = PG_LEVEL_4K; |
9df84993 | 356 | |
9f4c815c IM |
357 | return pte_offset_kernel(pmd, address); |
358 | } | |
0fd64c23 BP |
359 | |
360 | /* | |
361 | * Lookup the page table entry for a virtual address. Return a pointer | |
362 | * to the entry and the level of the mapping. | |
363 | * | |
364 | * Note: We return pud and pmd either when the entry is marked large | |
365 | * or when the present bit is not set. Otherwise we would return a | |
366 | * pointer to a nonexisting mapping. | |
367 | */ | |
368 | pte_t *lookup_address(unsigned long address, unsigned int *level) | |
369 | { | |
426e34cc | 370 | return lookup_address_in_pgd(pgd_offset_k(address), address, level); |
0fd64c23 | 371 | } |
75bb8835 | 372 | EXPORT_SYMBOL_GPL(lookup_address); |
9f4c815c | 373 | |
0fd64c23 BP |
374 | static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, |
375 | unsigned int *level) | |
376 | { | |
377 | if (cpa->pgd) | |
426e34cc | 378 | return lookup_address_in_pgd(cpa->pgd + pgd_index(address), |
0fd64c23 BP |
379 | address, level); |
380 | ||
381 | return lookup_address(address, level); | |
382 | } | |
383 | ||
792230c3 JG |
384 | /* |
385 | * Lookup the PMD entry for a virtual address. Return a pointer to the entry | |
386 | * or NULL if not present. | |
387 | */ | |
388 | pmd_t *lookup_pmd_address(unsigned long address) | |
389 | { | |
390 | pgd_t *pgd; | |
391 | pud_t *pud; | |
392 | ||
393 | pgd = pgd_offset_k(address); | |
394 | if (pgd_none(*pgd)) | |
395 | return NULL; | |
396 | ||
397 | pud = pud_offset(pgd, address); | |
398 | if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud)) | |
399 | return NULL; | |
400 | ||
401 | return pmd_offset(pud, address); | |
402 | } | |
403 | ||
d7656534 DH |
404 | /* |
405 | * This is necessary because __pa() does not work on some | |
406 | * kinds of memory, like vmalloc() or the alloc_remap() | |
407 | * areas on 32-bit NUMA systems. The percpu areas can | |
408 | * end up in this kind of memory, for instance. | |
409 | * | |
410 | * This could be optimized, but it is only intended to be | |
411 | * used at inititalization time, and keeping it | |
412 | * unoptimized should increase the testing coverage for | |
413 | * the more obscure platforms. | |
414 | */ | |
415 | phys_addr_t slow_virt_to_phys(void *__virt_addr) | |
416 | { | |
417 | unsigned long virt_addr = (unsigned long)__virt_addr; | |
418 | phys_addr_t phys_addr; | |
419 | unsigned long offset; | |
420 | enum pg_level level; | |
d7656534 DH |
421 | unsigned long pmask; |
422 | pte_t *pte; | |
423 | ||
424 | pte = lookup_address(virt_addr, &level); | |
425 | BUG_ON(!pte); | |
d7656534 DH |
426 | pmask = page_level_mask(level); |
427 | offset = virt_addr & ~pmask; | |
d1cd1210 | 428 | phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; |
d7656534 DH |
429 | return (phys_addr | offset); |
430 | } | |
431 | EXPORT_SYMBOL_GPL(slow_virt_to_phys); | |
432 | ||
9df84993 IM |
433 | /* |
434 | * Set the new pmd in all the pgds we know about: | |
435 | */ | |
9a3dc780 | 436 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
9f4c815c | 437 | { |
9f4c815c IM |
438 | /* change init_mm */ |
439 | set_pte_atomic(kpte, pte); | |
44af6c41 | 440 | #ifdef CONFIG_X86_32 |
e4b71dcf | 441 | if (!SHARED_KERNEL_PMD) { |
44af6c41 IM |
442 | struct page *page; |
443 | ||
e3ed910d | 444 | list_for_each_entry(page, &pgd_list, lru) { |
44af6c41 IM |
445 | pgd_t *pgd; |
446 | pud_t *pud; | |
447 | pmd_t *pmd; | |
448 | ||
449 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | |
450 | pud = pud_offset(pgd, address); | |
451 | pmd = pmd_offset(pud, address); | |
452 | set_pte_atomic((pte_t *)pmd, pte); | |
453 | } | |
1da177e4 | 454 | } |
44af6c41 | 455 | #endif |
1da177e4 LT |
456 | } |
457 | ||
9df84993 IM |
458 | static int |
459 | try_preserve_large_page(pte_t *kpte, unsigned long address, | |
460 | struct cpa_data *cpa) | |
65e074df | 461 | { |
a79e53d8 | 462 | unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; |
65e074df | 463 | pte_t new_pte, old_pte, *tmp; |
64edc8ed | 464 | pgprot_t old_prot, new_prot, req_prot; |
fac84939 | 465 | int i, do_split = 1; |
f3c4fbb6 | 466 | enum pg_level level; |
65e074df | 467 | |
c9caa02c AK |
468 | if (cpa->force_split) |
469 | return 1; | |
470 | ||
a79e53d8 | 471 | spin_lock(&pgd_lock); |
65e074df TG |
472 | /* |
473 | * Check for races, another CPU might have split this page | |
474 | * up already: | |
475 | */ | |
82f0712c | 476 | tmp = _lookup_address_cpa(cpa, address, &level); |
65e074df TG |
477 | if (tmp != kpte) |
478 | goto out_unlock; | |
479 | ||
480 | switch (level) { | |
481 | case PG_LEVEL_2M: | |
f07333fd | 482 | #ifdef CONFIG_X86_64 |
65e074df | 483 | case PG_LEVEL_1G: |
f07333fd | 484 | #endif |
f3c4fbb6 DH |
485 | psize = page_level_size(level); |
486 | pmask = page_level_mask(level); | |
487 | break; | |
65e074df | 488 | default: |
beaff633 | 489 | do_split = -EINVAL; |
65e074df TG |
490 | goto out_unlock; |
491 | } | |
492 | ||
493 | /* | |
494 | * Calculate the number of pages, which fit into this large | |
495 | * page starting at address: | |
496 | */ | |
497 | nextpage_addr = (address + psize) & pmask; | |
498 | numpages = (nextpage_addr - address) >> PAGE_SHIFT; | |
9b5cf48b RW |
499 | if (numpages < cpa->numpages) |
500 | cpa->numpages = numpages; | |
65e074df TG |
501 | |
502 | /* | |
503 | * We are safe now. Check whether the new pgprot is the same: | |
f5b2831d JG |
504 | * Convert protection attributes to 4k-format, as cpa->mask* are set |
505 | * up accordingly. | |
65e074df TG |
506 | */ |
507 | old_pte = *kpte; | |
f5b2831d | 508 | old_prot = req_prot = pgprot_large_2_4k(pte_pgprot(old_pte)); |
65e074df | 509 | |
64edc8ed | 510 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); |
511 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); | |
c31c7d48 | 512 | |
f5b2831d JG |
513 | /* |
514 | * req_prot is in format of 4k pages. It must be converted to large | |
515 | * page format: the caching mode includes the PAT bit located at | |
516 | * different bit positions in the two formats. | |
517 | */ | |
518 | req_prot = pgprot_4k_2_large(req_prot); | |
519 | ||
a8aed3e0 AA |
520 | /* |
521 | * Set the PSE and GLOBAL flags only if the PRESENT flag is | |
522 | * set otherwise pmd_present/pmd_huge will return true even on | |
523 | * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL | |
524 | * for the ancient hardware that doesn't support it. | |
525 | */ | |
f76cfa3c AA |
526 | if (pgprot_val(req_prot) & _PAGE_PRESENT) |
527 | pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL; | |
a8aed3e0 | 528 | else |
f76cfa3c | 529 | pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); |
a8aed3e0 | 530 | |
f76cfa3c | 531 | req_prot = canon_pgprot(req_prot); |
a8aed3e0 | 532 | |
c31c7d48 TG |
533 | /* |
534 | * old_pte points to the large page base address. So we need | |
535 | * to add the offset of the virtual address: | |
536 | */ | |
537 | pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); | |
538 | cpa->pfn = pfn; | |
539 | ||
64edc8ed | 540 | new_prot = static_protections(req_prot, address, pfn); |
65e074df | 541 | |
fac84939 TG |
542 | /* |
543 | * We need to check the full range, whether | |
544 | * static_protection() requires a different pgprot for one of | |
545 | * the pages in the range we try to preserve: | |
546 | */ | |
64edc8ed | 547 | addr = address & pmask; |
548 | pfn = pte_pfn(old_pte); | |
549 | for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { | |
550 | pgprot_t chk_prot = static_protections(req_prot, addr, pfn); | |
fac84939 TG |
551 | |
552 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) | |
553 | goto out_unlock; | |
554 | } | |
555 | ||
65e074df TG |
556 | /* |
557 | * If there are no changes, return. maxpages has been updated | |
558 | * above: | |
559 | */ | |
560 | if (pgprot_val(new_prot) == pgprot_val(old_prot)) { | |
beaff633 | 561 | do_split = 0; |
65e074df TG |
562 | goto out_unlock; |
563 | } | |
564 | ||
565 | /* | |
566 | * We need to change the attributes. Check, whether we can | |
567 | * change the large page in one go. We request a split, when | |
568 | * the address is not aligned and the number of pages is | |
569 | * smaller than the number of pages in the large page. Note | |
570 | * that we limited the number of possible pages already to | |
571 | * the number of pages in the large page. | |
572 | */ | |
64edc8ed | 573 | if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) { |
65e074df TG |
574 | /* |
575 | * The address is aligned and the number of pages | |
576 | * covers the full page. | |
577 | */ | |
a8aed3e0 | 578 | new_pte = pfn_pte(pte_pfn(old_pte), new_prot); |
65e074df | 579 | __set_pmd_pte(kpte, address, new_pte); |
d75586ad | 580 | cpa->flags |= CPA_FLUSHTLB; |
beaff633 | 581 | do_split = 0; |
65e074df TG |
582 | } |
583 | ||
584 | out_unlock: | |
a79e53d8 | 585 | spin_unlock(&pgd_lock); |
9df84993 | 586 | |
beaff633 | 587 | return do_split; |
65e074df TG |
588 | } |
589 | ||
5952886b | 590 | static int |
82f0712c BP |
591 | __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, |
592 | struct page *base) | |
bb5c2dbd | 593 | { |
5952886b | 594 | pte_t *pbase = (pte_t *)page_address(base); |
a79e53d8 | 595 | unsigned long pfn, pfninc = 1; |
9df84993 | 596 | unsigned int i, level; |
ae9aae9e | 597 | pte_t *tmp; |
9df84993 | 598 | pgprot_t ref_prot; |
bb5c2dbd | 599 | |
a79e53d8 | 600 | spin_lock(&pgd_lock); |
bb5c2dbd IM |
601 | /* |
602 | * Check for races, another CPU might have split this page | |
603 | * up for us already: | |
604 | */ | |
82f0712c | 605 | tmp = _lookup_address_cpa(cpa, address, &level); |
ae9aae9e WC |
606 | if (tmp != kpte) { |
607 | spin_unlock(&pgd_lock); | |
608 | return 1; | |
609 | } | |
bb5c2dbd | 610 | |
6944a9c8 | 611 | paravirt_alloc_pte(&init_mm, page_to_pfn(base)); |
07cf89c0 | 612 | ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
f5b2831d JG |
613 | |
614 | /* promote PAT bit to correct position */ | |
615 | if (level == PG_LEVEL_2M) | |
616 | ref_prot = pgprot_large_2_4k(ref_prot); | |
bb5c2dbd | 617 | |
f07333fd AK |
618 | #ifdef CONFIG_X86_64 |
619 | if (level == PG_LEVEL_1G) { | |
620 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; | |
a8aed3e0 AA |
621 | /* |
622 | * Set the PSE flags only if the PRESENT flag is set | |
623 | * otherwise pmd_present/pmd_huge will return true | |
624 | * even on a non present pmd. | |
625 | */ | |
626 | if (pgprot_val(ref_prot) & _PAGE_PRESENT) | |
627 | pgprot_val(ref_prot) |= _PAGE_PSE; | |
628 | else | |
629 | pgprot_val(ref_prot) &= ~_PAGE_PSE; | |
f07333fd AK |
630 | } |
631 | #endif | |
632 | ||
a8aed3e0 AA |
633 | /* |
634 | * Set the GLOBAL flags only if the PRESENT flag is set | |
635 | * otherwise pmd/pte_present will return true even on a non | |
636 | * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL | |
637 | * for the ancient hardware that doesn't support it. | |
638 | */ | |
639 | if (pgprot_val(ref_prot) & _PAGE_PRESENT) | |
640 | pgprot_val(ref_prot) |= _PAGE_GLOBAL; | |
641 | else | |
642 | pgprot_val(ref_prot) &= ~_PAGE_GLOBAL; | |
643 | ||
63c1dcf4 TG |
644 | /* |
645 | * Get the target pfn from the original entry: | |
646 | */ | |
647 | pfn = pte_pfn(*kpte); | |
f07333fd | 648 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) |
a8aed3e0 | 649 | set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot))); |
bb5c2dbd | 650 | |
8eb5779f YL |
651 | if (pfn_range_is_mapped(PFN_DOWN(__pa(address)), |
652 | PFN_DOWN(__pa(address)) + 1)) | |
f361a450 YL |
653 | split_page_count(level); |
654 | ||
bb5c2dbd | 655 | /* |
07a66d7c | 656 | * Install the new, split up pagetable. |
4c881ca1 | 657 | * |
07a66d7c IM |
658 | * We use the standard kernel pagetable protections for the new |
659 | * pagetable protections, the actual ptes set above control the | |
660 | * primary protection behavior: | |
bb5c2dbd | 661 | */ |
07a66d7c | 662 | __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); |
211b3d03 IM |
663 | |
664 | /* | |
665 | * Intel Atom errata AAH41 workaround. | |
666 | * | |
667 | * The real fix should be in hw or in a microcode update, but | |
668 | * we also probabilistically try to reduce the window of having | |
669 | * a large TLB mixed with 4K TLBs while instruction fetches are | |
670 | * going on. | |
671 | */ | |
672 | __flush_tlb_all(); | |
ae9aae9e | 673 | spin_unlock(&pgd_lock); |
211b3d03 | 674 | |
ae9aae9e WC |
675 | return 0; |
676 | } | |
bb5c2dbd | 677 | |
82f0712c BP |
678 | static int split_large_page(struct cpa_data *cpa, pte_t *kpte, |
679 | unsigned long address) | |
ae9aae9e | 680 | { |
ae9aae9e WC |
681 | struct page *base; |
682 | ||
683 | if (!debug_pagealloc) | |
684 | spin_unlock(&cpa_lock); | |
685 | base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0); | |
686 | if (!debug_pagealloc) | |
687 | spin_lock(&cpa_lock); | |
688 | if (!base) | |
689 | return -ENOMEM; | |
690 | ||
82f0712c | 691 | if (__split_large_page(cpa, kpte, address, base)) |
8311eb84 | 692 | __free_page(base); |
bb5c2dbd | 693 | |
bb5c2dbd IM |
694 | return 0; |
695 | } | |
696 | ||
52a628fb BP |
697 | static bool try_to_free_pte_page(pte_t *pte) |
698 | { | |
699 | int i; | |
700 | ||
701 | for (i = 0; i < PTRS_PER_PTE; i++) | |
702 | if (!pte_none(pte[i])) | |
703 | return false; | |
704 | ||
705 | free_page((unsigned long)pte); | |
706 | return true; | |
707 | } | |
708 | ||
709 | static bool try_to_free_pmd_page(pmd_t *pmd) | |
710 | { | |
711 | int i; | |
712 | ||
713 | for (i = 0; i < PTRS_PER_PMD; i++) | |
714 | if (!pmd_none(pmd[i])) | |
715 | return false; | |
716 | ||
717 | free_page((unsigned long)pmd); | |
718 | return true; | |
719 | } | |
720 | ||
42a54772 BP |
721 | static bool try_to_free_pud_page(pud_t *pud) |
722 | { | |
723 | int i; | |
724 | ||
725 | for (i = 0; i < PTRS_PER_PUD; i++) | |
726 | if (!pud_none(pud[i])) | |
727 | return false; | |
728 | ||
729 | free_page((unsigned long)pud); | |
730 | return true; | |
731 | } | |
732 | ||
52a628fb BP |
733 | static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) |
734 | { | |
735 | pte_t *pte = pte_offset_kernel(pmd, start); | |
736 | ||
737 | while (start < end) { | |
738 | set_pte(pte, __pte(0)); | |
739 | ||
740 | start += PAGE_SIZE; | |
741 | pte++; | |
742 | } | |
743 | ||
744 | if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { | |
745 | pmd_clear(pmd); | |
746 | return true; | |
747 | } | |
748 | return false; | |
749 | } | |
750 | ||
751 | static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, | |
752 | unsigned long start, unsigned long end) | |
753 | { | |
754 | if (unmap_pte_range(pmd, start, end)) | |
755 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) | |
756 | pud_clear(pud); | |
757 | } | |
758 | ||
759 | static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) | |
760 | { | |
761 | pmd_t *pmd = pmd_offset(pud, start); | |
762 | ||
763 | /* | |
764 | * Not on a 2MB page boundary? | |
765 | */ | |
766 | if (start & (PMD_SIZE - 1)) { | |
767 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; | |
768 | unsigned long pre_end = min_t(unsigned long, end, next_page); | |
769 | ||
770 | __unmap_pmd_range(pud, pmd, start, pre_end); | |
771 | ||
772 | start = pre_end; | |
773 | pmd++; | |
774 | } | |
775 | ||
776 | /* | |
777 | * Try to unmap in 2M chunks. | |
778 | */ | |
779 | while (end - start >= PMD_SIZE) { | |
780 | if (pmd_large(*pmd)) | |
781 | pmd_clear(pmd); | |
782 | else | |
783 | __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); | |
784 | ||
785 | start += PMD_SIZE; | |
786 | pmd++; | |
787 | } | |
788 | ||
789 | /* | |
790 | * 4K leftovers? | |
791 | */ | |
792 | if (start < end) | |
793 | return __unmap_pmd_range(pud, pmd, start, end); | |
794 | ||
795 | /* | |
796 | * Try again to free the PMD page if haven't succeeded above. | |
797 | */ | |
798 | if (!pud_none(*pud)) | |
799 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) | |
800 | pud_clear(pud); | |
801 | } | |
0bb8aeee BP |
802 | |
803 | static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) | |
804 | { | |
805 | pud_t *pud = pud_offset(pgd, start); | |
806 | ||
807 | /* | |
808 | * Not on a GB page boundary? | |
809 | */ | |
810 | if (start & (PUD_SIZE - 1)) { | |
811 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; | |
812 | unsigned long pre_end = min_t(unsigned long, end, next_page); | |
813 | ||
814 | unmap_pmd_range(pud, start, pre_end); | |
815 | ||
816 | start = pre_end; | |
817 | pud++; | |
818 | } | |
819 | ||
820 | /* | |
821 | * Try to unmap in 1G chunks? | |
822 | */ | |
823 | while (end - start >= PUD_SIZE) { | |
824 | ||
825 | if (pud_large(*pud)) | |
826 | pud_clear(pud); | |
827 | else | |
828 | unmap_pmd_range(pud, start, start + PUD_SIZE); | |
829 | ||
830 | start += PUD_SIZE; | |
831 | pud++; | |
832 | } | |
833 | ||
834 | /* | |
835 | * 2M leftovers? | |
836 | */ | |
837 | if (start < end) | |
838 | unmap_pmd_range(pud, start, end); | |
839 | ||
840 | /* | |
841 | * No need to try to free the PUD page because we'll free it in | |
842 | * populate_pgd's error path | |
843 | */ | |
844 | } | |
845 | ||
42a54772 BP |
846 | static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end) |
847 | { | |
848 | pgd_t *pgd_entry = root + pgd_index(addr); | |
849 | ||
850 | unmap_pud_range(pgd_entry, addr, end); | |
851 | ||
852 | if (try_to_free_pud_page((pud_t *)pgd_page_vaddr(*pgd_entry))) | |
853 | pgd_clear(pgd_entry); | |
854 | } | |
855 | ||
f900a4b8 BP |
856 | static int alloc_pte_page(pmd_t *pmd) |
857 | { | |
858 | pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); | |
859 | if (!pte) | |
860 | return -1; | |
861 | ||
862 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); | |
863 | return 0; | |
864 | } | |
865 | ||
4b23538d BP |
866 | static int alloc_pmd_page(pud_t *pud) |
867 | { | |
868 | pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); | |
869 | if (!pmd) | |
870 | return -1; | |
871 | ||
872 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); | |
873 | return 0; | |
874 | } | |
875 | ||
c6b6f363 BP |
876 | static void populate_pte(struct cpa_data *cpa, |
877 | unsigned long start, unsigned long end, | |
878 | unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) | |
879 | { | |
880 | pte_t *pte; | |
881 | ||
882 | pte = pte_offset_kernel(pmd, start); | |
883 | ||
884 | while (num_pages-- && start < end) { | |
885 | ||
886 | /* deal with the NX bit */ | |
887 | if (!(pgprot_val(pgprot) & _PAGE_NX)) | |
888 | cpa->pfn &= ~_PAGE_NX; | |
889 | ||
890 | set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot)); | |
891 | ||
892 | start += PAGE_SIZE; | |
893 | cpa->pfn += PAGE_SIZE; | |
894 | pte++; | |
895 | } | |
896 | } | |
f900a4b8 BP |
897 | |
898 | static int populate_pmd(struct cpa_data *cpa, | |
899 | unsigned long start, unsigned long end, | |
900 | unsigned num_pages, pud_t *pud, pgprot_t pgprot) | |
901 | { | |
902 | unsigned int cur_pages = 0; | |
903 | pmd_t *pmd; | |
f5b2831d | 904 | pgprot_t pmd_pgprot; |
f900a4b8 BP |
905 | |
906 | /* | |
907 | * Not on a 2M boundary? | |
908 | */ | |
909 | if (start & (PMD_SIZE - 1)) { | |
910 | unsigned long pre_end = start + (num_pages << PAGE_SHIFT); | |
911 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; | |
912 | ||
913 | pre_end = min_t(unsigned long, pre_end, next_page); | |
914 | cur_pages = (pre_end - start) >> PAGE_SHIFT; | |
915 | cur_pages = min_t(unsigned int, num_pages, cur_pages); | |
916 | ||
917 | /* | |
918 | * Need a PTE page? | |
919 | */ | |
920 | pmd = pmd_offset(pud, start); | |
921 | if (pmd_none(*pmd)) | |
922 | if (alloc_pte_page(pmd)) | |
923 | return -1; | |
924 | ||
925 | populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); | |
926 | ||
927 | start = pre_end; | |
928 | } | |
929 | ||
930 | /* | |
931 | * We mapped them all? | |
932 | */ | |
933 | if (num_pages == cur_pages) | |
934 | return cur_pages; | |
935 | ||
f5b2831d JG |
936 | pmd_pgprot = pgprot_4k_2_large(pgprot); |
937 | ||
f900a4b8 BP |
938 | while (end - start >= PMD_SIZE) { |
939 | ||
940 | /* | |
941 | * We cannot use a 1G page so allocate a PMD page if needed. | |
942 | */ | |
943 | if (pud_none(*pud)) | |
944 | if (alloc_pmd_page(pud)) | |
945 | return -1; | |
946 | ||
947 | pmd = pmd_offset(pud, start); | |
948 | ||
f5b2831d JG |
949 | set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | |
950 | massage_pgprot(pmd_pgprot))); | |
f900a4b8 BP |
951 | |
952 | start += PMD_SIZE; | |
953 | cpa->pfn += PMD_SIZE; | |
954 | cur_pages += PMD_SIZE >> PAGE_SHIFT; | |
955 | } | |
956 | ||
957 | /* | |
958 | * Map trailing 4K pages. | |
959 | */ | |
960 | if (start < end) { | |
961 | pmd = pmd_offset(pud, start); | |
962 | if (pmd_none(*pmd)) | |
963 | if (alloc_pte_page(pmd)) | |
964 | return -1; | |
965 | ||
966 | populate_pte(cpa, start, end, num_pages - cur_pages, | |
967 | pmd, pgprot); | |
968 | } | |
969 | return num_pages; | |
970 | } | |
4b23538d BP |
971 | |
972 | static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, | |
973 | pgprot_t pgprot) | |
974 | { | |
975 | pud_t *pud; | |
976 | unsigned long end; | |
977 | int cur_pages = 0; | |
f5b2831d | 978 | pgprot_t pud_pgprot; |
4b23538d BP |
979 | |
980 | end = start + (cpa->numpages << PAGE_SHIFT); | |
981 | ||
982 | /* | |
983 | * Not on a Gb page boundary? => map everything up to it with | |
984 | * smaller pages. | |
985 | */ | |
986 | if (start & (PUD_SIZE - 1)) { | |
987 | unsigned long pre_end; | |
988 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; | |
989 | ||
990 | pre_end = min_t(unsigned long, end, next_page); | |
991 | cur_pages = (pre_end - start) >> PAGE_SHIFT; | |
992 | cur_pages = min_t(int, (int)cpa->numpages, cur_pages); | |
993 | ||
994 | pud = pud_offset(pgd, start); | |
995 | ||
996 | /* | |
997 | * Need a PMD page? | |
998 | */ | |
999 | if (pud_none(*pud)) | |
1000 | if (alloc_pmd_page(pud)) | |
1001 | return -1; | |
1002 | ||
1003 | cur_pages = populate_pmd(cpa, start, pre_end, cur_pages, | |
1004 | pud, pgprot); | |
1005 | if (cur_pages < 0) | |
1006 | return cur_pages; | |
1007 | ||
1008 | start = pre_end; | |
1009 | } | |
1010 | ||
1011 | /* We mapped them all? */ | |
1012 | if (cpa->numpages == cur_pages) | |
1013 | return cur_pages; | |
1014 | ||
1015 | pud = pud_offset(pgd, start); | |
f5b2831d | 1016 | pud_pgprot = pgprot_4k_2_large(pgprot); |
4b23538d BP |
1017 | |
1018 | /* | |
1019 | * Map everything starting from the Gb boundary, possibly with 1G pages | |
1020 | */ | |
1021 | while (end - start >= PUD_SIZE) { | |
f5b2831d JG |
1022 | set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | |
1023 | massage_pgprot(pud_pgprot))); | |
4b23538d BP |
1024 | |
1025 | start += PUD_SIZE; | |
1026 | cpa->pfn += PUD_SIZE; | |
1027 | cur_pages += PUD_SIZE >> PAGE_SHIFT; | |
1028 | pud++; | |
1029 | } | |
1030 | ||
1031 | /* Map trailing leftover */ | |
1032 | if (start < end) { | |
1033 | int tmp; | |
1034 | ||
1035 | pud = pud_offset(pgd, start); | |
1036 | if (pud_none(*pud)) | |
1037 | if (alloc_pmd_page(pud)) | |
1038 | return -1; | |
1039 | ||
1040 | tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, | |
1041 | pud, pgprot); | |
1042 | if (tmp < 0) | |
1043 | return cur_pages; | |
1044 | ||
1045 | cur_pages += tmp; | |
1046 | } | |
1047 | return cur_pages; | |
1048 | } | |
f3f72966 BP |
1049 | |
1050 | /* | |
1051 | * Restrictions for kernel page table do not necessarily apply when mapping in | |
1052 | * an alternate PGD. | |
1053 | */ | |
1054 | static int populate_pgd(struct cpa_data *cpa, unsigned long addr) | |
1055 | { | |
1056 | pgprot_t pgprot = __pgprot(_KERNPG_TABLE); | |
f3f72966 | 1057 | pud_t *pud = NULL; /* shut up gcc */ |
42a54772 | 1058 | pgd_t *pgd_entry; |
f3f72966 BP |
1059 | int ret; |
1060 | ||
1061 | pgd_entry = cpa->pgd + pgd_index(addr); | |
1062 | ||
1063 | /* | |
1064 | * Allocate a PUD page and hand it down for mapping. | |
1065 | */ | |
1066 | if (pgd_none(*pgd_entry)) { | |
1067 | pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); | |
1068 | if (!pud) | |
1069 | return -1; | |
1070 | ||
1071 | set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE)); | |
f3f72966 BP |
1072 | } |
1073 | ||
1074 | pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); | |
1075 | pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); | |
1076 | ||
1077 | ret = populate_pud(cpa, addr, pgd_entry, pgprot); | |
0bb8aeee | 1078 | if (ret < 0) { |
42a54772 | 1079 | unmap_pgd_range(cpa->pgd, addr, |
0bb8aeee | 1080 | addr + (cpa->numpages << PAGE_SHIFT)); |
f3f72966 | 1081 | return ret; |
0bb8aeee | 1082 | } |
42a54772 | 1083 | |
f3f72966 BP |
1084 | cpa->numpages = ret; |
1085 | return 0; | |
1086 | } | |
1087 | ||
a1e46212 SS |
1088 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, |
1089 | int primary) | |
1090 | { | |
82f0712c BP |
1091 | if (cpa->pgd) |
1092 | return populate_pgd(cpa, vaddr); | |
1093 | ||
a1e46212 SS |
1094 | /* |
1095 | * Ignore all non primary paths. | |
1096 | */ | |
1097 | if (!primary) | |
1098 | return 0; | |
1099 | ||
1100 | /* | |
1101 | * Ignore the NULL PTE for kernel identity mapping, as it is expected | |
1102 | * to have holes. | |
1103 | * Also set numpages to '1' indicating that we processed cpa req for | |
1104 | * one virtual address page and its pfn. TBD: numpages can be set based | |
1105 | * on the initial value and the level returned by lookup_address(). | |
1106 | */ | |
1107 | if (within(vaddr, PAGE_OFFSET, | |
1108 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { | |
1109 | cpa->numpages = 1; | |
1110 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; | |
1111 | return 0; | |
1112 | } else { | |
1113 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | |
1114 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, | |
1115 | *cpa->vaddr); | |
1116 | ||
1117 | return -EFAULT; | |
1118 | } | |
1119 | } | |
1120 | ||
c31c7d48 | 1121 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
9f4c815c | 1122 | { |
d75586ad | 1123 | unsigned long address; |
da7bfc50 HH |
1124 | int do_split, err; |
1125 | unsigned int level; | |
c31c7d48 | 1126 | pte_t *kpte, old_pte; |
1da177e4 | 1127 | |
8523acfe TH |
1128 | if (cpa->flags & CPA_PAGES_ARRAY) { |
1129 | struct page *page = cpa->pages[cpa->curpage]; | |
1130 | if (unlikely(PageHighMem(page))) | |
1131 | return 0; | |
1132 | address = (unsigned long)page_address(page); | |
1133 | } else if (cpa->flags & CPA_ARRAY) | |
d75586ad SL |
1134 | address = cpa->vaddr[cpa->curpage]; |
1135 | else | |
1136 | address = *cpa->vaddr; | |
97f99fed | 1137 | repeat: |
82f0712c | 1138 | kpte = _lookup_address_cpa(cpa, address, &level); |
1da177e4 | 1139 | if (!kpte) |
a1e46212 | 1140 | return __cpa_process_fault(cpa, address, primary); |
c31c7d48 TG |
1141 | |
1142 | old_pte = *kpte; | |
a1e46212 SS |
1143 | if (!pte_val(old_pte)) |
1144 | return __cpa_process_fault(cpa, address, primary); | |
9f4c815c | 1145 | |
30551bb3 | 1146 | if (level == PG_LEVEL_4K) { |
c31c7d48 | 1147 | pte_t new_pte; |
626c2c9d | 1148 | pgprot_t new_prot = pte_pgprot(old_pte); |
c31c7d48 | 1149 | unsigned long pfn = pte_pfn(old_pte); |
86f03989 | 1150 | |
72e458df TG |
1151 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); |
1152 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | |
86f03989 | 1153 | |
c31c7d48 | 1154 | new_prot = static_protections(new_prot, address, pfn); |
86f03989 | 1155 | |
a8aed3e0 AA |
1156 | /* |
1157 | * Set the GLOBAL flags only if the PRESENT flag is | |
1158 | * set otherwise pte_present will return true even on | |
1159 | * a non present pte. The canon_pgprot will clear | |
1160 | * _PAGE_GLOBAL for the ancient hardware that doesn't | |
1161 | * support it. | |
1162 | */ | |
1163 | if (pgprot_val(new_prot) & _PAGE_PRESENT) | |
1164 | pgprot_val(new_prot) |= _PAGE_GLOBAL; | |
1165 | else | |
1166 | pgprot_val(new_prot) &= ~_PAGE_GLOBAL; | |
1167 | ||
626c2c9d AV |
1168 | /* |
1169 | * We need to keep the pfn from the existing PTE, | |
1170 | * after all we're only going to change it's attributes | |
1171 | * not the memory it points to | |
1172 | */ | |
c31c7d48 TG |
1173 | new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); |
1174 | cpa->pfn = pfn; | |
f4ae5da0 TG |
1175 | /* |
1176 | * Do we really change anything ? | |
1177 | */ | |
1178 | if (pte_val(old_pte) != pte_val(new_pte)) { | |
1179 | set_pte_atomic(kpte, new_pte); | |
d75586ad | 1180 | cpa->flags |= CPA_FLUSHTLB; |
f4ae5da0 | 1181 | } |
9b5cf48b | 1182 | cpa->numpages = 1; |
65e074df | 1183 | return 0; |
1da177e4 | 1184 | } |
65e074df TG |
1185 | |
1186 | /* | |
1187 | * Check, whether we can keep the large page intact | |
1188 | * and just change the pte: | |
1189 | */ | |
beaff633 | 1190 | do_split = try_preserve_large_page(kpte, address, cpa); |
65e074df TG |
1191 | /* |
1192 | * When the range fits into the existing large page, | |
9b5cf48b | 1193 | * return. cp->numpages and cpa->tlbflush have been updated in |
65e074df TG |
1194 | * try_large_page: |
1195 | */ | |
87f7f8fe IM |
1196 | if (do_split <= 0) |
1197 | return do_split; | |
65e074df TG |
1198 | |
1199 | /* | |
1200 | * We have to split the large page: | |
1201 | */ | |
82f0712c | 1202 | err = split_large_page(cpa, kpte, address); |
87f7f8fe | 1203 | if (!err) { |
ad5ca55f SS |
1204 | /* |
1205 | * Do a global flush tlb after splitting the large page | |
1206 | * and before we do the actual change page attribute in the PTE. | |
1207 | * | |
1208 | * With out this, we violate the TLB application note, that says | |
1209 | * "The TLBs may contain both ordinary and large-page | |
1210 | * translations for a 4-KByte range of linear addresses. This | |
1211 | * may occur if software modifies the paging structures so that | |
1212 | * the page size used for the address range changes. If the two | |
1213 | * translations differ with respect to page frame or attributes | |
1214 | * (e.g., permissions), processor behavior is undefined and may | |
1215 | * be implementation-specific." | |
1216 | * | |
1217 | * We do this global tlb flush inside the cpa_lock, so that we | |
1218 | * don't allow any other cpu, with stale tlb entries change the | |
1219 | * page attribute in parallel, that also falls into the | |
1220 | * just split large page entry. | |
1221 | */ | |
1222 | flush_tlb_all(); | |
87f7f8fe IM |
1223 | goto repeat; |
1224 | } | |
beaff633 | 1225 | |
87f7f8fe | 1226 | return err; |
9f4c815c | 1227 | } |
1da177e4 | 1228 | |
c31c7d48 TG |
1229 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); |
1230 | ||
1231 | static int cpa_process_alias(struct cpa_data *cpa) | |
1da177e4 | 1232 | { |
c31c7d48 | 1233 | struct cpa_data alias_cpa; |
992f4c1c | 1234 | unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); |
e933a73f | 1235 | unsigned long vaddr; |
992f4c1c | 1236 | int ret; |
44af6c41 | 1237 | |
8eb5779f | 1238 | if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1)) |
c31c7d48 | 1239 | return 0; |
626c2c9d | 1240 | |
f34b439f TG |
1241 | /* |
1242 | * No need to redo, when the primary call touched the direct | |
1243 | * mapping already: | |
1244 | */ | |
8523acfe TH |
1245 | if (cpa->flags & CPA_PAGES_ARRAY) { |
1246 | struct page *page = cpa->pages[cpa->curpage]; | |
1247 | if (unlikely(PageHighMem(page))) | |
1248 | return 0; | |
1249 | vaddr = (unsigned long)page_address(page); | |
1250 | } else if (cpa->flags & CPA_ARRAY) | |
d75586ad SL |
1251 | vaddr = cpa->vaddr[cpa->curpage]; |
1252 | else | |
1253 | vaddr = *cpa->vaddr; | |
1254 | ||
1255 | if (!(within(vaddr, PAGE_OFFSET, | |
a1e46212 | 1256 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
44af6c41 | 1257 | |
f34b439f | 1258 | alias_cpa = *cpa; |
992f4c1c | 1259 | alias_cpa.vaddr = &laddr; |
9ae28475 | 1260 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); |
d75586ad | 1261 | |
f34b439f | 1262 | ret = __change_page_attr_set_clr(&alias_cpa, 0); |
992f4c1c TH |
1263 | if (ret) |
1264 | return ret; | |
f34b439f | 1265 | } |
44af6c41 | 1266 | |
44af6c41 | 1267 | #ifdef CONFIG_X86_64 |
488fd995 | 1268 | /* |
992f4c1c TH |
1269 | * If the primary call didn't touch the high mapping already |
1270 | * and the physical address is inside the kernel map, we need | |
0879750f | 1271 | * to touch the high mapped kernel as well: |
488fd995 | 1272 | */ |
992f4c1c TH |
1273 | if (!within(vaddr, (unsigned long)_text, _brk_end) && |
1274 | within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) { | |
1275 | unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + | |
1276 | __START_KERNEL_map - phys_base; | |
1277 | alias_cpa = *cpa; | |
1278 | alias_cpa.vaddr = &temp_cpa_vaddr; | |
1279 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | |
c31c7d48 | 1280 | |
992f4c1c TH |
1281 | /* |
1282 | * The high mapping range is imprecise, so ignore the | |
1283 | * return value. | |
1284 | */ | |
1285 | __change_page_attr_set_clr(&alias_cpa, 0); | |
1286 | } | |
488fd995 | 1287 | #endif |
992f4c1c TH |
1288 | |
1289 | return 0; | |
1da177e4 LT |
1290 | } |
1291 | ||
c31c7d48 | 1292 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) |
ff31452b | 1293 | { |
65e074df | 1294 | int ret, numpages = cpa->numpages; |
ff31452b | 1295 | |
65e074df TG |
1296 | while (numpages) { |
1297 | /* | |
1298 | * Store the remaining nr of pages for the large page | |
1299 | * preservation check. | |
1300 | */ | |
9b5cf48b | 1301 | cpa->numpages = numpages; |
d75586ad | 1302 | /* for array changes, we can't use large page */ |
9ae28475 | 1303 | if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) |
d75586ad | 1304 | cpa->numpages = 1; |
c31c7d48 | 1305 | |
ad5ca55f SS |
1306 | if (!debug_pagealloc) |
1307 | spin_lock(&cpa_lock); | |
c31c7d48 | 1308 | ret = __change_page_attr(cpa, checkalias); |
ad5ca55f SS |
1309 | if (!debug_pagealloc) |
1310 | spin_unlock(&cpa_lock); | |
ff31452b TG |
1311 | if (ret) |
1312 | return ret; | |
ff31452b | 1313 | |
c31c7d48 TG |
1314 | if (checkalias) { |
1315 | ret = cpa_process_alias(cpa); | |
1316 | if (ret) | |
1317 | return ret; | |
1318 | } | |
1319 | ||
65e074df TG |
1320 | /* |
1321 | * Adjust the number of pages with the result of the | |
1322 | * CPA operation. Either a large page has been | |
1323 | * preserved or a single page update happened. | |
1324 | */ | |
9b5cf48b RW |
1325 | BUG_ON(cpa->numpages > numpages); |
1326 | numpages -= cpa->numpages; | |
9ae28475 | 1327 | if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) |
d75586ad SL |
1328 | cpa->curpage++; |
1329 | else | |
1330 | *cpa->vaddr += cpa->numpages * PAGE_SIZE; | |
1331 | ||
65e074df | 1332 | } |
ff31452b TG |
1333 | return 0; |
1334 | } | |
1335 | ||
d75586ad | 1336 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, |
c9caa02c | 1337 | pgprot_t mask_set, pgprot_t mask_clr, |
9ae28475 | 1338 | int force_split, int in_flag, |
1339 | struct page **pages) | |
ff31452b | 1340 | { |
72e458df | 1341 | struct cpa_data cpa; |
cacf8906 | 1342 | int ret, cache, checkalias; |
fa526d0d | 1343 | unsigned long baddr = 0; |
331e4065 | 1344 | |
82f0712c BP |
1345 | memset(&cpa, 0, sizeof(cpa)); |
1346 | ||
331e4065 TG |
1347 | /* |
1348 | * Check, if we are requested to change a not supported | |
1349 | * feature: | |
1350 | */ | |
1351 | mask_set = canon_pgprot(mask_set); | |
1352 | mask_clr = canon_pgprot(mask_clr); | |
c9caa02c | 1353 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) |
331e4065 TG |
1354 | return 0; |
1355 | ||
69b1415e | 1356 | /* Ensure we are PAGE_SIZE aligned */ |
9ae28475 | 1357 | if (in_flag & CPA_ARRAY) { |
d75586ad SL |
1358 | int i; |
1359 | for (i = 0; i < numpages; i++) { | |
1360 | if (addr[i] & ~PAGE_MASK) { | |
1361 | addr[i] &= PAGE_MASK; | |
1362 | WARN_ON_ONCE(1); | |
1363 | } | |
1364 | } | |
9ae28475 | 1365 | } else if (!(in_flag & CPA_PAGES_ARRAY)) { |
1366 | /* | |
1367 | * in_flag of CPA_PAGES_ARRAY implies it is aligned. | |
1368 | * No need to cehck in that case | |
1369 | */ | |
1370 | if (*addr & ~PAGE_MASK) { | |
1371 | *addr &= PAGE_MASK; | |
1372 | /* | |
1373 | * People should not be passing in unaligned addresses: | |
1374 | */ | |
1375 | WARN_ON_ONCE(1); | |
1376 | } | |
fa526d0d JS |
1377 | /* |
1378 | * Save address for cache flush. *addr is modified in the call | |
1379 | * to __change_page_attr_set_clr() below. | |
1380 | */ | |
1381 | baddr = *addr; | |
69b1415e TG |
1382 | } |
1383 | ||
5843d9a4 NP |
1384 | /* Must avoid aliasing mappings in the highmem code */ |
1385 | kmap_flush_unused(); | |
1386 | ||
db64fe02 NP |
1387 | vm_unmap_aliases(); |
1388 | ||
72e458df | 1389 | cpa.vaddr = addr; |
9ae28475 | 1390 | cpa.pages = pages; |
72e458df TG |
1391 | cpa.numpages = numpages; |
1392 | cpa.mask_set = mask_set; | |
1393 | cpa.mask_clr = mask_clr; | |
d75586ad SL |
1394 | cpa.flags = 0; |
1395 | cpa.curpage = 0; | |
c9caa02c | 1396 | cpa.force_split = force_split; |
72e458df | 1397 | |
9ae28475 | 1398 | if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY)) |
1399 | cpa.flags |= in_flag; | |
d75586ad | 1400 | |
af96e443 TG |
1401 | /* No alias checking for _NX bit modifications */ |
1402 | checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; | |
1403 | ||
1404 | ret = __change_page_attr_set_clr(&cpa, checkalias); | |
ff31452b | 1405 | |
f4ae5da0 TG |
1406 | /* |
1407 | * Check whether we really changed something: | |
1408 | */ | |
d75586ad | 1409 | if (!(cpa.flags & CPA_FLUSHTLB)) |
1ac2f7d5 | 1410 | goto out; |
cacf8906 | 1411 | |
6bb8383b AK |
1412 | /* |
1413 | * No need to flush, when we did not set any of the caching | |
1414 | * attributes: | |
1415 | */ | |
c06814d8 | 1416 | cache = !!pgprot2cachemode(mask_set); |
6bb8383b | 1417 | |
57a6a46a | 1418 | /* |
b82ad3d3 BP |
1419 | * On success we use CLFLUSH, when the CPU supports it to |
1420 | * avoid the WBINVD. If the CPU does not support it and in the | |
f026cfa8 | 1421 | * error case we fall back to cpa_flush_all (which uses |
b82ad3d3 | 1422 | * WBINVD): |
57a6a46a | 1423 | */ |
f026cfa8 | 1424 | if (!ret && cpu_has_clflush) { |
9ae28475 | 1425 | if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { |
1426 | cpa_flush_array(addr, numpages, cache, | |
1427 | cpa.flags, pages); | |
1428 | } else | |
fa526d0d | 1429 | cpa_flush_range(baddr, numpages, cache); |
d75586ad | 1430 | } else |
6bb8383b | 1431 | cpa_flush_all(cache); |
cacf8906 | 1432 | |
76ebd054 | 1433 | out: |
ff31452b TG |
1434 | return ret; |
1435 | } | |
1436 | ||
d75586ad SL |
1437 | static inline int change_page_attr_set(unsigned long *addr, int numpages, |
1438 | pgprot_t mask, int array) | |
75cbade8 | 1439 | { |
d75586ad | 1440 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0, |
9ae28475 | 1441 | (array ? CPA_ARRAY : 0), NULL); |
75cbade8 AV |
1442 | } |
1443 | ||
d75586ad SL |
1444 | static inline int change_page_attr_clear(unsigned long *addr, int numpages, |
1445 | pgprot_t mask, int array) | |
72932c7a | 1446 | { |
d75586ad | 1447 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0, |
9ae28475 | 1448 | (array ? CPA_ARRAY : 0), NULL); |
72932c7a TG |
1449 | } |
1450 | ||
0f350755 | 1451 | static inline int cpa_set_pages_array(struct page **pages, int numpages, |
1452 | pgprot_t mask) | |
1453 | { | |
1454 | return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0, | |
1455 | CPA_PAGES_ARRAY, pages); | |
1456 | } | |
1457 | ||
1458 | static inline int cpa_clear_pages_array(struct page **pages, int numpages, | |
1459 | pgprot_t mask) | |
1460 | { | |
1461 | return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0, | |
1462 | CPA_PAGES_ARRAY, pages); | |
1463 | } | |
1464 | ||
1219333d | 1465 | int _set_memory_uc(unsigned long addr, int numpages) |
72932c7a | 1466 | { |
de33c442 SS |
1467 | /* |
1468 | * for now UC MINUS. see comments in ioremap_nocache() | |
e4b6be33 LR |
1469 | * If you really need strong UC use ioremap_uc(), but note |
1470 | * that you cannot override IO areas with set_memory_*() as | |
1471 | * these helpers cannot work with IO memory. | |
de33c442 | 1472 | */ |
d75586ad | 1473 | return change_page_attr_set(&addr, numpages, |
c06814d8 JG |
1474 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
1475 | 0); | |
75cbade8 | 1476 | } |
1219333d | 1477 | |
1478 | int set_memory_uc(unsigned long addr, int numpages) | |
1479 | { | |
9fa3ab39 | 1480 | int ret; |
1481 | ||
de33c442 SS |
1482 | /* |
1483 | * for now UC MINUS. see comments in ioremap_nocache() | |
1484 | */ | |
9fa3ab39 | 1485 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
e00c8cc9 | 1486 | _PAGE_CACHE_MODE_UC_MINUS, NULL); |
9fa3ab39 | 1487 | if (ret) |
1488 | goto out_err; | |
1489 | ||
1490 | ret = _set_memory_uc(addr, numpages); | |
1491 | if (ret) | |
1492 | goto out_free; | |
1493 | ||
1494 | return 0; | |
1219333d | 1495 | |
9fa3ab39 | 1496 | out_free: |
1497 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); | |
1498 | out_err: | |
1499 | return ret; | |
1219333d | 1500 | } |
75cbade8 AV |
1501 | EXPORT_SYMBOL(set_memory_uc); |
1502 | ||
2d070eff | 1503 | static int _set_memory_array(unsigned long *addr, int addrinarray, |
c06814d8 | 1504 | enum page_cache_mode new_type) |
d75586ad | 1505 | { |
9fa3ab39 | 1506 | int i, j; |
1507 | int ret; | |
1508 | ||
d75586ad SL |
1509 | /* |
1510 | * for now UC MINUS. see comments in ioremap_nocache() | |
1511 | */ | |
1512 | for (i = 0; i < addrinarray; i++) { | |
9fa3ab39 | 1513 | ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, |
4f646254 | 1514 | new_type, NULL); |
9fa3ab39 | 1515 | if (ret) |
1516 | goto out_free; | |
d75586ad SL |
1517 | } |
1518 | ||
9fa3ab39 | 1519 | ret = change_page_attr_set(addr, addrinarray, |
c06814d8 JG |
1520 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
1521 | 1); | |
4f646254 | 1522 | |
c06814d8 | 1523 | if (!ret && new_type == _PAGE_CACHE_MODE_WC) |
4f646254 | 1524 | ret = change_page_attr_set_clr(addr, addrinarray, |
c06814d8 JG |
1525 | cachemode2pgprot( |
1526 | _PAGE_CACHE_MODE_WC), | |
4f646254 PN |
1527 | __pgprot(_PAGE_CACHE_MASK), |
1528 | 0, CPA_ARRAY, NULL); | |
9fa3ab39 | 1529 | if (ret) |
1530 | goto out_free; | |
1531 | ||
1532 | return 0; | |
1533 | ||
1534 | out_free: | |
1535 | for (j = 0; j < i; j++) | |
1536 | free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE); | |
1537 | ||
1538 | return ret; | |
d75586ad | 1539 | } |
4f646254 PN |
1540 | |
1541 | int set_memory_array_uc(unsigned long *addr, int addrinarray) | |
1542 | { | |
c06814d8 | 1543 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); |
4f646254 | 1544 | } |
d75586ad SL |
1545 | EXPORT_SYMBOL(set_memory_array_uc); |
1546 | ||
4f646254 PN |
1547 | int set_memory_array_wc(unsigned long *addr, int addrinarray) |
1548 | { | |
c06814d8 | 1549 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC); |
4f646254 PN |
1550 | } |
1551 | EXPORT_SYMBOL(set_memory_array_wc); | |
1552 | ||
ef354af4 | 1553 | int _set_memory_wc(unsigned long addr, int numpages) |
1554 | { | |
3869c4aa | 1555 | int ret; |
bdc6340f PV |
1556 | unsigned long addr_copy = addr; |
1557 | ||
3869c4aa | 1558 | ret = change_page_attr_set(&addr, numpages, |
c06814d8 JG |
1559 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
1560 | 0); | |
3869c4aa | 1561 | if (!ret) { |
bdc6340f | 1562 | ret = change_page_attr_set_clr(&addr_copy, numpages, |
c06814d8 JG |
1563 | cachemode2pgprot( |
1564 | _PAGE_CACHE_MODE_WC), | |
bdc6340f PV |
1565 | __pgprot(_PAGE_CACHE_MASK), |
1566 | 0, 0, NULL); | |
3869c4aa | 1567 | } |
1568 | return ret; | |
ef354af4 | 1569 | } |
1570 | ||
1571 | int set_memory_wc(unsigned long addr, int numpages) | |
1572 | { | |
9fa3ab39 | 1573 | int ret; |
1574 | ||
9fa3ab39 | 1575 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
e00c8cc9 | 1576 | _PAGE_CACHE_MODE_WC, NULL); |
9fa3ab39 | 1577 | if (ret) |
1578 | goto out_err; | |
ef354af4 | 1579 | |
9fa3ab39 | 1580 | ret = _set_memory_wc(addr, numpages); |
1581 | if (ret) | |
1582 | goto out_free; | |
1583 | ||
1584 | return 0; | |
1585 | ||
1586 | out_free: | |
1587 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); | |
1588 | out_err: | |
1589 | return ret; | |
ef354af4 | 1590 | } |
1591 | EXPORT_SYMBOL(set_memory_wc); | |
1592 | ||
1219333d | 1593 | int _set_memory_wb(unsigned long addr, int numpages) |
75cbade8 | 1594 | { |
c06814d8 | 1595 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
d75586ad SL |
1596 | return change_page_attr_clear(&addr, numpages, |
1597 | __pgprot(_PAGE_CACHE_MASK), 0); | |
75cbade8 | 1598 | } |
1219333d | 1599 | |
1600 | int set_memory_wb(unsigned long addr, int numpages) | |
1601 | { | |
9fa3ab39 | 1602 | int ret; |
1603 | ||
1604 | ret = _set_memory_wb(addr, numpages); | |
1605 | if (ret) | |
1606 | return ret; | |
1607 | ||
c15238df | 1608 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
9fa3ab39 | 1609 | return 0; |
1219333d | 1610 | } |
75cbade8 AV |
1611 | EXPORT_SYMBOL(set_memory_wb); |
1612 | ||
d75586ad SL |
1613 | int set_memory_array_wb(unsigned long *addr, int addrinarray) |
1614 | { | |
1615 | int i; | |
a5593e0b | 1616 | int ret; |
1617 | ||
c06814d8 | 1618 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
a5593e0b | 1619 | ret = change_page_attr_clear(addr, addrinarray, |
1620 | __pgprot(_PAGE_CACHE_MASK), 1); | |
9fa3ab39 | 1621 | if (ret) |
1622 | return ret; | |
d75586ad | 1623 | |
9fa3ab39 | 1624 | for (i = 0; i < addrinarray; i++) |
1625 | free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE); | |
c5e147cf | 1626 | |
9fa3ab39 | 1627 | return 0; |
d75586ad SL |
1628 | } |
1629 | EXPORT_SYMBOL(set_memory_array_wb); | |
1630 | ||
75cbade8 AV |
1631 | int set_memory_x(unsigned long addr, int numpages) |
1632 | { | |
583140af PA |
1633 | if (!(__supported_pte_mask & _PAGE_NX)) |
1634 | return 0; | |
1635 | ||
d75586ad | 1636 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0); |
75cbade8 AV |
1637 | } |
1638 | EXPORT_SYMBOL(set_memory_x); | |
1639 | ||
1640 | int set_memory_nx(unsigned long addr, int numpages) | |
1641 | { | |
583140af PA |
1642 | if (!(__supported_pte_mask & _PAGE_NX)) |
1643 | return 0; | |
1644 | ||
d75586ad | 1645 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0); |
75cbade8 AV |
1646 | } |
1647 | EXPORT_SYMBOL(set_memory_nx); | |
1648 | ||
1649 | int set_memory_ro(unsigned long addr, int numpages) | |
1650 | { | |
d75586ad | 1651 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0); |
75cbade8 | 1652 | } |
75cbade8 AV |
1653 | |
1654 | int set_memory_rw(unsigned long addr, int numpages) | |
1655 | { | |
d75586ad | 1656 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0); |
75cbade8 | 1657 | } |
f62d0f00 IM |
1658 | |
1659 | int set_memory_np(unsigned long addr, int numpages) | |
1660 | { | |
d75586ad | 1661 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0); |
f62d0f00 | 1662 | } |
75cbade8 | 1663 | |
c9caa02c AK |
1664 | int set_memory_4k(unsigned long addr, int numpages) |
1665 | { | |
d75586ad | 1666 | return change_page_attr_set_clr(&addr, numpages, __pgprot(0), |
9ae28475 | 1667 | __pgprot(0), 1, 0, NULL); |
c9caa02c AK |
1668 | } |
1669 | ||
75cbade8 AV |
1670 | int set_pages_uc(struct page *page, int numpages) |
1671 | { | |
1672 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 1673 | |
d7c8f21a | 1674 | return set_memory_uc(addr, numpages); |
75cbade8 AV |
1675 | } |
1676 | EXPORT_SYMBOL(set_pages_uc); | |
1677 | ||
4f646254 | 1678 | static int _set_pages_array(struct page **pages, int addrinarray, |
c06814d8 | 1679 | enum page_cache_mode new_type) |
0f350755 | 1680 | { |
1681 | unsigned long start; | |
1682 | unsigned long end; | |
1683 | int i; | |
1684 | int free_idx; | |
4f646254 | 1685 | int ret; |
0f350755 | 1686 | |
1687 | for (i = 0; i < addrinarray; i++) { | |
8523acfe TH |
1688 | if (PageHighMem(pages[i])) |
1689 | continue; | |
1690 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | |
0f350755 | 1691 | end = start + PAGE_SIZE; |
4f646254 | 1692 | if (reserve_memtype(start, end, new_type, NULL)) |
0f350755 | 1693 | goto err_out; |
1694 | } | |
1695 | ||
4f646254 | 1696 | ret = cpa_set_pages_array(pages, addrinarray, |
c06814d8 JG |
1697 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS)); |
1698 | if (!ret && new_type == _PAGE_CACHE_MODE_WC) | |
4f646254 | 1699 | ret = change_page_attr_set_clr(NULL, addrinarray, |
c06814d8 JG |
1700 | cachemode2pgprot( |
1701 | _PAGE_CACHE_MODE_WC), | |
4f646254 PN |
1702 | __pgprot(_PAGE_CACHE_MASK), |
1703 | 0, CPA_PAGES_ARRAY, pages); | |
1704 | if (ret) | |
1705 | goto err_out; | |
1706 | return 0; /* Success */ | |
0f350755 | 1707 | err_out: |
1708 | free_idx = i; | |
1709 | for (i = 0; i < free_idx; i++) { | |
8523acfe TH |
1710 | if (PageHighMem(pages[i])) |
1711 | continue; | |
1712 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | |
0f350755 | 1713 | end = start + PAGE_SIZE; |
1714 | free_memtype(start, end); | |
1715 | } | |
1716 | return -EINVAL; | |
1717 | } | |
4f646254 PN |
1718 | |
1719 | int set_pages_array_uc(struct page **pages, int addrinarray) | |
1720 | { | |
c06814d8 | 1721 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); |
4f646254 | 1722 | } |
0f350755 | 1723 | EXPORT_SYMBOL(set_pages_array_uc); |
1724 | ||
4f646254 PN |
1725 | int set_pages_array_wc(struct page **pages, int addrinarray) |
1726 | { | |
c06814d8 | 1727 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC); |
4f646254 PN |
1728 | } |
1729 | EXPORT_SYMBOL(set_pages_array_wc); | |
1730 | ||
75cbade8 AV |
1731 | int set_pages_wb(struct page *page, int numpages) |
1732 | { | |
1733 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 1734 | |
d7c8f21a | 1735 | return set_memory_wb(addr, numpages); |
75cbade8 AV |
1736 | } |
1737 | EXPORT_SYMBOL(set_pages_wb); | |
1738 | ||
0f350755 | 1739 | int set_pages_array_wb(struct page **pages, int addrinarray) |
1740 | { | |
1741 | int retval; | |
1742 | unsigned long start; | |
1743 | unsigned long end; | |
1744 | int i; | |
1745 | ||
c06814d8 | 1746 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
0f350755 | 1747 | retval = cpa_clear_pages_array(pages, addrinarray, |
1748 | __pgprot(_PAGE_CACHE_MASK)); | |
9fa3ab39 | 1749 | if (retval) |
1750 | return retval; | |
0f350755 | 1751 | |
1752 | for (i = 0; i < addrinarray; i++) { | |
8523acfe TH |
1753 | if (PageHighMem(pages[i])) |
1754 | continue; | |
1755 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | |
0f350755 | 1756 | end = start + PAGE_SIZE; |
1757 | free_memtype(start, end); | |
1758 | } | |
1759 | ||
9fa3ab39 | 1760 | return 0; |
0f350755 | 1761 | } |
1762 | EXPORT_SYMBOL(set_pages_array_wb); | |
1763 | ||
75cbade8 AV |
1764 | int set_pages_x(struct page *page, int numpages) |
1765 | { | |
1766 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 1767 | |
d7c8f21a | 1768 | return set_memory_x(addr, numpages); |
75cbade8 AV |
1769 | } |
1770 | EXPORT_SYMBOL(set_pages_x); | |
1771 | ||
1772 | int set_pages_nx(struct page *page, int numpages) | |
1773 | { | |
1774 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 1775 | |
d7c8f21a | 1776 | return set_memory_nx(addr, numpages); |
75cbade8 AV |
1777 | } |
1778 | EXPORT_SYMBOL(set_pages_nx); | |
1779 | ||
1780 | int set_pages_ro(struct page *page, int numpages) | |
1781 | { | |
1782 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 1783 | |
d7c8f21a | 1784 | return set_memory_ro(addr, numpages); |
75cbade8 | 1785 | } |
75cbade8 AV |
1786 | |
1787 | int set_pages_rw(struct page *page, int numpages) | |
1788 | { | |
1789 | unsigned long addr = (unsigned long)page_address(page); | |
e81d5dc4 | 1790 | |
d7c8f21a | 1791 | return set_memory_rw(addr, numpages); |
78c94aba IM |
1792 | } |
1793 | ||
1da177e4 | 1794 | #ifdef CONFIG_DEBUG_PAGEALLOC |
f62d0f00 IM |
1795 | |
1796 | static int __set_pages_p(struct page *page, int numpages) | |
1797 | { | |
d75586ad SL |
1798 | unsigned long tempaddr = (unsigned long) page_address(page); |
1799 | struct cpa_data cpa = { .vaddr = &tempaddr, | |
82f0712c | 1800 | .pgd = NULL, |
72e458df TG |
1801 | .numpages = numpages, |
1802 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), | |
d75586ad SL |
1803 | .mask_clr = __pgprot(0), |
1804 | .flags = 0}; | |
72932c7a | 1805 | |
55121b43 SS |
1806 | /* |
1807 | * No alias checking needed for setting present flag. otherwise, | |
1808 | * we may need to break large pages for 64-bit kernel text | |
1809 | * mappings (this adds to complexity if we want to do this from | |
1810 | * atomic context especially). Let's keep it simple! | |
1811 | */ | |
1812 | return __change_page_attr_set_clr(&cpa, 0); | |
f62d0f00 IM |
1813 | } |
1814 | ||
1815 | static int __set_pages_np(struct page *page, int numpages) | |
1816 | { | |
d75586ad SL |
1817 | unsigned long tempaddr = (unsigned long) page_address(page); |
1818 | struct cpa_data cpa = { .vaddr = &tempaddr, | |
82f0712c | 1819 | .pgd = NULL, |
72e458df TG |
1820 | .numpages = numpages, |
1821 | .mask_set = __pgprot(0), | |
d75586ad SL |
1822 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), |
1823 | .flags = 0}; | |
72932c7a | 1824 | |
55121b43 SS |
1825 | /* |
1826 | * No alias checking needed for setting not present flag. otherwise, | |
1827 | * we may need to break large pages for 64-bit kernel text | |
1828 | * mappings (this adds to complexity if we want to do this from | |
1829 | * atomic context especially). Let's keep it simple! | |
1830 | */ | |
1831 | return __change_page_attr_set_clr(&cpa, 0); | |
f62d0f00 IM |
1832 | } |
1833 | ||
031bc574 | 1834 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
1da177e4 LT |
1835 | { |
1836 | if (PageHighMem(page)) | |
1837 | return; | |
9f4c815c | 1838 | if (!enable) { |
f9b8404c IM |
1839 | debug_check_no_locks_freed(page_address(page), |
1840 | numpages * PAGE_SIZE); | |
9f4c815c | 1841 | } |
de5097c2 | 1842 | |
9f4c815c | 1843 | /* |
f8d8406b | 1844 | * The return value is ignored as the calls cannot fail. |
55121b43 SS |
1845 | * Large pages for identity mappings are not used at boot time |
1846 | * and hence no memory allocations during large page split. | |
1da177e4 | 1847 | */ |
f62d0f00 IM |
1848 | if (enable) |
1849 | __set_pages_p(page, numpages); | |
1850 | else | |
1851 | __set_pages_np(page, numpages); | |
9f4c815c IM |
1852 | |
1853 | /* | |
e4b71dcf IM |
1854 | * We should perform an IPI and flush all tlbs, |
1855 | * but that can deadlock->flush only current cpu: | |
1da177e4 LT |
1856 | */ |
1857 | __flush_tlb_all(); | |
26564600 BO |
1858 | |
1859 | arch_flush_lazy_mmu_mode(); | |
ee7ae7a1 TG |
1860 | } |
1861 | ||
8a235efa RW |
1862 | #ifdef CONFIG_HIBERNATION |
1863 | ||
1864 | bool kernel_page_present(struct page *page) | |
1865 | { | |
1866 | unsigned int level; | |
1867 | pte_t *pte; | |
1868 | ||
1869 | if (PageHighMem(page)) | |
1870 | return false; | |
1871 | ||
1872 | pte = lookup_address((unsigned long)page_address(page), &level); | |
1873 | return (pte_val(*pte) & _PAGE_PRESENT); | |
1874 | } | |
1875 | ||
1876 | #endif /* CONFIG_HIBERNATION */ | |
1877 | ||
1878 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | |
d1028a15 | 1879 | |
82f0712c BP |
1880 | int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, |
1881 | unsigned numpages, unsigned long page_flags) | |
1882 | { | |
1883 | int retval = -EINVAL; | |
1884 | ||
1885 | struct cpa_data cpa = { | |
1886 | .vaddr = &address, | |
1887 | .pfn = pfn, | |
1888 | .pgd = pgd, | |
1889 | .numpages = numpages, | |
1890 | .mask_set = __pgprot(0), | |
1891 | .mask_clr = __pgprot(0), | |
1892 | .flags = 0, | |
1893 | }; | |
1894 | ||
1895 | if (!(__supported_pte_mask & _PAGE_NX)) | |
1896 | goto out; | |
1897 | ||
1898 | if (!(page_flags & _PAGE_NX)) | |
1899 | cpa.mask_clr = __pgprot(_PAGE_NX); | |
1900 | ||
1901 | cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); | |
1902 | ||
1903 | retval = __change_page_attr_set_clr(&cpa, 0); | |
1904 | __flush_tlb_all(); | |
1905 | ||
1906 | out: | |
1907 | return retval; | |
1908 | } | |
1909 | ||
42a54772 BP |
1910 | void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address, |
1911 | unsigned numpages) | |
1912 | { | |
1913 | unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT)); | |
1914 | } | |
1915 | ||
d1028a15 AV |
1916 | /* |
1917 | * The testcases use internal knowledge of the implementation that shouldn't | |
1918 | * be exposed to the rest of the kernel. Include these directly here. | |
1919 | */ | |
1920 | #ifdef CONFIG_CPA_DEBUG | |
1921 | #include "pageattr-test.c" | |
1922 | #endif |