Merge tag 'edac_for_4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp
[deliverable/linux.git] / arch / x86 / mm / pageattr.c
CommitLineData
9f4c815c
IM
1/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
1da177e4 3 * Thanks to Ben LaHaise for precious feedback.
9f4c815c 4 */
1da177e4 5#include <linux/highmem.h>
8192206d 6#include <linux/bootmem.h>
1da177e4 7#include <linux/module.h>
9f4c815c 8#include <linux/sched.h>
9f4c815c 9#include <linux/mm.h>
76ebd054 10#include <linux/interrupt.h>
ee7ae7a1
TG
11#include <linux/seq_file.h>
12#include <linux/debugfs.h>
e59a1bb2 13#include <linux/pfn.h>
8c4bfc6e 14#include <linux/percpu.h>
5a0e3ad6 15#include <linux/gfp.h>
5bd5a452 16#include <linux/pci.h>
9f4c815c 17
950f9d95 18#include <asm/e820.h>
1da177e4
LT
19#include <asm/processor.h>
20#include <asm/tlbflush.h>
f8af095d 21#include <asm/sections.h>
93dbda7c 22#include <asm/setup.h>
9f4c815c
IM
23#include <asm/uaccess.h>
24#include <asm/pgalloc.h>
c31c7d48 25#include <asm/proto.h>
1219333d 26#include <asm/pat.h>
1da177e4 27
9df84993
IM
28/*
29 * The current flushing context - we pass it instead of 5 arguments:
30 */
72e458df 31struct cpa_data {
d75586ad 32 unsigned long *vaddr;
0fd64c23 33 pgd_t *pgd;
72e458df
TG
34 pgprot_t mask_set;
35 pgprot_t mask_clr;
65e074df 36 int numpages;
d75586ad 37 int flags;
c31c7d48 38 unsigned long pfn;
c9caa02c 39 unsigned force_split : 1;
d75586ad 40 int curpage;
9ae28475 41 struct page **pages;
72e458df
TG
42};
43
ad5ca55f
SS
44/*
45 * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
46 * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
47 * entries change the page attribute in parallel to some other cpu
48 * splitting a large page entry along with changing the attribute.
49 */
50static DEFINE_SPINLOCK(cpa_lock);
51
d75586ad
SL
52#define CPA_FLUSHTLB 1
53#define CPA_ARRAY 2
9ae28475 54#define CPA_PAGES_ARRAY 4
d75586ad 55
65280e61 56#ifdef CONFIG_PROC_FS
ce0c0e50
AK
57static unsigned long direct_pages_count[PG_LEVEL_NUM];
58
65280e61 59void update_page_count(int level, unsigned long pages)
ce0c0e50 60{
ce0c0e50 61 /* Protect against CPA */
a79e53d8 62 spin_lock(&pgd_lock);
ce0c0e50 63 direct_pages_count[level] += pages;
a79e53d8 64 spin_unlock(&pgd_lock);
65280e61
TG
65}
66
67static void split_page_count(int level)
68{
69 direct_pages_count[level]--;
70 direct_pages_count[level - 1] += PTRS_PER_PTE;
71}
72
e1759c21 73void arch_report_meminfo(struct seq_file *m)
65280e61 74{
b9c3bfc2 75 seq_printf(m, "DirectMap4k: %8lu kB\n",
a06de630
HD
76 direct_pages_count[PG_LEVEL_4K] << 2);
77#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
b9c3bfc2 78 seq_printf(m, "DirectMap2M: %8lu kB\n",
a06de630
HD
79 direct_pages_count[PG_LEVEL_2M] << 11);
80#else
b9c3bfc2 81 seq_printf(m, "DirectMap4M: %8lu kB\n",
a06de630
HD
82 direct_pages_count[PG_LEVEL_2M] << 12);
83#endif
a06de630 84 if (direct_gbpages)
b9c3bfc2 85 seq_printf(m, "DirectMap1G: %8lu kB\n",
a06de630 86 direct_pages_count[PG_LEVEL_1G] << 20);
ce0c0e50 87}
65280e61
TG
88#else
89static inline void split_page_count(int level) { }
90#endif
ce0c0e50 91
c31c7d48
TG
92#ifdef CONFIG_X86_64
93
94static inline unsigned long highmap_start_pfn(void)
95{
fc8d7826 96 return __pa_symbol(_text) >> PAGE_SHIFT;
c31c7d48
TG
97}
98
99static inline unsigned long highmap_end_pfn(void)
100{
fc8d7826 101 return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
c31c7d48
TG
102}
103
104#endif
105
92cb54a3
IM
106#ifdef CONFIG_DEBUG_PAGEALLOC
107# define debug_pagealloc 1
108#else
109# define debug_pagealloc 0
110#endif
111
ed724be6
AV
112static inline int
113within(unsigned long addr, unsigned long start, unsigned long end)
687c4825 114{
ed724be6
AV
115 return addr >= start && addr < end;
116}
117
d7c8f21a
TG
118/*
119 * Flushing functions
120 */
cd8ddf1a 121
cd8ddf1a
TG
122/**
123 * clflush_cache_range - flush a cache range with clflush
9efc31b8 124 * @vaddr: virtual start address
cd8ddf1a
TG
125 * @size: number of bytes to flush
126 *
8b80fd8b
RZ
127 * clflushopt is an unordered instruction which needs fencing with mfence or
128 * sfence to avoid ordering issues.
cd8ddf1a 129 */
4c61afcd 130void clflush_cache_range(void *vaddr, unsigned int size)
d7c8f21a 131{
4c61afcd 132 void *vend = vaddr + size - 1;
d7c8f21a 133
cd8ddf1a 134 mb();
4c61afcd
IM
135
136 for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
8b80fd8b 137 clflushopt(vaddr);
4c61afcd
IM
138 /*
139 * Flush any possible final partial cacheline:
140 */
8b80fd8b 141 clflushopt(vend);
4c61afcd 142
cd8ddf1a 143 mb();
d7c8f21a 144}
e517a5e9 145EXPORT_SYMBOL_GPL(clflush_cache_range);
d7c8f21a 146
af1e6844 147static void __cpa_flush_all(void *arg)
d7c8f21a 148{
6bb8383b
AK
149 unsigned long cache = (unsigned long)arg;
150
d7c8f21a
TG
151 /*
152 * Flush all to work around Errata in early athlons regarding
153 * large page flushing.
154 */
155 __flush_tlb_all();
156
0b827537 157 if (cache && boot_cpu_data.x86 >= 4)
d7c8f21a
TG
158 wbinvd();
159}
160
6bb8383b 161static void cpa_flush_all(unsigned long cache)
d7c8f21a
TG
162{
163 BUG_ON(irqs_disabled());
164
15c8b6c1 165 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
d7c8f21a
TG
166}
167
57a6a46a
TG
168static void __cpa_flush_range(void *arg)
169{
57a6a46a
TG
170 /*
171 * We could optimize that further and do individual per page
172 * tlb invalidates for a low number of pages. Caveat: we must
173 * flush the high aliases on 64bit as well.
174 */
175 __flush_tlb_all();
57a6a46a
TG
176}
177
6bb8383b 178static void cpa_flush_range(unsigned long start, int numpages, int cache)
57a6a46a 179{
4c61afcd
IM
180 unsigned int i, level;
181 unsigned long addr;
182
57a6a46a 183 BUG_ON(irqs_disabled());
4c61afcd 184 WARN_ON(PAGE_ALIGN(start) != start);
57a6a46a 185
15c8b6c1 186 on_each_cpu(__cpa_flush_range, NULL, 1);
57a6a46a 187
6bb8383b
AK
188 if (!cache)
189 return;
190
3b233e52
TG
191 /*
192 * We only need to flush on one CPU,
193 * clflush is a MESI-coherent instruction that
194 * will cause all other CPUs to flush the same
195 * cachelines:
196 */
4c61afcd
IM
197 for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
198 pte_t *pte = lookup_address(addr, &level);
199
200 /*
201 * Only flush present addresses:
202 */
7bfb72e8 203 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
4c61afcd
IM
204 clflush_cache_range((void *) addr, PAGE_SIZE);
205 }
57a6a46a
TG
206}
207
9ae28475 208static void cpa_flush_array(unsigned long *start, int numpages, int cache,
209 int in_flags, struct page **pages)
d75586ad
SL
210{
211 unsigned int i, level;
2171787b 212 unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
d75586ad
SL
213
214 BUG_ON(irqs_disabled());
215
2171787b 216 on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
d75586ad 217
2171787b 218 if (!cache || do_wbinvd)
d75586ad
SL
219 return;
220
d75586ad
SL
221 /*
222 * We only need to flush on one CPU,
223 * clflush is a MESI-coherent instruction that
224 * will cause all other CPUs to flush the same
225 * cachelines:
226 */
9ae28475 227 for (i = 0; i < numpages; i++) {
228 unsigned long addr;
229 pte_t *pte;
230
231 if (in_flags & CPA_PAGES_ARRAY)
232 addr = (unsigned long)page_address(pages[i]);
233 else
234 addr = start[i];
235
236 pte = lookup_address(addr, &level);
d75586ad
SL
237
238 /*
239 * Only flush present addresses:
240 */
241 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
9ae28475 242 clflush_cache_range((void *)addr, PAGE_SIZE);
d75586ad
SL
243 }
244}
245
ed724be6
AV
246/*
247 * Certain areas of memory on x86 require very specific protection flags,
248 * for example the BIOS area or kernel text. Callers don't always get this
249 * right (again, ioremap() on BIOS memory is not uncommon) so this function
250 * checks and fixes these known static required protection bits.
251 */
c31c7d48
TG
252static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
253 unsigned long pfn)
ed724be6
AV
254{
255 pgprot_t forbidden = __pgprot(0);
256
687c4825 257 /*
ed724be6
AV
258 * The BIOS area between 640k and 1Mb needs to be executable for
259 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
687c4825 260 */
5bd5a452
MC
261#ifdef CONFIG_PCI_BIOS
262 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
ed724be6 263 pgprot_val(forbidden) |= _PAGE_NX;
5bd5a452 264#endif
ed724be6
AV
265
266 /*
267 * The kernel text needs to be executable for obvious reasons
c31c7d48
TG
268 * Does not cover __inittext since that is gone later on. On
269 * 64bit we do not enforce !NX on the low mapping
ed724be6
AV
270 */
271 if (within(address, (unsigned long)_text, (unsigned long)_etext))
272 pgprot_val(forbidden) |= _PAGE_NX;
cc0f21bb 273
cc0f21bb 274 /*
c31c7d48
TG
275 * The .rodata section needs to be read-only. Using the pfn
276 * catches all aliases.
cc0f21bb 277 */
fc8d7826
AD
278 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
279 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
cc0f21bb 280 pgprot_val(forbidden) |= _PAGE_RW;
ed724be6 281
55ca3cc1 282#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
74e08179 283 /*
502f6604
SS
284 * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
285 * kernel text mappings for the large page aligned text, rodata sections
286 * will be always read-only. For the kernel identity mappings covering
287 * the holes caused by this alignment can be anything that user asks.
74e08179
SS
288 *
289 * This will preserve the large page mappings for kernel text/data
290 * at no extra cost.
291 */
502f6604
SS
292 if (kernel_set_to_readonly &&
293 within(address, (unsigned long)_text,
281ff33b
SS
294 (unsigned long)__end_rodata_hpage_align)) {
295 unsigned int level;
296
297 /*
298 * Don't enforce the !RW mapping for the kernel text mapping,
299 * if the current mapping is already using small page mapping.
300 * No need to work hard to preserve large page mappings in this
301 * case.
302 *
303 * This also fixes the Linux Xen paravirt guest boot failure
304 * (because of unexpected read-only mappings for kernel identity
305 * mappings). In this paravirt guest case, the kernel text
306 * mapping and the kernel identity mapping share the same
307 * page-table pages. Thus we can't really use different
308 * protections for the kernel text and identity mappings. Also,
309 * these shared mappings are made of small page mappings.
310 * Thus this don't enforce !RW mapping for small page kernel
311 * text mapping logic will help Linux Xen parvirt guest boot
0d2eb44f 312 * as well.
281ff33b
SS
313 */
314 if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
315 pgprot_val(forbidden) |= _PAGE_RW;
316 }
74e08179
SS
317#endif
318
ed724be6 319 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
687c4825
IM
320
321 return prot;
322}
323
426e34cc
MF
324/*
325 * Lookup the page table entry for a virtual address in a specific pgd.
326 * Return a pointer to the entry and the level of the mapping.
327 */
328pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
329 unsigned int *level)
9f4c815c 330{
1da177e4
LT
331 pud_t *pud;
332 pmd_t *pmd;
9f4c815c 333
30551bb3
TG
334 *level = PG_LEVEL_NONE;
335
1da177e4
LT
336 if (pgd_none(*pgd))
337 return NULL;
9df84993 338
1da177e4
LT
339 pud = pud_offset(pgd, address);
340 if (pud_none(*pud))
341 return NULL;
c2f71ee2
AK
342
343 *level = PG_LEVEL_1G;
344 if (pud_large(*pud) || !pud_present(*pud))
345 return (pte_t *)pud;
346
1da177e4
LT
347 pmd = pmd_offset(pud, address);
348 if (pmd_none(*pmd))
349 return NULL;
30551bb3
TG
350
351 *level = PG_LEVEL_2M;
9a14aefc 352 if (pmd_large(*pmd) || !pmd_present(*pmd))
1da177e4 353 return (pte_t *)pmd;
1da177e4 354
30551bb3 355 *level = PG_LEVEL_4K;
9df84993 356
9f4c815c
IM
357 return pte_offset_kernel(pmd, address);
358}
0fd64c23
BP
359
360/*
361 * Lookup the page table entry for a virtual address. Return a pointer
362 * to the entry and the level of the mapping.
363 *
364 * Note: We return pud and pmd either when the entry is marked large
365 * or when the present bit is not set. Otherwise we would return a
366 * pointer to a nonexisting mapping.
367 */
368pte_t *lookup_address(unsigned long address, unsigned int *level)
369{
426e34cc 370 return lookup_address_in_pgd(pgd_offset_k(address), address, level);
0fd64c23 371}
75bb8835 372EXPORT_SYMBOL_GPL(lookup_address);
9f4c815c 373
0fd64c23
BP
374static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
375 unsigned int *level)
376{
377 if (cpa->pgd)
426e34cc 378 return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
0fd64c23
BP
379 address, level);
380
381 return lookup_address(address, level);
382}
383
792230c3
JG
384/*
385 * Lookup the PMD entry for a virtual address. Return a pointer to the entry
386 * or NULL if not present.
387 */
388pmd_t *lookup_pmd_address(unsigned long address)
389{
390 pgd_t *pgd;
391 pud_t *pud;
392
393 pgd = pgd_offset_k(address);
394 if (pgd_none(*pgd))
395 return NULL;
396
397 pud = pud_offset(pgd, address);
398 if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
399 return NULL;
400
401 return pmd_offset(pud, address);
402}
403
d7656534
DH
404/*
405 * This is necessary because __pa() does not work on some
406 * kinds of memory, like vmalloc() or the alloc_remap()
407 * areas on 32-bit NUMA systems. The percpu areas can
408 * end up in this kind of memory, for instance.
409 *
410 * This could be optimized, but it is only intended to be
411 * used at inititalization time, and keeping it
412 * unoptimized should increase the testing coverage for
413 * the more obscure platforms.
414 */
415phys_addr_t slow_virt_to_phys(void *__virt_addr)
416{
417 unsigned long virt_addr = (unsigned long)__virt_addr;
418 phys_addr_t phys_addr;
419 unsigned long offset;
420 enum pg_level level;
421 unsigned long psize;
422 unsigned long pmask;
423 pte_t *pte;
424
425 pte = lookup_address(virt_addr, &level);
426 BUG_ON(!pte);
427 psize = page_level_size(level);
428 pmask = page_level_mask(level);
429 offset = virt_addr & ~pmask;
d1cd1210 430 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
d7656534
DH
431 return (phys_addr | offset);
432}
433EXPORT_SYMBOL_GPL(slow_virt_to_phys);
434
9df84993
IM
435/*
436 * Set the new pmd in all the pgds we know about:
437 */
9a3dc780 438static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
9f4c815c 439{
9f4c815c
IM
440 /* change init_mm */
441 set_pte_atomic(kpte, pte);
44af6c41 442#ifdef CONFIG_X86_32
e4b71dcf 443 if (!SHARED_KERNEL_PMD) {
44af6c41
IM
444 struct page *page;
445
e3ed910d 446 list_for_each_entry(page, &pgd_list, lru) {
44af6c41
IM
447 pgd_t *pgd;
448 pud_t *pud;
449 pmd_t *pmd;
450
451 pgd = (pgd_t *)page_address(page) + pgd_index(address);
452 pud = pud_offset(pgd, address);
453 pmd = pmd_offset(pud, address);
454 set_pte_atomic((pte_t *)pmd, pte);
455 }
1da177e4 456 }
44af6c41 457#endif
1da177e4
LT
458}
459
9df84993
IM
460static int
461try_preserve_large_page(pte_t *kpte, unsigned long address,
462 struct cpa_data *cpa)
65e074df 463{
a79e53d8 464 unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn;
65e074df 465 pte_t new_pte, old_pte, *tmp;
64edc8ed 466 pgprot_t old_prot, new_prot, req_prot;
fac84939 467 int i, do_split = 1;
f3c4fbb6 468 enum pg_level level;
65e074df 469
c9caa02c
AK
470 if (cpa->force_split)
471 return 1;
472
a79e53d8 473 spin_lock(&pgd_lock);
65e074df
TG
474 /*
475 * Check for races, another CPU might have split this page
476 * up already:
477 */
82f0712c 478 tmp = _lookup_address_cpa(cpa, address, &level);
65e074df
TG
479 if (tmp != kpte)
480 goto out_unlock;
481
482 switch (level) {
483 case PG_LEVEL_2M:
f07333fd 484#ifdef CONFIG_X86_64
65e074df 485 case PG_LEVEL_1G:
f07333fd 486#endif
f3c4fbb6
DH
487 psize = page_level_size(level);
488 pmask = page_level_mask(level);
489 break;
65e074df 490 default:
beaff633 491 do_split = -EINVAL;
65e074df
TG
492 goto out_unlock;
493 }
494
495 /*
496 * Calculate the number of pages, which fit into this large
497 * page starting at address:
498 */
499 nextpage_addr = (address + psize) & pmask;
500 numpages = (nextpage_addr - address) >> PAGE_SHIFT;
9b5cf48b
RW
501 if (numpages < cpa->numpages)
502 cpa->numpages = numpages;
65e074df
TG
503
504 /*
505 * We are safe now. Check whether the new pgprot is the same:
f5b2831d
JG
506 * Convert protection attributes to 4k-format, as cpa->mask* are set
507 * up accordingly.
65e074df
TG
508 */
509 old_pte = *kpte;
f5b2831d 510 old_prot = req_prot = pgprot_large_2_4k(pte_pgprot(old_pte));
65e074df 511
64edc8ed 512 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
513 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
c31c7d48 514
f5b2831d
JG
515 /*
516 * req_prot is in format of 4k pages. It must be converted to large
517 * page format: the caching mode includes the PAT bit located at
518 * different bit positions in the two formats.
519 */
520 req_prot = pgprot_4k_2_large(req_prot);
521
a8aed3e0
AA
522 /*
523 * Set the PSE and GLOBAL flags only if the PRESENT flag is
524 * set otherwise pmd_present/pmd_huge will return true even on
525 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
526 * for the ancient hardware that doesn't support it.
527 */
f76cfa3c
AA
528 if (pgprot_val(req_prot) & _PAGE_PRESENT)
529 pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
a8aed3e0 530 else
f76cfa3c 531 pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
a8aed3e0 532
f76cfa3c 533 req_prot = canon_pgprot(req_prot);
a8aed3e0 534
c31c7d48
TG
535 /*
536 * old_pte points to the large page base address. So we need
537 * to add the offset of the virtual address:
538 */
539 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
540 cpa->pfn = pfn;
541
64edc8ed 542 new_prot = static_protections(req_prot, address, pfn);
65e074df 543
fac84939
TG
544 /*
545 * We need to check the full range, whether
546 * static_protection() requires a different pgprot for one of
547 * the pages in the range we try to preserve:
548 */
64edc8ed 549 addr = address & pmask;
550 pfn = pte_pfn(old_pte);
551 for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) {
552 pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
fac84939
TG
553
554 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
555 goto out_unlock;
556 }
557
65e074df
TG
558 /*
559 * If there are no changes, return. maxpages has been updated
560 * above:
561 */
562 if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
beaff633 563 do_split = 0;
65e074df
TG
564 goto out_unlock;
565 }
566
567 /*
568 * We need to change the attributes. Check, whether we can
569 * change the large page in one go. We request a split, when
570 * the address is not aligned and the number of pages is
571 * smaller than the number of pages in the large page. Note
572 * that we limited the number of possible pages already to
573 * the number of pages in the large page.
574 */
64edc8ed 575 if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) {
65e074df
TG
576 /*
577 * The address is aligned and the number of pages
578 * covers the full page.
579 */
a8aed3e0 580 new_pte = pfn_pte(pte_pfn(old_pte), new_prot);
65e074df 581 __set_pmd_pte(kpte, address, new_pte);
d75586ad 582 cpa->flags |= CPA_FLUSHTLB;
beaff633 583 do_split = 0;
65e074df
TG
584 }
585
586out_unlock:
a79e53d8 587 spin_unlock(&pgd_lock);
9df84993 588
beaff633 589 return do_split;
65e074df
TG
590}
591
5952886b 592static int
82f0712c
BP
593__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
594 struct page *base)
bb5c2dbd 595{
5952886b 596 pte_t *pbase = (pte_t *)page_address(base);
a79e53d8 597 unsigned long pfn, pfninc = 1;
9df84993 598 unsigned int i, level;
ae9aae9e 599 pte_t *tmp;
9df84993 600 pgprot_t ref_prot;
bb5c2dbd 601
a79e53d8 602 spin_lock(&pgd_lock);
bb5c2dbd
IM
603 /*
604 * Check for races, another CPU might have split this page
605 * up for us already:
606 */
82f0712c 607 tmp = _lookup_address_cpa(cpa, address, &level);
ae9aae9e
WC
608 if (tmp != kpte) {
609 spin_unlock(&pgd_lock);
610 return 1;
611 }
bb5c2dbd 612
6944a9c8 613 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
07cf89c0 614 ref_prot = pte_pgprot(pte_clrhuge(*kpte));
f5b2831d
JG
615
616 /* promote PAT bit to correct position */
617 if (level == PG_LEVEL_2M)
618 ref_prot = pgprot_large_2_4k(ref_prot);
bb5c2dbd 619
f07333fd
AK
620#ifdef CONFIG_X86_64
621 if (level == PG_LEVEL_1G) {
622 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
a8aed3e0
AA
623 /*
624 * Set the PSE flags only if the PRESENT flag is set
625 * otherwise pmd_present/pmd_huge will return true
626 * even on a non present pmd.
627 */
628 if (pgprot_val(ref_prot) & _PAGE_PRESENT)
629 pgprot_val(ref_prot) |= _PAGE_PSE;
630 else
631 pgprot_val(ref_prot) &= ~_PAGE_PSE;
f07333fd
AK
632 }
633#endif
634
a8aed3e0
AA
635 /*
636 * Set the GLOBAL flags only if the PRESENT flag is set
637 * otherwise pmd/pte_present will return true even on a non
638 * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL
639 * for the ancient hardware that doesn't support it.
640 */
641 if (pgprot_val(ref_prot) & _PAGE_PRESENT)
642 pgprot_val(ref_prot) |= _PAGE_GLOBAL;
643 else
644 pgprot_val(ref_prot) &= ~_PAGE_GLOBAL;
645
63c1dcf4
TG
646 /*
647 * Get the target pfn from the original entry:
648 */
649 pfn = pte_pfn(*kpte);
f07333fd 650 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
a8aed3e0 651 set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));
bb5c2dbd 652
8eb5779f
YL
653 if (pfn_range_is_mapped(PFN_DOWN(__pa(address)),
654 PFN_DOWN(__pa(address)) + 1))
f361a450
YL
655 split_page_count(level);
656
bb5c2dbd 657 /*
07a66d7c 658 * Install the new, split up pagetable.
4c881ca1 659 *
07a66d7c
IM
660 * We use the standard kernel pagetable protections for the new
661 * pagetable protections, the actual ptes set above control the
662 * primary protection behavior:
bb5c2dbd 663 */
07a66d7c 664 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
211b3d03
IM
665
666 /*
667 * Intel Atom errata AAH41 workaround.
668 *
669 * The real fix should be in hw or in a microcode update, but
670 * we also probabilistically try to reduce the window of having
671 * a large TLB mixed with 4K TLBs while instruction fetches are
672 * going on.
673 */
674 __flush_tlb_all();
ae9aae9e 675 spin_unlock(&pgd_lock);
211b3d03 676
ae9aae9e
WC
677 return 0;
678}
bb5c2dbd 679
82f0712c
BP
680static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
681 unsigned long address)
ae9aae9e 682{
ae9aae9e
WC
683 struct page *base;
684
685 if (!debug_pagealloc)
686 spin_unlock(&cpa_lock);
687 base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
688 if (!debug_pagealloc)
689 spin_lock(&cpa_lock);
690 if (!base)
691 return -ENOMEM;
692
82f0712c 693 if (__split_large_page(cpa, kpte, address, base))
8311eb84 694 __free_page(base);
bb5c2dbd 695
bb5c2dbd
IM
696 return 0;
697}
698
52a628fb
BP
699static bool try_to_free_pte_page(pte_t *pte)
700{
701 int i;
702
703 for (i = 0; i < PTRS_PER_PTE; i++)
704 if (!pte_none(pte[i]))
705 return false;
706
707 free_page((unsigned long)pte);
708 return true;
709}
710
711static bool try_to_free_pmd_page(pmd_t *pmd)
712{
713 int i;
714
715 for (i = 0; i < PTRS_PER_PMD; i++)
716 if (!pmd_none(pmd[i]))
717 return false;
718
719 free_page((unsigned long)pmd);
720 return true;
721}
722
42a54772
BP
723static bool try_to_free_pud_page(pud_t *pud)
724{
725 int i;
726
727 for (i = 0; i < PTRS_PER_PUD; i++)
728 if (!pud_none(pud[i]))
729 return false;
730
731 free_page((unsigned long)pud);
732 return true;
733}
734
52a628fb
BP
735static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
736{
737 pte_t *pte = pte_offset_kernel(pmd, start);
738
739 while (start < end) {
740 set_pte(pte, __pte(0));
741
742 start += PAGE_SIZE;
743 pte++;
744 }
745
746 if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
747 pmd_clear(pmd);
748 return true;
749 }
750 return false;
751}
752
753static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
754 unsigned long start, unsigned long end)
755{
756 if (unmap_pte_range(pmd, start, end))
757 if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
758 pud_clear(pud);
759}
760
761static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
762{
763 pmd_t *pmd = pmd_offset(pud, start);
764
765 /*
766 * Not on a 2MB page boundary?
767 */
768 if (start & (PMD_SIZE - 1)) {
769 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
770 unsigned long pre_end = min_t(unsigned long, end, next_page);
771
772 __unmap_pmd_range(pud, pmd, start, pre_end);
773
774 start = pre_end;
775 pmd++;
776 }
777
778 /*
779 * Try to unmap in 2M chunks.
780 */
781 while (end - start >= PMD_SIZE) {
782 if (pmd_large(*pmd))
783 pmd_clear(pmd);
784 else
785 __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
786
787 start += PMD_SIZE;
788 pmd++;
789 }
790
791 /*
792 * 4K leftovers?
793 */
794 if (start < end)
795 return __unmap_pmd_range(pud, pmd, start, end);
796
797 /*
798 * Try again to free the PMD page if haven't succeeded above.
799 */
800 if (!pud_none(*pud))
801 if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
802 pud_clear(pud);
803}
0bb8aeee
BP
804
805static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
806{
807 pud_t *pud = pud_offset(pgd, start);
808
809 /*
810 * Not on a GB page boundary?
811 */
812 if (start & (PUD_SIZE - 1)) {
813 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
814 unsigned long pre_end = min_t(unsigned long, end, next_page);
815
816 unmap_pmd_range(pud, start, pre_end);
817
818 start = pre_end;
819 pud++;
820 }
821
822 /*
823 * Try to unmap in 1G chunks?
824 */
825 while (end - start >= PUD_SIZE) {
826
827 if (pud_large(*pud))
828 pud_clear(pud);
829 else
830 unmap_pmd_range(pud, start, start + PUD_SIZE);
831
832 start += PUD_SIZE;
833 pud++;
834 }
835
836 /*
837 * 2M leftovers?
838 */
839 if (start < end)
840 unmap_pmd_range(pud, start, end);
841
842 /*
843 * No need to try to free the PUD page because we'll free it in
844 * populate_pgd's error path
845 */
846}
847
42a54772
BP
848static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end)
849{
850 pgd_t *pgd_entry = root + pgd_index(addr);
851
852 unmap_pud_range(pgd_entry, addr, end);
853
854 if (try_to_free_pud_page((pud_t *)pgd_page_vaddr(*pgd_entry)))
855 pgd_clear(pgd_entry);
856}
857
f900a4b8
BP
858static int alloc_pte_page(pmd_t *pmd)
859{
860 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
861 if (!pte)
862 return -1;
863
864 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
865 return 0;
866}
867
4b23538d
BP
868static int alloc_pmd_page(pud_t *pud)
869{
870 pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
871 if (!pmd)
872 return -1;
873
874 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
875 return 0;
876}
877
c6b6f363
BP
878static void populate_pte(struct cpa_data *cpa,
879 unsigned long start, unsigned long end,
880 unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
881{
882 pte_t *pte;
883
884 pte = pte_offset_kernel(pmd, start);
885
886 while (num_pages-- && start < end) {
887
888 /* deal with the NX bit */
889 if (!(pgprot_val(pgprot) & _PAGE_NX))
890 cpa->pfn &= ~_PAGE_NX;
891
892 set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));
893
894 start += PAGE_SIZE;
895 cpa->pfn += PAGE_SIZE;
896 pte++;
897 }
898}
f900a4b8
BP
899
900static int populate_pmd(struct cpa_data *cpa,
901 unsigned long start, unsigned long end,
902 unsigned num_pages, pud_t *pud, pgprot_t pgprot)
903{
904 unsigned int cur_pages = 0;
905 pmd_t *pmd;
f5b2831d 906 pgprot_t pmd_pgprot;
f900a4b8
BP
907
908 /*
909 * Not on a 2M boundary?
910 */
911 if (start & (PMD_SIZE - 1)) {
912 unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
913 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
914
915 pre_end = min_t(unsigned long, pre_end, next_page);
916 cur_pages = (pre_end - start) >> PAGE_SHIFT;
917 cur_pages = min_t(unsigned int, num_pages, cur_pages);
918
919 /*
920 * Need a PTE page?
921 */
922 pmd = pmd_offset(pud, start);
923 if (pmd_none(*pmd))
924 if (alloc_pte_page(pmd))
925 return -1;
926
927 populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
928
929 start = pre_end;
930 }
931
932 /*
933 * We mapped them all?
934 */
935 if (num_pages == cur_pages)
936 return cur_pages;
937
f5b2831d
JG
938 pmd_pgprot = pgprot_4k_2_large(pgprot);
939
f900a4b8
BP
940 while (end - start >= PMD_SIZE) {
941
942 /*
943 * We cannot use a 1G page so allocate a PMD page if needed.
944 */
945 if (pud_none(*pud))
946 if (alloc_pmd_page(pud))
947 return -1;
948
949 pmd = pmd_offset(pud, start);
950
f5b2831d
JG
951 set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
952 massage_pgprot(pmd_pgprot)));
f900a4b8
BP
953
954 start += PMD_SIZE;
955 cpa->pfn += PMD_SIZE;
956 cur_pages += PMD_SIZE >> PAGE_SHIFT;
957 }
958
959 /*
960 * Map trailing 4K pages.
961 */
962 if (start < end) {
963 pmd = pmd_offset(pud, start);
964 if (pmd_none(*pmd))
965 if (alloc_pte_page(pmd))
966 return -1;
967
968 populate_pte(cpa, start, end, num_pages - cur_pages,
969 pmd, pgprot);
970 }
971 return num_pages;
972}
4b23538d
BP
973
974static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
975 pgprot_t pgprot)
976{
977 pud_t *pud;
978 unsigned long end;
979 int cur_pages = 0;
f5b2831d 980 pgprot_t pud_pgprot;
4b23538d
BP
981
982 end = start + (cpa->numpages << PAGE_SHIFT);
983
984 /*
985 * Not on a Gb page boundary? => map everything up to it with
986 * smaller pages.
987 */
988 if (start & (PUD_SIZE - 1)) {
989 unsigned long pre_end;
990 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
991
992 pre_end = min_t(unsigned long, end, next_page);
993 cur_pages = (pre_end - start) >> PAGE_SHIFT;
994 cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
995
996 pud = pud_offset(pgd, start);
997
998 /*
999 * Need a PMD page?
1000 */
1001 if (pud_none(*pud))
1002 if (alloc_pmd_page(pud))
1003 return -1;
1004
1005 cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
1006 pud, pgprot);
1007 if (cur_pages < 0)
1008 return cur_pages;
1009
1010 start = pre_end;
1011 }
1012
1013 /* We mapped them all? */
1014 if (cpa->numpages == cur_pages)
1015 return cur_pages;
1016
1017 pud = pud_offset(pgd, start);
f5b2831d 1018 pud_pgprot = pgprot_4k_2_large(pgprot);
4b23538d
BP
1019
1020 /*
1021 * Map everything starting from the Gb boundary, possibly with 1G pages
1022 */
1023 while (end - start >= PUD_SIZE) {
f5b2831d
JG
1024 set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
1025 massage_pgprot(pud_pgprot)));
4b23538d
BP
1026
1027 start += PUD_SIZE;
1028 cpa->pfn += PUD_SIZE;
1029 cur_pages += PUD_SIZE >> PAGE_SHIFT;
1030 pud++;
1031 }
1032
1033 /* Map trailing leftover */
1034 if (start < end) {
1035 int tmp;
1036
1037 pud = pud_offset(pgd, start);
1038 if (pud_none(*pud))
1039 if (alloc_pmd_page(pud))
1040 return -1;
1041
1042 tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
1043 pud, pgprot);
1044 if (tmp < 0)
1045 return cur_pages;
1046
1047 cur_pages += tmp;
1048 }
1049 return cur_pages;
1050}
f3f72966
BP
1051
1052/*
1053 * Restrictions for kernel page table do not necessarily apply when mapping in
1054 * an alternate PGD.
1055 */
1056static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1057{
1058 pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
f3f72966 1059 pud_t *pud = NULL; /* shut up gcc */
42a54772 1060 pgd_t *pgd_entry;
f3f72966
BP
1061 int ret;
1062
1063 pgd_entry = cpa->pgd + pgd_index(addr);
1064
1065 /*
1066 * Allocate a PUD page and hand it down for mapping.
1067 */
1068 if (pgd_none(*pgd_entry)) {
1069 pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
1070 if (!pud)
1071 return -1;
1072
1073 set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE));
f3f72966
BP
1074 }
1075
1076 pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
1077 pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
1078
1079 ret = populate_pud(cpa, addr, pgd_entry, pgprot);
0bb8aeee 1080 if (ret < 0) {
42a54772 1081 unmap_pgd_range(cpa->pgd, addr,
0bb8aeee 1082 addr + (cpa->numpages << PAGE_SHIFT));
f3f72966 1083 return ret;
0bb8aeee 1084 }
42a54772 1085
f3f72966
BP
1086 cpa->numpages = ret;
1087 return 0;
1088}
1089
a1e46212
SS
1090static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
1091 int primary)
1092{
82f0712c
BP
1093 if (cpa->pgd)
1094 return populate_pgd(cpa, vaddr);
1095
a1e46212
SS
1096 /*
1097 * Ignore all non primary paths.
1098 */
1099 if (!primary)
1100 return 0;
1101
1102 /*
1103 * Ignore the NULL PTE for kernel identity mapping, as it is expected
1104 * to have holes.
1105 * Also set numpages to '1' indicating that we processed cpa req for
1106 * one virtual address page and its pfn. TBD: numpages can be set based
1107 * on the initial value and the level returned by lookup_address().
1108 */
1109 if (within(vaddr, PAGE_OFFSET,
1110 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
1111 cpa->numpages = 1;
1112 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
1113 return 0;
1114 } else {
1115 WARN(1, KERN_WARNING "CPA: called for zero pte. "
1116 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
1117 *cpa->vaddr);
1118
1119 return -EFAULT;
1120 }
1121}
1122
c31c7d48 1123static int __change_page_attr(struct cpa_data *cpa, int primary)
9f4c815c 1124{
d75586ad 1125 unsigned long address;
da7bfc50
HH
1126 int do_split, err;
1127 unsigned int level;
c31c7d48 1128 pte_t *kpte, old_pte;
1da177e4 1129
8523acfe
TH
1130 if (cpa->flags & CPA_PAGES_ARRAY) {
1131 struct page *page = cpa->pages[cpa->curpage];
1132 if (unlikely(PageHighMem(page)))
1133 return 0;
1134 address = (unsigned long)page_address(page);
1135 } else if (cpa->flags & CPA_ARRAY)
d75586ad
SL
1136 address = cpa->vaddr[cpa->curpage];
1137 else
1138 address = *cpa->vaddr;
97f99fed 1139repeat:
82f0712c 1140 kpte = _lookup_address_cpa(cpa, address, &level);
1da177e4 1141 if (!kpte)
a1e46212 1142 return __cpa_process_fault(cpa, address, primary);
c31c7d48
TG
1143
1144 old_pte = *kpte;
a1e46212
SS
1145 if (!pte_val(old_pte))
1146 return __cpa_process_fault(cpa, address, primary);
9f4c815c 1147
30551bb3 1148 if (level == PG_LEVEL_4K) {
c31c7d48 1149 pte_t new_pte;
626c2c9d 1150 pgprot_t new_prot = pte_pgprot(old_pte);
c31c7d48 1151 unsigned long pfn = pte_pfn(old_pte);
86f03989 1152
72e458df
TG
1153 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
1154 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
86f03989 1155
c31c7d48 1156 new_prot = static_protections(new_prot, address, pfn);
86f03989 1157
a8aed3e0
AA
1158 /*
1159 * Set the GLOBAL flags only if the PRESENT flag is
1160 * set otherwise pte_present will return true even on
1161 * a non present pte. The canon_pgprot will clear
1162 * _PAGE_GLOBAL for the ancient hardware that doesn't
1163 * support it.
1164 */
1165 if (pgprot_val(new_prot) & _PAGE_PRESENT)
1166 pgprot_val(new_prot) |= _PAGE_GLOBAL;
1167 else
1168 pgprot_val(new_prot) &= ~_PAGE_GLOBAL;
1169
626c2c9d
AV
1170 /*
1171 * We need to keep the pfn from the existing PTE,
1172 * after all we're only going to change it's attributes
1173 * not the memory it points to
1174 */
c31c7d48
TG
1175 new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
1176 cpa->pfn = pfn;
f4ae5da0
TG
1177 /*
1178 * Do we really change anything ?
1179 */
1180 if (pte_val(old_pte) != pte_val(new_pte)) {
1181 set_pte_atomic(kpte, new_pte);
d75586ad 1182 cpa->flags |= CPA_FLUSHTLB;
f4ae5da0 1183 }
9b5cf48b 1184 cpa->numpages = 1;
65e074df 1185 return 0;
1da177e4 1186 }
65e074df
TG
1187
1188 /*
1189 * Check, whether we can keep the large page intact
1190 * and just change the pte:
1191 */
beaff633 1192 do_split = try_preserve_large_page(kpte, address, cpa);
65e074df
TG
1193 /*
1194 * When the range fits into the existing large page,
9b5cf48b 1195 * return. cp->numpages and cpa->tlbflush have been updated in
65e074df
TG
1196 * try_large_page:
1197 */
87f7f8fe
IM
1198 if (do_split <= 0)
1199 return do_split;
65e074df
TG
1200
1201 /*
1202 * We have to split the large page:
1203 */
82f0712c 1204 err = split_large_page(cpa, kpte, address);
87f7f8fe 1205 if (!err) {
ad5ca55f
SS
1206 /*
1207 * Do a global flush tlb after splitting the large page
1208 * and before we do the actual change page attribute in the PTE.
1209 *
1210 * With out this, we violate the TLB application note, that says
1211 * "The TLBs may contain both ordinary and large-page
1212 * translations for a 4-KByte range of linear addresses. This
1213 * may occur if software modifies the paging structures so that
1214 * the page size used for the address range changes. If the two
1215 * translations differ with respect to page frame or attributes
1216 * (e.g., permissions), processor behavior is undefined and may
1217 * be implementation-specific."
1218 *
1219 * We do this global tlb flush inside the cpa_lock, so that we
1220 * don't allow any other cpu, with stale tlb entries change the
1221 * page attribute in parallel, that also falls into the
1222 * just split large page entry.
1223 */
1224 flush_tlb_all();
87f7f8fe
IM
1225 goto repeat;
1226 }
beaff633 1227
87f7f8fe 1228 return err;
9f4c815c 1229}
1da177e4 1230
c31c7d48
TG
1231static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
1232
1233static int cpa_process_alias(struct cpa_data *cpa)
1da177e4 1234{
c31c7d48 1235 struct cpa_data alias_cpa;
992f4c1c 1236 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
e933a73f 1237 unsigned long vaddr;
992f4c1c 1238 int ret;
44af6c41 1239
8eb5779f 1240 if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
c31c7d48 1241 return 0;
626c2c9d 1242
f34b439f
TG
1243 /*
1244 * No need to redo, when the primary call touched the direct
1245 * mapping already:
1246 */
8523acfe
TH
1247 if (cpa->flags & CPA_PAGES_ARRAY) {
1248 struct page *page = cpa->pages[cpa->curpage];
1249 if (unlikely(PageHighMem(page)))
1250 return 0;
1251 vaddr = (unsigned long)page_address(page);
1252 } else if (cpa->flags & CPA_ARRAY)
d75586ad
SL
1253 vaddr = cpa->vaddr[cpa->curpage];
1254 else
1255 vaddr = *cpa->vaddr;
1256
1257 if (!(within(vaddr, PAGE_OFFSET,
a1e46212 1258 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
44af6c41 1259
f34b439f 1260 alias_cpa = *cpa;
992f4c1c 1261 alias_cpa.vaddr = &laddr;
9ae28475 1262 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
d75586ad 1263
f34b439f 1264 ret = __change_page_attr_set_clr(&alias_cpa, 0);
992f4c1c
TH
1265 if (ret)
1266 return ret;
f34b439f 1267 }
44af6c41 1268
44af6c41 1269#ifdef CONFIG_X86_64
488fd995 1270 /*
992f4c1c
TH
1271 * If the primary call didn't touch the high mapping already
1272 * and the physical address is inside the kernel map, we need
0879750f 1273 * to touch the high mapped kernel as well:
488fd995 1274 */
992f4c1c
TH
1275 if (!within(vaddr, (unsigned long)_text, _brk_end) &&
1276 within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
1277 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
1278 __START_KERNEL_map - phys_base;
1279 alias_cpa = *cpa;
1280 alias_cpa.vaddr = &temp_cpa_vaddr;
1281 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
c31c7d48 1282
992f4c1c
TH
1283 /*
1284 * The high mapping range is imprecise, so ignore the
1285 * return value.
1286 */
1287 __change_page_attr_set_clr(&alias_cpa, 0);
1288 }
488fd995 1289#endif
992f4c1c
TH
1290
1291 return 0;
1da177e4
LT
1292}
1293
c31c7d48 1294static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
ff31452b 1295{
65e074df 1296 int ret, numpages = cpa->numpages;
ff31452b 1297
65e074df
TG
1298 while (numpages) {
1299 /*
1300 * Store the remaining nr of pages for the large page
1301 * preservation check.
1302 */
9b5cf48b 1303 cpa->numpages = numpages;
d75586ad 1304 /* for array changes, we can't use large page */
9ae28475 1305 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
d75586ad 1306 cpa->numpages = 1;
c31c7d48 1307
ad5ca55f
SS
1308 if (!debug_pagealloc)
1309 spin_lock(&cpa_lock);
c31c7d48 1310 ret = __change_page_attr(cpa, checkalias);
ad5ca55f
SS
1311 if (!debug_pagealloc)
1312 spin_unlock(&cpa_lock);
ff31452b
TG
1313 if (ret)
1314 return ret;
ff31452b 1315
c31c7d48
TG
1316 if (checkalias) {
1317 ret = cpa_process_alias(cpa);
1318 if (ret)
1319 return ret;
1320 }
1321
65e074df
TG
1322 /*
1323 * Adjust the number of pages with the result of the
1324 * CPA operation. Either a large page has been
1325 * preserved or a single page update happened.
1326 */
9b5cf48b
RW
1327 BUG_ON(cpa->numpages > numpages);
1328 numpages -= cpa->numpages;
9ae28475 1329 if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
d75586ad
SL
1330 cpa->curpage++;
1331 else
1332 *cpa->vaddr += cpa->numpages * PAGE_SIZE;
1333
65e074df 1334 }
ff31452b
TG
1335 return 0;
1336}
1337
d75586ad 1338static int change_page_attr_set_clr(unsigned long *addr, int numpages,
c9caa02c 1339 pgprot_t mask_set, pgprot_t mask_clr,
9ae28475 1340 int force_split, int in_flag,
1341 struct page **pages)
ff31452b 1342{
72e458df 1343 struct cpa_data cpa;
cacf8906 1344 int ret, cache, checkalias;
fa526d0d 1345 unsigned long baddr = 0;
331e4065 1346
82f0712c
BP
1347 memset(&cpa, 0, sizeof(cpa));
1348
331e4065
TG
1349 /*
1350 * Check, if we are requested to change a not supported
1351 * feature:
1352 */
1353 mask_set = canon_pgprot(mask_set);
1354 mask_clr = canon_pgprot(mask_clr);
c9caa02c 1355 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
331e4065
TG
1356 return 0;
1357
69b1415e 1358 /* Ensure we are PAGE_SIZE aligned */
9ae28475 1359 if (in_flag & CPA_ARRAY) {
d75586ad
SL
1360 int i;
1361 for (i = 0; i < numpages; i++) {
1362 if (addr[i] & ~PAGE_MASK) {
1363 addr[i] &= PAGE_MASK;
1364 WARN_ON_ONCE(1);
1365 }
1366 }
9ae28475 1367 } else if (!(in_flag & CPA_PAGES_ARRAY)) {
1368 /*
1369 * in_flag of CPA_PAGES_ARRAY implies it is aligned.
1370 * No need to cehck in that case
1371 */
1372 if (*addr & ~PAGE_MASK) {
1373 *addr &= PAGE_MASK;
1374 /*
1375 * People should not be passing in unaligned addresses:
1376 */
1377 WARN_ON_ONCE(1);
1378 }
fa526d0d
JS
1379 /*
1380 * Save address for cache flush. *addr is modified in the call
1381 * to __change_page_attr_set_clr() below.
1382 */
1383 baddr = *addr;
69b1415e
TG
1384 }
1385
5843d9a4
NP
1386 /* Must avoid aliasing mappings in the highmem code */
1387 kmap_flush_unused();
1388
db64fe02
NP
1389 vm_unmap_aliases();
1390
72e458df 1391 cpa.vaddr = addr;
9ae28475 1392 cpa.pages = pages;
72e458df
TG
1393 cpa.numpages = numpages;
1394 cpa.mask_set = mask_set;
1395 cpa.mask_clr = mask_clr;
d75586ad
SL
1396 cpa.flags = 0;
1397 cpa.curpage = 0;
c9caa02c 1398 cpa.force_split = force_split;
72e458df 1399
9ae28475 1400 if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
1401 cpa.flags |= in_flag;
d75586ad 1402
af96e443
TG
1403 /* No alias checking for _NX bit modifications */
1404 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
1405
1406 ret = __change_page_attr_set_clr(&cpa, checkalias);
ff31452b 1407
f4ae5da0
TG
1408 /*
1409 * Check whether we really changed something:
1410 */
d75586ad 1411 if (!(cpa.flags & CPA_FLUSHTLB))
1ac2f7d5 1412 goto out;
cacf8906 1413
6bb8383b
AK
1414 /*
1415 * No need to flush, when we did not set any of the caching
1416 * attributes:
1417 */
c06814d8 1418 cache = !!pgprot2cachemode(mask_set);
6bb8383b 1419
57a6a46a 1420 /*
b82ad3d3
BP
1421 * On success we use CLFLUSH, when the CPU supports it to
1422 * avoid the WBINVD. If the CPU does not support it and in the
f026cfa8 1423 * error case we fall back to cpa_flush_all (which uses
b82ad3d3 1424 * WBINVD):
57a6a46a 1425 */
f026cfa8 1426 if (!ret && cpu_has_clflush) {
9ae28475 1427 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
1428 cpa_flush_array(addr, numpages, cache,
1429 cpa.flags, pages);
1430 } else
fa526d0d 1431 cpa_flush_range(baddr, numpages, cache);
d75586ad 1432 } else
6bb8383b 1433 cpa_flush_all(cache);
cacf8906 1434
76ebd054 1435out:
ff31452b
TG
1436 return ret;
1437}
1438
d75586ad
SL
1439static inline int change_page_attr_set(unsigned long *addr, int numpages,
1440 pgprot_t mask, int array)
75cbade8 1441{
d75586ad 1442 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
9ae28475 1443 (array ? CPA_ARRAY : 0), NULL);
75cbade8
AV
1444}
1445
d75586ad
SL
1446static inline int change_page_attr_clear(unsigned long *addr, int numpages,
1447 pgprot_t mask, int array)
72932c7a 1448{
d75586ad 1449 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
9ae28475 1450 (array ? CPA_ARRAY : 0), NULL);
72932c7a
TG
1451}
1452
0f350755 1453static inline int cpa_set_pages_array(struct page **pages, int numpages,
1454 pgprot_t mask)
1455{
1456 return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
1457 CPA_PAGES_ARRAY, pages);
1458}
1459
1460static inline int cpa_clear_pages_array(struct page **pages, int numpages,
1461 pgprot_t mask)
1462{
1463 return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
1464 CPA_PAGES_ARRAY, pages);
1465}
1466
1219333d 1467int _set_memory_uc(unsigned long addr, int numpages)
72932c7a 1468{
de33c442
SS
1469 /*
1470 * for now UC MINUS. see comments in ioremap_nocache()
1471 */
d75586ad 1472 return change_page_attr_set(&addr, numpages,
c06814d8
JG
1473 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1474 0);
75cbade8 1475}
1219333d 1476
1477int set_memory_uc(unsigned long addr, int numpages)
1478{
9fa3ab39 1479 int ret;
1480
de33c442
SS
1481 /*
1482 * for now UC MINUS. see comments in ioremap_nocache()
1483 */
9fa3ab39 1484 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
e00c8cc9 1485 _PAGE_CACHE_MODE_UC_MINUS, NULL);
9fa3ab39 1486 if (ret)
1487 goto out_err;
1488
1489 ret = _set_memory_uc(addr, numpages);
1490 if (ret)
1491 goto out_free;
1492
1493 return 0;
1219333d 1494
9fa3ab39 1495out_free:
1496 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1497out_err:
1498 return ret;
1219333d 1499}
75cbade8
AV
1500EXPORT_SYMBOL(set_memory_uc);
1501
2d070eff 1502static int _set_memory_array(unsigned long *addr, int addrinarray,
c06814d8 1503 enum page_cache_mode new_type)
d75586ad 1504{
9fa3ab39 1505 int i, j;
1506 int ret;
1507
d75586ad
SL
1508 /*
1509 * for now UC MINUS. see comments in ioremap_nocache()
1510 */
1511 for (i = 0; i < addrinarray; i++) {
9fa3ab39 1512 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
4f646254 1513 new_type, NULL);
9fa3ab39 1514 if (ret)
1515 goto out_free;
d75586ad
SL
1516 }
1517
9fa3ab39 1518 ret = change_page_attr_set(addr, addrinarray,
c06814d8
JG
1519 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1520 1);
4f646254 1521
c06814d8 1522 if (!ret && new_type == _PAGE_CACHE_MODE_WC)
4f646254 1523 ret = change_page_attr_set_clr(addr, addrinarray,
c06814d8
JG
1524 cachemode2pgprot(
1525 _PAGE_CACHE_MODE_WC),
4f646254
PN
1526 __pgprot(_PAGE_CACHE_MASK),
1527 0, CPA_ARRAY, NULL);
9fa3ab39 1528 if (ret)
1529 goto out_free;
1530
1531 return 0;
1532
1533out_free:
1534 for (j = 0; j < i; j++)
1535 free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
1536
1537 return ret;
d75586ad 1538}
4f646254
PN
1539
1540int set_memory_array_uc(unsigned long *addr, int addrinarray)
1541{
c06814d8 1542 return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS);
4f646254 1543}
d75586ad
SL
1544EXPORT_SYMBOL(set_memory_array_uc);
1545
4f646254
PN
1546int set_memory_array_wc(unsigned long *addr, int addrinarray)
1547{
c06814d8 1548 return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC);
4f646254
PN
1549}
1550EXPORT_SYMBOL(set_memory_array_wc);
1551
ef354af4 1552int _set_memory_wc(unsigned long addr, int numpages)
1553{
3869c4aa 1554 int ret;
bdc6340f
PV
1555 unsigned long addr_copy = addr;
1556
3869c4aa 1557 ret = change_page_attr_set(&addr, numpages,
c06814d8
JG
1558 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1559 0);
3869c4aa 1560 if (!ret) {
bdc6340f 1561 ret = change_page_attr_set_clr(&addr_copy, numpages,
c06814d8
JG
1562 cachemode2pgprot(
1563 _PAGE_CACHE_MODE_WC),
bdc6340f
PV
1564 __pgprot(_PAGE_CACHE_MASK),
1565 0, 0, NULL);
3869c4aa 1566 }
1567 return ret;
ef354af4 1568}
1569
1570int set_memory_wc(unsigned long addr, int numpages)
1571{
9fa3ab39 1572 int ret;
1573
499f8f84 1574 if (!pat_enabled)
ef354af4 1575 return set_memory_uc(addr, numpages);
1576
9fa3ab39 1577 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
e00c8cc9 1578 _PAGE_CACHE_MODE_WC, NULL);
9fa3ab39 1579 if (ret)
1580 goto out_err;
ef354af4 1581
9fa3ab39 1582 ret = _set_memory_wc(addr, numpages);
1583 if (ret)
1584 goto out_free;
1585
1586 return 0;
1587
1588out_free:
1589 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1590out_err:
1591 return ret;
ef354af4 1592}
1593EXPORT_SYMBOL(set_memory_wc);
1594
1219333d 1595int _set_memory_wb(unsigned long addr, int numpages)
75cbade8 1596{
c06814d8 1597 /* WB cache mode is hard wired to all cache attribute bits being 0 */
d75586ad
SL
1598 return change_page_attr_clear(&addr, numpages,
1599 __pgprot(_PAGE_CACHE_MASK), 0);
75cbade8 1600}
1219333d 1601
1602int set_memory_wb(unsigned long addr, int numpages)
1603{
9fa3ab39 1604 int ret;
1605
1606 ret = _set_memory_wb(addr, numpages);
1607 if (ret)
1608 return ret;
1609
c15238df 1610 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
9fa3ab39 1611 return 0;
1219333d 1612}
75cbade8
AV
1613EXPORT_SYMBOL(set_memory_wb);
1614
d75586ad
SL
1615int set_memory_array_wb(unsigned long *addr, int addrinarray)
1616{
1617 int i;
a5593e0b 1618 int ret;
1619
c06814d8 1620 /* WB cache mode is hard wired to all cache attribute bits being 0 */
a5593e0b 1621 ret = change_page_attr_clear(addr, addrinarray,
1622 __pgprot(_PAGE_CACHE_MASK), 1);
9fa3ab39 1623 if (ret)
1624 return ret;
d75586ad 1625
9fa3ab39 1626 for (i = 0; i < addrinarray; i++)
1627 free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
c5e147cf 1628
9fa3ab39 1629 return 0;
d75586ad
SL
1630}
1631EXPORT_SYMBOL(set_memory_array_wb);
1632
75cbade8
AV
1633int set_memory_x(unsigned long addr, int numpages)
1634{
583140af
PA
1635 if (!(__supported_pte_mask & _PAGE_NX))
1636 return 0;
1637
d75586ad 1638 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
75cbade8
AV
1639}
1640EXPORT_SYMBOL(set_memory_x);
1641
1642int set_memory_nx(unsigned long addr, int numpages)
1643{
583140af
PA
1644 if (!(__supported_pte_mask & _PAGE_NX))
1645 return 0;
1646
d75586ad 1647 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
75cbade8
AV
1648}
1649EXPORT_SYMBOL(set_memory_nx);
1650
1651int set_memory_ro(unsigned long addr, int numpages)
1652{
d75586ad 1653 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
75cbade8 1654}
75cbade8
AV
1655
1656int set_memory_rw(unsigned long addr, int numpages)
1657{
d75586ad 1658 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
75cbade8 1659}
f62d0f00
IM
1660
1661int set_memory_np(unsigned long addr, int numpages)
1662{
d75586ad 1663 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
f62d0f00 1664}
75cbade8 1665
c9caa02c
AK
1666int set_memory_4k(unsigned long addr, int numpages)
1667{
d75586ad 1668 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
9ae28475 1669 __pgprot(0), 1, 0, NULL);
c9caa02c
AK
1670}
1671
75cbade8
AV
1672int set_pages_uc(struct page *page, int numpages)
1673{
1674 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1675
d7c8f21a 1676 return set_memory_uc(addr, numpages);
75cbade8
AV
1677}
1678EXPORT_SYMBOL(set_pages_uc);
1679
4f646254 1680static int _set_pages_array(struct page **pages, int addrinarray,
c06814d8 1681 enum page_cache_mode new_type)
0f350755 1682{
1683 unsigned long start;
1684 unsigned long end;
1685 int i;
1686 int free_idx;
4f646254 1687 int ret;
0f350755 1688
1689 for (i = 0; i < addrinarray; i++) {
8523acfe
TH
1690 if (PageHighMem(pages[i]))
1691 continue;
1692 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
0f350755 1693 end = start + PAGE_SIZE;
4f646254 1694 if (reserve_memtype(start, end, new_type, NULL))
0f350755 1695 goto err_out;
1696 }
1697
4f646254 1698 ret = cpa_set_pages_array(pages, addrinarray,
c06814d8
JG
1699 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS));
1700 if (!ret && new_type == _PAGE_CACHE_MODE_WC)
4f646254 1701 ret = change_page_attr_set_clr(NULL, addrinarray,
c06814d8
JG
1702 cachemode2pgprot(
1703 _PAGE_CACHE_MODE_WC),
4f646254
PN
1704 __pgprot(_PAGE_CACHE_MASK),
1705 0, CPA_PAGES_ARRAY, pages);
1706 if (ret)
1707 goto err_out;
1708 return 0; /* Success */
0f350755 1709err_out:
1710 free_idx = i;
1711 for (i = 0; i < free_idx; i++) {
8523acfe
TH
1712 if (PageHighMem(pages[i]))
1713 continue;
1714 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
0f350755 1715 end = start + PAGE_SIZE;
1716 free_memtype(start, end);
1717 }
1718 return -EINVAL;
1719}
4f646254
PN
1720
1721int set_pages_array_uc(struct page **pages, int addrinarray)
1722{
c06814d8 1723 return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS);
4f646254 1724}
0f350755 1725EXPORT_SYMBOL(set_pages_array_uc);
1726
4f646254
PN
1727int set_pages_array_wc(struct page **pages, int addrinarray)
1728{
c06814d8 1729 return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC);
4f646254
PN
1730}
1731EXPORT_SYMBOL(set_pages_array_wc);
1732
75cbade8
AV
1733int set_pages_wb(struct page *page, int numpages)
1734{
1735 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1736
d7c8f21a 1737 return set_memory_wb(addr, numpages);
75cbade8
AV
1738}
1739EXPORT_SYMBOL(set_pages_wb);
1740
0f350755 1741int set_pages_array_wb(struct page **pages, int addrinarray)
1742{
1743 int retval;
1744 unsigned long start;
1745 unsigned long end;
1746 int i;
1747
c06814d8 1748 /* WB cache mode is hard wired to all cache attribute bits being 0 */
0f350755 1749 retval = cpa_clear_pages_array(pages, addrinarray,
1750 __pgprot(_PAGE_CACHE_MASK));
9fa3ab39 1751 if (retval)
1752 return retval;
0f350755 1753
1754 for (i = 0; i < addrinarray; i++) {
8523acfe
TH
1755 if (PageHighMem(pages[i]))
1756 continue;
1757 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
0f350755 1758 end = start + PAGE_SIZE;
1759 free_memtype(start, end);
1760 }
1761
9fa3ab39 1762 return 0;
0f350755 1763}
1764EXPORT_SYMBOL(set_pages_array_wb);
1765
75cbade8
AV
1766int set_pages_x(struct page *page, int numpages)
1767{
1768 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1769
d7c8f21a 1770 return set_memory_x(addr, numpages);
75cbade8
AV
1771}
1772EXPORT_SYMBOL(set_pages_x);
1773
1774int set_pages_nx(struct page *page, int numpages)
1775{
1776 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1777
d7c8f21a 1778 return set_memory_nx(addr, numpages);
75cbade8
AV
1779}
1780EXPORT_SYMBOL(set_pages_nx);
1781
1782int set_pages_ro(struct page *page, int numpages)
1783{
1784 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1785
d7c8f21a 1786 return set_memory_ro(addr, numpages);
75cbade8 1787}
75cbade8
AV
1788
1789int set_pages_rw(struct page *page, int numpages)
1790{
1791 unsigned long addr = (unsigned long)page_address(page);
e81d5dc4 1792
d7c8f21a 1793 return set_memory_rw(addr, numpages);
78c94aba
IM
1794}
1795
1da177e4 1796#ifdef CONFIG_DEBUG_PAGEALLOC
f62d0f00
IM
1797
1798static int __set_pages_p(struct page *page, int numpages)
1799{
d75586ad
SL
1800 unsigned long tempaddr = (unsigned long) page_address(page);
1801 struct cpa_data cpa = { .vaddr = &tempaddr,
82f0712c 1802 .pgd = NULL,
72e458df
TG
1803 .numpages = numpages,
1804 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
d75586ad
SL
1805 .mask_clr = __pgprot(0),
1806 .flags = 0};
72932c7a 1807
55121b43
SS
1808 /*
1809 * No alias checking needed for setting present flag. otherwise,
1810 * we may need to break large pages for 64-bit kernel text
1811 * mappings (this adds to complexity if we want to do this from
1812 * atomic context especially). Let's keep it simple!
1813 */
1814 return __change_page_attr_set_clr(&cpa, 0);
f62d0f00
IM
1815}
1816
1817static int __set_pages_np(struct page *page, int numpages)
1818{
d75586ad
SL
1819 unsigned long tempaddr = (unsigned long) page_address(page);
1820 struct cpa_data cpa = { .vaddr = &tempaddr,
82f0712c 1821 .pgd = NULL,
72e458df
TG
1822 .numpages = numpages,
1823 .mask_set = __pgprot(0),
d75586ad
SL
1824 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
1825 .flags = 0};
72932c7a 1826
55121b43
SS
1827 /*
1828 * No alias checking needed for setting not present flag. otherwise,
1829 * we may need to break large pages for 64-bit kernel text
1830 * mappings (this adds to complexity if we want to do this from
1831 * atomic context especially). Let's keep it simple!
1832 */
1833 return __change_page_attr_set_clr(&cpa, 0);
f62d0f00
IM
1834}
1835
031bc574 1836void __kernel_map_pages(struct page *page, int numpages, int enable)
1da177e4
LT
1837{
1838 if (PageHighMem(page))
1839 return;
9f4c815c 1840 if (!enable) {
f9b8404c
IM
1841 debug_check_no_locks_freed(page_address(page),
1842 numpages * PAGE_SIZE);
9f4c815c 1843 }
de5097c2 1844
9f4c815c 1845 /*
f8d8406b 1846 * The return value is ignored as the calls cannot fail.
55121b43
SS
1847 * Large pages for identity mappings are not used at boot time
1848 * and hence no memory allocations during large page split.
1da177e4 1849 */
f62d0f00
IM
1850 if (enable)
1851 __set_pages_p(page, numpages);
1852 else
1853 __set_pages_np(page, numpages);
9f4c815c
IM
1854
1855 /*
e4b71dcf
IM
1856 * We should perform an IPI and flush all tlbs,
1857 * but that can deadlock->flush only current cpu:
1da177e4
LT
1858 */
1859 __flush_tlb_all();
26564600
BO
1860
1861 arch_flush_lazy_mmu_mode();
ee7ae7a1
TG
1862}
1863
8a235efa
RW
1864#ifdef CONFIG_HIBERNATION
1865
1866bool kernel_page_present(struct page *page)
1867{
1868 unsigned int level;
1869 pte_t *pte;
1870
1871 if (PageHighMem(page))
1872 return false;
1873
1874 pte = lookup_address((unsigned long)page_address(page), &level);
1875 return (pte_val(*pte) & _PAGE_PRESENT);
1876}
1877
1878#endif /* CONFIG_HIBERNATION */
1879
1880#endif /* CONFIG_DEBUG_PAGEALLOC */
d1028a15 1881
82f0712c
BP
1882int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
1883 unsigned numpages, unsigned long page_flags)
1884{
1885 int retval = -EINVAL;
1886
1887 struct cpa_data cpa = {
1888 .vaddr = &address,
1889 .pfn = pfn,
1890 .pgd = pgd,
1891 .numpages = numpages,
1892 .mask_set = __pgprot(0),
1893 .mask_clr = __pgprot(0),
1894 .flags = 0,
1895 };
1896
1897 if (!(__supported_pte_mask & _PAGE_NX))
1898 goto out;
1899
1900 if (!(page_flags & _PAGE_NX))
1901 cpa.mask_clr = __pgprot(_PAGE_NX);
1902
1903 cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
1904
1905 retval = __change_page_attr_set_clr(&cpa, 0);
1906 __flush_tlb_all();
1907
1908out:
1909 return retval;
1910}
1911
42a54772
BP
1912void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
1913 unsigned numpages)
1914{
1915 unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT));
1916}
1917
d1028a15
AV
1918/*
1919 * The testcases use internal knowledge of the implementation that shouldn't
1920 * be exposed to the rest of the kernel. Include these directly here.
1921 */
1922#ifdef CONFIG_CPA_DEBUG
1923#include "pageattr-test.c"
1924#endif
This page took 0.851641 seconds and 5 git commands to generate.