Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / arch / x86 / include / asm / pgtable.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H
6c386655 3
c47c1b1f 4#include <asm/page.h>
1adcaafe 5#include <asm/e820.h>
c47c1b1f 6
8d19c99f 7#include <asm/pgtable_types.h>
b2bc2731 8
8a7b12f7 9/*
10 * Macro to mark a page protection value as UC-
11 */
12#define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
15 : (prot))
16
4614139c 17#ifndef __ASSEMBLY__
195466dc 18
55a6ca25
PA
19#include <asm/x86_init.h>
20
8405b122
JF
21/*
22 * ZERO_PAGE is a global shared page that is always zero: used
23 * for zero-mapped memory areas etc..
24 */
3cbaeafe 25extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
8405b122
JF
26#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
27
e3ed910d
JF
28extern spinlock_t pgd_lock;
29extern struct list_head pgd_list;
8405b122 30
617d34d9
JF
31extern struct mm_struct *pgd_page_get_mm(struct page *page);
32
54321d94
JF
33#ifdef CONFIG_PARAVIRT
34#include <asm/paravirt.h>
35#else /* !CONFIG_PARAVIRT */
36#define set_pte(ptep, pte) native_set_pte(ptep, pte)
37#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
2609ae6d 38#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
54321d94 39
54321d94
JF
40#define set_pte_atomic(ptep, pte) \
41 native_set_pte_atomic(ptep, pte)
42
43#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
44
45#ifndef __PAGETABLE_PUD_FOLDED
46#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
47#define pgd_clear(pgd) native_pgd_clear(pgd)
48#endif
49
50#ifndef set_pud
51# define set_pud(pudp, pud) native_set_pud(pudp, pud)
52#endif
53
54#ifndef __PAGETABLE_PMD_FOLDED
55#define pud_clear(pud) native_pud_clear(pud)
56#endif
57
58#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
59#define pmd_clear(pmd) native_pmd_clear(pmd)
60
61#define pte_update(mm, addr, ptep) do { } while (0)
62#define pte_update_defer(mm, addr, ptep) do { } while (0)
2609ae6d
AA
63#define pmd_update(mm, addr, ptep) do { } while (0)
64#define pmd_update_defer(mm, addr, ptep) do { } while (0)
54321d94 65
54321d94
JF
66#define pgd_val(x) native_pgd_val(x)
67#define __pgd(x) native_make_pgd(x)
68
69#ifndef __PAGETABLE_PUD_FOLDED
70#define pud_val(x) native_pud_val(x)
71#define __pud(x) native_make_pud(x)
72#endif
73
74#ifndef __PAGETABLE_PMD_FOLDED
75#define pmd_val(x) native_pmd_val(x)
76#define __pmd(x) native_make_pmd(x)
77#endif
78
79#define pte_val(x) native_pte_val(x)
80#define __pte(x) native_make_pte(x)
81
224101ed
JF
82#define arch_end_context_switch(prev) do {} while(0)
83
54321d94
JF
84#endif /* CONFIG_PARAVIRT */
85
4614139c
JF
86/*
87 * The following only work if pte_present() is true.
88 * Undefined behaviour if not..
89 */
3cbaeafe
JP
90static inline int pte_dirty(pte_t pte)
91{
a15af1c9 92 return pte_flags(pte) & _PAGE_DIRTY;
3cbaeafe
JP
93}
94
95static inline int pte_young(pte_t pte)
96{
a15af1c9 97 return pte_flags(pte) & _PAGE_ACCESSED;
3cbaeafe
JP
98}
99
f2d6bfe9
JW
100static inline int pmd_young(pmd_t pmd)
101{
102 return pmd_flags(pmd) & _PAGE_ACCESSED;
103}
104
3cbaeafe
JP
105static inline int pte_write(pte_t pte)
106{
a15af1c9 107 return pte_flags(pte) & _PAGE_RW;
3cbaeafe
JP
108}
109
110static inline int pte_file(pte_t pte)
111{
a15af1c9 112 return pte_flags(pte) & _PAGE_FILE;
3cbaeafe
JP
113}
114
115static inline int pte_huge(pte_t pte)
116{
a15af1c9 117 return pte_flags(pte) & _PAGE_PSE;
4614139c
JF
118}
119
3cbaeafe
JP
120static inline int pte_global(pte_t pte)
121{
a15af1c9 122 return pte_flags(pte) & _PAGE_GLOBAL;
3cbaeafe
JP
123}
124
125static inline int pte_exec(pte_t pte)
126{
a15af1c9 127 return !(pte_flags(pte) & _PAGE_NX);
3cbaeafe
JP
128}
129
7e675137
NP
130static inline int pte_special(pte_t pte)
131{
606ee44d 132 return pte_flags(pte) & _PAGE_SPECIAL;
7e675137
NP
133}
134
91030ca1
HD
135static inline unsigned long pte_pfn(pte_t pte)
136{
137 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
138}
139
087975b0
AM
140static inline unsigned long pmd_pfn(pmd_t pmd)
141{
142 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
143}
144
0ee364eb
MG
145static inline unsigned long pud_pfn(pud_t pud)
146{
147 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
148}
149
91030ca1
HD
150#define pte_page(pte) pfn_to_page(pte_pfn(pte))
151
3cbaeafe
JP
152static inline int pmd_large(pmd_t pte)
153{
027ef6c8 154 return pmd_flags(pte) & _PAGE_PSE;
3cbaeafe
JP
155}
156
f2d6bfe9
JW
157#ifdef CONFIG_TRANSPARENT_HUGEPAGE
158static inline int pmd_trans_splitting(pmd_t pmd)
159{
160 return pmd_val(pmd) & _PAGE_SPLITTING;
161}
162
163static inline int pmd_trans_huge(pmd_t pmd)
164{
165 return pmd_val(pmd) & _PAGE_PSE;
166}
4b7167b9
AA
167
168static inline int has_transparent_hugepage(void)
169{
170 return cpu_has_pse;
171}
f2d6bfe9
JW
172#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
173
6522869c
JF
174static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
175{
176 pteval_t v = native_pte_val(pte);
177
178 return native_make_pte(v | set);
179}
180
181static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
182{
183 pteval_t v = native_pte_val(pte);
184
185 return native_make_pte(v & ~clear);
186}
187
3cbaeafe
JP
188static inline pte_t pte_mkclean(pte_t pte)
189{
6522869c 190 return pte_clear_flags(pte, _PAGE_DIRTY);
3cbaeafe
JP
191}
192
193static inline pte_t pte_mkold(pte_t pte)
194{
6522869c 195 return pte_clear_flags(pte, _PAGE_ACCESSED);
3cbaeafe
JP
196}
197
198static inline pte_t pte_wrprotect(pte_t pte)
199{
6522869c 200 return pte_clear_flags(pte, _PAGE_RW);
3cbaeafe
JP
201}
202
203static inline pte_t pte_mkexec(pte_t pte)
204{
6522869c 205 return pte_clear_flags(pte, _PAGE_NX);
3cbaeafe
JP
206}
207
208static inline pte_t pte_mkdirty(pte_t pte)
209{
6522869c 210 return pte_set_flags(pte, _PAGE_DIRTY);
3cbaeafe
JP
211}
212
213static inline pte_t pte_mkyoung(pte_t pte)
214{
6522869c 215 return pte_set_flags(pte, _PAGE_ACCESSED);
3cbaeafe
JP
216}
217
218static inline pte_t pte_mkwrite(pte_t pte)
219{
6522869c 220 return pte_set_flags(pte, _PAGE_RW);
3cbaeafe
JP
221}
222
223static inline pte_t pte_mkhuge(pte_t pte)
224{
6522869c 225 return pte_set_flags(pte, _PAGE_PSE);
3cbaeafe
JP
226}
227
228static inline pte_t pte_clrhuge(pte_t pte)
229{
6522869c 230 return pte_clear_flags(pte, _PAGE_PSE);
3cbaeafe
JP
231}
232
233static inline pte_t pte_mkglobal(pte_t pte)
234{
6522869c 235 return pte_set_flags(pte, _PAGE_GLOBAL);
3cbaeafe
JP
236}
237
238static inline pte_t pte_clrglobal(pte_t pte)
239{
6522869c 240 return pte_clear_flags(pte, _PAGE_GLOBAL);
3cbaeafe 241}
4614139c 242
7e675137
NP
243static inline pte_t pte_mkspecial(pte_t pte)
244{
6522869c 245 return pte_set_flags(pte, _PAGE_SPECIAL);
7e675137
NP
246}
247
f2d6bfe9
JW
248static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
249{
250 pmdval_t v = native_pmd_val(pmd);
251
252 return __pmd(v | set);
253}
254
255static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
256{
257 pmdval_t v = native_pmd_val(pmd);
258
259 return __pmd(v & ~clear);
260}
261
262static inline pmd_t pmd_mkold(pmd_t pmd)
263{
264 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
265}
266
267static inline pmd_t pmd_wrprotect(pmd_t pmd)
268{
269 return pmd_clear_flags(pmd, _PAGE_RW);
270}
271
272static inline pmd_t pmd_mkdirty(pmd_t pmd)
273{
274 return pmd_set_flags(pmd, _PAGE_DIRTY);
275}
276
277static inline pmd_t pmd_mkhuge(pmd_t pmd)
278{
279 return pmd_set_flags(pmd, _PAGE_PSE);
280}
281
282static inline pmd_t pmd_mkyoung(pmd_t pmd)
283{
284 return pmd_set_flags(pmd, _PAGE_ACCESSED);
285}
286
287static inline pmd_t pmd_mkwrite(pmd_t pmd)
288{
289 return pmd_set_flags(pmd, _PAGE_RW);
290}
291
292static inline pmd_t pmd_mknotpresent(pmd_t pmd)
293{
294 return pmd_clear_flags(pmd, _PAGE_PRESENT);
295}
296
b534816b
JF
297/*
298 * Mask out unsupported bits in a present pgprot. Non-present pgprots
299 * can use those bits for other purposes, so leave them be.
300 */
301static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
302{
303 pgprotval_t protval = pgprot_val(pgprot);
304
305 if (protval & _PAGE_PRESENT)
306 protval &= __supported_pte_mask;
307
308 return protval;
309}
310
6fdc05d4
JF
311static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
312{
b534816b
JF
313 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
314 massage_pgprot(pgprot));
6fdc05d4
JF
315}
316
317static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
318{
b534816b
JF
319 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
320 massage_pgprot(pgprot));
6fdc05d4
JF
321}
322
38472311
IM
323static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
324{
325 pteval_t val = pte_val(pte);
326
327 /*
328 * Chop off the NX bit (if present), and add the NX portion of
329 * the newprot (if present):
330 */
1c12c4cf 331 val &= _PAGE_CHG_MASK;
b534816b 332 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
38472311
IM
333
334 return __pte(val);
335}
336
c489f125
JW
337static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
338{
339 pmdval_t val = pmd_val(pmd);
340
341 val &= _HPAGE_CHG_MASK;
342 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
343
344 return __pmd(val);
345}
346
1c12c4cf
VP
347/* mprotect needs to preserve PAT bits when updating vm_page_prot */
348#define pgprot_modify pgprot_modify
349static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
350{
351 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
352 pgprotval_t addbits = pgprot_val(newprot);
353 return __pgprot(preservebits | addbits);
354}
355
77be1fab 356#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
c6ca18eb 357
b534816b 358#define canon_pgprot(p) __pgprot(massage_pgprot(p))
1e8e23bc 359
1adcaafe
SS
360static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
361 unsigned long flags,
362 unsigned long new_flags)
afc7d20c 363{
1adcaafe 364 /*
55a6ca25 365 * PAT type is always WB for untracked ranges, so no need to check.
1adcaafe 366 */
8a271389 367 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
1adcaafe
SS
368 return 1;
369
afc7d20c 370 /*
371 * Certain new memtypes are not allowed with certain
372 * requested memtype:
373 * - request is uncached, return cannot be write-back
374 * - request is write-combine, return cannot be write-back
375 */
376 if ((flags == _PAGE_CACHE_UC_MINUS &&
377 new_flags == _PAGE_CACHE_WB) ||
378 (flags == _PAGE_CACHE_WC &&
379 new_flags == _PAGE_CACHE_WB)) {
380 return 0;
381 }
382
383 return 1;
384}
385
458a3e64
TH
386pmd_t *populate_extra_pmd(unsigned long vaddr);
387pte_t *populate_extra_pte(unsigned long vaddr);
4614139c
JF
388#endif /* __ASSEMBLY__ */
389
96a388de 390#ifdef CONFIG_X86_32
a1ce3928 391# include <asm/pgtable_32.h>
96a388de 392#else
a1ce3928 393# include <asm/pgtable_64.h>
96a388de 394#endif
6c386655 395
aca159db 396#ifndef __ASSEMBLY__
f476961c 397#include <linux/mm_types.h>
4cbeb51b 398#include <linux/log2.h>
aca159db 399
a034a010
JF
400static inline int pte_none(pte_t pte)
401{
402 return !pte.pte;
403}
404
8de01da3
JF
405#define __HAVE_ARCH_PTE_SAME
406static inline int pte_same(pte_t a, pte_t b)
407{
408 return a.pte == b.pte;
409}
410
7c683851
JF
411static inline int pte_present(pte_t a)
412{
be3a7284
AA
413 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
414 _PAGE_NUMA);
7c683851
JF
415}
416
2c3cf556
RR
417#define pte_accessible pte_accessible
418static inline int pte_accessible(pte_t a)
419{
420 return pte_flags(a) & _PAGE_PRESENT;
421}
422
eb63657e 423static inline int pte_hidden(pte_t pte)
dfec072e 424{
eb63657e 425 return pte_flags(pte) & _PAGE_HIDDEN;
dfec072e
VN
426}
427
649e8ef6
JF
428static inline int pmd_present(pmd_t pmd)
429{
027ef6c8
AA
430 /*
431 * Checking for _PAGE_PSE is needed too because
432 * split_huge_page will temporarily clear the present bit (but
433 * the _PAGE_PSE flag will remain set at all times while the
434 * _PAGE_PRESENT bit is clear).
435 */
be3a7284
AA
436 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
437 _PAGE_NUMA);
649e8ef6
JF
438}
439
4fea801a
JF
440static inline int pmd_none(pmd_t pmd)
441{
442 /* Only check low word on 32-bit platforms, since it might be
443 out of sync with upper half. */
26c8e317 444 return (unsigned long)native_pmd_val(pmd) == 0;
4fea801a
JF
445}
446
3ffb3564
JF
447static inline unsigned long pmd_page_vaddr(pmd_t pmd)
448{
449 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
450}
451
e5f7f202
IM
452/*
453 * Currently stuck as a macro due to indirect forward reference to
454 * linux/mmzone.h's __section_mem_map_addr() definition:
455 */
db3eb96f 456#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
20063ca4 457
e24d7eee
JF
458/*
459 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
460 *
461 * this macro returns the index of the entry in the pmd page which would
462 * control the given virtual address
463 */
ce0c0f9e 464static inline unsigned long pmd_index(unsigned long address)
e24d7eee
JF
465{
466 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
467}
468
97e2817d
JF
469/*
470 * Conversion functions: convert a page and protection to a page entry,
471 * and a page entry and page directory to the page they refer to.
472 *
473 * (Currently stuck as a macro because of indirect forward reference
474 * to linux/mm.h:page_to_nid())
475 */
476#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
477
346309cf
JF
478/*
479 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
480 *
481 * this function returns the index of the entry in the pte page which would
482 * control the given virtual address
483 */
ce0c0f9e 484static inline unsigned long pte_index(unsigned long address)
346309cf
JF
485{
486 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
487}
488
3fbc2444
JF
489static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
490{
491 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
492}
493
99510238
JF
494static inline int pmd_bad(pmd_t pmd)
495{
be3a7284
AA
496#ifdef CONFIG_NUMA_BALANCING
497 /* pmd_numa check */
498 if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
499 return 0;
500#endif
18a7a199 501 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
99510238
JF
502}
503
cc290ca3
JF
504static inline unsigned long pages_to_mb(unsigned long npg)
505{
506 return npg >> (20 - PAGE_SHIFT);
507}
508
6cf71500
JF
509#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
510 remap_pfn_range(vma, vaddr, pfn, size, prot)
511
5ba7c913 512#if PAGETABLE_LEVELS > 2
deb79cfb
JF
513static inline int pud_none(pud_t pud)
514{
26c8e317 515 return native_pud_val(pud) == 0;
deb79cfb
JF
516}
517
5ba7c913
JF
518static inline int pud_present(pud_t pud)
519{
18a7a199 520 return pud_flags(pud) & _PAGE_PRESENT;
5ba7c913 521}
6fff47e3
JF
522
523static inline unsigned long pud_page_vaddr(pud_t pud)
524{
525 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
526}
f476961c 527
e5f7f202
IM
528/*
529 * Currently stuck as a macro due to indirect forward reference to
530 * linux/mmzone.h's __section_mem_map_addr() definition:
531 */
532#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
01ade20d
JF
533
534/* Find an entry in the second-level page table.. */
535static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
536{
537 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
538}
3180fba0 539
3f6cbef1
JF
540static inline int pud_large(pud_t pud)
541{
e2f5bda9 542 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
3f6cbef1
JF
543 (_PAGE_PSE | _PAGE_PRESENT);
544}
a61bb29a
JF
545
546static inline int pud_bad(pud_t pud)
547{
18a7a199 548 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
a61bb29a 549}
e2f5bda9
JF
550#else
551static inline int pud_large(pud_t pud)
552{
553 return 0;
554}
5ba7c913
JF
555#endif /* PAGETABLE_LEVELS > 2 */
556
9f38d7e8
JF
557#if PAGETABLE_LEVELS > 3
558static inline int pgd_present(pgd_t pgd)
559{
18a7a199 560 return pgd_flags(pgd) & _PAGE_PRESENT;
9f38d7e8 561}
c5f040b1
JF
562
563static inline unsigned long pgd_page_vaddr(pgd_t pgd)
564{
565 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
566}
777cba16 567
e5f7f202
IM
568/*
569 * Currently stuck as a macro due to indirect forward reference to
570 * linux/mmzone.h's __section_mem_map_addr() definition:
571 */
572#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
7cfb8102
JF
573
574/* to find an entry in a page-table-directory. */
ce0c0f9e 575static inline unsigned long pud_index(unsigned long address)
7cfb8102
JF
576{
577 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
578}
3d081b18
JF
579
580static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
581{
582 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
583}
30f10316
JF
584
585static inline int pgd_bad(pgd_t pgd)
586{
18a7a199 587 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
30f10316 588}
7325cc2e
JF
589
590static inline int pgd_none(pgd_t pgd)
591{
26c8e317 592 return !native_pgd_val(pgd);
7325cc2e 593}
9f38d7e8
JF
594#endif /* PAGETABLE_LEVELS > 3 */
595
4614139c
JF
596#endif /* __ASSEMBLY__ */
597
fb15a9b3
JF
598/*
599 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
600 *
601 * this macro returns the index of the entry in the pgd page which would
602 * control the given virtual address
603 */
604#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
605
606/*
607 * pgd_offset() returns a (pgd_t *)
608 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
609 */
610#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
611/*
612 * a shortcut which implies the use of the kernel's pgd, instead
613 * of a process's
614 */
615#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
616
617
68db065c
JF
618#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
619#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
620
195466dc
JF
621#ifndef __ASSEMBLY__
622
2c1b284e 623extern int direct_gbpages;
22ddfcaa 624void init_mem_mapping(void);
8d57470d 625void early_alloc_pgt_buf(void);
2c1b284e 626
4891645e
JF
627/* local pte updates need not use xchg for locking */
628static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
629{
630 pte_t res = *ptep;
631
632 /* Pure native function needs no input for mm, addr */
633 native_pte_clear(NULL, 0, ptep);
634 return res;
635}
636
f2d6bfe9
JW
637static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
638{
639 pmd_t res = *pmdp;
640
641 native_pmd_clear(pmdp);
642 return res;
643}
644
4891645e
JF
645static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
646 pte_t *ptep , pte_t pte)
647{
648 native_set_pte(ptep, pte);
649}
650
0a47de52
AA
651static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
652 pmd_t *pmdp , pmd_t pmd)
653{
654 native_set_pmd(pmdp, pmd);
655}
656
195466dc
JF
657#ifndef CONFIG_PARAVIRT
658/*
659 * Rules for using pte_update - it must be called after any PTE update which
660 * has not been done using the set_pte / clear_pte interfaces. It is used by
661 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
662 * updates should either be sets, clears, or set_pte_atomic for P->P
663 * transitions, which means this hook should only be called for user PTEs.
664 * This hook implies a P->P protection or access change has taken place, which
665 * requires a subsequent TLB flush. The notification can optionally be delayed
666 * until the TLB flush event by using the pte_update_defer form of the
667 * interface, but care must be taken to assure that the flush happens while
668 * still holding the same page table lock so that the shadow and primary pages
669 * do not become out of sync on SMP.
670 */
671#define pte_update(mm, addr, ptep) do { } while (0)
672#define pte_update_defer(mm, addr, ptep) do { } while (0)
673#endif
674
195466dc
JF
675/*
676 * We only update the dirty/accessed state if we set
677 * the dirty bit by hand in the kernel, since the hardware
678 * will do the accessed bit for us, and we don't want to
679 * race with other CPU's that might be updating the dirty
680 * bit at the same time.
681 */
bea41808
JF
682struct vm_area_struct;
683
195466dc 684#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ee5aa8d3
JF
685extern int ptep_set_access_flags(struct vm_area_struct *vma,
686 unsigned long address, pte_t *ptep,
687 pte_t entry, int dirty);
195466dc
JF
688
689#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
f9fbf1a3
JF
690extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
691 unsigned long addr, pte_t *ptep);
195466dc
JF
692
693#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
c20311e1
JF
694extern int ptep_clear_flush_young(struct vm_area_struct *vma,
695 unsigned long address, pte_t *ptep);
195466dc
JF
696
697#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
3cbaeafe
JP
698static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
699 pte_t *ptep)
195466dc
JF
700{
701 pte_t pte = native_ptep_get_and_clear(ptep);
702 pte_update(mm, addr, ptep);
703 return pte;
704}
705
706#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
3cbaeafe
JP
707static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
708 unsigned long addr, pte_t *ptep,
709 int full)
195466dc
JF
710{
711 pte_t pte;
712 if (full) {
713 /*
714 * Full address destruction in progress; paravirt does not
715 * care about updates and native needs no locking
716 */
717 pte = native_local_ptep_get_and_clear(ptep);
718 } else {
719 pte = ptep_get_and_clear(mm, addr, ptep);
720 }
721 return pte;
722}
723
724#define __HAVE_ARCH_PTEP_SET_WRPROTECT
3cbaeafe
JP
725static inline void ptep_set_wrprotect(struct mm_struct *mm,
726 unsigned long addr, pte_t *ptep)
195466dc 727{
d8d89827 728 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
195466dc
JF
729 pte_update(mm, addr, ptep);
730}
731
2ac13462 732#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
61c77326 733
f2d6bfe9
JW
734#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
735
736#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
737extern int pmdp_set_access_flags(struct vm_area_struct *vma,
738 unsigned long address, pmd_t *pmdp,
739 pmd_t entry, int dirty);
740
741#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
742extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
743 unsigned long addr, pmd_t *pmdp);
744
745#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
746extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
747 unsigned long address, pmd_t *pmdp);
748
749
750#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
751extern void pmdp_splitting_flush(struct vm_area_struct *vma,
752 unsigned long addr, pmd_t *pmdp);
753
754#define __HAVE_ARCH_PMD_WRITE
755static inline int pmd_write(pmd_t pmd)
756{
757 return pmd_flags(pmd) & _PAGE_RW;
758}
759
760#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
761static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
762 pmd_t *pmdp)
763{
764 pmd_t pmd = native_pmdp_get_and_clear(pmdp);
765 pmd_update(mm, addr, pmdp);
766 return pmd;
767}
768
769#define __HAVE_ARCH_PMDP_SET_WRPROTECT
770static inline void pmdp_set_wrprotect(struct mm_struct *mm,
771 unsigned long addr, pmd_t *pmdp)
772{
773 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
774 pmd_update(mm, addr, pmdp);
775}
776
85958b46
JF
777/*
778 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
779 *
780 * dst - pointer to pgd range anwhere on a pgd page
781 * src - ""
782 * count - the number of pgds to copy.
783 *
784 * dst and src can be on the same page, but the range must not overlap,
785 * and must not cross a page boundary.
786 */
787static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
788{
789 memcpy(dst, src, count * sizeof(pgd_t));
790}
791
4cbeb51b
DH
792#define PTE_SHIFT ilog2(PTRS_PER_PTE)
793static inline int page_level_shift(enum pg_level level)
794{
795 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
796}
797static inline unsigned long page_level_size(enum pg_level level)
798{
799 return 1UL << page_level_shift(level);
800}
801static inline unsigned long page_level_mask(enum pg_level level)
802{
803 return ~(page_level_size(level) - 1);
804}
85958b46 805
602e0186
KS
806/*
807 * The x86 doesn't have any external MMU info: the kernel page
808 * tables contain all the necessary information.
809 */
810static inline void update_mmu_cache(struct vm_area_struct *vma,
811 unsigned long addr, pte_t *ptep)
812{
813}
814static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
815 unsigned long addr, pmd_t *pmd)
816{
817}
85958b46 818
195466dc
JF
819#include <asm-generic/pgtable.h>
820#endif /* __ASSEMBLY__ */
821
1965aae3 822#endif /* _ASM_X86_PGTABLE_H */
This page took 1.012188 seconds and 5 git commands to generate.