mm: x86 pgtable: drop unneeded preprocessor ifdef
[deliverable/linux.git] / arch / x86 / include / asm / pgtable.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H
6c386655 3
c47c1b1f 4#include <asm/page.h>
1adcaafe 5#include <asm/e820.h>
c47c1b1f 6
8d19c99f 7#include <asm/pgtable_types.h>
b2bc2731 8
8a7b12f7 9/*
10 * Macro to mark a page protection value as UC-
11 */
12#define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
15 : (prot))
16
4614139c 17#ifndef __ASSEMBLY__
55a6ca25
PA
18#include <asm/x86_init.h>
19
ef6bea6d
BP
20void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
21
8405b122
JF
22/*
23 * ZERO_PAGE is a global shared page that is always zero: used
24 * for zero-mapped memory areas etc..
25 */
277d5b40
AK
26extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
27 __visible;
8405b122
JF
28#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
29
e3ed910d
JF
30extern spinlock_t pgd_lock;
31extern struct list_head pgd_list;
8405b122 32
617d34d9
JF
33extern struct mm_struct *pgd_page_get_mm(struct page *page);
34
54321d94
JF
35#ifdef CONFIG_PARAVIRT
36#include <asm/paravirt.h>
37#else /* !CONFIG_PARAVIRT */
38#define set_pte(ptep, pte) native_set_pte(ptep, pte)
39#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
2609ae6d 40#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
54321d94 41
54321d94
JF
42#define set_pte_atomic(ptep, pte) \
43 native_set_pte_atomic(ptep, pte)
44
45#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
46
47#ifndef __PAGETABLE_PUD_FOLDED
48#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
49#define pgd_clear(pgd) native_pgd_clear(pgd)
50#endif
51
52#ifndef set_pud
53# define set_pud(pudp, pud) native_set_pud(pudp, pud)
54#endif
55
56#ifndef __PAGETABLE_PMD_FOLDED
57#define pud_clear(pud) native_pud_clear(pud)
58#endif
59
60#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
61#define pmd_clear(pmd) native_pmd_clear(pmd)
62
63#define pte_update(mm, addr, ptep) do { } while (0)
64#define pte_update_defer(mm, addr, ptep) do { } while (0)
2609ae6d
AA
65#define pmd_update(mm, addr, ptep) do { } while (0)
66#define pmd_update_defer(mm, addr, ptep) do { } while (0)
54321d94 67
54321d94
JF
68#define pgd_val(x) native_pgd_val(x)
69#define __pgd(x) native_make_pgd(x)
70
71#ifndef __PAGETABLE_PUD_FOLDED
72#define pud_val(x) native_pud_val(x)
73#define __pud(x) native_make_pud(x)
74#endif
75
76#ifndef __PAGETABLE_PMD_FOLDED
77#define pmd_val(x) native_pmd_val(x)
78#define __pmd(x) native_make_pmd(x)
79#endif
80
81#define pte_val(x) native_pte_val(x)
82#define __pte(x) native_make_pte(x)
83
224101ed
JF
84#define arch_end_context_switch(prev) do {} while(0)
85
54321d94
JF
86#endif /* CONFIG_PARAVIRT */
87
4614139c
JF
88/*
89 * The following only work if pte_present() is true.
90 * Undefined behaviour if not..
91 */
3cbaeafe
JP
92static inline int pte_dirty(pte_t pte)
93{
a15af1c9 94 return pte_flags(pte) & _PAGE_DIRTY;
3cbaeafe
JP
95}
96
97static inline int pte_young(pte_t pte)
98{
a15af1c9 99 return pte_flags(pte) & _PAGE_ACCESSED;
3cbaeafe
JP
100}
101
f2d6bfe9
JW
102static inline int pmd_young(pmd_t pmd)
103{
104 return pmd_flags(pmd) & _PAGE_ACCESSED;
105}
106
3cbaeafe
JP
107static inline int pte_write(pte_t pte)
108{
a15af1c9 109 return pte_flags(pte) & _PAGE_RW;
3cbaeafe
JP
110}
111
112static inline int pte_file(pte_t pte)
113{
a15af1c9 114 return pte_flags(pte) & _PAGE_FILE;
3cbaeafe
JP
115}
116
117static inline int pte_huge(pte_t pte)
118{
a15af1c9 119 return pte_flags(pte) & _PAGE_PSE;
4614139c
JF
120}
121
3cbaeafe
JP
122static inline int pte_global(pte_t pte)
123{
a15af1c9 124 return pte_flags(pte) & _PAGE_GLOBAL;
3cbaeafe
JP
125}
126
127static inline int pte_exec(pte_t pte)
128{
a15af1c9 129 return !(pte_flags(pte) & _PAGE_NX);
3cbaeafe
JP
130}
131
7e675137
NP
132static inline int pte_special(pte_t pte)
133{
c46a7c81
MG
134 return (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_SPECIAL)) ==
135 (_PAGE_PRESENT|_PAGE_SPECIAL);
7e675137
NP
136}
137
91030ca1
HD
138static inline unsigned long pte_pfn(pte_t pte)
139{
140 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
141}
142
087975b0
AM
143static inline unsigned long pmd_pfn(pmd_t pmd)
144{
145 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
146}
147
0ee364eb
MG
148static inline unsigned long pud_pfn(pud_t pud)
149{
150 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
151}
152
91030ca1
HD
153#define pte_page(pte) pfn_to_page(pte_pfn(pte))
154
3cbaeafe
JP
155static inline int pmd_large(pmd_t pte)
156{
027ef6c8 157 return pmd_flags(pte) & _PAGE_PSE;
3cbaeafe
JP
158}
159
f2d6bfe9
JW
160#ifdef CONFIG_TRANSPARENT_HUGEPAGE
161static inline int pmd_trans_splitting(pmd_t pmd)
162{
163 return pmd_val(pmd) & _PAGE_SPLITTING;
164}
165
166static inline int pmd_trans_huge(pmd_t pmd)
167{
168 return pmd_val(pmd) & _PAGE_PSE;
169}
4b7167b9
AA
170
171static inline int has_transparent_hugepage(void)
172{
173 return cpu_has_pse;
174}
f2d6bfe9
JW
175#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
176
6522869c
JF
177static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
178{
179 pteval_t v = native_pte_val(pte);
180
181 return native_make_pte(v | set);
182}
183
184static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
185{
186 pteval_t v = native_pte_val(pte);
187
188 return native_make_pte(v & ~clear);
189}
190
3cbaeafe
JP
191static inline pte_t pte_mkclean(pte_t pte)
192{
6522869c 193 return pte_clear_flags(pte, _PAGE_DIRTY);
3cbaeafe
JP
194}
195
196static inline pte_t pte_mkold(pte_t pte)
197{
6522869c 198 return pte_clear_flags(pte, _PAGE_ACCESSED);
3cbaeafe
JP
199}
200
201static inline pte_t pte_wrprotect(pte_t pte)
202{
6522869c 203 return pte_clear_flags(pte, _PAGE_RW);
3cbaeafe
JP
204}
205
206static inline pte_t pte_mkexec(pte_t pte)
207{
6522869c 208 return pte_clear_flags(pte, _PAGE_NX);
3cbaeafe
JP
209}
210
211static inline pte_t pte_mkdirty(pte_t pte)
212{
0f8975ec 213 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
3cbaeafe
JP
214}
215
216static inline pte_t pte_mkyoung(pte_t pte)
217{
6522869c 218 return pte_set_flags(pte, _PAGE_ACCESSED);
3cbaeafe
JP
219}
220
221static inline pte_t pte_mkwrite(pte_t pte)
222{
6522869c 223 return pte_set_flags(pte, _PAGE_RW);
3cbaeafe
JP
224}
225
226static inline pte_t pte_mkhuge(pte_t pte)
227{
6522869c 228 return pte_set_flags(pte, _PAGE_PSE);
3cbaeafe
JP
229}
230
231static inline pte_t pte_clrhuge(pte_t pte)
232{
6522869c 233 return pte_clear_flags(pte, _PAGE_PSE);
3cbaeafe
JP
234}
235
236static inline pte_t pte_mkglobal(pte_t pte)
237{
6522869c 238 return pte_set_flags(pte, _PAGE_GLOBAL);
3cbaeafe
JP
239}
240
241static inline pte_t pte_clrglobal(pte_t pte)
242{
6522869c 243 return pte_clear_flags(pte, _PAGE_GLOBAL);
3cbaeafe 244}
4614139c 245
7e675137
NP
246static inline pte_t pte_mkspecial(pte_t pte)
247{
6522869c 248 return pte_set_flags(pte, _PAGE_SPECIAL);
7e675137
NP
249}
250
f2d6bfe9
JW
251static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
252{
253 pmdval_t v = native_pmd_val(pmd);
254
255 return __pmd(v | set);
256}
257
258static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
259{
260 pmdval_t v = native_pmd_val(pmd);
261
262 return __pmd(v & ~clear);
263}
264
265static inline pmd_t pmd_mkold(pmd_t pmd)
266{
267 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
268}
269
270static inline pmd_t pmd_wrprotect(pmd_t pmd)
271{
272 return pmd_clear_flags(pmd, _PAGE_RW);
273}
274
275static inline pmd_t pmd_mkdirty(pmd_t pmd)
276{
0f8975ec 277 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
f2d6bfe9
JW
278}
279
280static inline pmd_t pmd_mkhuge(pmd_t pmd)
281{
282 return pmd_set_flags(pmd, _PAGE_PSE);
283}
284
285static inline pmd_t pmd_mkyoung(pmd_t pmd)
286{
287 return pmd_set_flags(pmd, _PAGE_ACCESSED);
288}
289
290static inline pmd_t pmd_mkwrite(pmd_t pmd)
291{
292 return pmd_set_flags(pmd, _PAGE_RW);
293}
294
295static inline pmd_t pmd_mknotpresent(pmd_t pmd)
296{
297 return pmd_clear_flags(pmd, _PAGE_PRESENT);
298}
299
0f8975ec
PE
300static inline int pte_soft_dirty(pte_t pte)
301{
302 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
303}
304
305static inline int pmd_soft_dirty(pmd_t pmd)
306{
307 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
308}
309
310static inline pte_t pte_mksoft_dirty(pte_t pte)
311{
312 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
313}
314
315static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
316{
317 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
318}
319
41bb3476
CG
320static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
321{
322 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
323}
324
325static inline pte_t pte_file_mksoft_dirty(pte_t pte)
326{
327 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
328}
329
330static inline int pte_file_soft_dirty(pte_t pte)
331{
332 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
333}
334
b534816b
JF
335/*
336 * Mask out unsupported bits in a present pgprot. Non-present pgprots
337 * can use those bits for other purposes, so leave them be.
338 */
339static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
340{
341 pgprotval_t protval = pgprot_val(pgprot);
342
343 if (protval & _PAGE_PRESENT)
344 protval &= __supported_pte_mask;
345
346 return protval;
347}
348
6fdc05d4
JF
349static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
350{
b534816b
JF
351 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
352 massage_pgprot(pgprot));
6fdc05d4
JF
353}
354
355static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
356{
b534816b
JF
357 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
358 massage_pgprot(pgprot));
6fdc05d4
JF
359}
360
38472311
IM
361static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
362{
363 pteval_t val = pte_val(pte);
364
365 /*
366 * Chop off the NX bit (if present), and add the NX portion of
367 * the newprot (if present):
368 */
1c12c4cf 369 val &= _PAGE_CHG_MASK;
b534816b 370 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
38472311
IM
371
372 return __pte(val);
373}
374
c489f125
JW
375static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
376{
377 pmdval_t val = pmd_val(pmd);
378
379 val &= _HPAGE_CHG_MASK;
380 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
381
382 return __pmd(val);
383}
384
1c12c4cf
VP
385/* mprotect needs to preserve PAT bits when updating vm_page_prot */
386#define pgprot_modify pgprot_modify
387static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
388{
389 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
390 pgprotval_t addbits = pgprot_val(newprot);
391 return __pgprot(preservebits | addbits);
392}
393
77be1fab 394#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
c6ca18eb 395
b534816b 396#define canon_pgprot(p) __pgprot(massage_pgprot(p))
1e8e23bc 397
1adcaafe
SS
398static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
399 unsigned long flags,
400 unsigned long new_flags)
afc7d20c 401{
1adcaafe 402 /*
55a6ca25 403 * PAT type is always WB for untracked ranges, so no need to check.
1adcaafe 404 */
8a271389 405 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
1adcaafe
SS
406 return 1;
407
afc7d20c 408 /*
409 * Certain new memtypes are not allowed with certain
410 * requested memtype:
411 * - request is uncached, return cannot be write-back
412 * - request is write-combine, return cannot be write-back
413 */
414 if ((flags == _PAGE_CACHE_UC_MINUS &&
415 new_flags == _PAGE_CACHE_WB) ||
416 (flags == _PAGE_CACHE_WC &&
417 new_flags == _PAGE_CACHE_WB)) {
418 return 0;
419 }
420
421 return 1;
422}
423
458a3e64
TH
424pmd_t *populate_extra_pmd(unsigned long vaddr);
425pte_t *populate_extra_pte(unsigned long vaddr);
4614139c
JF
426#endif /* __ASSEMBLY__ */
427
96a388de 428#ifdef CONFIG_X86_32
a1ce3928 429# include <asm/pgtable_32.h>
96a388de 430#else
a1ce3928 431# include <asm/pgtable_64.h>
96a388de 432#endif
6c386655 433
aca159db 434#ifndef __ASSEMBLY__
f476961c 435#include <linux/mm_types.h>
fa0f281c 436#include <linux/mmdebug.h>
4cbeb51b 437#include <linux/log2.h>
aca159db 438
a034a010
JF
439static inline int pte_none(pte_t pte)
440{
441 return !pte.pte;
442}
443
8de01da3
JF
444#define __HAVE_ARCH_PTE_SAME
445static inline int pte_same(pte_t a, pte_t b)
446{
447 return a.pte == b.pte;
448}
449
7c683851
JF
450static inline int pte_present(pte_t a)
451{
5926f87f
DV
452 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
453 _PAGE_NUMA);
7c683851
JF
454}
455
c46a7c81
MG
456#define pte_present_nonuma pte_present_nonuma
457static inline int pte_present_nonuma(pte_t a)
458{
459 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
460}
461
2c3cf556 462#define pte_accessible pte_accessible
20841405 463static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
2c3cf556 464{
20841405
RR
465 if (pte_flags(a) & _PAGE_PRESENT)
466 return true;
467
468 if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
469 mm_tlb_flush_pending(mm))
470 return true;
471
472 return false;
2c3cf556
RR
473}
474
eb63657e 475static inline int pte_hidden(pte_t pte)
dfec072e 476{
eb63657e 477 return pte_flags(pte) & _PAGE_HIDDEN;
dfec072e
VN
478}
479
649e8ef6
JF
480static inline int pmd_present(pmd_t pmd)
481{
027ef6c8
AA
482 /*
483 * Checking for _PAGE_PSE is needed too because
484 * split_huge_page will temporarily clear the present bit (but
485 * the _PAGE_PSE flag will remain set at all times while the
486 * _PAGE_PRESENT bit is clear).
487 */
be3a7284
AA
488 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
489 _PAGE_NUMA);
649e8ef6
JF
490}
491
4fea801a
JF
492static inline int pmd_none(pmd_t pmd)
493{
494 /* Only check low word on 32-bit platforms, since it might be
495 out of sync with upper half. */
26c8e317 496 return (unsigned long)native_pmd_val(pmd) == 0;
4fea801a
JF
497}
498
3ffb3564
JF
499static inline unsigned long pmd_page_vaddr(pmd_t pmd)
500{
501 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
502}
503
e5f7f202
IM
504/*
505 * Currently stuck as a macro due to indirect forward reference to
506 * linux/mmzone.h's __section_mem_map_addr() definition:
507 */
db3eb96f 508#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
20063ca4 509
e24d7eee
JF
510/*
511 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
512 *
513 * this macro returns the index of the entry in the pmd page which would
514 * control the given virtual address
515 */
ce0c0f9e 516static inline unsigned long pmd_index(unsigned long address)
e24d7eee
JF
517{
518 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
519}
520
97e2817d
JF
521/*
522 * Conversion functions: convert a page and protection to a page entry,
523 * and a page entry and page directory to the page they refer to.
524 *
525 * (Currently stuck as a macro because of indirect forward reference
526 * to linux/mm.h:page_to_nid())
527 */
528#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
529
346309cf
JF
530/*
531 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
532 *
533 * this function returns the index of the entry in the pte page which would
534 * control the given virtual address
535 */
ce0c0f9e 536static inline unsigned long pte_index(unsigned long address)
346309cf
JF
537{
538 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
539}
540
3fbc2444
JF
541static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
542{
543 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
544}
545
99510238
JF
546static inline int pmd_bad(pmd_t pmd)
547{
be3a7284
AA
548#ifdef CONFIG_NUMA_BALANCING
549 /* pmd_numa check */
550 if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
551 return 0;
552#endif
18a7a199 553 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
99510238
JF
554}
555
cc290ca3
JF
556static inline unsigned long pages_to_mb(unsigned long npg)
557{
558 return npg >> (20 - PAGE_SHIFT);
559}
560
5ba7c913 561#if PAGETABLE_LEVELS > 2
deb79cfb
JF
562static inline int pud_none(pud_t pud)
563{
26c8e317 564 return native_pud_val(pud) == 0;
deb79cfb
JF
565}
566
5ba7c913
JF
567static inline int pud_present(pud_t pud)
568{
18a7a199 569 return pud_flags(pud) & _PAGE_PRESENT;
5ba7c913 570}
6fff47e3
JF
571
572static inline unsigned long pud_page_vaddr(pud_t pud)
573{
574 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
575}
f476961c 576
e5f7f202
IM
577/*
578 * Currently stuck as a macro due to indirect forward reference to
579 * linux/mmzone.h's __section_mem_map_addr() definition:
580 */
581#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
01ade20d
JF
582
583/* Find an entry in the second-level page table.. */
584static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
585{
586 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
587}
3180fba0 588
3f6cbef1
JF
589static inline int pud_large(pud_t pud)
590{
e2f5bda9 591 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
3f6cbef1
JF
592 (_PAGE_PSE | _PAGE_PRESENT);
593}
a61bb29a
JF
594
595static inline int pud_bad(pud_t pud)
596{
18a7a199 597 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
a61bb29a 598}
e2f5bda9
JF
599#else
600static inline int pud_large(pud_t pud)
601{
602 return 0;
603}
5ba7c913
JF
604#endif /* PAGETABLE_LEVELS > 2 */
605
9f38d7e8
JF
606#if PAGETABLE_LEVELS > 3
607static inline int pgd_present(pgd_t pgd)
608{
18a7a199 609 return pgd_flags(pgd) & _PAGE_PRESENT;
9f38d7e8 610}
c5f040b1
JF
611
612static inline unsigned long pgd_page_vaddr(pgd_t pgd)
613{
614 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
615}
777cba16 616
e5f7f202
IM
617/*
618 * Currently stuck as a macro due to indirect forward reference to
619 * linux/mmzone.h's __section_mem_map_addr() definition:
620 */
621#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
7cfb8102
JF
622
623/* to find an entry in a page-table-directory. */
ce0c0f9e 624static inline unsigned long pud_index(unsigned long address)
7cfb8102
JF
625{
626 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
627}
3d081b18
JF
628
629static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
630{
631 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
632}
30f10316
JF
633
634static inline int pgd_bad(pgd_t pgd)
635{
18a7a199 636 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
30f10316 637}
7325cc2e
JF
638
639static inline int pgd_none(pgd_t pgd)
640{
26c8e317 641 return !native_pgd_val(pgd);
7325cc2e 642}
9f38d7e8
JF
643#endif /* PAGETABLE_LEVELS > 3 */
644
4614139c
JF
645#endif /* __ASSEMBLY__ */
646
fb15a9b3
JF
647/*
648 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
649 *
650 * this macro returns the index of the entry in the pgd page which would
651 * control the given virtual address
652 */
653#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
654
655/*
656 * pgd_offset() returns a (pgd_t *)
657 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
658 */
659#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
660/*
661 * a shortcut which implies the use of the kernel's pgd, instead
662 * of a process's
663 */
664#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
665
666
68db065c
JF
667#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
668#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
669
195466dc
JF
670#ifndef __ASSEMBLY__
671
2c1b284e 672extern int direct_gbpages;
22ddfcaa 673void init_mem_mapping(void);
8d57470d 674void early_alloc_pgt_buf(void);
2c1b284e 675
4891645e
JF
676/* local pte updates need not use xchg for locking */
677static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
678{
679 pte_t res = *ptep;
680
681 /* Pure native function needs no input for mm, addr */
682 native_pte_clear(NULL, 0, ptep);
683 return res;
684}
685
f2d6bfe9
JW
686static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
687{
688 pmd_t res = *pmdp;
689
690 native_pmd_clear(pmdp);
691 return res;
692}
693
4891645e
JF
694static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
695 pte_t *ptep , pte_t pte)
696{
697 native_set_pte(ptep, pte);
698}
699
0a47de52
AA
700static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
701 pmd_t *pmdp , pmd_t pmd)
702{
703 native_set_pmd(pmdp, pmd);
704}
705
195466dc
JF
706#ifndef CONFIG_PARAVIRT
707/*
708 * Rules for using pte_update - it must be called after any PTE update which
709 * has not been done using the set_pte / clear_pte interfaces. It is used by
710 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
711 * updates should either be sets, clears, or set_pte_atomic for P->P
712 * transitions, which means this hook should only be called for user PTEs.
713 * This hook implies a P->P protection or access change has taken place, which
714 * requires a subsequent TLB flush. The notification can optionally be delayed
715 * until the TLB flush event by using the pte_update_defer form of the
716 * interface, but care must be taken to assure that the flush happens while
717 * still holding the same page table lock so that the shadow and primary pages
718 * do not become out of sync on SMP.
719 */
720#define pte_update(mm, addr, ptep) do { } while (0)
721#define pte_update_defer(mm, addr, ptep) do { } while (0)
722#endif
723
195466dc
JF
724/*
725 * We only update the dirty/accessed state if we set
726 * the dirty bit by hand in the kernel, since the hardware
727 * will do the accessed bit for us, and we don't want to
728 * race with other CPU's that might be updating the dirty
729 * bit at the same time.
730 */
bea41808
JF
731struct vm_area_struct;
732
195466dc 733#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ee5aa8d3
JF
734extern int ptep_set_access_flags(struct vm_area_struct *vma,
735 unsigned long address, pte_t *ptep,
736 pte_t entry, int dirty);
195466dc
JF
737
738#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
f9fbf1a3
JF
739extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
740 unsigned long addr, pte_t *ptep);
195466dc
JF
741
742#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
c20311e1
JF
743extern int ptep_clear_flush_young(struct vm_area_struct *vma,
744 unsigned long address, pte_t *ptep);
195466dc
JF
745
746#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
3cbaeafe
JP
747static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
748 pte_t *ptep)
195466dc
JF
749{
750 pte_t pte = native_ptep_get_and_clear(ptep);
751 pte_update(mm, addr, ptep);
752 return pte;
753}
754
755#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
3cbaeafe
JP
756static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
757 unsigned long addr, pte_t *ptep,
758 int full)
195466dc
JF
759{
760 pte_t pte;
761 if (full) {
762 /*
763 * Full address destruction in progress; paravirt does not
764 * care about updates and native needs no locking
765 */
766 pte = native_local_ptep_get_and_clear(ptep);
767 } else {
768 pte = ptep_get_and_clear(mm, addr, ptep);
769 }
770 return pte;
771}
772
773#define __HAVE_ARCH_PTEP_SET_WRPROTECT
3cbaeafe
JP
774static inline void ptep_set_wrprotect(struct mm_struct *mm,
775 unsigned long addr, pte_t *ptep)
195466dc 776{
d8d89827 777 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
195466dc
JF
778 pte_update(mm, addr, ptep);
779}
780
2ac13462 781#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
61c77326 782
f2d6bfe9
JW
783#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
784
785#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
786extern int pmdp_set_access_flags(struct vm_area_struct *vma,
787 unsigned long address, pmd_t *pmdp,
788 pmd_t entry, int dirty);
789
790#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
791extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
792 unsigned long addr, pmd_t *pmdp);
793
794#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
795extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
796 unsigned long address, pmd_t *pmdp);
797
798
799#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
800extern void pmdp_splitting_flush(struct vm_area_struct *vma,
801 unsigned long addr, pmd_t *pmdp);
802
803#define __HAVE_ARCH_PMD_WRITE
804static inline int pmd_write(pmd_t pmd)
805{
806 return pmd_flags(pmd) & _PAGE_RW;
807}
808
809#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
810static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
811 pmd_t *pmdp)
812{
813 pmd_t pmd = native_pmdp_get_and_clear(pmdp);
814 pmd_update(mm, addr, pmdp);
815 return pmd;
816}
817
818#define __HAVE_ARCH_PMDP_SET_WRPROTECT
819static inline void pmdp_set_wrprotect(struct mm_struct *mm,
820 unsigned long addr, pmd_t *pmdp)
821{
822 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
823 pmd_update(mm, addr, pmdp);
824}
825
85958b46
JF
826/*
827 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
828 *
829 * dst - pointer to pgd range anwhere on a pgd page
830 * src - ""
831 * count - the number of pgds to copy.
832 *
833 * dst and src can be on the same page, but the range must not overlap,
834 * and must not cross a page boundary.
835 */
836static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
837{
838 memcpy(dst, src, count * sizeof(pgd_t));
839}
840
4cbeb51b
DH
841#define PTE_SHIFT ilog2(PTRS_PER_PTE)
842static inline int page_level_shift(enum pg_level level)
843{
844 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
845}
846static inline unsigned long page_level_size(enum pg_level level)
847{
848 return 1UL << page_level_shift(level);
849}
850static inline unsigned long page_level_mask(enum pg_level level)
851{
852 return ~(page_level_size(level) - 1);
853}
85958b46 854
602e0186
KS
855/*
856 * The x86 doesn't have any external MMU info: the kernel page
857 * tables contain all the necessary information.
858 */
859static inline void update_mmu_cache(struct vm_area_struct *vma,
860 unsigned long addr, pte_t *ptep)
861{
862}
863static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
864 unsigned long addr, pmd_t *pmd)
865{
866}
85958b46 867
fa0f281c
CG
868static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
869{
c46a7c81 870 VM_BUG_ON(pte_present_nonuma(pte));
fa0f281c
CG
871 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
872}
873
874static inline int pte_swp_soft_dirty(pte_t pte)
875{
c46a7c81 876 VM_BUG_ON(pte_present_nonuma(pte));
fa0f281c
CG
877 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
878}
879
880static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
881{
c46a7c81 882 VM_BUG_ON(pte_present_nonuma(pte));
fa0f281c
CG
883 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
884}
885
195466dc
JF
886#include <asm-generic/pgtable.h>
887#endif /* __ASSEMBLY__ */
888
1965aae3 889#endif /* _ASM_X86_PGTABLE_H */
This page took 0.85682 seconds and 5 git commands to generate.