powerpc/mm: Rework usage of _PAGE_COHERENT/NO_CACHE/GUARDED
[deliverable/linux.git] / arch / powerpc / include / asm / pgtable.h
CommitLineData
047ea784
PM
1#ifndef _ASM_POWERPC_PGTABLE_H
2#define _ASM_POWERPC_PGTABLE_H
88ced031 3#ifdef __KERNEL__
047ea784 4
9c709f3b
DG
5#ifndef __ASSEMBLY__
6#include <asm/processor.h> /* For TASK_SIZE */
7#include <asm/mmu.h>
8#include <asm/page.h>
9struct mm_struct;
10#endif /* !__ASSEMBLY__ */
11
f88df14b
DG
12#if defined(CONFIG_PPC64)
13# include <asm/pgtable-ppc64.h>
047ea784 14#else
f88df14b 15# include <asm/pgtable-ppc32.h>
e28f7faf 16#endif
1da177e4 17
1da177e4 18#ifndef __ASSEMBLY__
64b3d0e8
BH
19
20/*
21 * Macro to mark a page protection value as "uncacheable".
22 */
23
24#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
25 _PAGE_WRITETHRU)
26
27#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
28 _PAGE_NO_CACHE | _PAGE_GUARDED))
29
30#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
31 _PAGE_NO_CACHE))
32
33#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
34 _PAGE_COHERENT))
35
36#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
37 _PAGE_COHERENT | _PAGE_WRITETHRU))
38
39
40struct file;
41extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
42 unsigned long size, pgprot_t vma_prot);
43#define __HAVE_PHYS_MEM_ACCESS_PROT
44
9c709f3b
DG
45/*
46 * ZERO_PAGE is a global shared page that is always zero: used
47 * for zero-mapped memory areas etc..
48 */
49extern unsigned long empty_zero_page[];
50#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
51
52extern pgd_t swapper_pg_dir[];
53
54extern void paging_init(void);
55
56/*
57 * kern_addr_valid is intended to indicate whether an address is a valid
58 * kernel address. Most 32-bit archs define it as always true (like this)
59 * but most 64-bit archs actually perform a test. What should we do here?
60 */
61#define kern_addr_valid(addr) (1)
62
63#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
64 remap_pfn_range(vma, vaddr, pfn, size, prot)
65
1da177e4 66#include <asm-generic/pgtable.h>
1e3519f8
BH
67
68
69/*
70 * This gets called at the end of handling a page fault, when
71 * the kernel has put a new PTE into the page table for the process.
72 * We use it to ensure coherency between the i-cache and d-cache
73 * for the page which has just been mapped in.
74 * On machines which use an MMU hash table, we use this to put a
75 * corresponding HPTE into the hash table ahead of time, instead of
76 * waiting for the inevitable extra hash-table miss exception.
77 */
78extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
79
1da177e4
LT
80#endif /* __ASSEMBLY__ */
81
88ced031 82#endif /* __KERNEL__ */
047ea784 83#endif /* _ASM_POWERPC_PGTABLE_H */
This page took 0.51253 seconds and 5 git commands to generate.