Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
3 | * Thanks to Ben LaHaise for precious feedback. | |
4 | */ | |
5 | ||
1da177e4 LT |
6 | #include <linux/mm.h> |
7 | #include <linux/sched.h> | |
8 | #include <linux/highmem.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/slab.h> | |
11 | #include <asm/uaccess.h> | |
12 | #include <asm/processor.h> | |
13 | #include <asm/tlbflush.h> | |
14 | #include <asm/io.h> | |
15 | ||
16 | static inline pte_t *lookup_address(unsigned long address) | |
17 | { | |
18 | pgd_t *pgd = pgd_offset_k(address); | |
19 | pud_t *pud; | |
20 | pmd_t *pmd; | |
21 | pte_t *pte; | |
22 | if (pgd_none(*pgd)) | |
23 | return NULL; | |
24 | pud = pud_offset(pgd, address); | |
25 | if (!pud_present(*pud)) | |
26 | return NULL; | |
27 | pmd = pmd_offset(pud, address); | |
28 | if (!pmd_present(*pmd)) | |
29 | return NULL; | |
30 | if (pmd_large(*pmd)) | |
31 | return (pte_t *)pmd; | |
32 | pte = pte_offset_kernel(pmd, address); | |
33 | if (pte && !pte_present(*pte)) | |
34 | pte = NULL; | |
35 | return pte; | |
36 | } | |
37 | ||
38 | static struct page *split_large_page(unsigned long address, pgprot_t prot, | |
39 | pgprot_t ref_prot) | |
40 | { | |
41 | int i; | |
42 | unsigned long addr; | |
43 | struct page *base = alloc_pages(GFP_KERNEL, 0); | |
44 | pte_t *pbase; | |
45 | if (!base) | |
46 | return NULL; | |
4fa4f53b NP |
47 | /* |
48 | * page_private is used to track the number of entries in | |
49 | * the page table page have non standard attributes. | |
50 | */ | |
51 | SetPagePrivate(base); | |
52 | page_private(base) = 0; | |
53 | ||
1da177e4 LT |
54 | address = __pa(address); |
55 | addr = address & LARGE_PAGE_MASK; | |
56 | pbase = (pte_t *)page_address(base); | |
57 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { | |
58 | pbase[i] = pfn_pte(addr >> PAGE_SHIFT, | |
59 | addr == address ? prot : ref_prot); | |
60 | } | |
61 | return base; | |
62 | } | |
63 | ||
64 | ||
65 | static void flush_kernel_map(void *address) | |
66 | { | |
67 | if (0 && address && cpu_has_clflush) { | |
68 | /* is this worth it? */ | |
69 | int i; | |
70 | for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) | |
71 | asm volatile("clflush (%0)" :: "r" (address + i)); | |
72 | } else | |
73 | asm volatile("wbinvd":::"memory"); | |
74 | if (address) | |
75 | __flush_tlb_one(address); | |
76 | else | |
77 | __flush_tlb_all(); | |
78 | } | |
79 | ||
80 | ||
81 | static inline void flush_map(unsigned long address) | |
82 | { | |
83 | on_each_cpu(flush_kernel_map, (void *)address, 1, 1); | |
84 | } | |
85 | ||
20aaffd6 NP |
86 | static struct page *deferred_pages; /* protected by init_mm.mmap_sem */ |
87 | ||
88 | static inline void save_page(struct page *fpage) | |
1da177e4 | 89 | { |
20aaffd6 NP |
90 | fpage->lru.next = (struct list_head *)deferred_pages; |
91 | deferred_pages = fpage; | |
1da177e4 LT |
92 | } |
93 | ||
94 | /* | |
95 | * No more special protections in this 2/4MB area - revert to a | |
96 | * large page again. | |
97 | */ | |
98 | static void revert_page(unsigned long address, pgprot_t ref_prot) | |
99 | { | |
100 | pgd_t *pgd; | |
101 | pud_t *pud; | |
102 | pmd_t *pmd; | |
103 | pte_t large_pte; | |
104 | ||
105 | pgd = pgd_offset_k(address); | |
106 | BUG_ON(pgd_none(*pgd)); | |
107 | pud = pud_offset(pgd,address); | |
108 | BUG_ON(pud_none(*pud)); | |
109 | pmd = pmd_offset(pud, address); | |
110 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); | |
111 | pgprot_val(ref_prot) |= _PAGE_PSE; | |
112 | large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot); | |
113 | set_pte((pte_t *)pmd, large_pte); | |
114 | } | |
115 | ||
116 | static int | |
117 | __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |
118 | pgprot_t ref_prot) | |
119 | { | |
120 | pte_t *kpte; | |
121 | struct page *kpte_page; | |
122 | unsigned kpte_flags; | |
c728252c | 123 | pgprot_t ref_prot2; |
1da177e4 LT |
124 | kpte = lookup_address(address); |
125 | if (!kpte) return 0; | |
126 | kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); | |
127 | kpte_flags = pte_val(*kpte); | |
128 | if (pgprot_val(prot) != pgprot_val(ref_prot)) { | |
129 | if ((kpte_flags & _PAGE_PSE) == 0) { | |
130 | set_pte(kpte, pfn_pte(pfn, prot)); | |
131 | } else { | |
132 | /* | |
4fa4f53b NP |
133 | * split_large_page will take the reference for this |
134 | * change_page_attr on the split page. | |
1da177e4 | 135 | */ |
c728252c AV |
136 | |
137 | struct page *split; | |
138 | ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE)); | |
139 | ||
140 | split = split_large_page(address, prot, ref_prot2); | |
1da177e4 LT |
141 | if (!split) |
142 | return -ENOMEM; | |
c728252c | 143 | set_pte(kpte,mk_pte(split, ref_prot2)); |
1da177e4 LT |
144 | kpte_page = split; |
145 | } | |
4fa4f53b | 146 | page_private(kpte_page)++; |
1da177e4 LT |
147 | } else if ((kpte_flags & _PAGE_PSE) == 0) { |
148 | set_pte(kpte, pfn_pte(pfn, ref_prot)); | |
4fa4f53b NP |
149 | BUG_ON(page_private(kpte_page) == 0); |
150 | page_private(kpte_page)--; | |
1da177e4 LT |
151 | } else |
152 | BUG(); | |
153 | ||
154 | /* on x86-64 the direct mapping set at boot is not using 4k pages */ | |
155 | BUG_ON(PageReserved(kpte_page)); | |
156 | ||
4fa4f53b | 157 | if (page_private(kpte_page) == 0) { |
20aaffd6 | 158 | save_page(kpte_page); |
1da177e4 | 159 | revert_page(address, ref_prot); |
1da177e4 LT |
160 | } |
161 | return 0; | |
162 | } | |
163 | ||
164 | /* | |
165 | * Change the page attributes of an page in the linear mapping. | |
166 | * | |
167 | * This should be used when a page is mapped with a different caching policy | |
168 | * than write-back somewhere - some CPUs do not like it when mappings with | |
169 | * different caching policies exist. This changes the page attributes of the | |
170 | * in kernel linear mapping too. | |
171 | * | |
172 | * The caller needs to ensure that there are no conflicting mappings elsewhere. | |
173 | * This function only deals with the kernel linear map. | |
174 | * | |
175 | * Caller must call global_flush_tlb() after this. | |
176 | */ | |
177 | int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) | |
178 | { | |
179 | int err = 0; | |
180 | int i; | |
181 | ||
182 | down_write(&init_mm.mmap_sem); | |
183 | for (i = 0; i < numpages; i++, address += PAGE_SIZE) { | |
184 | unsigned long pfn = __pa(address) >> PAGE_SHIFT; | |
185 | ||
186 | err = __change_page_attr(address, pfn, prot, PAGE_KERNEL); | |
187 | if (err) | |
188 | break; | |
189 | /* Handle kernel mapping too which aliases part of the | |
190 | * lowmem */ | |
191 | if (__pa(address) < KERNEL_TEXT_SIZE) { | |
192 | unsigned long addr2; | |
df992848 | 193 | pgprot_t prot2; |
1da177e4 | 194 | addr2 = __START_KERNEL_map + __pa(address); |
df992848 AK |
195 | /* Make sure the kernel mappings stay executable */ |
196 | prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); | |
197 | err = __change_page_attr(addr2, pfn, prot2, | |
198 | PAGE_KERNEL_EXEC); | |
1da177e4 LT |
199 | } |
200 | } | |
201 | up_write(&init_mm.mmap_sem); | |
202 | return err; | |
203 | } | |
204 | ||
205 | /* Don't call this for MMIO areas that may not have a mem_map entry */ | |
206 | int change_page_attr(struct page *page, int numpages, pgprot_t prot) | |
207 | { | |
208 | unsigned long addr = (unsigned long)page_address(page); | |
209 | return change_page_attr_addr(addr, numpages, prot); | |
210 | } | |
211 | ||
212 | void global_flush_tlb(void) | |
213 | { | |
20aaffd6 | 214 | struct page *dpage; |
1da177e4 LT |
215 | |
216 | down_read(&init_mm.mmap_sem); | |
20aaffd6 | 217 | dpage = xchg(&deferred_pages, NULL); |
1da177e4 | 218 | up_read(&init_mm.mmap_sem); |
20aaffd6 NP |
219 | |
220 | flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0); | |
221 | while (dpage) { | |
222 | struct page *tmp = dpage; | |
223 | dpage = (struct page *)dpage->lru.next; | |
4fa4f53b | 224 | ClearPagePrivate(tmp); |
20aaffd6 | 225 | __free_page(tmp); |
1da177e4 LT |
226 | } |
227 | } | |
228 | ||
229 | EXPORT_SYMBOL(change_page_attr); | |
230 | EXPORT_SYMBOL(global_flush_tlb); |