Merge branch 'next' into for-linus
[deliverable/linux.git] / arch / x86 / include / asm / fixmap.h
1 /*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 *
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
12 */
13
14 #ifndef _ASM_X86_FIXMAP_H
15 #define _ASM_X86_FIXMAP_H
16
17 #ifndef __ASSEMBLY__
18 #include <linux/kernel.h>
19 #include <asm/acpi.h>
20 #include <asm/apicdef.h>
21 #include <asm/page.h>
22 #ifdef CONFIG_X86_32
23 #include <linux/threads.h>
24 #include <asm/kmap_types.h>
25 #else
26 #include <asm/vsyscall.h>
27 #endif
28
29 /*
30 * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
31 * uses fixmaps that relies on FIXADDR_TOP for proper address calculation.
32 * Because of this, FIXADDR_TOP x86 integration was left as later work.
33 */
34 #ifdef CONFIG_X86_32
35 /* used by vmalloc.c, vsyscall.lds.S.
36 *
37 * Leave one empty page between vmalloc'ed areas and
38 * the start of the fixmap.
39 */
40 extern unsigned long __FIXADDR_TOP;
41 #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
42
43 #define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
44 #define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
45 #else
46 #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
47
48 /* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
49 #define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
50 #define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
51 #endif
52
53
54 /*
55 * Here we define all the compile-time 'special' virtual
56 * addresses. The point is to have a constant address at
57 * compile time, but to set the physical address only
58 * in the boot process.
59 * for x86_32: We allocate these special addresses
60 * from the end of virtual memory (0xfffff000) backwards.
61 * Also this lets us do fail-safe vmalloc(), we
62 * can guarantee that these special addresses and
63 * vmalloc()-ed addresses never overlap.
64 *
65 * These 'compile-time allocated' memory buffers are
66 * fixed-size 4k pages (or larger if used with an increment
67 * higher than 1). Use set_fixmap(idx,phys) to associate
68 * physical memory with fixmap indices.
69 *
70 * TLB entries of such buffers will not be flushed across
71 * task switches.
72 */
73 enum fixed_addresses {
74 #ifdef CONFIG_X86_32
75 FIX_HOLE,
76 FIX_VDSO,
77 #else
78 VSYSCALL_LAST_PAGE,
79 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
80 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
81 VSYSCALL_HPET,
82 #endif
83 FIX_DBGP_BASE,
84 FIX_EARLYCON_MEM_BASE,
85 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
86 FIX_OHCI1394_BASE,
87 #endif
88 #ifdef CONFIG_X86_LOCAL_APIC
89 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
90 #endif
91 #ifdef CONFIG_X86_IO_APIC
92 FIX_IO_APIC_BASE_0,
93 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
94 #endif
95 #ifdef CONFIG_X86_VISWS_APIC
96 FIX_CO_CPU, /* Cobalt timer */
97 FIX_CO_APIC, /* Cobalt APIC Redirection Table */
98 FIX_LI_PCIA, /* Lithium PCI Bridge A */
99 FIX_LI_PCIB, /* Lithium PCI Bridge B */
100 #endif
101 #ifdef CONFIG_X86_F00F_BUG
102 FIX_F00F_IDT, /* Virtual mapping for IDT */
103 #endif
104 #ifdef CONFIG_X86_CYCLONE_TIMER
105 FIX_CYCLONE_TIMER, /*cyclone timer register*/
106 #endif
107 #ifdef CONFIG_X86_32
108 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
109 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
110 #ifdef CONFIG_PCI_MMCONFIG
111 FIX_PCIE_MCFG,
112 #endif
113 #endif
114 #ifdef CONFIG_PARAVIRT
115 FIX_PARAVIRT_BOOTMAP,
116 #endif
117 FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
118 FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
119 #ifdef CONFIG_X86_MRST
120 FIX_LNW_VRTC,
121 #endif
122 __end_of_permanent_fixed_addresses,
123
124 /*
125 * 256 temporary boot-time mappings, used by early_ioremap(),
126 * before ioremap() is functional.
127 *
128 * If necessary we round it up to the next 256 pages boundary so
129 * that we can have a single pgd entry and a single pte table:
130 */
131 #define NR_FIX_BTMAPS 64
132 #define FIX_BTMAPS_SLOTS 4
133 #define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
134 FIX_BTMAP_END =
135 (__end_of_permanent_fixed_addresses ^
136 (__end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - 1)) &
137 -PTRS_PER_PTE
138 ? __end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS -
139 (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1))
140 : __end_of_permanent_fixed_addresses,
141 FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
142 #ifdef CONFIG_X86_32
143 FIX_WP_TEST,
144 #endif
145 #ifdef CONFIG_INTEL_TXT
146 FIX_TBOOT_BASE,
147 #endif
148 __end_of_fixed_addresses
149 };
150
151
152 extern void reserve_top_address(unsigned long reserve);
153
154 #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
155 #define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
156 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
157 #define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE)
158
159 extern int fixmaps_set;
160
161 extern pte_t *kmap_pte;
162 extern pgprot_t kmap_prot;
163 extern pte_t *pkmap_page_table;
164
165 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
166 void native_set_fixmap(enum fixed_addresses idx,
167 phys_addr_t phys, pgprot_t flags);
168
169 #ifndef CONFIG_PARAVIRT
170 static inline void __set_fixmap(enum fixed_addresses idx,
171 phys_addr_t phys, pgprot_t flags)
172 {
173 native_set_fixmap(idx, phys, flags);
174 }
175 #endif
176
177 #define set_fixmap(idx, phys) \
178 __set_fixmap(idx, phys, PAGE_KERNEL)
179
180 /*
181 * Some hardware wants to get fixmapped without caching.
182 */
183 #define set_fixmap_nocache(idx, phys) \
184 __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
185
186 #define clear_fixmap(idx) \
187 __set_fixmap(idx, 0, __pgprot(0))
188
189 #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
190 #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
191
192 extern void __this_fixmap_does_not_exist(void);
193
194 /*
195 * 'index to address' translation. If anyone tries to use the idx
196 * directly without translation, we catch the bug with a NULL-deference
197 * kernel oops. Illegal ranges of incoming indices are caught too.
198 */
199 static __always_inline unsigned long fix_to_virt(const unsigned int idx)
200 {
201 /*
202 * this branch gets completely eliminated after inlining,
203 * except when someone tries to use fixaddr indices in an
204 * illegal way. (such as mixing up address types or using
205 * out-of-range indices).
206 *
207 * If it doesn't get removed, the linker will complain
208 * loudly with a reasonably clear error message..
209 */
210 if (idx >= __end_of_fixed_addresses)
211 __this_fixmap_does_not_exist();
212
213 return __fix_to_virt(idx);
214 }
215
216 static inline unsigned long virt_to_fix(const unsigned long vaddr)
217 {
218 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
219 return __virt_to_fix(vaddr);
220 }
221
222 /* Return an pointer with offset calculated */
223 static __always_inline unsigned long
224 __set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
225 {
226 __set_fixmap(idx, phys, flags);
227 return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1));
228 }
229
230 #define set_fixmap_offset(idx, phys) \
231 __set_fixmap_offset(idx, phys, PAGE_KERNEL)
232
233 #define set_fixmap_offset_nocache(idx, phys) \
234 __set_fixmap_offset(idx, phys, PAGE_KERNEL_NOCACHE)
235
236 #endif /* !__ASSEMBLY__ */
237 #endif /* _ASM_X86_FIXMAP_H */
This page took 0.054119 seconds and 5 git commands to generate.