Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * include/asm-s390/pgtable.h | |
3 | * | |
4 | * S390 version | |
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
6 | * Author(s): Hartmut Penner (hp@de.ibm.com) | |
7 | * Ulrich Weigand (weigand@de.ibm.com) | |
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | |
9 | * | |
10 | * Derived from "include/asm-i386/pgtable.h" | |
11 | */ | |
12 | ||
13 | #ifndef _ASM_S390_PGTABLE_H | |
14 | #define _ASM_S390_PGTABLE_H | |
15 | ||
16 | #include <asm-generic/4level-fixup.h> | |
17 | ||
18 | /* | |
19 | * The Linux memory management assumes a three-level page table setup. For | |
20 | * s390 31 bit we "fold" the mid level into the top-level page table, so | |
21 | * that we physically have the same two-level page table as the s390 mmu | |
22 | * expects in 31 bit mode. For s390 64 bit we use three of the five levels | |
23 | * the hardware provides (region first and region second tables are not | |
24 | * used). | |
25 | * | |
26 | * The "pgd_xxx()" functions are trivial for a folded two-level | |
27 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | |
28 | * into the pgd entry) | |
29 | * | |
30 | * This file contains the functions and defines necessary to modify and use | |
31 | * the S390 page table tree. | |
32 | */ | |
33 | #ifndef __ASSEMBLY__ | |
2dcea57a | 34 | #include <linux/mm_types.h> |
1da177e4 LT |
35 | #include <asm/bug.h> |
36 | #include <asm/processor.h> | |
1da177e4 | 37 | |
1da177e4 LT |
38 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); |
39 | extern void paging_init(void); | |
2b67fc46 | 40 | extern void vmem_map_init(void); |
1da177e4 LT |
41 | |
42 | /* | |
43 | * The S390 doesn't have any external MMU info: the kernel page | |
44 | * tables contain all the necessary information. | |
45 | */ | |
46 | #define update_mmu_cache(vma, address, pte) do { } while (0) | |
47 | ||
48 | /* | |
49 | * ZERO_PAGE is a global shared page that is always zero: used | |
50 | * for zero-mapped memory areas etc.. | |
51 | */ | |
52 | extern char empty_zero_page[PAGE_SIZE]; | |
53 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
54 | #endif /* !__ASSEMBLY__ */ | |
55 | ||
56 | /* | |
57 | * PMD_SHIFT determines the size of the area a second-level page | |
58 | * table can map | |
59 | * PGDIR_SHIFT determines what a third-level page table entry can map | |
60 | */ | |
61 | #ifndef __s390x__ | |
62 | # define PMD_SHIFT 22 | |
63 | # define PGDIR_SHIFT 22 | |
64 | #else /* __s390x__ */ | |
65 | # define PMD_SHIFT 21 | |
66 | # define PGDIR_SHIFT 31 | |
67 | #endif /* __s390x__ */ | |
68 | ||
69 | #define PMD_SIZE (1UL << PMD_SHIFT) | |
70 | #define PMD_MASK (~(PMD_SIZE-1)) | |
71 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
72 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
73 | ||
74 | /* | |
75 | * entries per page directory level: the S390 is two-level, so | |
76 | * we don't really have any PMD directory physically. | |
77 | * for S390 segment-table entries are combined to one PGD | |
78 | * that leads to 1024 pte per pgd | |
79 | */ | |
80 | #ifndef __s390x__ | |
81 | # define PTRS_PER_PTE 1024 | |
82 | # define PTRS_PER_PMD 1 | |
83 | # define PTRS_PER_PGD 512 | |
84 | #else /* __s390x__ */ | |
85 | # define PTRS_PER_PTE 512 | |
86 | # define PTRS_PER_PMD 1024 | |
87 | # define PTRS_PER_PGD 2048 | |
88 | #endif /* __s390x__ */ | |
89 | ||
d455a369 HD |
90 | #define FIRST_USER_ADDRESS 0 |
91 | ||
1da177e4 LT |
92 | #define pte_ERROR(e) \ |
93 | printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) | |
94 | #define pmd_ERROR(e) \ | |
95 | printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) | |
96 | #define pgd_ERROR(e) \ | |
97 | printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) | |
98 | ||
99 | #ifndef __ASSEMBLY__ | |
100 | /* | |
101 | * Just any arbitrary offset to the start of the vmalloc VM area: the | |
102 | * current 8MB value just means that there will be a 8MB "hole" after the | |
103 | * physical memory until the kernel virtual memory starts. That means that | |
104 | * any out-of-bounds memory accesses will hopefully be caught. | |
105 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
106 | * area for the same reason. ;) | |
e39394b8 HC |
107 | * vmalloc area starts at 4GB to prevent syscall table entry exchanging |
108 | * from modules. | |
1da177e4 | 109 | */ |
f4eb07c1 | 110 | extern unsigned long vmalloc_end; |
e39394b8 HC |
111 | |
112 | #ifdef CONFIG_64BIT | |
113 | #define VMALLOC_ADDR (max(0x100000000UL, (unsigned long) high_memory)) | |
114 | #else | |
115 | #define VMALLOC_ADDR ((unsigned long) high_memory) | |
116 | #endif | |
117 | #define VMALLOC_OFFSET (8*1024*1024) | |
118 | #define VMALLOC_START ((VMALLOC_ADDR + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) | |
f4eb07c1 | 119 | #define VMALLOC_END vmalloc_end |
8b62bc96 HC |
120 | |
121 | /* | |
122 | * We need some free virtual space to be able to do vmalloc. | |
123 | * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc | |
124 | * area. On a machine with 2GB memory we make sure that we | |
125 | * have at least 128MB free space for vmalloc. On a machine | |
f4eb07c1 | 126 | * with 4TB we make sure we have at least 128GB. |
8b62bc96 | 127 | */ |
1da177e4 | 128 | #ifndef __s390x__ |
8b62bc96 | 129 | #define VMALLOC_MIN_SIZE 0x8000000UL |
f4eb07c1 | 130 | #define VMALLOC_END_INIT 0x80000000UL |
1da177e4 | 131 | #else /* __s390x__ */ |
f4eb07c1 HC |
132 | #define VMALLOC_MIN_SIZE 0x2000000000UL |
133 | #define VMALLOC_END_INIT 0x40000000000UL | |
1da177e4 LT |
134 | #endif /* __s390x__ */ |
135 | ||
1da177e4 LT |
136 | /* |
137 | * A 31 bit pagetable entry of S390 has following format: | |
138 | * | PFRA | | OS | | |
139 | * 0 0IP0 | |
140 | * 00000000001111111111222222222233 | |
141 | * 01234567890123456789012345678901 | |
142 | * | |
143 | * I Page-Invalid Bit: Page is not available for address-translation | |
144 | * P Page-Protection Bit: Store access not possible for page | |
145 | * | |
146 | * A 31 bit segmenttable entry of S390 has following format: | |
147 | * | P-table origin | |PTL | |
148 | * 0 IC | |
149 | * 00000000001111111111222222222233 | |
150 | * 01234567890123456789012345678901 | |
151 | * | |
152 | * I Segment-Invalid Bit: Segment is not available for address-translation | |
153 | * C Common-Segment Bit: Segment is not private (PoP 3-30) | |
154 | * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) | |
155 | * | |
156 | * The 31 bit segmenttable origin of S390 has following format: | |
157 | * | |
158 | * |S-table origin | | STL | | |
159 | * X **GPS | |
160 | * 00000000001111111111222222222233 | |
161 | * 01234567890123456789012345678901 | |
162 | * | |
163 | * X Space-Switch event: | |
164 | * G Segment-Invalid Bit: * | |
165 | * P Private-Space Bit: Segment is not private (PoP 3-30) | |
166 | * S Storage-Alteration: | |
167 | * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) | |
168 | * | |
169 | * A 64 bit pagetable entry of S390 has following format: | |
170 | * | PFRA |0IP0| OS | | |
171 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
172 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
173 | * | |
174 | * I Page-Invalid Bit: Page is not available for address-translation | |
175 | * P Page-Protection Bit: Store access not possible for page | |
176 | * | |
177 | * A 64 bit segmenttable entry of S390 has following format: | |
178 | * | P-table origin | TT | |
179 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
180 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
181 | * | |
182 | * I Segment-Invalid Bit: Segment is not available for address-translation | |
183 | * C Common-Segment Bit: Segment is not private (PoP 3-30) | |
184 | * P Page-Protection Bit: Store access not possible for page | |
185 | * TT Type 00 | |
186 | * | |
187 | * A 64 bit region table entry of S390 has following format: | |
188 | * | S-table origin | TF TTTL | |
189 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
190 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
191 | * | |
192 | * I Segment-Invalid Bit: Segment is not available for address-translation | |
193 | * TT Type 01 | |
194 | * TF | |
195 | * TL Table lenght | |
196 | * | |
197 | * The 64 bit regiontable origin of S390 has following format: | |
198 | * | region table origon | DTTL | |
199 | * 0000000000111111111122222222223333333333444444444455555555556666 | |
200 | * 0123456789012345678901234567890123456789012345678901234567890123 | |
201 | * | |
202 | * X Space-Switch event: | |
203 | * G Segment-Invalid Bit: | |
204 | * P Private-Space Bit: | |
205 | * S Storage-Alteration: | |
206 | * R Real space | |
207 | * TL Table-Length: | |
208 | * | |
209 | * A storage key has the following format: | |
210 | * | ACC |F|R|C|0| | |
211 | * 0 3 4 5 6 7 | |
212 | * ACC: access key | |
213 | * F : fetch protection bit | |
214 | * R : referenced bit | |
215 | * C : changed bit | |
216 | */ | |
217 | ||
218 | /* Hardware bits in the page table entry */ | |
83377484 MS |
219 | #define _PAGE_RO 0x200 /* HW read-only bit */ |
220 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ | |
3610cce8 MS |
221 | |
222 | /* Software bits in the page table entry */ | |
83377484 MS |
223 | #define _PAGE_SWT 0x001 /* SW pte type bit t */ |
224 | #define _PAGE_SWX 0x002 /* SW pte type bit x */ | |
1da177e4 | 225 | |
83377484 | 226 | /* Six different types of pages. */ |
9282ed92 GS |
227 | #define _PAGE_TYPE_EMPTY 0x400 |
228 | #define _PAGE_TYPE_NONE 0x401 | |
83377484 MS |
229 | #define _PAGE_TYPE_SWAP 0x403 |
230 | #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ | |
9282ed92 GS |
231 | #define _PAGE_TYPE_RO 0x200 |
232 | #define _PAGE_TYPE_RW 0x000 | |
c1821c2e GS |
233 | #define _PAGE_TYPE_EX_RO 0x202 |
234 | #define _PAGE_TYPE_EX_RW 0x002 | |
1da177e4 | 235 | |
83377484 MS |
236 | /* |
237 | * PTE type bits are rather complicated. handle_pte_fault uses pte_present, | |
238 | * pte_none and pte_file to find out the pte type WITHOUT holding the page | |
239 | * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to | |
240 | * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs | |
241 | * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. | |
242 | * This change is done while holding the lock, but the intermediate step | |
243 | * of a previously valid pte with the hw invalid bit set can be observed by | |
244 | * handle_pte_fault. That makes it necessary that all valid pte types with | |
245 | * the hw invalid bit set must be distinguishable from the four pte types | |
246 | * empty, none, swap and file. | |
247 | * | |
248 | * irxt ipte irxt | |
249 | * _PAGE_TYPE_EMPTY 1000 -> 1000 | |
250 | * _PAGE_TYPE_NONE 1001 -> 1001 | |
251 | * _PAGE_TYPE_SWAP 1011 -> 1011 | |
252 | * _PAGE_TYPE_FILE 11?1 -> 11?1 | |
253 | * _PAGE_TYPE_RO 0100 -> 1100 | |
254 | * _PAGE_TYPE_RW 0000 -> 1000 | |
c1821c2e GS |
255 | * _PAGE_TYPE_EX_RO 0110 -> 1110 |
256 | * _PAGE_TYPE_EX_RW 0010 -> 1010 | |
83377484 | 257 | * |
c1821c2e | 258 | * pte_none is true for bits combinations 1000, 1010, 1100, 1110 |
83377484 MS |
259 | * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 |
260 | * pte_file is true for bits combinations 1101, 1111 | |
c1821c2e | 261 | * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. |
83377484 MS |
262 | */ |
263 | ||
1da177e4 LT |
264 | #ifndef __s390x__ |
265 | ||
3610cce8 MS |
266 | /* Bits in the segment table address-space-control-element */ |
267 | #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ | |
268 | #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ | |
269 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ | |
270 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ | |
271 | #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ | |
1da177e4 | 272 | |
3610cce8 MS |
273 | /* Bits in the segment table entry */ |
274 | #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ | |
275 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | |
276 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ | |
277 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ | |
1da177e4 | 278 | |
3610cce8 MS |
279 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) |
280 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) | |
1da177e4 LT |
281 | |
282 | #else /* __s390x__ */ | |
283 | ||
3610cce8 MS |
284 | /* Bits in the segment/region table address-space-control-element */ |
285 | #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ | |
286 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ | |
287 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ | |
288 | #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ | |
289 | #define _ASCE_REAL_SPACE 0x20 /* real space control */ | |
290 | #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ | |
291 | #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ | |
292 | #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ | |
293 | #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ | |
294 | #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ | |
295 | #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ | |
296 | ||
297 | /* Bits in the region table entry */ | |
298 | #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ | |
299 | #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ | |
300 | #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ | |
301 | #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ | |
302 | #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ | |
303 | #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ | |
304 | #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ | |
305 | ||
306 | #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) | |
307 | #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) | |
308 | #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) | |
309 | #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) | |
310 | #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) | |
311 | #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) | |
312 | ||
1da177e4 | 313 | /* Bits in the segment table entry */ |
3610cce8 MS |
314 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
315 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ | |
316 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | |
1da177e4 | 317 | |
3610cce8 MS |
318 | #define _SEGMENT_ENTRY (0) |
319 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) | |
320 | ||
321 | #endif /* __s390x__ */ | |
1da177e4 LT |
322 | |
323 | /* | |
3610cce8 MS |
324 | * A user page table pointer has the space-switch-event bit, the |
325 | * private-space-control bit and the storage-alteration-event-control | |
326 | * bit set. A kernel page table pointer doesn't need them. | |
1da177e4 | 327 | */ |
3610cce8 MS |
328 | #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ |
329 | _ASCE_ALT_EVENT) | |
1da177e4 | 330 | |
3610cce8 | 331 | /* Bits int the storage key */ |
1da177e4 LT |
332 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ |
333 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ | |
334 | ||
1da177e4 | 335 | /* |
9282ed92 | 336 | * Page protection definitions. |
1da177e4 | 337 | */ |
9282ed92 GS |
338 | #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) |
339 | #define PAGE_RO __pgprot(_PAGE_TYPE_RO) | |
340 | #define PAGE_RW __pgprot(_PAGE_TYPE_RW) | |
c1821c2e GS |
341 | #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO) |
342 | #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW) | |
9282ed92 GS |
343 | |
344 | #define PAGE_KERNEL PAGE_RW | |
345 | #define PAGE_COPY PAGE_RO | |
1da177e4 LT |
346 | |
347 | /* | |
c1821c2e GS |
348 | * Dependent on the EXEC_PROTECT option s390 can do execute protection. |
349 | * Write permission always implies read permission. In theory with a | |
350 | * primary/secondary page table execute only can be implemented but | |
351 | * it would cost an additional bit in the pte to distinguish all the | |
352 | * different pte types. To avoid that execute permission currently | |
353 | * implies read permission as well. | |
1da177e4 LT |
354 | */ |
355 | /*xwr*/ | |
9282ed92 GS |
356 | #define __P000 PAGE_NONE |
357 | #define __P001 PAGE_RO | |
358 | #define __P010 PAGE_RO | |
359 | #define __P011 PAGE_RO | |
c1821c2e GS |
360 | #define __P100 PAGE_EX_RO |
361 | #define __P101 PAGE_EX_RO | |
362 | #define __P110 PAGE_EX_RO | |
363 | #define __P111 PAGE_EX_RO | |
9282ed92 GS |
364 | |
365 | #define __S000 PAGE_NONE | |
366 | #define __S001 PAGE_RO | |
367 | #define __S010 PAGE_RW | |
368 | #define __S011 PAGE_RW | |
c1821c2e GS |
369 | #define __S100 PAGE_EX_RO |
370 | #define __S101 PAGE_EX_RO | |
371 | #define __S110 PAGE_EX_RW | |
372 | #define __S111 PAGE_EX_RW | |
373 | ||
374 | #ifndef __s390x__ | |
3610cce8 | 375 | # define PxD_SHADOW_SHIFT 1 |
c1821c2e | 376 | #else /* __s390x__ */ |
3610cce8 | 377 | # define PxD_SHADOW_SHIFT 2 |
c1821c2e GS |
378 | #endif /* __s390x__ */ |
379 | ||
380 | static inline struct page *get_shadow_page(struct page *page) | |
381 | { | |
3610cce8 MS |
382 | if (s390_noexec && page->index) |
383 | return virt_to_page((void *)(addr_t) page->index); | |
c1821c2e GS |
384 | return NULL; |
385 | } | |
386 | ||
3610cce8 | 387 | static inline void *get_shadow_pte(void *table) |
c1821c2e | 388 | { |
3610cce8 MS |
389 | unsigned long addr, offset; |
390 | struct page *page; | |
c1821c2e | 391 | |
3610cce8 MS |
392 | addr = (unsigned long) table; |
393 | offset = addr & (PAGE_SIZE - 1); | |
394 | page = virt_to_page((void *)(addr ^ offset)); | |
395 | return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); | |
c1821c2e GS |
396 | } |
397 | ||
3610cce8 | 398 | static inline void *get_shadow_table(void *table) |
c1821c2e | 399 | { |
3610cce8 MS |
400 | unsigned long addr, offset; |
401 | struct page *page; | |
402 | ||
403 | addr = (unsigned long) table; | |
404 | offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1); | |
405 | page = virt_to_page((void *)(addr ^ offset)); | |
406 | return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); | |
c1821c2e | 407 | } |
1da177e4 LT |
408 | |
409 | /* | |
410 | * Certain architectures need to do special things when PTEs | |
411 | * within a page table are directly modified. Thus, the following | |
412 | * hook is made available. | |
413 | */ | |
ba8a9229 MS |
414 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
415 | pte_t *pteptr, pte_t pteval) | |
1da177e4 | 416 | { |
c1821c2e GS |
417 | pte_t *shadow_pte = get_shadow_pte(pteptr); |
418 | ||
1da177e4 | 419 | *pteptr = pteval; |
c1821c2e GS |
420 | if (shadow_pte) { |
421 | if (!(pte_val(pteval) & _PAGE_INVALID) && | |
422 | (pte_val(pteval) & _PAGE_SWX)) | |
423 | pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO; | |
424 | else | |
425 | pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; | |
426 | } | |
1da177e4 | 427 | } |
1da177e4 LT |
428 | |
429 | /* | |
430 | * pgd/pmd/pte query functions | |
431 | */ | |
432 | #ifndef __s390x__ | |
433 | ||
4448aaf0 AB |
434 | static inline int pgd_present(pgd_t pgd) { return 1; } |
435 | static inline int pgd_none(pgd_t pgd) { return 0; } | |
436 | static inline int pgd_bad(pgd_t pgd) { return 0; } | |
1da177e4 | 437 | |
1da177e4 LT |
438 | #else /* __s390x__ */ |
439 | ||
4448aaf0 | 440 | static inline int pgd_present(pgd_t pgd) |
1da177e4 | 441 | { |
3610cce8 | 442 | return pgd_val(pgd) & _REGION_ENTRY_ORIGIN; |
1da177e4 LT |
443 | } |
444 | ||
4448aaf0 | 445 | static inline int pgd_none(pgd_t pgd) |
1da177e4 | 446 | { |
3610cce8 | 447 | return pgd_val(pgd) & _REGION_ENTRY_INV; |
1da177e4 LT |
448 | } |
449 | ||
4448aaf0 | 450 | static inline int pgd_bad(pgd_t pgd) |
1da177e4 | 451 | { |
3610cce8 MS |
452 | unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV; |
453 | return (pgd_val(pgd) & mask) != _REGION3_ENTRY; | |
1da177e4 LT |
454 | } |
455 | ||
3610cce8 MS |
456 | #endif /* __s390x__ */ |
457 | ||
4448aaf0 | 458 | static inline int pmd_present(pmd_t pmd) |
1da177e4 | 459 | { |
3610cce8 | 460 | return pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN; |
1da177e4 LT |
461 | } |
462 | ||
4448aaf0 | 463 | static inline int pmd_none(pmd_t pmd) |
1da177e4 | 464 | { |
3610cce8 | 465 | return pmd_val(pmd) & _SEGMENT_ENTRY_INV; |
1da177e4 LT |
466 | } |
467 | ||
4448aaf0 | 468 | static inline int pmd_bad(pmd_t pmd) |
1da177e4 | 469 | { |
3610cce8 MS |
470 | unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; |
471 | return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; | |
1da177e4 LT |
472 | } |
473 | ||
4448aaf0 | 474 | static inline int pte_none(pte_t pte) |
1da177e4 | 475 | { |
83377484 | 476 | return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); |
1da177e4 LT |
477 | } |
478 | ||
4448aaf0 | 479 | static inline int pte_present(pte_t pte) |
1da177e4 | 480 | { |
83377484 MS |
481 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; |
482 | return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || | |
483 | (!(pte_val(pte) & _PAGE_INVALID) && | |
484 | !(pte_val(pte) & _PAGE_SWT)); | |
1da177e4 LT |
485 | } |
486 | ||
4448aaf0 | 487 | static inline int pte_file(pte_t pte) |
1da177e4 | 488 | { |
83377484 MS |
489 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; |
490 | return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; | |
1da177e4 LT |
491 | } |
492 | ||
ba8a9229 MS |
493 | #define __HAVE_ARCH_PTE_SAME |
494 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) | |
1da177e4 LT |
495 | |
496 | /* | |
497 | * query functions pte_write/pte_dirty/pte_young only work if | |
498 | * pte_present() is true. Undefined behaviour if not.. | |
499 | */ | |
4448aaf0 | 500 | static inline int pte_write(pte_t pte) |
1da177e4 LT |
501 | { |
502 | return (pte_val(pte) & _PAGE_RO) == 0; | |
503 | } | |
504 | ||
4448aaf0 | 505 | static inline int pte_dirty(pte_t pte) |
1da177e4 LT |
506 | { |
507 | /* A pte is neither clean nor dirty on s/390. The dirty bit | |
508 | * is in the storage key. See page_test_and_clear_dirty for | |
509 | * details. | |
510 | */ | |
511 | return 0; | |
512 | } | |
513 | ||
4448aaf0 | 514 | static inline int pte_young(pte_t pte) |
1da177e4 LT |
515 | { |
516 | /* A pte is neither young nor old on s/390. The young bit | |
517 | * is in the storage key. See page_test_and_clear_young for | |
518 | * details. | |
519 | */ | |
520 | return 0; | |
521 | } | |
522 | ||
1da177e4 LT |
523 | /* |
524 | * pgd/pmd/pte modification functions | |
525 | */ | |
526 | ||
527 | #ifndef __s390x__ | |
528 | ||
4448aaf0 | 529 | static inline void pgd_clear(pgd_t * pgdp) { } |
1da177e4 | 530 | |
c1821c2e | 531 | static inline void pmd_clear_kernel(pmd_t * pmdp) |
1da177e4 | 532 | { |
3610cce8 MS |
533 | pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY; |
534 | pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY; | |
535 | pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY; | |
536 | pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY; | |
c1821c2e GS |
537 | } |
538 | ||
1da177e4 LT |
539 | #else /* __s390x__ */ |
540 | ||
c1821c2e | 541 | static inline void pgd_clear_kernel(pgd_t * pgdp) |
1da177e4 | 542 | { |
3610cce8 | 543 | pgd_val(*pgdp) = _REGION3_ENTRY_EMPTY; |
1da177e4 LT |
544 | } |
545 | ||
c1821c2e GS |
546 | static inline void pgd_clear(pgd_t * pgdp) |
547 | { | |
3610cce8 | 548 | pgd_t *shadow_pgd = get_shadow_table(pgdp); |
c1821c2e GS |
549 | |
550 | pgd_clear_kernel(pgdp); | |
551 | if (shadow_pgd) | |
552 | pgd_clear_kernel(shadow_pgd); | |
553 | } | |
554 | ||
555 | static inline void pmd_clear_kernel(pmd_t * pmdp) | |
1da177e4 | 556 | { |
3610cce8 MS |
557 | pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; |
558 | pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY; | |
1da177e4 LT |
559 | } |
560 | ||
3610cce8 MS |
561 | #endif /* __s390x__ */ |
562 | ||
c1821c2e GS |
563 | static inline void pmd_clear(pmd_t * pmdp) |
564 | { | |
3610cce8 | 565 | pmd_t *shadow_pmd = get_shadow_table(pmdp); |
c1821c2e GS |
566 | |
567 | pmd_clear_kernel(pmdp); | |
568 | if (shadow_pmd) | |
569 | pmd_clear_kernel(shadow_pmd); | |
570 | } | |
571 | ||
4448aaf0 | 572 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
1da177e4 | 573 | { |
c1821c2e GS |
574 | pte_t *shadow_pte = get_shadow_pte(ptep); |
575 | ||
9282ed92 | 576 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
c1821c2e GS |
577 | if (shadow_pte) |
578 | pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; | |
1da177e4 LT |
579 | } |
580 | ||
581 | /* | |
582 | * The following pte modification functions only work if | |
583 | * pte_present() is true. Undefined behaviour if not.. | |
584 | */ | |
4448aaf0 | 585 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
1da177e4 LT |
586 | { |
587 | pte_val(pte) &= PAGE_MASK; | |
588 | pte_val(pte) |= pgprot_val(newprot); | |
589 | return pte; | |
590 | } | |
591 | ||
4448aaf0 | 592 | static inline pte_t pte_wrprotect(pte_t pte) |
1da177e4 | 593 | { |
9282ed92 | 594 | /* Do not clobber _PAGE_TYPE_NONE pages! */ |
1da177e4 LT |
595 | if (!(pte_val(pte) & _PAGE_INVALID)) |
596 | pte_val(pte) |= _PAGE_RO; | |
597 | return pte; | |
598 | } | |
599 | ||
4448aaf0 | 600 | static inline pte_t pte_mkwrite(pte_t pte) |
1da177e4 LT |
601 | { |
602 | pte_val(pte) &= ~_PAGE_RO; | |
603 | return pte; | |
604 | } | |
605 | ||
4448aaf0 | 606 | static inline pte_t pte_mkclean(pte_t pte) |
1da177e4 LT |
607 | { |
608 | /* The only user of pte_mkclean is the fork() code. | |
609 | We must *not* clear the *physical* page dirty bit | |
610 | just because fork() wants to clear the dirty bit in | |
611 | *one* of the page's mappings. So we just do nothing. */ | |
612 | return pte; | |
613 | } | |
614 | ||
4448aaf0 | 615 | static inline pte_t pte_mkdirty(pte_t pte) |
1da177e4 LT |
616 | { |
617 | /* We do not explicitly set the dirty bit because the | |
618 | * sske instruction is slow. It is faster to let the | |
619 | * next instruction set the dirty bit. | |
620 | */ | |
621 | return pte; | |
622 | } | |
623 | ||
4448aaf0 | 624 | static inline pte_t pte_mkold(pte_t pte) |
1da177e4 LT |
625 | { |
626 | /* S/390 doesn't keep its dirty/referenced bit in the pte. | |
627 | * There is no point in clearing the real referenced bit. | |
628 | */ | |
629 | return pte; | |
630 | } | |
631 | ||
4448aaf0 | 632 | static inline pte_t pte_mkyoung(pte_t pte) |
1da177e4 LT |
633 | { |
634 | /* S/390 doesn't keep its dirty/referenced bit in the pte. | |
635 | * There is no point in setting the real referenced bit. | |
636 | */ | |
637 | return pte; | |
638 | } | |
639 | ||
ba8a9229 MS |
640 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
641 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
642 | unsigned long addr, pte_t *ptep) | |
1da177e4 LT |
643 | { |
644 | return 0; | |
645 | } | |
646 | ||
ba8a9229 MS |
647 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
648 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | |
649 | unsigned long address, pte_t *ptep) | |
1da177e4 LT |
650 | { |
651 | /* No need to flush TLB; bits are in storage key */ | |
ba8a9229 | 652 | return 0; |
1da177e4 LT |
653 | } |
654 | ||
9282ed92 | 655 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) |
1da177e4 | 656 | { |
9282ed92 | 657 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { |
1da177e4 | 658 | #ifndef __s390x__ |
1da177e4 LT |
659 | /* S390 has 1mb segments, we are emulating 4MB segments */ |
660 | pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); | |
9282ed92 GS |
661 | #else |
662 | /* ipte in zarch mode can do the math */ | |
663 | pte_t *pto = ptep; | |
664 | #endif | |
94c12cc7 MS |
665 | asm volatile( |
666 | " ipte %2,%3" | |
667 | : "=m" (*ptep) : "m" (*ptep), | |
668 | "a" (pto), "a" (address)); | |
1da177e4 | 669 | } |
9282ed92 GS |
670 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
671 | } | |
672 | ||
f0e47c22 | 673 | static inline void ptep_invalidate(unsigned long address, pte_t *ptep) |
9282ed92 | 674 | { |
9282ed92 | 675 | __ptep_ipte(address, ptep); |
f0e47c22 MS |
676 | ptep = get_shadow_pte(ptep); |
677 | if (ptep) | |
678 | __ptep_ipte(address, ptep); | |
679 | } | |
680 | ||
ba8a9229 MS |
681 | /* |
682 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush | |
683 | * both clear the TLB for the unmapped pte. The reason is that | |
684 | * ptep_get_and_clear is used in common code (e.g. change_pte_range) | |
685 | * to modify an active pte. The sequence is | |
686 | * 1) ptep_get_and_clear | |
687 | * 2) set_pte_at | |
688 | * 3) flush_tlb_range | |
689 | * On s390 the tlb needs to get flushed with the modification of the pte | |
690 | * if the pte is active. The only way how this can be implemented is to | |
691 | * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range | |
692 | * is a nop. | |
693 | */ | |
694 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
695 | #define ptep_get_and_clear(__mm, __address, __ptep) \ | |
696 | ({ \ | |
697 | pte_t __pte = *(__ptep); \ | |
698 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | |
699 | (__mm) != current->active_mm) \ | |
700 | ptep_invalidate(__address, __ptep); \ | |
701 | else \ | |
702 | pte_clear((__mm), (__address), (__ptep)); \ | |
703 | __pte; \ | |
704 | }) | |
705 | ||
706 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH | |
f0e47c22 MS |
707 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, |
708 | unsigned long address, pte_t *ptep) | |
709 | { | |
710 | pte_t pte = *ptep; | |
711 | ptep_invalidate(address, ptep); | |
1da177e4 LT |
712 | return pte; |
713 | } | |
714 | ||
ba8a9229 MS |
715 | /* |
716 | * The batched pte unmap code uses ptep_get_and_clear_full to clear the | |
717 | * ptes. Here an optimization is possible. tlb_gather_mmu flushes all | |
718 | * tlbs of an mm if it can guarantee that the ptes of the mm_struct | |
719 | * cannot be accessed while the batched unmap is running. In this case | |
720 | * full==1 and a simple pte_clear is enough. See tlb.h. | |
721 | */ | |
722 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | |
723 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | |
724 | unsigned long addr, | |
725 | pte_t *ptep, int full) | |
1da177e4 | 726 | { |
ba8a9229 MS |
727 | pte_t pte = *ptep; |
728 | ||
729 | if (full) | |
730 | pte_clear(mm, addr, ptep); | |
731 | else | |
732 | ptep_invalidate(addr, ptep); | |
733 | return pte; | |
1da177e4 LT |
734 | } |
735 | ||
ba8a9229 MS |
736 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
737 | #define ptep_set_wrprotect(__mm, __addr, __ptep) \ | |
738 | ({ \ | |
739 | pte_t __pte = *(__ptep); \ | |
740 | if (pte_write(__pte)) { \ | |
741 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | |
742 | (__mm) != current->active_mm) \ | |
743 | ptep_invalidate(__addr, __ptep); \ | |
744 | set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ | |
745 | } \ | |
746 | }) | |
747 | ||
748 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
f0e47c22 MS |
749 | #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ |
750 | ({ \ | |
751 | int __changed = !pte_same(*(__ptep), __entry); \ | |
752 | if (__changed) { \ | |
753 | ptep_invalidate(__addr, __ptep); \ | |
754 | set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ | |
755 | } \ | |
756 | __changed; \ | |
8dab5241 | 757 | }) |
1da177e4 LT |
758 | |
759 | /* | |
760 | * Test and clear dirty bit in storage key. | |
761 | * We can't clear the changed bit atomically. This is a potential | |
762 | * race against modification of the referenced bit. This function | |
763 | * should therefore only be called if it is not mapped in any | |
764 | * address space. | |
765 | */ | |
ba8a9229 | 766 | #define __HAVE_ARCH_PAGE_TEST_DIRTY |
6c210482 | 767 | static inline int page_test_dirty(struct page *page) |
2dcea57a | 768 | { |
6c210482 MS |
769 | return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; |
770 | } | |
2dcea57a | 771 | |
ba8a9229 | 772 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY |
6c210482 MS |
773 | static inline void page_clear_dirty(struct page *page) |
774 | { | |
775 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); | |
2dcea57a | 776 | } |
1da177e4 LT |
777 | |
778 | /* | |
779 | * Test and clear referenced bit in storage key. | |
780 | */ | |
ba8a9229 | 781 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG |
2dcea57a HC |
782 | static inline int page_test_and_clear_young(struct page *page) |
783 | { | |
0b2b6e1d | 784 | unsigned long physpage = page_to_phys(page); |
2dcea57a HC |
785 | int ccode; |
786 | ||
0b2b6e1d HC |
787 | asm volatile( |
788 | " rrbe 0,%1\n" | |
789 | " ipm %0\n" | |
790 | " srl %0,28\n" | |
2dcea57a HC |
791 | : "=d" (ccode) : "a" (physpage) : "cc" ); |
792 | return ccode & 2; | |
793 | } | |
1da177e4 LT |
794 | |
795 | /* | |
796 | * Conversion functions: convert a page and protection to a page entry, | |
797 | * and a page entry and page directory to the page they refer to. | |
798 | */ | |
799 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) | |
800 | { | |
801 | pte_t __pte; | |
802 | pte_val(__pte) = physpage + pgprot_val(pgprot); | |
803 | return __pte; | |
804 | } | |
805 | ||
2dcea57a HC |
806 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) |
807 | { | |
0b2b6e1d | 808 | unsigned long physpage = page_to_phys(page); |
1da177e4 | 809 | |
2dcea57a HC |
810 | return mk_pte_phys(physpage, pgprot); |
811 | } | |
812 | ||
813 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | |
814 | { | |
815 | unsigned long physpage = __pa((pfn) << PAGE_SHIFT); | |
816 | ||
817 | return mk_pte_phys(physpage, pgprot); | |
818 | } | |
1da177e4 | 819 | |
1da177e4 LT |
820 | #ifdef __s390x__ |
821 | ||
2dcea57a HC |
822 | static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) |
823 | { | |
824 | unsigned long physpage = __pa((pfn) << PAGE_SHIFT); | |
825 | ||
826 | return __pmd(physpage + pgprot_val(pgprot)); | |
827 | } | |
1da177e4 LT |
828 | |
829 | #endif /* __s390x__ */ | |
830 | ||
831 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) | |
832 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | |
833 | ||
46a82b2d | 834 | #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) |
1da177e4 | 835 | |
0b2b6e1d | 836 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) |
1da177e4 | 837 | |
46a82b2d DM |
838 | #define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK) |
839 | ||
0b2b6e1d | 840 | #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) |
1da177e4 LT |
841 | |
842 | /* to find an entry in a page-table-directory */ | |
843 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | |
844 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | |
845 | ||
846 | /* to find an entry in a kernel page-table-directory */ | |
847 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
848 | ||
849 | #ifndef __s390x__ | |
850 | ||
851 | /* Find an entry in the second-level page table.. */ | |
4448aaf0 | 852 | static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) |
1da177e4 LT |
853 | { |
854 | return (pmd_t *) dir; | |
855 | } | |
856 | ||
857 | #else /* __s390x__ */ | |
858 | ||
859 | /* Find an entry in the second-level page table.. */ | |
860 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | |
861 | #define pmd_offset(dir,addr) \ | |
46a82b2d | 862 | ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(addr)) |
1da177e4 LT |
863 | |
864 | #endif /* __s390x__ */ | |
865 | ||
866 | /* Find an entry in the third-level page table.. */ | |
867 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) | |
868 | #define pte_offset_kernel(pmd, address) \ | |
46a82b2d | 869 | ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) |
1da177e4 LT |
870 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) |
871 | #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) | |
872 | #define pte_unmap(pte) do { } while (0) | |
873 | #define pte_unmap_nested(pte) do { } while (0) | |
874 | ||
875 | /* | |
876 | * 31 bit swap entry format: | |
877 | * A page-table entry has some bits we have to treat in a special way. | |
878 | * Bits 0, 20 and bit 23 have to be zero, otherwise an specification | |
879 | * exception will occur instead of a page translation exception. The | |
880 | * specifiation exception has the bad habit not to store necessary | |
881 | * information in the lowcore. | |
882 | * Bit 21 and bit 22 are the page invalid bit and the page protection | |
883 | * bit. We set both to indicate a swapped page. | |
884 | * Bit 30 and 31 are used to distinguish the different page types. For | |
885 | * a swapped page these bits need to be zero. | |
886 | * This leaves the bits 1-19 and bits 24-29 to store type and offset. | |
887 | * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 | |
888 | * plus 24 for the offset. | |
889 | * 0| offset |0110|o|type |00| | |
890 | * 0 0000000001111111111 2222 2 22222 33 | |
891 | * 0 1234567890123456789 0123 4 56789 01 | |
892 | * | |
893 | * 64 bit swap entry format: | |
894 | * A page-table entry has some bits we have to treat in a special way. | |
895 | * Bits 52 and bit 55 have to be zero, otherwise an specification | |
896 | * exception will occur instead of a page translation exception. The | |
897 | * specifiation exception has the bad habit not to store necessary | |
898 | * information in the lowcore. | |
899 | * Bit 53 and bit 54 are the page invalid bit and the page protection | |
900 | * bit. We set both to indicate a swapped page. | |
901 | * Bit 62 and 63 are used to distinguish the different page types. For | |
902 | * a swapped page these bits need to be zero. | |
903 | * This leaves the bits 0-51 and bits 56-61 to store type and offset. | |
904 | * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 | |
905 | * plus 56 for the offset. | |
906 | * | offset |0110|o|type |00| | |
907 | * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 | |
908 | * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 | |
909 | */ | |
910 | #ifndef __s390x__ | |
911 | #define __SWP_OFFSET_MASK (~0UL >> 12) | |
912 | #else | |
913 | #define __SWP_OFFSET_MASK (~0UL >> 11) | |
914 | #endif | |
4448aaf0 | 915 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) |
1da177e4 LT |
916 | { |
917 | pte_t pte; | |
918 | offset &= __SWP_OFFSET_MASK; | |
9282ed92 | 919 | pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | |
1da177e4 LT |
920 | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); |
921 | return pte; | |
922 | } | |
923 | ||
924 | #define __swp_type(entry) (((entry).val >> 2) & 0x1f) | |
925 | #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) | |
926 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) | |
927 | ||
928 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
929 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
930 | ||
931 | #ifndef __s390x__ | |
932 | # define PTE_FILE_MAX_BITS 26 | |
933 | #else /* __s390x__ */ | |
934 | # define PTE_FILE_MAX_BITS 59 | |
935 | #endif /* __s390x__ */ | |
936 | ||
937 | #define pte_to_pgoff(__pte) \ | |
938 | ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) | |
939 | ||
940 | #define pgoff_to_pte(__off) \ | |
941 | ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ | |
9282ed92 | 942 | | _PAGE_TYPE_FILE }) |
1da177e4 LT |
943 | |
944 | #endif /* !__ASSEMBLY__ */ | |
945 | ||
946 | #define kern_addr_valid(addr) (1) | |
947 | ||
f4eb07c1 HC |
948 | extern int add_shared_memory(unsigned long start, unsigned long size); |
949 | extern int remove_shared_memory(unsigned long start, unsigned long size); | |
950 | ||
1da177e4 LT |
951 | /* |
952 | * No page table caches to initialise | |
953 | */ | |
954 | #define pgtable_cache_init() do { } while (0) | |
955 | ||
f4eb07c1 HC |
956 | #define __HAVE_ARCH_MEMMAP_INIT |
957 | extern void memmap_init(unsigned long, int, unsigned long, unsigned long); | |
958 | ||
1da177e4 LT |
959 | #include <asm-generic/pgtable.h> |
960 | ||
961 | #endif /* _S390_PAGE_H */ | |
962 |