b59b44badae94d209a51c0d062d30656f4a39fcd
3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/pgtable.h"
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
15 * The Linux memory management assumes a three-level page table setup. For
16 * s390 31 bit we "fold" the mid level into the top-level page table, so
17 * that we physically have the same two-level page table as the s390 mmu
18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19 * the hardware provides (region first and region second tables are not
22 * The "pgd_xxx()" functions are trivial for a folded two-level
23 * setup: the pgd is never bad, and a pmd always exists (as it's folded
26 * This file contains the functions and defines necessary to modify and use
27 * the S390 page table tree.
30 #include <linux/sched.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
36 extern pgd_t swapper_pg_dir
[] __attribute__ ((aligned (4096)));
37 extern void paging_init(void);
38 extern void vmem_map_init(void);
41 * The S390 doesn't have any external MMU info: the kernel page
42 * tables contain all the necessary information.
44 #define update_mmu_cache(vma, address, ptep) do { } while (0)
45 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
48 * ZERO_PAGE is a global shared page that is always zero; used
49 * for zero-mapped memory areas etc..
52 extern unsigned long empty_zero_page
;
53 extern unsigned long zero_page_mask
;
55 #define ZERO_PAGE(vaddr) \
56 (virt_to_page((void *)(empty_zero_page + \
57 (((unsigned long)(vaddr)) &zero_page_mask))))
58 #define __HAVE_COLOR_ZERO_PAGE
60 /* TODO: s390 cannot support io_remap_pfn_range... */
61 #endif /* !__ASSEMBLY__ */
64 * PMD_SHIFT determines the size of the area a second-level page
66 * PGDIR_SHIFT determines what a third-level page table entry can map
71 # define PGDIR_SHIFT 20
72 #else /* CONFIG_64BIT */
75 # define PGDIR_SHIFT 42
76 #endif /* CONFIG_64BIT */
78 #define PMD_SIZE (1UL << PMD_SHIFT)
79 #define PMD_MASK (~(PMD_SIZE-1))
80 #define PUD_SIZE (1UL << PUD_SHIFT)
81 #define PUD_MASK (~(PUD_SIZE-1))
82 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
83 #define PGDIR_MASK (~(PGDIR_SIZE-1))
86 * entries per page directory level: the S390 is two-level, so
87 * we don't really have any PMD directory physically.
88 * for S390 segment-table entries are combined to one PGD
89 * that leads to 1024 pte per pgd
91 #define PTRS_PER_PTE 256
93 #define PTRS_PER_PMD 1
94 #define PTRS_PER_PUD 1
95 #else /* CONFIG_64BIT */
96 #define PTRS_PER_PMD 2048
97 #define PTRS_PER_PUD 2048
98 #endif /* CONFIG_64BIT */
99 #define PTRS_PER_PGD 2048
101 #define FIRST_USER_ADDRESS 0
103 #define pte_ERROR(e) \
104 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
105 #define pmd_ERROR(e) \
106 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
107 #define pud_ERROR(e) \
108 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
109 #define pgd_ERROR(e) \
110 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
114 * The vmalloc and module area will always be on the topmost area of the kernel
115 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
116 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
117 * modules will reside. That makes sure that inter module branches always
118 * happen without trampolines and in addition the placement within a 2GB frame
119 * is branch prediction unit friendly.
121 extern unsigned long VMALLOC_START
;
122 extern unsigned long VMALLOC_END
;
123 extern struct page
*vmemmap
;
125 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
128 extern unsigned long MODULES_VADDR
;
129 extern unsigned long MODULES_END
;
130 #define MODULES_VADDR MODULES_VADDR
131 #define MODULES_END MODULES_END
132 #define MODULES_LEN (1UL << 31)
136 * A 31 bit pagetable entry of S390 has following format:
139 * 00000000001111111111222222222233
140 * 01234567890123456789012345678901
142 * I Page-Invalid Bit: Page is not available for address-translation
143 * P Page-Protection Bit: Store access not possible for page
145 * A 31 bit segmenttable entry of S390 has following format:
146 * | P-table origin | |PTL
148 * 00000000001111111111222222222233
149 * 01234567890123456789012345678901
151 * I Segment-Invalid Bit: Segment is not available for address-translation
152 * C Common-Segment Bit: Segment is not private (PoP 3-30)
153 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
155 * The 31 bit segmenttable origin of S390 has following format:
157 * |S-table origin | | STL |
159 * 00000000001111111111222222222233
160 * 01234567890123456789012345678901
162 * X Space-Switch event:
163 * G Segment-Invalid Bit: *
164 * P Private-Space Bit: Segment is not private (PoP 3-30)
165 * S Storage-Alteration:
166 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
168 * A 64 bit pagetable entry of S390 has following format:
170 * 0000000000111111111122222222223333333333444444444455555555556666
171 * 0123456789012345678901234567890123456789012345678901234567890123
173 * I Page-Invalid Bit: Page is not available for address-translation
174 * P Page-Protection Bit: Store access not possible for page
175 * C Change-bit override: HW is not required to set change bit
177 * A 64 bit segmenttable entry of S390 has following format:
178 * | P-table origin | TT
179 * 0000000000111111111122222222223333333333444444444455555555556666
180 * 0123456789012345678901234567890123456789012345678901234567890123
182 * I Segment-Invalid Bit: Segment is not available for address-translation
183 * C Common-Segment Bit: Segment is not private (PoP 3-30)
184 * P Page-Protection Bit: Store access not possible for page
187 * A 64 bit region table entry of S390 has following format:
188 * | S-table origin | TF TTTL
189 * 0000000000111111111122222222223333333333444444444455555555556666
190 * 0123456789012345678901234567890123456789012345678901234567890123
192 * I Segment-Invalid Bit: Segment is not available for address-translation
197 * The 64 bit regiontable origin of S390 has following format:
198 * | region table origon | DTTL
199 * 0000000000111111111122222222223333333333444444444455555555556666
200 * 0123456789012345678901234567890123456789012345678901234567890123
202 * X Space-Switch event:
203 * G Segment-Invalid Bit:
204 * P Private-Space Bit:
205 * S Storage-Alteration:
209 * A storage key has the following format:
213 * F : fetch protection bit
218 /* Hardware bits in the page table entry */
219 #define _PAGE_CO 0x100 /* HW Change-bit override */
220 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
221 #define _PAGE_INVALID 0x400 /* HW invalid bit */
222 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
224 /* Software bits in the page table entry */
225 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
226 #define _PAGE_TYPE 0x002 /* SW pte type bit */
227 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
228 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
229 #define _PAGE_WRITE 0x010 /* SW pte write bit */
230 #define _PAGE_SPECIAL 0x020 /* SW associated with special page */
231 #define __HAVE_ARCH_PTE_SPECIAL
233 /* Set of bits not changed in pte_modify */
234 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
235 _PAGE_DIRTY | _PAGE_YOUNG)
238 * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
239 * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit
240 * is used to distinguish present from not-present ptes. It is changed only
241 * with the page table lock held.
243 * The following table gives the different possible bit combinations for
244 * the pte hardware and software bits in the last 12 bits of a pte:
253 * prot-none, clean .11....00x01
254 * prot-none, dirty .10....01x01
255 * read-only, clean .01....00x01
256 * read-only, dirty .01....01x01
257 * read-write, clean .01....10x01
258 * read-write, dirty .00....11x01
260 * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
261 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
262 * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600
263 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
268 /* Bits in the segment table address-space-control-element */
269 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
270 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
271 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
272 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
273 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
275 /* Bits in the segment table entry */
276 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
277 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
278 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
279 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
280 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
282 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
283 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
285 /* Page status table bits for virtualization */
286 #define PGSTE_ACC_BITS 0xf0000000UL
287 #define PGSTE_FP_BIT 0x08000000UL
288 #define PGSTE_PCL_BIT 0x00800000UL
289 #define PGSTE_HR_BIT 0x00400000UL
290 #define PGSTE_HC_BIT 0x00200000UL
291 #define PGSTE_GR_BIT 0x00040000UL
292 #define PGSTE_GC_BIT 0x00020000UL
293 #define PGSTE_UR_BIT 0x00008000UL
294 #define PGSTE_UC_BIT 0x00004000UL /* user dirty (migration) */
295 #define PGSTE_IN_BIT 0x00002000UL /* IPTE notify bit */
297 #else /* CONFIG_64BIT */
299 /* Bits in the segment/region table address-space-control-element */
300 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
301 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
302 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
303 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
304 #define _ASCE_REAL_SPACE 0x20 /* real space control */
305 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
306 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
307 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
308 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
309 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
310 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
312 /* Bits in the region table entry */
313 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
314 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
315 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
316 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
317 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
318 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
319 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
320 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
322 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
323 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
324 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
325 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
326 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
327 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
329 #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
330 #define _REGION3_ENTRY_RO 0x200 /* page protection bit */
331 #define _REGION3_ENTRY_CO 0x100 /* change-recording override */
333 /* Bits in the segment table entry */
334 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
335 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
336 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
337 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
339 #define _SEGMENT_ENTRY (0)
340 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
342 #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
343 #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
344 #define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */
346 #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
348 /* Set of bits not changed in pmd_modify */
349 #define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
350 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
352 /* Page status table bits for virtualization */
353 #define PGSTE_ACC_BITS 0xf000000000000000UL
354 #define PGSTE_FP_BIT 0x0800000000000000UL
355 #define PGSTE_PCL_BIT 0x0080000000000000UL
356 #define PGSTE_HR_BIT 0x0040000000000000UL
357 #define PGSTE_HC_BIT 0x0020000000000000UL
358 #define PGSTE_GR_BIT 0x0004000000000000UL
359 #define PGSTE_GC_BIT 0x0002000000000000UL
360 #define PGSTE_UR_BIT 0x0000800000000000UL
361 #define PGSTE_UC_BIT 0x0000400000000000UL /* user dirty (migration) */
362 #define PGSTE_IN_BIT 0x0000200000000000UL /* IPTE notify bit */
364 #endif /* CONFIG_64BIT */
367 * A user page table pointer has the space-switch-event bit, the
368 * private-space-control bit and the storage-alteration-event-control
369 * bit set. A kernel page table pointer doesn't need them.
371 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
375 * Page protection definitions.
377 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
378 #define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_PROTECT)
379 #define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_PROTECT)
381 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY)
382 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY)
383 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_PROTECT)
386 * On s390 the page table entry has an invalid bit and a read-only bit.
387 * Read permission implies execute permission and write permission
388 * implies read permission.
391 #define __P000 PAGE_NONE
392 #define __P001 PAGE_READ
393 #define __P010 PAGE_READ
394 #define __P011 PAGE_READ
395 #define __P100 PAGE_READ
396 #define __P101 PAGE_READ
397 #define __P110 PAGE_READ
398 #define __P111 PAGE_READ
400 #define __S000 PAGE_NONE
401 #define __S001 PAGE_READ
402 #define __S010 PAGE_WRITE
403 #define __S011 PAGE_WRITE
404 #define __S100 PAGE_READ
405 #define __S101 PAGE_READ
406 #define __S110 PAGE_WRITE
407 #define __S111 PAGE_WRITE
410 * Segment entry (large page) protection definitions.
412 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
413 _SEGMENT_ENTRY_PROTECT)
414 #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT)
415 #define SEGMENT_WRITE __pgprot(0)
417 static inline int mm_exclusive(struct mm_struct
*mm
)
419 return likely(mm
== current
->active_mm
&&
420 atomic_read(&mm
->context
.attach_count
) <= 1);
423 static inline int mm_has_pgste(struct mm_struct
*mm
)
426 if (unlikely(mm
->context
.has_pgste
))
432 * pgd/pmd/pte query functions
436 static inline int pgd_present(pgd_t pgd
) { return 1; }
437 static inline int pgd_none(pgd_t pgd
) { return 0; }
438 static inline int pgd_bad(pgd_t pgd
) { return 0; }
440 static inline int pud_present(pud_t pud
) { return 1; }
441 static inline int pud_none(pud_t pud
) { return 0; }
442 static inline int pud_large(pud_t pud
) { return 0; }
443 static inline int pud_bad(pud_t pud
) { return 0; }
445 #else /* CONFIG_64BIT */
447 static inline int pgd_present(pgd_t pgd
)
449 if ((pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
)
451 return (pgd_val(pgd
) & _REGION_ENTRY_ORIGIN
) != 0UL;
454 static inline int pgd_none(pgd_t pgd
)
456 if ((pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
)
458 return (pgd_val(pgd
) & _REGION_ENTRY_INVALID
) != 0UL;
461 static inline int pgd_bad(pgd_t pgd
)
464 * With dynamic page table levels the pgd can be a region table
465 * entry or a segment table entry. Check for the bit that are
466 * invalid for either table entry.
469 ~_SEGMENT_ENTRY_ORIGIN
& ~_REGION_ENTRY_INVALID
&
470 ~_REGION_ENTRY_TYPE_MASK
& ~_REGION_ENTRY_LENGTH
;
471 return (pgd_val(pgd
) & mask
) != 0;
474 static inline int pud_present(pud_t pud
)
476 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
)
478 return (pud_val(pud
) & _REGION_ENTRY_ORIGIN
) != 0UL;
481 static inline int pud_none(pud_t pud
)
483 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
)
485 return (pud_val(pud
) & _REGION_ENTRY_INVALID
) != 0UL;
488 static inline int pud_large(pud_t pud
)
490 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) != _REGION_ENTRY_TYPE_R3
)
492 return !!(pud_val(pud
) & _REGION3_ENTRY_LARGE
);
495 static inline int pud_bad(pud_t pud
)
498 * With dynamic page table levels the pud can be a region table
499 * entry or a segment table entry. Check for the bit that are
500 * invalid for either table entry.
503 ~_SEGMENT_ENTRY_ORIGIN
& ~_REGION_ENTRY_INVALID
&
504 ~_REGION_ENTRY_TYPE_MASK
& ~_REGION_ENTRY_LENGTH
;
505 return (pud_val(pud
) & mask
) != 0;
508 #endif /* CONFIG_64BIT */
510 static inline int pmd_present(pmd_t pmd
)
512 return pmd_val(pmd
) != _SEGMENT_ENTRY_INVALID
;
515 static inline int pmd_none(pmd_t pmd
)
517 return pmd_val(pmd
) == _SEGMENT_ENTRY_INVALID
;
520 static inline int pmd_large(pmd_t pmd
)
523 return (pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
) != 0;
529 static inline int pmd_bad(pmd_t pmd
)
531 unsigned long mask
= ~_SEGMENT_ENTRY_ORIGIN
& ~_SEGMENT_ENTRY_INVALID
;
532 return (pmd_val(pmd
) & mask
) != _SEGMENT_ENTRY
;
535 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
536 extern void pmdp_splitting_flush(struct vm_area_struct
*vma
,
537 unsigned long addr
, pmd_t
*pmdp
);
539 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
540 extern int pmdp_set_access_flags(struct vm_area_struct
*vma
,
541 unsigned long address
, pmd_t
*pmdp
,
542 pmd_t entry
, int dirty
);
544 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
545 extern int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
546 unsigned long address
, pmd_t
*pmdp
);
548 #define __HAVE_ARCH_PMD_WRITE
549 static inline int pmd_write(pmd_t pmd
)
551 return (pmd_val(pmd
) & _SEGMENT_ENTRY_PROTECT
) == 0;
554 static inline int pmd_young(pmd_t pmd
)
559 static inline int pte_present(pte_t pte
)
561 /* Bit pattern: (pte & 0x001) == 0x001 */
562 return (pte_val(pte
) & _PAGE_PRESENT
) != 0;
565 static inline int pte_none(pte_t pte
)
567 /* Bit pattern: pte == 0x400 */
568 return pte_val(pte
) == _PAGE_INVALID
;
571 static inline int pte_file(pte_t pte
)
573 /* Bit pattern: (pte & 0x601) == 0x600 */
574 return (pte_val(pte
) & (_PAGE_INVALID
| _PAGE_PROTECT
| _PAGE_PRESENT
))
575 == (_PAGE_INVALID
| _PAGE_PROTECT
);
578 static inline int pte_special(pte_t pte
)
580 return (pte_val(pte
) & _PAGE_SPECIAL
);
583 #define __HAVE_ARCH_PTE_SAME
584 static inline int pte_same(pte_t a
, pte_t b
)
586 return pte_val(a
) == pte_val(b
);
589 static inline pgste_t
pgste_get_lock(pte_t
*ptep
)
591 unsigned long new = 0;
599 " nihh %0,0xff7f\n" /* clear PCL bit in old */
600 " oihh %1,0x0080\n" /* set PCL bit in new */
603 : "=&d" (old
), "=&d" (new), "=Q" (ptep
[PTRS_PER_PTE
])
604 : "Q" (ptep
[PTRS_PER_PTE
]) : "cc", "memory");
609 static inline void pgste_set_unlock(pte_t
*ptep
, pgste_t pgste
)
613 " nihh %1,0xff7f\n" /* clear PCL bit */
615 : "=Q" (ptep
[PTRS_PER_PTE
])
616 : "d" (pgste_val(pgste
)), "Q" (ptep
[PTRS_PER_PTE
])
622 static inline void pgste_set(pte_t
*ptep
, pgste_t pgste
)
625 *(pgste_t
*)(ptep
+ PTRS_PER_PTE
) = pgste
;
629 static inline pgste_t
pgste_update_all(pte_t
*ptep
, pgste_t pgste
)
632 unsigned long address
, bits
;
635 if (pte_val(*ptep
) & _PAGE_INVALID
)
637 address
= pte_val(*ptep
) & PAGE_MASK
;
638 skey
= page_get_storage_key(address
);
639 bits
= skey
& (_PAGE_CHANGED
| _PAGE_REFERENCED
);
640 /* Clear page changed & referenced bit in the storage key */
641 if (bits
& _PAGE_CHANGED
)
642 page_set_storage_key(address
, skey
^ bits
, 0);
644 page_reset_referenced(address
);
645 /* Transfer page changed & referenced bit to guest bits in pgste */
646 pgste_val(pgste
) |= bits
<< 48; /* GR bit & GC bit */
647 /* Get host changed & referenced bits from pgste */
648 bits
|= (pgste_val(pgste
) & (PGSTE_HR_BIT
| PGSTE_HC_BIT
)) >> 52;
649 /* Transfer page changed & referenced bit to kvm user bits */
650 pgste_val(pgste
) |= bits
<< 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */
651 /* Clear relevant host bits in pgste. */
652 pgste_val(pgste
) &= ~(PGSTE_HR_BIT
| PGSTE_HC_BIT
);
653 pgste_val(pgste
) &= ~(PGSTE_ACC_BITS
| PGSTE_FP_BIT
);
654 /* Copy page access key and fetch protection bit to pgste */
656 (unsigned long) (skey
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
)) << 56;
657 /* Transfer referenced bit to pte */
658 pte_val(*ptep
) |= (bits
& _PAGE_REFERENCED
) << 1;
664 static inline pgste_t
pgste_update_young(pte_t
*ptep
, pgste_t pgste
)
669 if (pte_val(*ptep
) & _PAGE_INVALID
)
671 /* Get referenced bit from storage key */
672 young
= page_reset_referenced(pte_val(*ptep
) & PAGE_MASK
);
674 pgste_val(pgste
) |= PGSTE_GR_BIT
;
675 /* Get host referenced bit from pgste */
676 if (pgste_val(pgste
) & PGSTE_HR_BIT
) {
677 pgste_val(pgste
) &= ~PGSTE_HR_BIT
;
680 /* Transfer referenced bit to kvm user bits and pte */
682 pgste_val(pgste
) |= PGSTE_UR_BIT
;
683 pte_val(*ptep
) |= _PAGE_YOUNG
;
689 static inline void pgste_set_key(pte_t
*ptep
, pgste_t pgste
, pte_t entry
)
692 unsigned long address
;
695 if (pte_val(entry
) & _PAGE_INVALID
)
697 VM_BUG_ON(!(pte_val(*ptep
) & _PAGE_INVALID
));
698 address
= pte_val(entry
) & PAGE_MASK
;
700 * Set page access key and fetch protection bit from pgste.
701 * The guest C/R information is still in the PGSTE, set real
704 nkey
= (pgste_val(pgste
) & (PGSTE_ACC_BITS
| PGSTE_FP_BIT
)) >> 56;
705 page_set_storage_key(address
, nkey
, 0);
709 static inline void pgste_set_pte(pte_t
*ptep
, pte_t entry
)
711 if (!MACHINE_HAS_ESOP
&& (pte_val(entry
) & _PAGE_WRITE
)) {
713 * Without enhanced suppression-on-protection force
714 * the dirty bit on for all writable ptes.
716 pte_val(entry
) |= _PAGE_DIRTY
;
717 pte_val(entry
) &= ~_PAGE_PROTECT
;
723 * struct gmap_struct - guest address space
724 * @mm: pointer to the parent mm_struct
725 * @table: pointer to the page directory
726 * @asce: address space control element for gmap page table
727 * @crst_list: list of all crst tables used in the guest address space
730 struct list_head list
;
731 struct mm_struct
*mm
;
732 unsigned long *table
;
735 struct list_head crst_list
;
739 * struct gmap_rmap - reverse mapping for segment table entries
740 * @gmap: pointer to the gmap_struct
741 * @entry: pointer to a segment table entry
742 * @vmaddr: virtual address in the guest address space
745 struct list_head list
;
747 unsigned long *entry
;
748 unsigned long vmaddr
;
752 * struct gmap_pgtable - gmap information attached to a page table
753 * @vmaddr: address of the 1MB segment in the process virtual memory
754 * @mapper: list of segment table entries mapping a page table
756 struct gmap_pgtable
{
757 unsigned long vmaddr
;
758 struct list_head mapper
;
762 * struct gmap_notifier - notify function block for page invalidation
763 * @notifier_call: address of callback function
765 struct gmap_notifier
{
766 struct list_head list
;
767 void (*notifier_call
)(struct gmap
*gmap
, unsigned long address
);
770 struct gmap
*gmap_alloc(struct mm_struct
*mm
);
771 void gmap_free(struct gmap
*gmap
);
772 void gmap_enable(struct gmap
*gmap
);
773 void gmap_disable(struct gmap
*gmap
);
774 int gmap_map_segment(struct gmap
*gmap
, unsigned long from
,
775 unsigned long to
, unsigned long len
);
776 int gmap_unmap_segment(struct gmap
*gmap
, unsigned long to
, unsigned long len
);
777 unsigned long __gmap_translate(unsigned long address
, struct gmap
*);
778 unsigned long gmap_translate(unsigned long address
, struct gmap
*);
779 unsigned long __gmap_fault(unsigned long address
, struct gmap
*);
780 unsigned long gmap_fault(unsigned long address
, struct gmap
*);
781 void gmap_discard(unsigned long from
, unsigned long to
, struct gmap
*);
783 void gmap_register_ipte_notifier(struct gmap_notifier
*);
784 void gmap_unregister_ipte_notifier(struct gmap_notifier
*);
785 int gmap_ipte_notify(struct gmap
*, unsigned long start
, unsigned long len
);
786 void gmap_do_ipte_notify(struct mm_struct
*, unsigned long addr
, pte_t
*);
788 static inline pgste_t
pgste_ipte_notify(struct mm_struct
*mm
,
790 pte_t
*ptep
, pgste_t pgste
)
793 if (pgste_val(pgste
) & PGSTE_IN_BIT
) {
794 pgste_val(pgste
) &= ~PGSTE_IN_BIT
;
795 gmap_do_ipte_notify(mm
, addr
, ptep
);
802 * Certain architectures need to do special things when PTEs
803 * within a page table are directly modified. Thus, the following
804 * hook is made available.
806 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
807 pte_t
*ptep
, pte_t entry
)
811 if (mm_has_pgste(mm
)) {
812 pgste
= pgste_get_lock(ptep
);
813 pgste_set_key(ptep
, pgste
, entry
);
814 pgste_set_pte(ptep
, entry
);
815 pgste_set_unlock(ptep
, pgste
);
817 if (!(pte_val(entry
) & _PAGE_INVALID
) && MACHINE_HAS_EDAT1
)
818 pte_val(entry
) |= _PAGE_CO
;
824 * query functions pte_write/pte_dirty/pte_young only work if
825 * pte_present() is true. Undefined behaviour if not..
827 static inline int pte_write(pte_t pte
)
829 return (pte_val(pte
) & _PAGE_WRITE
) != 0;
832 static inline int pte_dirty(pte_t pte
)
834 return (pte_val(pte
) & _PAGE_DIRTY
) != 0;
837 static inline int pte_young(pte_t pte
)
840 if (pte_val(pte
) & _PAGE_YOUNG
)
847 * pgd/pmd/pte modification functions
850 static inline void pgd_clear(pgd_t
*pgd
)
853 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
854 pgd_val(*pgd
) = _REGION2_ENTRY_EMPTY
;
858 static inline void pud_clear(pud_t
*pud
)
861 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
862 pud_val(*pud
) = _REGION3_ENTRY_EMPTY
;
866 static inline void pmd_clear(pmd_t
*pmdp
)
868 pmd_val(*pmdp
) = _SEGMENT_ENTRY_INVALID
;
871 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
873 pte_val(*ptep
) = _PAGE_INVALID
;
877 * The following pte modification functions only work if
878 * pte_present() is true. Undefined behaviour if not..
880 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
882 pte_val(pte
) &= _PAGE_CHG_MASK
;
883 pte_val(pte
) |= pgprot_val(newprot
);
884 if ((pte_val(pte
) & _PAGE_DIRTY
) && (pte_val(pte
) & _PAGE_WRITE
))
885 pte_val(pte
) &= ~_PAGE_PROTECT
;
889 static inline pte_t
pte_wrprotect(pte_t pte
)
891 pte_val(pte
) &= ~_PAGE_WRITE
;
892 pte_val(pte
) |= _PAGE_PROTECT
;
896 static inline pte_t
pte_mkwrite(pte_t pte
)
898 pte_val(pte
) |= _PAGE_WRITE
;
899 if (pte_val(pte
) & _PAGE_DIRTY
)
900 pte_val(pte
) &= ~_PAGE_PROTECT
;
904 static inline pte_t
pte_mkclean(pte_t pte
)
906 pte_val(pte
) &= ~_PAGE_DIRTY
;
907 pte_val(pte
) |= _PAGE_PROTECT
;
911 static inline pte_t
pte_mkdirty(pte_t pte
)
913 pte_val(pte
) |= _PAGE_DIRTY
;
914 if (pte_val(pte
) & _PAGE_WRITE
)
915 pte_val(pte
) &= ~_PAGE_PROTECT
;
919 static inline pte_t
pte_mkold(pte_t pte
)
922 pte_val(pte
) &= ~_PAGE_YOUNG
;
927 static inline pte_t
pte_mkyoung(pte_t pte
)
932 static inline pte_t
pte_mkspecial(pte_t pte
)
934 pte_val(pte
) |= _PAGE_SPECIAL
;
938 #ifdef CONFIG_HUGETLB_PAGE
939 static inline pte_t
pte_mkhuge(pte_t pte
)
941 pte_val(pte
) |= _PAGE_LARGE
;
947 * Get (and clear) the user dirty bit for a pte.
949 static inline int ptep_test_and_clear_user_dirty(struct mm_struct
*mm
,
955 if (mm_has_pgste(mm
)) {
956 pgste
= pgste_get_lock(ptep
);
957 pgste
= pgste_update_all(ptep
, pgste
);
958 dirty
= !!(pgste_val(pgste
) & PGSTE_UC_BIT
);
959 pgste_val(pgste
) &= ~PGSTE_UC_BIT
;
960 pgste_set_unlock(ptep
, pgste
);
967 * Get (and clear) the user referenced bit for a pte.
969 static inline int ptep_test_and_clear_user_young(struct mm_struct
*mm
,
975 if (mm_has_pgste(mm
)) {
976 pgste
= pgste_get_lock(ptep
);
977 pgste
= pgste_update_young(ptep
, pgste
);
978 young
= !!(pgste_val(pgste
) & PGSTE_UR_BIT
);
979 pgste_val(pgste
) &= ~PGSTE_UR_BIT
;
980 pgste_set_unlock(ptep
, pgste
);
985 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
986 static inline int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
987 unsigned long addr
, pte_t
*ptep
)
992 if (mm_has_pgste(vma
->vm_mm
)) {
993 pgste
= pgste_get_lock(ptep
);
994 pgste
= pgste_update_young(ptep
, pgste
);
996 *ptep
= pte_mkold(pte
);
997 pgste_set_unlock(ptep
, pgste
);
998 return pte_young(pte
);
1003 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1004 static inline int ptep_clear_flush_young(struct vm_area_struct
*vma
,
1005 unsigned long address
, pte_t
*ptep
)
1007 /* No need to flush TLB
1008 * On s390 reference bits are in storage key and never in TLB
1009 * With virtualization we handle the reference bit, without we
1010 * we can simply return */
1011 return ptep_test_and_clear_young(vma
, address
, ptep
);
1014 static inline void __ptep_ipte(unsigned long address
, pte_t
*ptep
)
1016 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
1017 #ifndef CONFIG_64BIT
1018 /* pto must point to the start of the segment table */
1019 pte_t
*pto
= (pte_t
*) (((unsigned long) ptep
) & 0x7ffffc00);
1021 /* ipte in zarch mode can do the math */
1026 : "=m" (*ptep
) : "m" (*ptep
),
1027 "a" (pto
), "a" (address
));
1032 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1033 * both clear the TLB for the unmapped pte. The reason is that
1034 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1035 * to modify an active pte. The sequence is
1036 * 1) ptep_get_and_clear
1038 * 3) flush_tlb_range
1039 * On s390 the tlb needs to get flushed with the modification of the pte
1040 * if the pte is active. The only way how this can be implemented is to
1041 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1044 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1045 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
,
1046 unsigned long address
, pte_t
*ptep
)
1051 mm
->context
.flush_mm
= 1;
1052 if (mm_has_pgste(mm
)) {
1053 pgste
= pgste_get_lock(ptep
);
1054 pgste
= pgste_ipte_notify(mm
, address
, ptep
, pgste
);
1058 if (!mm_exclusive(mm
))
1059 __ptep_ipte(address
, ptep
);
1060 pte_val(*ptep
) = _PAGE_INVALID
;
1062 if (mm_has_pgste(mm
)) {
1063 pgste
= pgste_update_all(&pte
, pgste
);
1064 pgste_set_unlock(ptep
, pgste
);
1069 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1070 static inline pte_t
ptep_modify_prot_start(struct mm_struct
*mm
,
1071 unsigned long address
,
1077 mm
->context
.flush_mm
= 1;
1078 if (mm_has_pgste(mm
)) {
1079 pgste
= pgste_get_lock(ptep
);
1080 pgste_ipte_notify(mm
, address
, ptep
, pgste
);
1084 if (!mm_exclusive(mm
))
1085 __ptep_ipte(address
, ptep
);
1087 if (mm_has_pgste(mm
)) {
1088 pgste
= pgste_update_all(&pte
, pgste
);
1089 pgste_set(ptep
, pgste
);
1094 static inline void ptep_modify_prot_commit(struct mm_struct
*mm
,
1095 unsigned long address
,
1096 pte_t
*ptep
, pte_t pte
)
1100 if (mm_has_pgste(mm
)) {
1101 pgste
= *(pgste_t
*)(ptep
+ PTRS_PER_PTE
);
1102 pgste_set_key(ptep
, pgste
, pte
);
1103 pgste_set_pte(ptep
, pte
);
1104 pgste_set_unlock(ptep
, pgste
);
1109 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1110 static inline pte_t
ptep_clear_flush(struct vm_area_struct
*vma
,
1111 unsigned long address
, pte_t
*ptep
)
1116 if (mm_has_pgste(vma
->vm_mm
)) {
1117 pgste
= pgste_get_lock(ptep
);
1118 pgste
= pgste_ipte_notify(vma
->vm_mm
, address
, ptep
, pgste
);
1122 __ptep_ipte(address
, ptep
);
1123 pte_val(*ptep
) = _PAGE_INVALID
;
1125 if (mm_has_pgste(vma
->vm_mm
)) {
1126 pgste
= pgste_update_all(&pte
, pgste
);
1127 pgste_set_unlock(ptep
, pgste
);
1133 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1134 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1135 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1136 * cannot be accessed while the batched unmap is running. In this case
1137 * full==1 and a simple pte_clear is enough. See tlb.h.
1139 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1140 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
1141 unsigned long address
,
1142 pte_t
*ptep
, int full
)
1147 if (!full
&& mm_has_pgste(mm
)) {
1148 pgste
= pgste_get_lock(ptep
);
1149 pgste
= pgste_ipte_notify(mm
, address
, ptep
, pgste
);
1154 __ptep_ipte(address
, ptep
);
1155 pte_val(*ptep
) = _PAGE_INVALID
;
1157 if (!full
&& mm_has_pgste(mm
)) {
1158 pgste
= pgste_update_all(&pte
, pgste
);
1159 pgste_set_unlock(ptep
, pgste
);
1164 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1165 static inline pte_t
ptep_set_wrprotect(struct mm_struct
*mm
,
1166 unsigned long address
, pte_t
*ptep
)
1171 if (pte_write(pte
)) {
1172 mm
->context
.flush_mm
= 1;
1173 if (mm_has_pgste(mm
)) {
1174 pgste
= pgste_get_lock(ptep
);
1175 pgste
= pgste_ipte_notify(mm
, address
, ptep
, pgste
);
1178 if (!mm_exclusive(mm
))
1179 __ptep_ipte(address
, ptep
);
1180 pte
= pte_wrprotect(pte
);
1182 if (mm_has_pgste(mm
)) {
1183 pgste_set_pte(ptep
, pte
);
1184 pgste_set_unlock(ptep
, pgste
);
1191 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1192 static inline int ptep_set_access_flags(struct vm_area_struct
*vma
,
1193 unsigned long address
, pte_t
*ptep
,
1194 pte_t entry
, int dirty
)
1198 if (pte_same(*ptep
, entry
))
1200 if (mm_has_pgste(vma
->vm_mm
)) {
1201 pgste
= pgste_get_lock(ptep
);
1202 pgste
= pgste_ipte_notify(vma
->vm_mm
, address
, ptep
, pgste
);
1205 __ptep_ipte(address
, ptep
);
1207 if (mm_has_pgste(vma
->vm_mm
)) {
1208 pgste_set_pte(ptep
, entry
);
1209 pgste_set_unlock(ptep
, pgste
);
1216 * Conversion functions: convert a page and protection to a page entry,
1217 * and a page entry and page directory to the page they refer to.
1219 static inline pte_t
mk_pte_phys(unsigned long physpage
, pgprot_t pgprot
)
1222 pte_val(__pte
) = physpage
+ pgprot_val(pgprot
);
1226 static inline pte_t
mk_pte(struct page
*page
, pgprot_t pgprot
)
1228 unsigned long physpage
= page_to_phys(page
);
1229 pte_t __pte
= mk_pte_phys(physpage
, pgprot
);
1231 if (pte_write(__pte
) && PageDirty(page
))
1232 __pte
= pte_mkdirty(__pte
);
1236 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1237 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1238 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1239 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1241 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1242 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1244 #ifndef CONFIG_64BIT
1246 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1247 #define pud_deref(pmd) ({ BUG(); 0UL; })
1248 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1250 #define pud_offset(pgd, address) ((pud_t *) pgd)
1251 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1253 #else /* CONFIG_64BIT */
1255 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1256 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1257 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1259 static inline pud_t
*pud_offset(pgd_t
*pgd
, unsigned long address
)
1261 pud_t
*pud
= (pud_t
*) pgd
;
1262 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
1263 pud
= (pud_t
*) pgd_deref(*pgd
);
1264 return pud
+ pud_index(address
);
1267 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
1269 pmd_t
*pmd
= (pmd_t
*) pud
;
1270 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
1271 pmd
= (pmd_t
*) pud_deref(*pud
);
1272 return pmd
+ pmd_index(address
);
1275 #endif /* CONFIG_64BIT */
1277 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1278 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1279 #define pte_page(x) pfn_to_page(pte_pfn(x))
1281 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1283 /* Find an entry in the lowest level page table.. */
1284 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1285 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1286 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1287 #define pte_unmap(pte) do { } while (0)
1289 static inline void __pmd_idte(unsigned long address
, pmd_t
*pmdp
)
1291 unsigned long sto
= (unsigned long) pmdp
-
1292 pmd_index(address
) * sizeof(pmd_t
);
1294 if (!(pmd_val(*pmdp
) & _SEGMENT_ENTRY_INVALID
)) {
1296 " .insn rrf,0xb98e0000,%2,%3,0,0"
1298 : "m" (*pmdp
), "a" (sto
),
1299 "a" ((address
& HPAGE_MASK
))
1305 static inline void __pmd_csp(pmd_t
*pmdp
)
1307 register unsigned long reg2
asm("2") = pmd_val(*pmdp
);
1308 register unsigned long reg3
asm("3") = pmd_val(*pmdp
) |
1309 _SEGMENT_ENTRY_INVALID
;
1310 register unsigned long reg4
asm("4") = ((unsigned long) pmdp
) + 5;
1315 : "d" (reg2
), "d" (reg3
), "d" (reg4
), "m" (*pmdp
) : "cc");
1318 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1319 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot
)
1322 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
1323 * Convert to segment table entry format.
1325 if (pgprot_val(pgprot
) == pgprot_val(PAGE_NONE
))
1326 return pgprot_val(SEGMENT_NONE
);
1327 if (pgprot_val(pgprot
) == pgprot_val(PAGE_READ
))
1328 return pgprot_val(SEGMENT_READ
);
1329 return pgprot_val(SEGMENT_WRITE
);
1332 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
1334 pmd_val(pmd
) &= _SEGMENT_CHG_MASK
;
1335 pmd_val(pmd
) |= massage_pgprot_pmd(newprot
);
1339 static inline pmd_t
mk_pmd_phys(unsigned long physpage
, pgprot_t pgprot
)
1342 pmd_val(__pmd
) = physpage
+ massage_pgprot_pmd(pgprot
);
1346 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
1348 /* Do not clobber PROT_NONE pages! */
1349 if (!(pmd_val(pmd
) & _SEGMENT_ENTRY_INVALID
))
1350 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_PROTECT
;
1353 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1355 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1357 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1358 extern void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
1361 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1362 extern pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
);
1364 static inline int pmd_trans_splitting(pmd_t pmd
)
1366 return pmd_val(pmd
) & _SEGMENT_ENTRY_SPLIT
;
1369 static inline void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
1370 pmd_t
*pmdp
, pmd_t entry
)
1372 if (!(pmd_val(entry
) & _SEGMENT_ENTRY_INVALID
) && MACHINE_HAS_EDAT1
)
1373 pmd_val(entry
) |= _SEGMENT_ENTRY_CO
;
1377 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
1379 pmd_val(pmd
) |= _SEGMENT_ENTRY_LARGE
;
1383 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
1385 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1389 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
1391 /* No dirty bit in the segment table entry. */
1395 static inline pmd_t
pmd_mkold(pmd_t pmd
)
1397 /* No referenced bit in the segment table entry. */
1401 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
1403 /* No referenced bit in the segment table entry. */
1407 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1408 static inline int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
1409 unsigned long address
, pmd_t
*pmdp
)
1411 unsigned long pmd_addr
= pmd_val(*pmdp
) & HPAGE_MASK
;
1416 if (MACHINE_HAS_RRBM
) {
1417 counter
= PTRS_PER_PTE
>> 6;
1419 "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */
1423 : "=&d" (tmp
), "+&d" (rc
), "+d" (counter
),
1425 : "a" (64 * 4096UL) : "cc");
1428 counter
= PTRS_PER_PTE
;
1435 : "+d" (rc
), "+d" (counter
), "+a" (pmd_addr
)
1436 : "a" (4096UL) : "cc");
1441 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1442 static inline pmd_t
pmdp_get_and_clear(struct mm_struct
*mm
,
1443 unsigned long address
, pmd_t
*pmdp
)
1447 __pmd_idte(address
, pmdp
);
1452 #define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1453 static inline pmd_t
pmdp_clear_flush(struct vm_area_struct
*vma
,
1454 unsigned long address
, pmd_t
*pmdp
)
1456 return pmdp_get_and_clear(vma
->vm_mm
, address
, pmdp
);
1459 #define __HAVE_ARCH_PMDP_INVALIDATE
1460 static inline void pmdp_invalidate(struct vm_area_struct
*vma
,
1461 unsigned long address
, pmd_t
*pmdp
)
1463 __pmd_idte(address
, pmdp
);
1466 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1467 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
1468 unsigned long address
, pmd_t
*pmdp
)
1472 if (pmd_write(pmd
)) {
1473 __pmd_idte(address
, pmdp
);
1474 set_pmd_at(mm
, address
, pmdp
, pmd_wrprotect(pmd
));
1478 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1479 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1481 static inline int pmd_trans_huge(pmd_t pmd
)
1483 return pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
;
1486 static inline int has_transparent_hugepage(void)
1488 return MACHINE_HAS_HPAGE
? 1 : 0;
1491 static inline unsigned long pmd_pfn(pmd_t pmd
)
1493 return pmd_val(pmd
) >> PAGE_SHIFT
;
1495 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1498 * 31 bit swap entry format:
1499 * A page-table entry has some bits we have to treat in a special way.
1500 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1501 * exception will occur instead of a page translation exception. The
1502 * specifiation exception has the bad habit not to store necessary
1503 * information in the lowcore.
1504 * Bits 21, 22, 30 and 31 are used to indicate the page type.
1505 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1506 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1507 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1508 * plus 24 for the offset.
1509 * 0| offset |0110|o|type |00|
1510 * 0 0000000001111111111 2222 2 22222 33
1511 * 0 1234567890123456789 0123 4 56789 01
1513 * 64 bit swap entry format:
1514 * A page-table entry has some bits we have to treat in a special way.
1515 * Bits 52 and bit 55 have to be zero, otherwise an specification
1516 * exception will occur instead of a page translation exception. The
1517 * specifiation exception has the bad habit not to store necessary
1518 * information in the lowcore.
1519 * Bits 53, 54, 62 and 63 are used to indicate the page type.
1520 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1521 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1522 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1523 * plus 56 for the offset.
1524 * | offset |0110|o|type |00|
1525 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1526 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1528 #ifndef CONFIG_64BIT
1529 #define __SWP_OFFSET_MASK (~0UL >> 12)
1531 #define __SWP_OFFSET_MASK (~0UL >> 11)
1533 static inline pte_t
mk_swap_pte(unsigned long type
, unsigned long offset
)
1536 offset
&= __SWP_OFFSET_MASK
;
1537 pte_val(pte
) = _PAGE_INVALID
| _PAGE_TYPE
| ((type
& 0x1f) << 2) |
1538 ((offset
& 1UL) << 7) | ((offset
& ~1UL) << 11);
1542 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1543 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1544 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1546 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1547 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1549 #ifndef CONFIG_64BIT
1550 # define PTE_FILE_MAX_BITS 26
1551 #else /* CONFIG_64BIT */
1552 # define PTE_FILE_MAX_BITS 59
1553 #endif /* CONFIG_64BIT */
1555 #define pte_to_pgoff(__pte) \
1556 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1558 #define pgoff_to_pte(__off) \
1559 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1560 | _PAGE_INVALID | _PAGE_PROTECT })
1562 #endif /* !__ASSEMBLY__ */
1564 #define kern_addr_valid(addr) (1)
1566 extern int vmem_add_mapping(unsigned long start
, unsigned long size
);
1567 extern int vmem_remove_mapping(unsigned long start
, unsigned long size
);
1568 extern int s390_enable_sie(void);
1571 * No page table caches to initialise
1573 static inline void pgtable_cache_init(void) { }
1574 static inline void check_pgt_cache(void) { }
1576 #include <asm-generic/pgtable.h>
1578 #endif /* _S390_PAGE_H */
This page took 0.073389 seconds and 4 git commands to generate.