[UDP]: Move seq_ops from udp_iter_state to udp_seq_afinfo.
[deliverable/linux.git] / include / asm-s390 / pgalloc.h
1 /*
2 * include/asm-s390/pgalloc.h
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
11 */
12
13 #ifndef _S390_PGALLOC_H
14 #define _S390_PGALLOC_H
15
16 #include <linux/threads.h>
17 #include <linux/gfp.h>
18 #include <linux/mm.h>
19
20 #define check_pgt_cache() do {} while (0)
21
22 unsigned long *crst_table_alloc(struct mm_struct *, int);
23 void crst_table_free(struct mm_struct *, unsigned long *);
24
25 unsigned long *page_table_alloc(struct mm_struct *);
26 void page_table_free(struct mm_struct *, unsigned long *);
27 void disable_noexec(struct mm_struct *, struct task_struct *);
28
29 static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
30 {
31 *s = val;
32 n = (n / 256) - 1;
33 asm volatile(
34 #ifdef CONFIG_64BIT
35 " mvc 8(248,%0),0(%0)\n"
36 #else
37 " mvc 4(252,%0),0(%0)\n"
38 #endif
39 "0: mvc 256(256,%0),0(%0)\n"
40 " la %0,256(%0)\n"
41 " brct %1,0b\n"
42 : "+a" (s), "+d" (n));
43 }
44
45 static inline void crst_table_init(unsigned long *crst, unsigned long entry)
46 {
47 clear_table(crst, entry, sizeof(unsigned long)*2048);
48 crst = get_shadow_table(crst);
49 if (crst)
50 clear_table(crst, entry, sizeof(unsigned long)*2048);
51 }
52
53 #ifndef __s390x__
54
55 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
56 {
57 return _SEGMENT_ENTRY_EMPTY;
58 }
59
60 #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
61 #define pud_free(mm, x) do { } while (0)
62
63 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
64 #define pmd_free(mm, x) do { } while (0)
65
66 #define pgd_populate(mm, pgd, pud) BUG()
67 #define pgd_populate_kernel(mm, pgd, pud) BUG()
68
69 #define pud_populate(mm, pud, pmd) BUG()
70 #define pud_populate_kernel(mm, pud, pmd) BUG()
71
72 #else /* __s390x__ */
73
74 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
75 {
76 if (mm->context.asce_limit <= (1UL << 31))
77 return _SEGMENT_ENTRY_EMPTY;
78 if (mm->context.asce_limit <= (1UL << 42))
79 return _REGION3_ENTRY_EMPTY;
80 return _REGION2_ENTRY_EMPTY;
81 }
82
83 int crst_table_upgrade(struct mm_struct *, unsigned long limit);
84 void crst_table_downgrade(struct mm_struct *, unsigned long limit);
85
86 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
87 {
88 unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
89 if (table)
90 crst_table_init(table, _REGION3_ENTRY_EMPTY);
91 return (pud_t *) table;
92 }
93 #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
94
95 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
96 {
97 unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
98 if (table)
99 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
100 return (pmd_t *) table;
101 }
102 #define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
103
104 static inline void pgd_populate_kernel(struct mm_struct *mm,
105 pgd_t *pgd, pud_t *pud)
106 {
107 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
108 }
109
110 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
111 {
112 pgd_populate_kernel(mm, pgd, pud);
113 if (mm->context.noexec) {
114 pgd = get_shadow_table(pgd);
115 pud = get_shadow_table(pud);
116 pgd_populate_kernel(mm, pgd, pud);
117 }
118 }
119
120 static inline void pud_populate_kernel(struct mm_struct *mm,
121 pud_t *pud, pmd_t *pmd)
122 {
123 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
124 }
125
126 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
127 {
128 pud_populate_kernel(mm, pud, pmd);
129 if (mm->context.noexec) {
130 pud = get_shadow_table(pud);
131 pmd = get_shadow_table(pmd);
132 pud_populate_kernel(mm, pud, pmd);
133 }
134 }
135
136 #endif /* __s390x__ */
137
138 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
139 {
140 INIT_LIST_HEAD(&mm->context.crst_list);
141 INIT_LIST_HEAD(&mm->context.pgtable_list);
142 return (pgd_t *) crst_table_alloc(mm, s390_noexec);
143 }
144 #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
145
146 static inline void pmd_populate_kernel(struct mm_struct *mm,
147 pmd_t *pmd, pte_t *pte)
148 {
149 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
150 }
151
152 static inline void pmd_populate(struct mm_struct *mm,
153 pmd_t *pmd, pgtable_t pte)
154 {
155 pmd_populate_kernel(mm, pmd, pte);
156 if (mm->context.noexec) {
157 pmd = get_shadow_table(pmd);
158 pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
159 }
160 }
161
162 #define pmd_pgtable(pmd) \
163 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
164
165 /*
166 * page table entry allocation/free routines.
167 */
168 #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
169 #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
170
171 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
172 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
173
174 #endif /* _S390_PGALLOC_H */
This page took 0.053057 seconds and 5 git commands to generate.