Commit | Line | Data |
---|---|---|
3610cce8 MS |
1 | /* |
2 | * arch/s390/mm/pgtable.c | |
3 | * | |
4 | * Copyright IBM Corp. 2007 | |
5 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | |
6 | */ | |
7 | ||
8 | #include <linux/sched.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/errno.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/swap.h> | |
13 | #include <linux/smp.h> | |
14 | #include <linux/highmem.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/pagemap.h> | |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/quicklist.h> | |
20 | ||
21 | #include <asm/system.h> | |
22 | #include <asm/pgtable.h> | |
23 | #include <asm/pgalloc.h> | |
24 | #include <asm/tlb.h> | |
25 | #include <asm/tlbflush.h> | |
26 | ||
27 | #ifndef CONFIG_64BIT | |
28 | #define ALLOC_ORDER 1 | |
146e4b3c MS |
29 | #define TABLES_PER_PAGE 4 |
30 | #define FRAG_MASK 15UL | |
31 | #define SECOND_HALVES 10UL | |
3610cce8 MS |
32 | #else |
33 | #define ALLOC_ORDER 2 | |
146e4b3c MS |
34 | #define TABLES_PER_PAGE 2 |
35 | #define FRAG_MASK 3UL | |
36 | #define SECOND_HALVES 2UL | |
3610cce8 MS |
37 | #endif |
38 | ||
39 | unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) | |
40 | { | |
41 | struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | |
42 | ||
43 | if (!page) | |
44 | return NULL; | |
45 | page->index = 0; | |
46 | if (noexec) { | |
47 | struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | |
48 | if (!shadow) { | |
49 | __free_pages(page, ALLOC_ORDER); | |
50 | return NULL; | |
51 | } | |
52 | page->index = page_to_phys(shadow); | |
53 | } | |
146e4b3c MS |
54 | spin_lock(&mm->page_table_lock); |
55 | list_add(&page->lru, &mm->context.crst_list); | |
56 | spin_unlock(&mm->page_table_lock); | |
3610cce8 MS |
57 | return (unsigned long *) page_to_phys(page); |
58 | } | |
59 | ||
146e4b3c | 60 | void crst_table_free(struct mm_struct *mm, unsigned long *table) |
3610cce8 MS |
61 | { |
62 | unsigned long *shadow = get_shadow_table(table); | |
146e4b3c | 63 | struct page *page = virt_to_page(table); |
3610cce8 | 64 | |
146e4b3c MS |
65 | spin_lock(&mm->page_table_lock); |
66 | list_del(&page->lru); | |
67 | spin_unlock(&mm->page_table_lock); | |
3610cce8 MS |
68 | if (shadow) |
69 | free_pages((unsigned long) shadow, ALLOC_ORDER); | |
70 | free_pages((unsigned long) table, ALLOC_ORDER); | |
71 | } | |
72 | ||
73 | /* | |
74 | * page table entry allocation/free routines. | |
75 | */ | |
146e4b3c | 76 | unsigned long *page_table_alloc(struct mm_struct *mm) |
3610cce8 | 77 | { |
146e4b3c | 78 | struct page *page; |
3610cce8 | 79 | unsigned long *table; |
146e4b3c | 80 | unsigned long bits; |
3610cce8 | 81 | |
146e4b3c MS |
82 | bits = mm->context.noexec ? 3UL : 1UL; |
83 | spin_lock(&mm->page_table_lock); | |
84 | page = NULL; | |
85 | if (!list_empty(&mm->context.pgtable_list)) { | |
86 | page = list_first_entry(&mm->context.pgtable_list, | |
87 | struct page, lru); | |
88 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) | |
89 | page = NULL; | |
90 | } | |
91 | if (!page) { | |
92 | spin_unlock(&mm->page_table_lock); | |
93 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); | |
94 | if (!page) | |
3610cce8 | 95 | return NULL; |
146e4b3c MS |
96 | pgtable_page_ctor(page); |
97 | page->flags &= ~FRAG_MASK; | |
98 | table = (unsigned long *) page_to_phys(page); | |
3610cce8 | 99 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); |
146e4b3c MS |
100 | spin_lock(&mm->page_table_lock); |
101 | list_add(&page->lru, &mm->context.pgtable_list); | |
3610cce8 MS |
102 | } |
103 | table = (unsigned long *) page_to_phys(page); | |
146e4b3c MS |
104 | while (page->flags & bits) { |
105 | table += 256; | |
106 | bits <<= 1; | |
107 | } | |
108 | page->flags |= bits; | |
109 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) | |
110 | list_move_tail(&page->lru, &mm->context.pgtable_list); | |
111 | spin_unlock(&mm->page_table_lock); | |
3610cce8 MS |
112 | return table; |
113 | } | |
114 | ||
146e4b3c | 115 | void page_table_free(struct mm_struct *mm, unsigned long *table) |
3610cce8 | 116 | { |
146e4b3c MS |
117 | struct page *page; |
118 | unsigned long bits; | |
3610cce8 | 119 | |
146e4b3c MS |
120 | bits = mm->context.noexec ? 3UL : 1UL; |
121 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); | |
122 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
123 | spin_lock(&mm->page_table_lock); | |
124 | page->flags ^= bits; | |
125 | if (page->flags & FRAG_MASK) { | |
126 | /* Page now has some free pgtable fragments. */ | |
127 | list_move(&page->lru, &mm->context.pgtable_list); | |
128 | page = NULL; | |
129 | } else | |
130 | /* All fragments of the 4K page have been freed. */ | |
131 | list_del(&page->lru); | |
132 | spin_unlock(&mm->page_table_lock); | |
133 | if (page) { | |
134 | pgtable_page_dtor(page); | |
135 | __free_page(page); | |
136 | } | |
137 | } | |
3610cce8 | 138 | |
146e4b3c MS |
139 | void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) |
140 | { | |
141 | struct page *page; | |
142 | ||
143 | spin_lock(&mm->page_table_lock); | |
144 | /* Free shadow region and segment tables. */ | |
145 | list_for_each_entry(page, &mm->context.crst_list, lru) | |
146 | if (page->index) { | |
147 | free_pages((unsigned long) page->index, ALLOC_ORDER); | |
148 | page->index = 0; | |
149 | } | |
150 | /* "Free" second halves of page tables. */ | |
151 | list_for_each_entry(page, &mm->context.pgtable_list, lru) | |
152 | page->flags &= ~SECOND_HALVES; | |
153 | spin_unlock(&mm->page_table_lock); | |
154 | mm->context.noexec = 0; | |
155 | update_mm(mm, tsk); | |
3610cce8 | 156 | } |