arch/tile: properly flush the I$ when unloading kernel modules
[deliverable/linux.git] / arch / tile / kernel / module.c
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Based on i386 version, copyright (C) 2001 Rusty Russell.
15 */
16
17 #include <linux/moduleloader.h>
18 #include <linux/elf.h>
19 #include <linux/vmalloc.h>
20 #include <linux/fs.h>
21 #include <linux/string.h>
22 #include <linux/kernel.h>
23 #include <asm/opcode-tile.h>
24 #include <asm/pgtable.h>
25 #include <asm/homecache.h>
26
27 #ifdef __tilegx__
28 # define Elf_Rela Elf64_Rela
29 # define ELF_R_SYM ELF64_R_SYM
30 # define ELF_R_TYPE ELF64_R_TYPE
31 #else
32 # define Elf_Rela Elf32_Rela
33 # define ELF_R_SYM ELF32_R_SYM
34 # define ELF_R_TYPE ELF32_R_TYPE
35 #endif
36
37 #ifdef MODULE_DEBUG
38 #define DEBUGP printk
39 #else
40 #define DEBUGP(fmt...)
41 #endif
42
43 /*
44 * Allocate some address space in the range MEM_MODULE_START to
45 * MEM_MODULE_END and populate it with memory.
46 */
47 void *module_alloc(unsigned long size)
48 {
49 struct page **pages;
50 pgprot_t prot_rwx = __pgprot(_PAGE_KERNEL | _PAGE_KERNEL_EXEC);
51 struct vm_struct *area;
52 int i = 0;
53 int npages;
54
55 if (size == 0)
56 return NULL;
57 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
58 pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
59 if (pages == NULL)
60 return NULL;
61 for (; i < npages; ++i) {
62 pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
63 if (!pages[i])
64 goto error;
65 }
66
67 area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END);
68 if (!area)
69 goto error;
70
71 if (map_vm_area(area, prot_rwx, &pages)) {
72 vunmap(area->addr);
73 goto error;
74 }
75
76 return area->addr;
77
78 error:
79 while (--i >= 0)
80 __free_page(pages[i]);
81 kfree(pages);
82 return NULL;
83 }
84
85
86 /* Free memory returned from module_alloc */
87 void module_free(struct module *mod, void *module_region)
88 {
89 vfree(module_region);
90
91 /* Globally flush the L1 icache. */
92 flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
93 0, 0, 0, NULL, NULL, 0);
94
95 /*
96 * FIXME: If module_region == mod->module_init, trim exception
97 * table entries.
98 */
99 }
100
101 /* We don't need anything special. */
102 int module_frob_arch_sections(Elf_Ehdr *hdr,
103 Elf_Shdr *sechdrs,
104 char *secstrings,
105 struct module *mod)
106 {
107 return 0;
108 }
109
110 int apply_relocate(Elf_Shdr *sechdrs,
111 const char *strtab,
112 unsigned int symindex,
113 unsigned int relsec,
114 struct module *me)
115 {
116 pr_err("module %s: .rel relocation unsupported\n", me->name);
117 return -ENOEXEC;
118 }
119
120 #ifdef __tilegx__
121 /*
122 * Validate that the high 16 bits of "value" is just the sign-extension of
123 * the low 48 bits.
124 */
125 static int validate_hw2_last(long value, struct module *me)
126 {
127 if (((value << 16) >> 16) != value) {
128 pr_warning("module %s: Out of range HW2_LAST value %#lx\n",
129 me->name, value);
130 return 0;
131 }
132 return 1;
133 }
134
135 /*
136 * Validate that "value" isn't too big to hold in a JumpOff relocation.
137 */
138 static int validate_jumpoff(long value)
139 {
140 /* Determine size of jump offset. */
141 int shift = __builtin_clzl(get_JumpOff_X1(create_JumpOff_X1(-1)));
142
143 /* Check to see if it fits into the relocation slot. */
144 long f = get_JumpOff_X1(create_JumpOff_X1(value));
145 f = (f << shift) >> shift;
146
147 return f == value;
148 }
149 #endif
150
151 int apply_relocate_add(Elf_Shdr *sechdrs,
152 const char *strtab,
153 unsigned int symindex,
154 unsigned int relsec,
155 struct module *me)
156 {
157 unsigned int i;
158 Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr;
159 Elf_Sym *sym;
160 u64 *location;
161 unsigned long value;
162
163 DEBUGP("Applying relocate section %u to %u\n", relsec,
164 sechdrs[relsec].sh_info);
165 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
166 /* This is where to make the change */
167 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
168 + rel[i].r_offset;
169 /*
170 * This is the symbol it is referring to.
171 * Note that all undefined symbols have been resolved.
172 */
173 sym = (Elf_Sym *)sechdrs[symindex].sh_addr
174 + ELF_R_SYM(rel[i].r_info);
175 value = sym->st_value + rel[i].r_addend;
176
177 switch (ELF_R_TYPE(rel[i].r_info)) {
178
179 #define MUNGE(func) (*location = ((*location & ~func(-1)) | func(value)))
180
181 #ifndef __tilegx__
182 case R_TILE_32:
183 *(uint32_t *)location = value;
184 break;
185 case R_TILE_IMM16_X0_HA:
186 value = (value + 0x8000) >> 16;
187 /*FALLTHROUGH*/
188 case R_TILE_IMM16_X0_LO:
189 MUNGE(create_Imm16_X0);
190 break;
191 case R_TILE_IMM16_X1_HA:
192 value = (value + 0x8000) >> 16;
193 /*FALLTHROUGH*/
194 case R_TILE_IMM16_X1_LO:
195 MUNGE(create_Imm16_X1);
196 break;
197 case R_TILE_JOFFLONG_X1:
198 value -= (unsigned long) location; /* pc-relative */
199 value = (long) value >> 3; /* count by instrs */
200 MUNGE(create_JOffLong_X1);
201 break;
202 #else
203 case R_TILEGX_64:
204 *location = value;
205 break;
206 case R_TILEGX_IMM16_X0_HW2_LAST:
207 if (!validate_hw2_last(value, me))
208 return -ENOEXEC;
209 value >>= 16;
210 /*FALLTHROUGH*/
211 case R_TILEGX_IMM16_X0_HW1:
212 value >>= 16;
213 /*FALLTHROUGH*/
214 case R_TILEGX_IMM16_X0_HW0:
215 MUNGE(create_Imm16_X0);
216 break;
217 case R_TILEGX_IMM16_X1_HW2_LAST:
218 if (!validate_hw2_last(value, me))
219 return -ENOEXEC;
220 value >>= 16;
221 /*FALLTHROUGH*/
222 case R_TILEGX_IMM16_X1_HW1:
223 value >>= 16;
224 /*FALLTHROUGH*/
225 case R_TILEGX_IMM16_X1_HW0:
226 MUNGE(create_Imm16_X1);
227 break;
228 case R_TILEGX_JUMPOFF_X1:
229 value -= (unsigned long) location; /* pc-relative */
230 value = (long) value >> 3; /* count by instrs */
231 if (!validate_jumpoff(value)) {
232 pr_warning("module %s: Out of range jump to"
233 " %#llx at %#llx (%p)\n", me->name,
234 sym->st_value + rel[i].r_addend,
235 rel[i].r_offset, location);
236 return -ENOEXEC;
237 }
238 MUNGE(create_JumpOff_X1);
239 break;
240 #endif
241
242 #undef MUNGE
243
244 default:
245 pr_err("module %s: Unknown relocation: %d\n",
246 me->name, (int) ELF_R_TYPE(rel[i].r_info));
247 return -ENOEXEC;
248 }
249 }
250 return 0;
251 }
252
253 int module_finalize(const Elf_Ehdr *hdr,
254 const Elf_Shdr *sechdrs,
255 struct module *me)
256 {
257 /* FIXME: perhaps remove the "writable" bit from the TLB? */
258 return 0;
259 }
260
261 void module_arch_cleanup(struct module *mod)
262 {
263 }
This page took 0.037283 seconds and 5 git commands to generate.