Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[deliverable/linux.git] / arch / sparc / kernel / unaligned_32.c
1 /*
2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <asm/ptrace.h>
15 #include <asm/processor.h>
16 #include <asm/system.h>
17 #include <asm/uaccess.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/perf_event.h>
21
22 enum direction {
23 load, /* ld, ldd, ldh, ldsh */
24 store, /* st, std, sth, stsh */
25 both, /* Swap, ldstub, etc. */
26 fpload,
27 fpstore,
28 invalid,
29 };
30
31 static inline enum direction decode_direction(unsigned int insn)
32 {
33 unsigned long tmp = (insn >> 21) & 1;
34
35 if(!tmp)
36 return load;
37 else {
38 if(((insn>>19)&0x3f) == 15)
39 return both;
40 else
41 return store;
42 }
43 }
44
45 /* 8 = double-word, 4 = word, 2 = half-word */
46 static inline int decode_access_size(unsigned int insn)
47 {
48 insn = (insn >> 19) & 3;
49
50 if(!insn)
51 return 4;
52 else if(insn == 3)
53 return 8;
54 else if(insn == 2)
55 return 2;
56 else {
57 printk("Impossible unaligned trap. insn=%08x\n", insn);
58 die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
59 return 4; /* just to keep gcc happy. */
60 }
61 }
62
63 /* 0x400000 = signed, 0 = unsigned */
64 static inline int decode_signedness(unsigned int insn)
65 {
66 return (insn & 0x400000);
67 }
68
69 static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
70 unsigned int rd)
71 {
72 if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
73 /* Wheee... */
74 __asm__ __volatile__("save %sp, -0x40, %sp\n\t"
75 "save %sp, -0x40, %sp\n\t"
76 "save %sp, -0x40, %sp\n\t"
77 "save %sp, -0x40, %sp\n\t"
78 "save %sp, -0x40, %sp\n\t"
79 "save %sp, -0x40, %sp\n\t"
80 "save %sp, -0x40, %sp\n\t"
81 "restore; restore; restore; restore;\n\t"
82 "restore; restore; restore;\n\t");
83 }
84 }
85
86 static inline int sign_extend_imm13(int imm)
87 {
88 return imm << 19 >> 19;
89 }
90
91 static inline unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
92 {
93 struct reg_window32 *win;
94
95 if(reg < 16)
96 return (!reg ? 0 : regs->u_regs[reg]);
97
98 /* Ho hum, the slightly complicated case. */
99 win = (struct reg_window32 *) regs->u_regs[UREG_FP];
100 return win->locals[reg - 16]; /* yes, I know what this does... */
101 }
102
103 static inline unsigned long safe_fetch_reg(unsigned int reg, struct pt_regs *regs)
104 {
105 struct reg_window32 __user *win;
106 unsigned long ret;
107
108 if (reg < 16)
109 return (!reg ? 0 : regs->u_regs[reg]);
110
111 /* Ho hum, the slightly complicated case. */
112 win = (struct reg_window32 __user *) regs->u_regs[UREG_FP];
113
114 if ((unsigned long)win & 3)
115 return -1;
116
117 if (get_user(ret, &win->locals[reg - 16]))
118 return -1;
119
120 return ret;
121 }
122
123 static inline unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
124 {
125 struct reg_window32 *win;
126
127 if(reg < 16)
128 return &regs->u_regs[reg];
129 win = (struct reg_window32 *) regs->u_regs[UREG_FP];
130 return &win->locals[reg - 16];
131 }
132
133 static unsigned long compute_effective_address(struct pt_regs *regs,
134 unsigned int insn)
135 {
136 unsigned int rs1 = (insn >> 14) & 0x1f;
137 unsigned int rs2 = insn & 0x1f;
138 unsigned int rd = (insn >> 25) & 0x1f;
139
140 if(insn & 0x2000) {
141 maybe_flush_windows(rs1, 0, rd);
142 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
143 } else {
144 maybe_flush_windows(rs1, rs2, rd);
145 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
146 }
147 }
148
149 unsigned long safe_compute_effective_address(struct pt_regs *regs,
150 unsigned int insn)
151 {
152 unsigned int rs1 = (insn >> 14) & 0x1f;
153 unsigned int rs2 = insn & 0x1f;
154 unsigned int rd = (insn >> 25) & 0x1f;
155
156 if(insn & 0x2000) {
157 maybe_flush_windows(rs1, 0, rd);
158 return (safe_fetch_reg(rs1, regs) + sign_extend_imm13(insn));
159 } else {
160 maybe_flush_windows(rs1, rs2, rd);
161 return (safe_fetch_reg(rs1, regs) + safe_fetch_reg(rs2, regs));
162 }
163 }
164
165 /* This is just to make gcc think panic does return... */
166 static void unaligned_panic(char *str)
167 {
168 panic(str);
169 }
170
171 /* una_asm.S */
172 extern int do_int_load(unsigned long *dest_reg, int size,
173 unsigned long *saddr, int is_signed);
174 extern int __do_int_store(unsigned long *dst_addr, int size,
175 unsigned long *src_val);
176
177 static int do_int_store(int reg_num, int size, unsigned long *dst_addr,
178 struct pt_regs *regs)
179 {
180 unsigned long zero[2] = { 0, 0 };
181 unsigned long *src_val;
182
183 if (reg_num)
184 src_val = fetch_reg_addr(reg_num, regs);
185 else {
186 src_val = &zero[0];
187 if (size == 8)
188 zero[1] = fetch_reg(1, regs);
189 }
190 return __do_int_store(dst_addr, size, src_val);
191 }
192
193 extern void smp_capture(void);
194 extern void smp_release(void);
195
196 static inline void advance(struct pt_regs *regs)
197 {
198 regs->pc = regs->npc;
199 regs->npc += 4;
200 }
201
202 static inline int floating_point_load_or_store_p(unsigned int insn)
203 {
204 return (insn >> 24) & 1;
205 }
206
207 static inline int ok_for_kernel(unsigned int insn)
208 {
209 return !floating_point_load_or_store_p(insn);
210 }
211
212 static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
213 {
214 unsigned long g2 = regs->u_regs [UREG_G2];
215 unsigned long fixup = search_extables_range(regs->pc, &g2);
216
217 if (!fixup) {
218 unsigned long address = compute_effective_address(regs, insn);
219 if(address < PAGE_SIZE) {
220 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
221 } else
222 printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
223 printk(KERN_ALERT " at virtual address %08lx\n",address);
224 printk(KERN_ALERT "current->{mm,active_mm}->context = %08lx\n",
225 (current->mm ? current->mm->context :
226 current->active_mm->context));
227 printk(KERN_ALERT "current->{mm,active_mm}->pgd = %08lx\n",
228 (current->mm ? (unsigned long) current->mm->pgd :
229 (unsigned long) current->active_mm->pgd));
230 die_if_kernel("Oops", regs);
231 /* Not reached */
232 }
233 regs->pc = fixup;
234 regs->npc = regs->pc + 4;
235 regs->u_regs [UREG_G2] = g2;
236 }
237
238 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
239 {
240 enum direction dir = decode_direction(insn);
241 int size = decode_access_size(insn);
242
243 if(!ok_for_kernel(insn) || dir == both) {
244 printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n",
245 regs->pc);
246 unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
247 } else {
248 unsigned long addr = compute_effective_address(regs, insn);
249 int err;
250
251 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
252 switch (dir) {
253 case load:
254 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
255 regs),
256 size, (unsigned long *) addr,
257 decode_signedness(insn));
258 break;
259
260 case store:
261 err = do_int_store(((insn>>25)&0x1f), size,
262 (unsigned long *) addr, regs);
263 break;
264 default:
265 panic("Impossible kernel unaligned trap.");
266 /* Not reached... */
267 }
268 if (err)
269 kernel_mna_trap_fault(regs, insn);
270 else
271 advance(regs);
272 }
273 }
274
275 static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
276 enum direction dir)
277 {
278 unsigned int reg;
279 int check = (dir == load) ? VERIFY_READ : VERIFY_WRITE;
280 int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
281
282 if ((regs->pc | regs->npc) & 3)
283 return 0;
284
285 /* Must access_ok() in all the necessary places. */
286 #define WINREG_ADDR(regnum) \
287 ((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
288
289 reg = (insn >> 25) & 0x1f;
290 if (reg >= 16) {
291 if (!access_ok(check, WINREG_ADDR(reg - 16), size))
292 return -EFAULT;
293 }
294 reg = (insn >> 14) & 0x1f;
295 if (reg >= 16) {
296 if (!access_ok(check, WINREG_ADDR(reg - 16), size))
297 return -EFAULT;
298 }
299 if (!(insn & 0x2000)) {
300 reg = (insn & 0x1f);
301 if (reg >= 16) {
302 if (!access_ok(check, WINREG_ADDR(reg - 16), size))
303 return -EFAULT;
304 }
305 }
306 #undef WINREG_ADDR
307 return 0;
308 }
309
310 static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
311 {
312 siginfo_t info;
313
314 info.si_signo = SIGBUS;
315 info.si_errno = 0;
316 info.si_code = BUS_ADRALN;
317 info.si_addr = (void __user *)safe_compute_effective_address(regs, insn);
318 info.si_trapno = 0;
319 send_sig_info(SIGBUS, &info, current);
320 }
321
322 asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
323 {
324 enum direction dir;
325
326 lock_kernel();
327 if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
328 (((insn >> 30) & 3) != 3))
329 goto kill_user;
330 dir = decode_direction(insn);
331 if(!ok_for_user(regs, insn, dir)) {
332 goto kill_user;
333 } else {
334 int err, size = decode_access_size(insn);
335 unsigned long addr;
336
337 if(floating_point_load_or_store_p(insn)) {
338 printk("User FPU load/store unaligned unsupported.\n");
339 goto kill_user;
340 }
341
342 addr = compute_effective_address(regs, insn);
343 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
344 switch(dir) {
345 case load:
346 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
347 regs),
348 size, (unsigned long *) addr,
349 decode_signedness(insn));
350 break;
351
352 case store:
353 err = do_int_store(((insn>>25)&0x1f), size,
354 (unsigned long *) addr, regs);
355 break;
356
357 case both:
358 /*
359 * This was supported in 2.4. However, we question
360 * the value of SWAP instruction across word boundaries.
361 */
362 printk("Unaligned SWAP unsupported.\n");
363 err = -EFAULT;
364 break;
365
366 default:
367 unaligned_panic("Impossible user unaligned trap.");
368 goto out;
369 }
370 if (err)
371 goto kill_user;
372 else
373 advance(regs);
374 goto out;
375 }
376
377 kill_user:
378 user_mna_trap_fault(regs, insn);
379 out:
380 unlock_kernel();
381 }
This page took 0.037098 seconds and 5 git commands to generate.