Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[deliverable/linux.git] / arch / sparc / kernel / unaligned_64.c
1 /*
2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
4 *
5 * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <asm/asi.h>
16 #include <asm/ptrace.h>
17 #include <asm/pstate.h>
18 #include <asm/processor.h>
19 #include <asm/uaccess.h>
20 #include <linux/smp.h>
21 #include <linux/bitops.h>
22 #include <linux/perf_event.h>
23 #include <linux/ratelimit.h>
24 #include <linux/context_tracking.h>
25 #include <asm/fpumacro.h>
26 #include <asm/cacheflush.h>
27
28 #include "entry.h"
29
30 enum direction {
31 load, /* ld, ldd, ldh, ldsh */
32 store, /* st, std, sth, stsh */
33 both, /* Swap, ldstub, cas, ... */
34 fpld,
35 fpst,
36 invalid,
37 };
38
39 static inline enum direction decode_direction(unsigned int insn)
40 {
41 unsigned long tmp = (insn >> 21) & 1;
42
43 if (!tmp)
44 return load;
45 else {
46 switch ((insn>>19)&0xf) {
47 case 15: /* swap* */
48 return both;
49 default:
50 return store;
51 }
52 }
53 }
54
55 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
56 static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
57 {
58 unsigned int tmp;
59
60 tmp = ((insn >> 19) & 0xf);
61 if (tmp == 11 || tmp == 14) /* ldx/stx */
62 return 8;
63 tmp &= 3;
64 if (!tmp)
65 return 4;
66 else if (tmp == 3)
67 return 16; /* ldd/std - Although it is actually 8 */
68 else if (tmp == 2)
69 return 2;
70 else {
71 printk("Impossible unaligned trap. insn=%08x\n", insn);
72 die_if_kernel("Byte sized unaligned access?!?!", regs);
73
74 /* GCC should never warn that control reaches the end
75 * of this function without returning a value because
76 * die_if_kernel() is marked with attribute 'noreturn'.
77 * Alas, some versions do...
78 */
79
80 return 0;
81 }
82 }
83
84 static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
85 {
86 if (insn & 0x800000) {
87 if (insn & 0x2000)
88 return (unsigned char)(regs->tstate >> 24); /* %asi */
89 else
90 return (unsigned char)(insn >> 5); /* imm_asi */
91 } else
92 return ASI_P;
93 }
94
95 /* 0x400000 = signed, 0 = unsigned */
96 static inline int decode_signedness(unsigned int insn)
97 {
98 return (insn & 0x400000);
99 }
100
101 static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
102 unsigned int rd, int from_kernel)
103 {
104 if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
105 if (from_kernel != 0)
106 __asm__ __volatile__("flushw");
107 else
108 flushw_user();
109 }
110 }
111
112 static inline long sign_extend_imm13(long imm)
113 {
114 return imm << 51 >> 51;
115 }
116
117 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
118 {
119 unsigned long value, fp;
120
121 if (reg < 16)
122 return (!reg ? 0 : regs->u_regs[reg]);
123
124 fp = regs->u_regs[UREG_FP];
125
126 if (regs->tstate & TSTATE_PRIV) {
127 struct reg_window *win;
128 win = (struct reg_window *)(fp + STACK_BIAS);
129 value = win->locals[reg - 16];
130 } else if (!test_thread_64bit_stack(fp)) {
131 struct reg_window32 __user *win32;
132 win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
133 get_user(value, &win32->locals[reg - 16]);
134 } else {
135 struct reg_window __user *win;
136 win = (struct reg_window __user *)(fp + STACK_BIAS);
137 get_user(value, &win->locals[reg - 16]);
138 }
139 return value;
140 }
141
142 static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
143 {
144 unsigned long fp;
145
146 if (reg < 16)
147 return &regs->u_regs[reg];
148
149 fp = regs->u_regs[UREG_FP];
150
151 if (regs->tstate & TSTATE_PRIV) {
152 struct reg_window *win;
153 win = (struct reg_window *)(fp + STACK_BIAS);
154 return &win->locals[reg - 16];
155 } else if (!test_thread_64bit_stack(fp)) {
156 struct reg_window32 *win32;
157 win32 = (struct reg_window32 *)((unsigned long)((u32)fp));
158 return (unsigned long *)&win32->locals[reg - 16];
159 } else {
160 struct reg_window *win;
161 win = (struct reg_window *)(fp + STACK_BIAS);
162 return &win->locals[reg - 16];
163 }
164 }
165
166 unsigned long compute_effective_address(struct pt_regs *regs,
167 unsigned int insn, unsigned int rd)
168 {
169 unsigned int rs1 = (insn >> 14) & 0x1f;
170 unsigned int rs2 = insn & 0x1f;
171 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
172
173 if (insn & 0x2000) {
174 maybe_flush_windows(rs1, 0, rd, from_kernel);
175 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
176 } else {
177 maybe_flush_windows(rs1, rs2, rd, from_kernel);
178 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
179 }
180 }
181
182 /* This is just to make gcc think die_if_kernel does return... */
183 static void __used unaligned_panic(char *str, struct pt_regs *regs)
184 {
185 die_if_kernel(str, regs);
186 }
187
188 extern int do_int_load(unsigned long *dest_reg, int size,
189 unsigned long *saddr, int is_signed, int asi);
190
191 extern int __do_int_store(unsigned long *dst_addr, int size,
192 unsigned long src_val, int asi);
193
194 static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
195 struct pt_regs *regs, int asi, int orig_asi)
196 {
197 unsigned long zero = 0;
198 unsigned long *src_val_p = &zero;
199 unsigned long src_val;
200
201 if (size == 16) {
202 size = 8;
203 zero = (((long)(reg_num ?
204 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
205 (unsigned)fetch_reg(reg_num + 1, regs);
206 } else if (reg_num) {
207 src_val_p = fetch_reg_addr(reg_num, regs);
208 }
209 src_val = *src_val_p;
210 if (unlikely(asi != orig_asi)) {
211 switch (size) {
212 case 2:
213 src_val = swab16(src_val);
214 break;
215 case 4:
216 src_val = swab32(src_val);
217 break;
218 case 8:
219 src_val = swab64(src_val);
220 break;
221 case 16:
222 default:
223 BUG();
224 break;
225 }
226 }
227 return __do_int_store(dst_addr, size, src_val, asi);
228 }
229
230 static inline void advance(struct pt_regs *regs)
231 {
232 regs->tpc = regs->tnpc;
233 regs->tnpc += 4;
234 if (test_thread_flag(TIF_32BIT)) {
235 regs->tpc &= 0xffffffff;
236 regs->tnpc &= 0xffffffff;
237 }
238 }
239
240 static inline int floating_point_load_or_store_p(unsigned int insn)
241 {
242 return (insn >> 24) & 1;
243 }
244
245 static inline int ok_for_kernel(unsigned int insn)
246 {
247 return !floating_point_load_or_store_p(insn);
248 }
249
250 static void kernel_mna_trap_fault(int fixup_tstate_asi)
251 {
252 struct pt_regs *regs = current_thread_info()->kern_una_regs;
253 unsigned int insn = current_thread_info()->kern_una_insn;
254 const struct exception_table_entry *entry;
255
256 entry = search_exception_tables(regs->tpc);
257 if (!entry) {
258 unsigned long address;
259
260 address = compute_effective_address(regs, insn,
261 ((insn >> 25) & 0x1f));
262 if (address < PAGE_SIZE) {
263 printk(KERN_ALERT "Unable to handle kernel NULL "
264 "pointer dereference in mna handler");
265 } else
266 printk(KERN_ALERT "Unable to handle kernel paging "
267 "request in mna handler");
268 printk(KERN_ALERT " at virtual address %016lx\n",address);
269 printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
270 (current->mm ? CTX_HWBITS(current->mm->context) :
271 CTX_HWBITS(current->active_mm->context)));
272 printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
273 (current->mm ? (unsigned long) current->mm->pgd :
274 (unsigned long) current->active_mm->pgd));
275 die_if_kernel("Oops", regs);
276 /* Not reached */
277 }
278 regs->tpc = entry->fixup;
279 regs->tnpc = regs->tpc + 4;
280
281 if (fixup_tstate_asi) {
282 regs->tstate &= ~TSTATE_ASI;
283 regs->tstate |= (ASI_AIUS << 24UL);
284 }
285 }
286
287 static void log_unaligned(struct pt_regs *regs)
288 {
289 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
290
291 if (__ratelimit(&ratelimit)) {
292 printk("Kernel unaligned access at TPC[%lx] %pS\n",
293 regs->tpc, (void *) regs->tpc);
294 }
295 }
296
297 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
298 {
299 enum direction dir = decode_direction(insn);
300 int size = decode_access_size(regs, insn);
301 int orig_asi, asi;
302
303 current_thread_info()->kern_una_regs = regs;
304 current_thread_info()->kern_una_insn = insn;
305
306 orig_asi = asi = decode_asi(insn, regs);
307
308 /* If this is a {get,put}_user() on an unaligned userspace pointer,
309 * just signal a fault and do not log the event.
310 */
311 if (asi == ASI_AIUS) {
312 kernel_mna_trap_fault(0);
313 return;
314 }
315
316 log_unaligned(regs);
317
318 if (!ok_for_kernel(insn) || dir == both) {
319 printk("Unsupported unaligned load/store trap for kernel "
320 "at <%016lx>.\n", regs->tpc);
321 unaligned_panic("Kernel does fpu/atomic "
322 "unaligned load/store.", regs);
323
324 kernel_mna_trap_fault(0);
325 } else {
326 unsigned long addr, *reg_addr;
327 int err;
328
329 addr = compute_effective_address(regs, insn,
330 ((insn >> 25) & 0x1f));
331 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
332 switch (asi) {
333 case ASI_NL:
334 case ASI_AIUPL:
335 case ASI_AIUSL:
336 case ASI_PL:
337 case ASI_SL:
338 case ASI_PNFL:
339 case ASI_SNFL:
340 asi &= ~0x08;
341 break;
342 }
343 switch (dir) {
344 case load:
345 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
346 err = do_int_load(reg_addr, size,
347 (unsigned long *) addr,
348 decode_signedness(insn), asi);
349 if (likely(!err) && unlikely(asi != orig_asi)) {
350 unsigned long val_in = *reg_addr;
351 switch (size) {
352 case 2:
353 val_in = swab16(val_in);
354 break;
355 case 4:
356 val_in = swab32(val_in);
357 break;
358 case 8:
359 val_in = swab64(val_in);
360 break;
361 case 16:
362 default:
363 BUG();
364 break;
365 }
366 *reg_addr = val_in;
367 }
368 break;
369
370 case store:
371 err = do_int_store(((insn>>25)&0x1f), size,
372 (unsigned long *) addr, regs,
373 asi, orig_asi);
374 break;
375
376 default:
377 panic("Impossible kernel unaligned trap.");
378 /* Not reached... */
379 }
380 if (unlikely(err))
381 kernel_mna_trap_fault(1);
382 else
383 advance(regs);
384 }
385 }
386
387 int handle_popc(u32 insn, struct pt_regs *regs)
388 {
389 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
390 int ret, rd = ((insn >> 25) & 0x1f);
391 u64 value;
392
393 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
394 if (insn & 0x2000) {
395 maybe_flush_windows(0, 0, rd, from_kernel);
396 value = sign_extend_imm13(insn);
397 } else {
398 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
399 value = fetch_reg(insn & 0x1f, regs);
400 }
401 ret = hweight64(value);
402 if (rd < 16) {
403 if (rd)
404 regs->u_regs[rd] = ret;
405 } else {
406 unsigned long fp = regs->u_regs[UREG_FP];
407
408 if (!test_thread_64bit_stack(fp)) {
409 struct reg_window32 __user *win32;
410 win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
411 put_user(ret, &win32->locals[rd - 16]);
412 } else {
413 struct reg_window __user *win;
414 win = (struct reg_window __user *)(fp + STACK_BIAS);
415 put_user(ret, &win->locals[rd - 16]);
416 }
417 }
418 advance(regs);
419 return 1;
420 }
421
422 extern void do_fpother(struct pt_regs *regs);
423 extern void do_privact(struct pt_regs *regs);
424 extern void sun4v_data_access_exception(struct pt_regs *regs,
425 unsigned long addr,
426 unsigned long type_ctx);
427
428 int handle_ldf_stq(u32 insn, struct pt_regs *regs)
429 {
430 unsigned long addr = compute_effective_address(regs, insn, 0);
431 int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
432 struct fpustate *f = FPUSTATE;
433 int asi = decode_asi(insn, regs);
434 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
435
436 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
437
438 save_and_clear_fpu();
439 current_thread_info()->xfsr[0] &= ~0x1c000;
440 if (freg & 3) {
441 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
442 do_fpother(regs);
443 return 0;
444 }
445 if (insn & 0x200000) {
446 /* STQ */
447 u64 first = 0, second = 0;
448
449 if (current_thread_info()->fpsaved[0] & flag) {
450 first = *(u64 *)&f->regs[freg];
451 second = *(u64 *)&f->regs[freg+2];
452 }
453 if (asi < 0x80) {
454 do_privact(regs);
455 return 1;
456 }
457 switch (asi) {
458 case ASI_P:
459 case ASI_S: break;
460 case ASI_PL:
461 case ASI_SL:
462 {
463 /* Need to convert endians */
464 u64 tmp = __swab64p(&first);
465
466 first = __swab64p(&second);
467 second = tmp;
468 break;
469 }
470 default:
471 if (tlb_type == hypervisor)
472 sun4v_data_access_exception(regs, addr, 0);
473 else
474 spitfire_data_access_exception(regs, 0, addr);
475 return 1;
476 }
477 if (put_user (first >> 32, (u32 __user *)addr) ||
478 __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
479 __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
480 __put_user ((u32)second, (u32 __user *)(addr + 12))) {
481 if (tlb_type == hypervisor)
482 sun4v_data_access_exception(regs, addr, 0);
483 else
484 spitfire_data_access_exception(regs, 0, addr);
485 return 1;
486 }
487 } else {
488 /* LDF, LDDF, LDQF */
489 u32 data[4] __attribute__ ((aligned(8)));
490 int size, i;
491 int err;
492
493 if (asi < 0x80) {
494 do_privact(regs);
495 return 1;
496 } else if (asi > ASI_SNFL) {
497 if (tlb_type == hypervisor)
498 sun4v_data_access_exception(regs, addr, 0);
499 else
500 spitfire_data_access_exception(regs, 0, addr);
501 return 1;
502 }
503 switch (insn & 0x180000) {
504 case 0x000000: size = 1; break;
505 case 0x100000: size = 4; break;
506 default: size = 2; break;
507 }
508 for (i = 0; i < size; i++)
509 data[i] = 0;
510
511 err = get_user (data[0], (u32 __user *) addr);
512 if (!err) {
513 for (i = 1; i < size; i++)
514 err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
515 }
516 if (err && !(asi & 0x2 /* NF */)) {
517 if (tlb_type == hypervisor)
518 sun4v_data_access_exception(regs, addr, 0);
519 else
520 spitfire_data_access_exception(regs, 0, addr);
521 return 1;
522 }
523 if (asi & 0x8) /* Little */ {
524 u64 tmp;
525
526 switch (size) {
527 case 1: data[0] = le32_to_cpup(data + 0); break;
528 default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
529 break;
530 case 4: tmp = le64_to_cpup((u64 *)(data + 0));
531 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
532 *(u64 *)(data + 2) = tmp;
533 break;
534 }
535 }
536 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
537 current_thread_info()->fpsaved[0] = FPRS_FEF;
538 current_thread_info()->gsr[0] = 0;
539 }
540 if (!(current_thread_info()->fpsaved[0] & flag)) {
541 if (freg < 32)
542 memset(f->regs, 0, 32*sizeof(u32));
543 else
544 memset(f->regs+32, 0, 32*sizeof(u32));
545 }
546 memcpy(f->regs + freg, data, size * 4);
547 current_thread_info()->fpsaved[0] |= flag;
548 }
549 advance(regs);
550 return 1;
551 }
552
553 void handle_ld_nf(u32 insn, struct pt_regs *regs)
554 {
555 int rd = ((insn >> 25) & 0x1f);
556 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
557 unsigned long *reg;
558
559 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
560
561 maybe_flush_windows(0, 0, rd, from_kernel);
562 reg = fetch_reg_addr(rd, regs);
563 if (from_kernel || rd < 16) {
564 reg[0] = 0;
565 if ((insn & 0x780000) == 0x180000)
566 reg[1] = 0;
567 } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
568 put_user(0, (int __user *) reg);
569 if ((insn & 0x780000) == 0x180000)
570 put_user(0, ((int __user *) reg) + 1);
571 } else {
572 put_user(0, (unsigned long __user *) reg);
573 if ((insn & 0x780000) == 0x180000)
574 put_user(0, (unsigned long __user *) reg + 1);
575 }
576 advance(regs);
577 }
578
579 void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
580 {
581 enum ctx_state prev_state = exception_enter();
582 unsigned long pc = regs->tpc;
583 unsigned long tstate = regs->tstate;
584 u32 insn;
585 u64 value;
586 u8 freg;
587 int flag;
588 struct fpustate *f = FPUSTATE;
589
590 if (tstate & TSTATE_PRIV)
591 die_if_kernel("lddfmna from kernel", regs);
592 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
593 if (test_thread_flag(TIF_32BIT))
594 pc = (u32)pc;
595 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
596 int asi = decode_asi(insn, regs);
597 u32 first, second;
598 int err;
599
600 if ((asi > ASI_SNFL) ||
601 (asi < ASI_P))
602 goto daex;
603 first = second = 0;
604 err = get_user(first, (u32 __user *)sfar);
605 if (!err)
606 err = get_user(second, (u32 __user *)(sfar + 4));
607 if (err) {
608 if (!(asi & 0x2))
609 goto daex;
610 first = second = 0;
611 }
612 save_and_clear_fpu();
613 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
614 value = (((u64)first) << 32) | second;
615 if (asi & 0x8) /* Little */
616 value = __swab64p(&value);
617 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
618 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
619 current_thread_info()->fpsaved[0] = FPRS_FEF;
620 current_thread_info()->gsr[0] = 0;
621 }
622 if (!(current_thread_info()->fpsaved[0] & flag)) {
623 if (freg < 32)
624 memset(f->regs, 0, 32*sizeof(u32));
625 else
626 memset(f->regs+32, 0, 32*sizeof(u32));
627 }
628 *(u64 *)(f->regs + freg) = value;
629 current_thread_info()->fpsaved[0] |= flag;
630 } else {
631 daex:
632 if (tlb_type == hypervisor)
633 sun4v_data_access_exception(regs, sfar, sfsr);
634 else
635 spitfire_data_access_exception(regs, sfsr, sfar);
636 goto out;
637 }
638 advance(regs);
639 out:
640 exception_exit(prev_state);
641 }
642
643 void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
644 {
645 enum ctx_state prev_state = exception_enter();
646 unsigned long pc = regs->tpc;
647 unsigned long tstate = regs->tstate;
648 u32 insn;
649 u64 value;
650 u8 freg;
651 int flag;
652 struct fpustate *f = FPUSTATE;
653
654 if (tstate & TSTATE_PRIV)
655 die_if_kernel("stdfmna from kernel", regs);
656 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
657 if (test_thread_flag(TIF_32BIT))
658 pc = (u32)pc;
659 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
660 int asi = decode_asi(insn, regs);
661 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
662 value = 0;
663 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
664 if ((asi > ASI_SNFL) ||
665 (asi < ASI_P))
666 goto daex;
667 save_and_clear_fpu();
668 if (current_thread_info()->fpsaved[0] & flag)
669 value = *(u64 *)&f->regs[freg];
670 switch (asi) {
671 case ASI_P:
672 case ASI_S: break;
673 case ASI_PL:
674 case ASI_SL:
675 value = __swab64p(&value); break;
676 default: goto daex;
677 }
678 if (put_user (value >> 32, (u32 __user *) sfar) ||
679 __put_user ((u32)value, (u32 __user *)(sfar + 4)))
680 goto daex;
681 } else {
682 daex:
683 if (tlb_type == hypervisor)
684 sun4v_data_access_exception(regs, sfar, sfsr);
685 else
686 spitfire_data_access_exception(regs, sfsr, sfar);
687 goto out;
688 }
689 advance(regs);
690 out:
691 exception_exit(prev_state);
692 }
This page took 0.045905 seconds and 6 git commands to generate.