1 /* align.c - handle alignment exceptions for the Power PC.
3 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
4 * Copyright (c) 1998-1999 TiVo, Inc.
5 * PowerPC 403GCX modifications.
6 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
7 * PowerPC 403GCX/405GP modifications.
8 * Copyright (c) 2001-2002 PPC64 team, IBM Corp
9 * 64-bit and Power4 support
10 * Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp
11 * <benh@kernel.crashing.org>
12 * Merge ppc32 and ppc64 implementations
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
22 #include <asm/processor.h>
23 #include <asm/uaccess.h>
24 #include <asm/cache.h>
25 #include <asm/cputable.h>
26 #include <asm/emulated_ops.h>
27 #include <asm/switch_to.h>
34 #define IS_XFORM(inst) (((inst) >> 26) == 31)
35 #define IS_DSFORM(inst) (((inst) >> 26) >= 56)
37 #define INVALID { 0, 0 }
39 /* Bits in the flags field */
40 #define LD 0 /* load */
41 #define ST 1 /* store */
42 #define SE 2 /* sign-extend value, or FP ld/st as word */
43 #define F 4 /* to/from fp regs */
44 #define U 8 /* update index register */
45 #define M 0x10 /* multiple load/store */
46 #define SW 0x20 /* byte swap */
47 #define S 0x40 /* single-precision fp or... */
48 #define SX 0x40 /* ... byte count in XER */
49 #define HARD 0x80 /* string, stwcx. */
50 #define E4 0x40 /* SPE endianness is word */
51 #define E8 0x80 /* SPE endianness is double word */
52 #define SPLT 0x80 /* VSX SPLAT load */
54 /* DSISR bits reported for a DCBZ instruction: */
55 #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
58 * The PowerPC stores certain bits of the instruction that caused the
59 * alignment exception in the DSISR register. This array maps those
60 * bits to information about the operand length and what the
61 * instruction would do.
63 static struct aligninfo aligninfo
[128] = {
64 { 4, LD
}, /* 00 0 0000: lwz / lwarx */
65 INVALID
, /* 00 0 0001 */
66 { 4, ST
}, /* 00 0 0010: stw */
67 INVALID
, /* 00 0 0011 */
68 { 2, LD
}, /* 00 0 0100: lhz */
69 { 2, LD
+SE
}, /* 00 0 0101: lha */
70 { 2, ST
}, /* 00 0 0110: sth */
71 { 4, LD
+M
}, /* 00 0 0111: lmw */
72 { 4, LD
+F
+S
}, /* 00 0 1000: lfs */
73 { 8, LD
+F
}, /* 00 0 1001: lfd */
74 { 4, ST
+F
+S
}, /* 00 0 1010: stfs */
75 { 8, ST
+F
}, /* 00 0 1011: stfd */
76 INVALID
, /* 00 0 1100 */
77 { 8, LD
}, /* 00 0 1101: ld/ldu/lwa */
78 INVALID
, /* 00 0 1110 */
79 { 8, ST
}, /* 00 0 1111: std/stdu */
80 { 4, LD
+U
}, /* 00 1 0000: lwzu */
81 INVALID
, /* 00 1 0001 */
82 { 4, ST
+U
}, /* 00 1 0010: stwu */
83 INVALID
, /* 00 1 0011 */
84 { 2, LD
+U
}, /* 00 1 0100: lhzu */
85 { 2, LD
+SE
+U
}, /* 00 1 0101: lhau */
86 { 2, ST
+U
}, /* 00 1 0110: sthu */
87 { 4, ST
+M
}, /* 00 1 0111: stmw */
88 { 4, LD
+F
+S
+U
}, /* 00 1 1000: lfsu */
89 { 8, LD
+F
+U
}, /* 00 1 1001: lfdu */
90 { 4, ST
+F
+S
+U
}, /* 00 1 1010: stfsu */
91 { 8, ST
+F
+U
}, /* 00 1 1011: stfdu */
92 { 16, LD
+F
}, /* 00 1 1100: lfdp */
93 INVALID
, /* 00 1 1101 */
94 { 16, ST
+F
}, /* 00 1 1110: stfdp */
95 INVALID
, /* 00 1 1111 */
96 { 8, LD
}, /* 01 0 0000: ldx */
97 INVALID
, /* 01 0 0001 */
98 { 8, ST
}, /* 01 0 0010: stdx */
99 INVALID
, /* 01 0 0011 */
100 INVALID
, /* 01 0 0100 */
101 { 4, LD
+SE
}, /* 01 0 0101: lwax */
102 INVALID
, /* 01 0 0110 */
103 INVALID
, /* 01 0 0111 */
104 { 4, LD
+M
+HARD
+SX
}, /* 01 0 1000: lswx */
105 { 4, LD
+M
+HARD
}, /* 01 0 1001: lswi */
106 { 4, ST
+M
+HARD
+SX
}, /* 01 0 1010: stswx */
107 { 4, ST
+M
+HARD
}, /* 01 0 1011: stswi */
108 INVALID
, /* 01 0 1100 */
109 { 8, LD
+U
}, /* 01 0 1101: ldu */
110 INVALID
, /* 01 0 1110 */
111 { 8, ST
+U
}, /* 01 0 1111: stdu */
112 { 8, LD
+U
}, /* 01 1 0000: ldux */
113 INVALID
, /* 01 1 0001 */
114 { 8, ST
+U
}, /* 01 1 0010: stdux */
115 INVALID
, /* 01 1 0011 */
116 INVALID
, /* 01 1 0100 */
117 { 4, LD
+SE
+U
}, /* 01 1 0101: lwaux */
118 INVALID
, /* 01 1 0110 */
119 INVALID
, /* 01 1 0111 */
120 INVALID
, /* 01 1 1000 */
121 INVALID
, /* 01 1 1001 */
122 INVALID
, /* 01 1 1010 */
123 INVALID
, /* 01 1 1011 */
124 INVALID
, /* 01 1 1100 */
125 INVALID
, /* 01 1 1101 */
126 INVALID
, /* 01 1 1110 */
127 INVALID
, /* 01 1 1111 */
128 INVALID
, /* 10 0 0000 */
129 INVALID
, /* 10 0 0001 */
130 INVALID
, /* 10 0 0010: stwcx. */
131 INVALID
, /* 10 0 0011 */
132 INVALID
, /* 10 0 0100 */
133 INVALID
, /* 10 0 0101 */
134 INVALID
, /* 10 0 0110 */
135 INVALID
, /* 10 0 0111 */
136 { 4, LD
+SW
}, /* 10 0 1000: lwbrx */
137 INVALID
, /* 10 0 1001 */
138 { 4, ST
+SW
}, /* 10 0 1010: stwbrx */
139 INVALID
, /* 10 0 1011 */
140 { 2, LD
+SW
}, /* 10 0 1100: lhbrx */
141 { 4, LD
+SE
}, /* 10 0 1101 lwa */
142 { 2, ST
+SW
}, /* 10 0 1110: sthbrx */
143 INVALID
, /* 10 0 1111 */
144 INVALID
, /* 10 1 0000 */
145 INVALID
, /* 10 1 0001 */
146 INVALID
, /* 10 1 0010 */
147 INVALID
, /* 10 1 0011 */
148 INVALID
, /* 10 1 0100 */
149 INVALID
, /* 10 1 0101 */
150 INVALID
, /* 10 1 0110 */
151 INVALID
, /* 10 1 0111 */
152 INVALID
, /* 10 1 1000 */
153 INVALID
, /* 10 1 1001 */
154 INVALID
, /* 10 1 1010 */
155 INVALID
, /* 10 1 1011 */
156 INVALID
, /* 10 1 1100 */
157 INVALID
, /* 10 1 1101 */
158 INVALID
, /* 10 1 1110 */
159 { 0, ST
+HARD
}, /* 10 1 1111: dcbz */
160 { 4, LD
}, /* 11 0 0000: lwzx */
161 INVALID
, /* 11 0 0001 */
162 { 4, ST
}, /* 11 0 0010: stwx */
163 INVALID
, /* 11 0 0011 */
164 { 2, LD
}, /* 11 0 0100: lhzx */
165 { 2, LD
+SE
}, /* 11 0 0101: lhax */
166 { 2, ST
}, /* 11 0 0110: sthx */
167 INVALID
, /* 11 0 0111 */
168 { 4, LD
+F
+S
}, /* 11 0 1000: lfsx */
169 { 8, LD
+F
}, /* 11 0 1001: lfdx */
170 { 4, ST
+F
+S
}, /* 11 0 1010: stfsx */
171 { 8, ST
+F
}, /* 11 0 1011: stfdx */
172 { 16, LD
+F
}, /* 11 0 1100: lfdpx */
173 { 4, LD
+F
+SE
}, /* 11 0 1101: lfiwax */
174 { 16, ST
+F
}, /* 11 0 1110: stfdpx */
175 { 4, ST
+F
}, /* 11 0 1111: stfiwx */
176 { 4, LD
+U
}, /* 11 1 0000: lwzux */
177 INVALID
, /* 11 1 0001 */
178 { 4, ST
+U
}, /* 11 1 0010: stwux */
179 INVALID
, /* 11 1 0011 */
180 { 2, LD
+U
}, /* 11 1 0100: lhzux */
181 { 2, LD
+SE
+U
}, /* 11 1 0101: lhaux */
182 { 2, ST
+U
}, /* 11 1 0110: sthux */
183 INVALID
, /* 11 1 0111 */
184 { 4, LD
+F
+S
+U
}, /* 11 1 1000: lfsux */
185 { 8, LD
+F
+U
}, /* 11 1 1001: lfdux */
186 { 4, ST
+F
+S
+U
}, /* 11 1 1010: stfsux */
187 { 8, ST
+F
+U
}, /* 11 1 1011: stfdux */
188 INVALID
, /* 11 1 1100 */
189 { 4, LD
+F
}, /* 11 1 1101: lfiwzx */
190 INVALID
, /* 11 1 1110 */
191 INVALID
, /* 11 1 1111 */
195 * Create a DSISR value from the instruction
197 static inline unsigned make_dsisr(unsigned instr
)
202 /* bits 6:15 --> 22:31 */
203 dsisr
= (instr
& 0x03ff0000) >> 16;
205 if (IS_XFORM(instr
)) {
206 /* bits 29:30 --> 15:16 */
207 dsisr
|= (instr
& 0x00000006) << 14;
209 dsisr
|= (instr
& 0x00000040) << 8;
210 /* bits 21:24 --> 18:21 */
211 dsisr
|= (instr
& 0x00000780) << 3;
214 dsisr
|= (instr
& 0x04000000) >> 12;
215 /* bits 1: 4 --> 18:21 */
216 dsisr
|= (instr
& 0x78000000) >> 17;
217 /* bits 30:31 --> 12:13 */
218 if (IS_DSFORM(instr
))
219 dsisr
|= (instr
& 0x00000003) << 18;
226 * The dcbz (data cache block zero) instruction
227 * gives an alignment fault if used on non-cacheable
228 * memory. We handle the fault mainly for the
229 * case when we are running with the cache disabled
232 static int emulate_dcbz(struct pt_regs
*regs
, unsigned char __user
*addr
)
238 size
= ppc64_caches
.dline_size
;
240 size
= L1_CACHE_BYTES
;
242 p
= (long __user
*) (regs
->dar
& -size
);
243 if (user_mode(regs
) && !access_ok(VERIFY_WRITE
, p
, size
))
245 for (i
= 0; i
< size
/ sizeof(long); ++i
)
246 if (__put_user_inatomic(0, p
+i
))
252 * Emulate load & store multiple instructions
253 * On 64-bit machines, these instructions only affect/use the
254 * bottom 4 bytes of each register, and the loads clear the
255 * top 4 bytes of the affected register.
257 #ifdef __BIG_ENDIAN__
259 #define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4)
261 #define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
265 #ifdef __LITTLE_ENDIAN__
266 #define REG_BYTE(rp, i) (*(((u8 *)((rp) + ((i)>>2)) + ((i)&3))))
269 #define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
271 static int emulate_multiple(struct pt_regs
*regs
, unsigned char __user
*addr
,
272 unsigned int reg
, unsigned int nb
,
273 unsigned int flags
, unsigned int instr
,
277 unsigned int nb0
, i
, bswiz
;
281 * We do not try to emulate 8 bytes multiple as they aren't really
282 * available in our operating environments and we don't try to
283 * emulate multiples operations in kernel land as they should never
284 * be used/generated there at least not on unaligned boundaries
286 if (unlikely((nb
> 4) || !user_mode(regs
)))
289 /* lmw, stmw, lswi/x, stswi/x */
293 nb
= regs
->xer
& 127;
297 unsigned long pc
= regs
->nip
^ (swiz
& 4);
299 if (__get_user_inatomic(instr
,
300 (unsigned int __user
*)pc
))
302 if (swiz
== 0 && (flags
& SW
))
303 instr
= cpu_to_le32(instr
);
304 nb
= (instr
>> 11) & 0x1f;
308 if (nb
+ reg
* 4 > 128) {
309 nb0
= nb
+ reg
* 4 - 128;
312 #ifdef __LITTLE_ENDIAN__
314 * String instructions are endian neutral but the code
315 * below is not. Force byte swapping on so that the
316 * effects of swizzling are undone in the load/store
326 if (!access_ok((flags
& ST
? VERIFY_WRITE
: VERIFY_READ
), addr
, nb
+nb0
))
327 return -EFAULT
; /* bad address */
329 rptr
= ®s
->gpr
[reg
];
330 p
= (unsigned long) addr
;
331 bswiz
= (flags
& SW
)? 3: 0;
335 * This zeroes the top 4 bytes of the affected registers
336 * in 64-bit mode, and also zeroes out any remaining
337 * bytes of the last register for lsw*.
339 memset(rptr
, 0, ((nb
+ 3) / 4) * sizeof(unsigned long));
341 memset(®s
->gpr
[0], 0,
342 ((nb0
+ 3) / 4) * sizeof(unsigned long));
344 for (i
= 0; i
< nb
; ++i
, ++p
)
345 if (__get_user_inatomic(REG_BYTE(rptr
, i
^ bswiz
),
349 rptr
= ®s
->gpr
[0];
351 for (i
= 0; i
< nb0
; ++i
, ++p
)
352 if (__get_user_inatomic(REG_BYTE(rptr
,
359 for (i
= 0; i
< nb
; ++i
, ++p
)
360 if (__put_user_inatomic(REG_BYTE(rptr
, i
^ bswiz
),
364 rptr
= ®s
->gpr
[0];
366 for (i
= 0; i
< nb0
; ++i
, ++p
)
367 if (__put_user_inatomic(REG_BYTE(rptr
,
377 * Emulate floating-point pair loads and stores.
378 * Only POWER6 has these instructions, and it does true little-endian,
379 * so we don't need the address swizzling.
381 static int emulate_fp_pair(unsigned char __user
*addr
, unsigned int reg
,
384 char *ptr0
= (char *) ¤t
->thread
.TS_FPR(reg
);
385 char *ptr1
= (char *) ¤t
->thread
.TS_FPR(reg
+1);
391 return 0; /* invalid form: FRS/FRT must be even */
395 for (i
= 0; i
< 8; ++i
) {
397 ret
|= __get_user(ptr0
[i
^sw
], addr
+ i
);
398 ret
|= __get_user(ptr1
[i
^sw
], addr
+ i
+ 8);
400 ret
|= __put_user(ptr0
[i
^sw
], addr
+ i
);
401 ret
|= __put_user(ptr1
[i
^sw
], addr
+ i
+ 8);
406 return 1; /* exception handled and fixed up */
411 static struct aligninfo spe_aligninfo
[32] = {
412 { 8, LD
+E8
}, /* 0 00 00: evldd[x] */
413 { 8, LD
+E4
}, /* 0 00 01: evldw[x] */
414 { 8, LD
}, /* 0 00 10: evldh[x] */
415 INVALID
, /* 0 00 11 */
416 { 2, LD
}, /* 0 01 00: evlhhesplat[x] */
417 INVALID
, /* 0 01 01 */
418 { 2, LD
}, /* 0 01 10: evlhhousplat[x] */
419 { 2, LD
+SE
}, /* 0 01 11: evlhhossplat[x] */
420 { 4, LD
}, /* 0 10 00: evlwhe[x] */
421 INVALID
, /* 0 10 01 */
422 { 4, LD
}, /* 0 10 10: evlwhou[x] */
423 { 4, LD
+SE
}, /* 0 10 11: evlwhos[x] */
424 { 4, LD
+E4
}, /* 0 11 00: evlwwsplat[x] */
425 INVALID
, /* 0 11 01 */
426 { 4, LD
}, /* 0 11 10: evlwhsplat[x] */
427 INVALID
, /* 0 11 11 */
429 { 8, ST
+E8
}, /* 1 00 00: evstdd[x] */
430 { 8, ST
+E4
}, /* 1 00 01: evstdw[x] */
431 { 8, ST
}, /* 1 00 10: evstdh[x] */
432 INVALID
, /* 1 00 11 */
433 INVALID
, /* 1 01 00 */
434 INVALID
, /* 1 01 01 */
435 INVALID
, /* 1 01 10 */
436 INVALID
, /* 1 01 11 */
437 { 4, ST
}, /* 1 10 00: evstwhe[x] */
438 INVALID
, /* 1 10 01 */
439 { 4, ST
}, /* 1 10 10: evstwho[x] */
440 INVALID
, /* 1 10 11 */
441 { 4, ST
+E4
}, /* 1 11 00: evstwwe[x] */
442 INVALID
, /* 1 11 01 */
443 { 4, ST
+E4
}, /* 1 11 10: evstwwo[x] */
444 INVALID
, /* 1 11 11 */
450 #define EVLHHESPLAT 0x04
451 #define EVLHHOUSPLAT 0x06
452 #define EVLHHOSSPLAT 0x07
456 #define EVLWWSPLAT 0x0C
457 #define EVLWHSPLAT 0x0E
467 * Emulate SPE loads and stores.
468 * Only Book-E has these instructions, and it does true little-endian,
469 * so we don't need the address swizzling.
471 static int emulate_spe(struct pt_regs
*regs
, unsigned int reg
,
481 unsigned char __user
*p
, *addr
;
482 unsigned long *evr
= ¤t
->thread
.evr
[reg
];
483 unsigned int nb
, flags
;
485 instr
= (instr
>> 1) & 0x1f;
487 /* DAR has the operand effective address */
488 addr
= (unsigned char __user
*)regs
->dar
;
490 nb
= spe_aligninfo
[instr
].len
;
491 flags
= spe_aligninfo
[instr
].flags
;
493 /* Verify the address of the operand */
494 if (unlikely(user_mode(regs
) &&
495 !access_ok((flags
& ST
? VERIFY_WRITE
: VERIFY_READ
),
500 if (unlikely(!user_mode(regs
)))
503 flush_spe_to_thread(current
);
505 /* If we are loading, get the data from user space, else
506 * get it from register values
515 data
.w
[1] = regs
->gpr
[reg
];
518 data
.h
[2] = *evr
>> 16;
519 data
.h
[3] = regs
->gpr
[reg
] >> 16;
522 data
.h
[2] = *evr
& 0xffff;
523 data
.h
[3] = regs
->gpr
[reg
] & 0xffff;
529 data
.w
[1] = regs
->gpr
[reg
];
535 temp
.ll
= data
.ll
= 0;
541 ret
|= __get_user_inatomic(temp
.v
[0], p
++);
542 ret
|= __get_user_inatomic(temp
.v
[1], p
++);
543 ret
|= __get_user_inatomic(temp
.v
[2], p
++);
544 ret
|= __get_user_inatomic(temp
.v
[3], p
++);
546 ret
|= __get_user_inatomic(temp
.v
[4], p
++);
547 ret
|= __get_user_inatomic(temp
.v
[5], p
++);
549 ret
|= __get_user_inatomic(temp
.v
[6], p
++);
550 ret
|= __get_user_inatomic(temp
.v
[7], p
++);
562 data
.h
[0] = temp
.h
[3];
563 data
.h
[2] = temp
.h
[3];
567 data
.h
[1] = temp
.h
[3];
568 data
.h
[3] = temp
.h
[3];
571 data
.h
[0] = temp
.h
[2];
572 data
.h
[2] = temp
.h
[3];
576 data
.h
[1] = temp
.h
[2];
577 data
.h
[3] = temp
.h
[3];
580 data
.w
[0] = temp
.w
[1];
581 data
.w
[1] = temp
.w
[1];
584 data
.h
[0] = temp
.h
[2];
585 data
.h
[1] = temp
.h
[2];
586 data
.h
[2] = temp
.h
[3];
587 data
.h
[3] = temp
.h
[3];
595 switch (flags
& 0xf0) {
597 data
.ll
= swab64(data
.ll
);
600 data
.w
[0] = swab32(data
.w
[0]);
601 data
.w
[1] = swab32(data
.w
[1]);
603 /* Its half word endian */
605 data
.h
[0] = swab16(data
.h
[0]);
606 data
.h
[1] = swab16(data
.h
[1]);
607 data
.h
[2] = swab16(data
.h
[2]);
608 data
.h
[3] = swab16(data
.h
[3]);
614 data
.w
[0] = (s16
)data
.h
[1];
615 data
.w
[1] = (s16
)data
.h
[3];
618 /* Store result to memory or update registers */
624 ret
|= __put_user_inatomic(data
.v
[0], p
++);
625 ret
|= __put_user_inatomic(data
.v
[1], p
++);
626 ret
|= __put_user_inatomic(data
.v
[2], p
++);
627 ret
|= __put_user_inatomic(data
.v
[3], p
++);
629 ret
|= __put_user_inatomic(data
.v
[4], p
++);
630 ret
|= __put_user_inatomic(data
.v
[5], p
++);
632 ret
|= __put_user_inatomic(data
.v
[6], p
++);
633 ret
|= __put_user_inatomic(data
.v
[7], p
++);
639 regs
->gpr
[reg
] = data
.w
[1];
644 #endif /* CONFIG_SPE */
648 * Emulate VSX instructions...
650 static int emulate_vsx(unsigned char __user
*addr
, unsigned int reg
,
651 unsigned int areg
, struct pt_regs
*regs
,
652 unsigned int flags
, unsigned int length
,
662 if (unlikely(!user_mode(regs
)))
665 flush_vsx_to_thread(current
);
668 ptr
= (char *) ¤t
->thread
.fp_state
.fpr
[reg
][0];
670 ptr
= (char *) ¤t
->thread
.vr_state
.vr
[reg
- 32];
672 lptr
= (unsigned long *) ptr
;
674 #ifdef __LITTLE_ENDIAN__
680 * The elements are BE ordered, even in LE mode, so process
681 * them in reverse order.
683 addr
+= length
- elsize
;
685 /* 8 byte memory accesses go in the top 8 bytes of the VR */
694 for (j
= 0; j
< length
; j
+= elsize
) {
695 for (i
= 0; i
< elsize
; ++i
) {
697 ret
|= __put_user(ptr
[i
^sw
], addr
+ i
);
699 ret
|= __get_user(ptr
[i
^sw
], addr
+ i
);
702 #ifdef __LITTLE_ENDIAN__
709 #ifdef __BIG_ENDIAN__
719 regs
->gpr
[areg
] = regs
->dar
;
721 /* Splat load copies the same data to top and bottom 8 bytes */
723 lptr
[VSX_LO
] = lptr
[VSX_HI
];
724 /* For 8 byte loads, zero the low 8 bytes */
725 else if (!(flags
& ST
) && (8 == length
))
735 * Called on alignment exception. Attempts to fixup
737 * Return 1 on success
738 * Return 0 if unable to handle the interrupt
739 * Return -EFAULT if data address is bad
742 int fix_alignment(struct pt_regs
*regs
)
744 unsigned int instr
, nb
, flags
, instruction
= 0;
745 unsigned int reg
, areg
;
747 unsigned char __user
*addr
;
748 unsigned long p
, swiz
;
755 #ifdef __LITTLE_ENDIAN__
764 #ifdef __LITTLE_ENDIAN__
766 unsigned char hi48
[6];
768 unsigned char hi48
[6];
775 * We require a complete register set, if not, then our assembly
778 CHECK_FULL_REGS(regs
);
782 /* Some processors don't provide us with a DSISR we can use here,
783 * let's make one up from the instruction
785 if (cpu_has_feature(CPU_FTR_NODSISRALIGN
)) {
786 unsigned long pc
= regs
->nip
;
788 if (cpu_has_feature(CPU_FTR_PPC_LE
) && (regs
->msr
& MSR_LE
))
790 if (unlikely(__get_user_inatomic(instr
,
791 (unsigned int __user
*)pc
)))
793 if (cpu_has_feature(CPU_FTR_REAL_LE
) && (regs
->msr
& MSR_LE
))
794 instr
= cpu_to_le32(instr
);
795 dsisr
= make_dsisr(instr
);
799 /* extract the operation and registers from the dsisr */
800 reg
= (dsisr
>> 5) & 0x1f; /* source/dest register */
801 areg
= dsisr
& 0x1f; /* register to update */
804 if ((instr
>> 26) == 0x4) {
805 PPC_WARN_ALIGNMENT(spe
, regs
);
806 return emulate_spe(regs
, reg
, instr
);
810 instr
= (dsisr
>> 10) & 0x7f;
811 instr
|= (dsisr
>> 13) & 0x60;
813 /* Lookup the operation in our table */
814 nb
= aligninfo
[instr
].len
;
815 flags
= aligninfo
[instr
].flags
;
817 /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
818 if (IS_XFORM(instruction
) && ((instruction
>> 1) & 0x3ff) == 532) {
821 } else if (IS_XFORM(instruction
) &&
822 ((instruction
>> 1) & 0x3ff) == 660) {
827 /* Byteswap little endian loads and stores */
829 if ((regs
->msr
& MSR_LE
) != (MSR_KERNEL
& MSR_LE
)) {
831 #ifdef __BIG_ENDIAN__
833 * So-called "PowerPC little endian" mode works by
834 * swizzling addresses rather than by actually doing
835 * any byte-swapping. To emulate this, we XOR each
836 * byte address with 7. We also byte-swap, because
837 * the processor's address swizzling depends on the
838 * operand size (it xors the address with 7 for bytes,
839 * 6 for halfwords, 4 for words, 0 for doublewords) but
840 * we will xor with 7 and load/store each byte separately.
842 if (cpu_has_feature(CPU_FTR_PPC_LE
))
847 /* DAR has the operand effective address */
848 addr
= (unsigned char __user
*)regs
->dar
;
851 if ((instruction
& 0xfc00003e) == 0x7c000018) {
854 /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
855 reg
|= (instruction
& 0x1) << 5;
856 /* Simple inline decoder instead of a table */
857 /* VSX has only 8 and 16 byte memory accesses */
859 if (instruction
& 0x200)
862 /* Vector stores in little-endian mode swap individual
863 elements, so process them separately */
865 if (instruction
& 0x80)
869 if ((regs
->msr
& MSR_LE
) != (MSR_KERNEL
& MSR_LE
))
871 if (instruction
& 0x100)
873 if (instruction
& 0x040)
875 /* splat load needs a special decoder */
876 if ((instruction
& 0x400) == 0){
880 PPC_WARN_ALIGNMENT(vsx
, regs
);
881 return emulate_vsx(addr
, reg
, areg
, regs
, flags
, nb
, elsize
);
884 /* A size of 0 indicates an instruction we don't support, with
885 * the exception of DCBZ which is handled as a special case here
888 PPC_WARN_ALIGNMENT(dcbz
, regs
);
889 return emulate_dcbz(regs
, addr
);
891 if (unlikely(nb
== 0))
894 /* Load/Store Multiple instructions are handled in their own
898 PPC_WARN_ALIGNMENT(multiple
, regs
);
899 return emulate_multiple(regs
, addr
, reg
, nb
,
903 /* Verify the address of the operand */
904 if (unlikely(user_mode(regs
) &&
905 !access_ok((flags
& ST
? VERIFY_WRITE
: VERIFY_READ
),
909 /* Force the fprs into the save area so we can reference them */
912 if (unlikely(!user_mode(regs
)))
914 flush_fp_to_thread(current
);
917 /* Special case for 16-byte FP loads and stores */
919 PPC_WARN_ALIGNMENT(fp_pair
, regs
);
920 return emulate_fp_pair(addr
, reg
, flags
);
923 PPC_WARN_ALIGNMENT(unaligned
, regs
);
925 /* If we are loading, get the data from user space, else
926 * get it from register values
929 unsigned int start
= 0;
933 start
= offsetof(union data
, x32
.low32
);
936 start
= offsetof(union data
, x16
.low16
);
942 p
= (unsigned long)addr
;
944 for (i
= 0; i
< nb
; i
++)
945 ret
|= __get_user_inatomic(data
.v
[start
+ i
],
951 } else if (flags
& F
) {
952 data
.ll
= current
->thread
.TS_FPR(reg
);
954 /* Single-precision FP store requires conversion... */
955 #ifdef CONFIG_PPC_FPU
958 cvt_df(&data
.dd
, (float *)&data
.x32
.low32
);
965 data
.ll
= regs
->gpr
[reg
];
970 data
.ll
= swab64(data
.ll
);
973 data
.x32
.low32
= swab32(data
.x32
.low32
);
976 data
.x16
.low16
= swab16(data
.x16
.low16
);
981 /* Perform other misc operations like sign extension
982 * or floating point single precision conversion
984 switch (flags
& ~(U
|SW
)) {
985 case LD
+SE
: /* sign extending integer loads */
986 case LD
+F
+SE
: /* sign extend for lfiwax */
988 data
.ll
= data
.x16
.low16
;
989 else /* nb must be 4 */
990 data
.ll
= data
.x32
.low32
;
993 /* Single-precision FP load requires conversion... */
995 #ifdef CONFIG_PPC_FPU
998 cvt_fd((float *)&data
.x32
.low32
, &data
.dd
);
1006 /* Store result to memory or update registers */
1008 unsigned int start
= 0;
1012 start
= offsetof(union data
, x32
.low32
);
1015 start
= offsetof(union data
, x16
.low16
);
1020 p
= (unsigned long)addr
;
1022 for (i
= 0; i
< nb
; i
++)
1023 ret
|= __put_user_inatomic(data
.v
[start
+ i
],
1028 } else if (flags
& F
)
1029 current
->thread
.TS_FPR(reg
) = data
.ll
;
1031 regs
->gpr
[reg
] = data
.ll
;
1033 /* Update RA as needed */
1035 regs
->gpr
[areg
] = regs
->dar
;
This page took 0.053604 seconds and 6 git commands to generate.