powerpc: Add trap_nr to thread_struct
[deliverable/linux.git] / arch / powerpc / include / asm / processor.h
1 #ifndef _ASM_POWERPC_PROCESSOR_H
2 #define _ASM_POWERPC_PROCESSOR_H
3
4 /*
5 * Copyright (C) 2001 PPC 64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #include <asm/reg.h>
14
15 #ifdef CONFIG_VSX
16 #define TS_FPRWIDTH 2
17 #else
18 #define TS_FPRWIDTH 1
19 #endif
20
21 #ifndef __ASSEMBLY__
22 #include <linux/compiler.h>
23 #include <linux/cache.h>
24 #include <asm/ptrace.h>
25 #include <asm/types.h>
26
27 /* We do _not_ want to define new machine types at all, those must die
28 * in favor of using the device-tree
29 * -- BenH.
30 */
31
32 /* PREP sub-platform types see residual.h for these */
33 #define _PREP_Motorola 0x01 /* motorola prep */
34 #define _PREP_Firm 0x02 /* firmworks prep */
35 #define _PREP_IBM 0x00 /* ibm prep */
36 #define _PREP_Bull 0x03 /* bull prep */
37
38 /* CHRP sub-platform types. These are arbitrary */
39 #define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */
40 #define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */
41 #define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */
42 #define _CHRP_briq 0x07 /* TotalImpact's briQ */
43
44 #if defined(__KERNEL__) && defined(CONFIG_PPC32)
45
46 extern int _chrp_type;
47
48 #ifdef CONFIG_PPC_PREP
49
50 /* what kind of prep workstation we are */
51 extern int _prep_type;
52
53 #endif /* CONFIG_PPC_PREP */
54
55 #endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
56
57 /*
58 * Default implementation of macro that returns current
59 * instruction pointer ("program counter").
60 */
61 #define current_text_addr() ({ __label__ _l; _l: &&_l;})
62
63 /* Macros for adjusting thread priority (hardware multi-threading) */
64 #define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
65 #define HMT_low() asm volatile("or 1,1,1 # low priority")
66 #define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
67 #define HMT_medium() asm volatile("or 2,2,2 # medium priority")
68 #define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
69 #define HMT_high() asm volatile("or 3,3,3 # high priority")
70
71 #ifdef __KERNEL__
72
73 struct task_struct;
74 void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
75 void release_thread(struct task_struct *);
76
77 /* Create a new kernel thread. */
78 extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
79
80 /* Lazy FPU handling on uni-processor */
81 extern struct task_struct *last_task_used_math;
82 extern struct task_struct *last_task_used_altivec;
83 extern struct task_struct *last_task_used_vsx;
84 extern struct task_struct *last_task_used_spe;
85
86 #ifdef CONFIG_PPC32
87
88 #if CONFIG_TASK_SIZE > CONFIG_KERNEL_START
89 #error User TASK_SIZE overlaps with KERNEL_START address
90 #endif
91 #define TASK_SIZE (CONFIG_TASK_SIZE)
92
93 /* This decides where the kernel will search for a free chunk of vm
94 * space during mmap's.
95 */
96 #define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
97 #endif
98
99 #ifdef CONFIG_PPC64
100 /* 64-bit user address space is 44-bits (16TB user VM) */
101 #define TASK_SIZE_USER64 (0x0000100000000000UL)
102
103 /*
104 * 32-bit user address space is 4GB - 1 page
105 * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
106 */
107 #define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
108
109 #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
110 TASK_SIZE_USER32 : TASK_SIZE_USER64)
111 #define TASK_SIZE TASK_SIZE_OF(current)
112
113 /* This decides where the kernel will search for a free chunk of vm
114 * space during mmap's.
115 */
116 #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
117 #define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
118
119 #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
120 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
121 #endif
122
123 #ifdef __powerpc64__
124
125 #define STACK_TOP_USER64 TASK_SIZE_USER64
126 #define STACK_TOP_USER32 TASK_SIZE_USER32
127
128 #define STACK_TOP (is_32bit_task() ? \
129 STACK_TOP_USER32 : STACK_TOP_USER64)
130
131 #define STACK_TOP_MAX STACK_TOP_USER64
132
133 #else /* __powerpc64__ */
134
135 #define STACK_TOP TASK_SIZE
136 #define STACK_TOP_MAX STACK_TOP
137
138 #endif /* __powerpc64__ */
139
140 typedef struct {
141 unsigned long seg;
142 } mm_segment_t;
143
144 #define TS_FPROFFSET 0
145 #define TS_VSRLOWOFFSET 1
146 #define TS_FPR(i) fpr[i][TS_FPROFFSET]
147
148 struct thread_struct {
149 unsigned long ksp; /* Kernel stack pointer */
150 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
151
152 #ifdef CONFIG_PPC64
153 unsigned long ksp_vsid;
154 #endif
155 struct pt_regs *regs; /* Pointer to saved register state */
156 mm_segment_t fs; /* for get_fs() validation */
157 #ifdef CONFIG_BOOKE
158 /* BookE base exception scratch space; align on cacheline */
159 unsigned long normsave[8] ____cacheline_aligned;
160 #endif
161 #ifdef CONFIG_PPC32
162 void *pgdir; /* root of page-table tree */
163 #endif
164 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
165 /*
166 * The following help to manage the use of Debug Control Registers
167 * om the BookE platforms.
168 */
169 unsigned long dbcr0;
170 unsigned long dbcr1;
171 #ifdef CONFIG_BOOKE
172 unsigned long dbcr2;
173 #endif
174 /*
175 * The stored value of the DBSR register will be the value at the
176 * last debug interrupt. This register can only be read from the
177 * user (will never be written to) and has value while helping to
178 * describe the reason for the last debug trap. Torez
179 */
180 unsigned long dbsr;
181 /*
182 * The following will contain addresses used by debug applications
183 * to help trace and trap on particular address locations.
184 * The bits in the Debug Control Registers above help define which
185 * of the following registers will contain valid data and/or addresses.
186 */
187 unsigned long iac1;
188 unsigned long iac2;
189 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
190 unsigned long iac3;
191 unsigned long iac4;
192 #endif
193 unsigned long dac1;
194 unsigned long dac2;
195 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
196 unsigned long dvc1;
197 unsigned long dvc2;
198 #endif
199 #endif
200 /* FP and VSX 0-31 register set */
201 double fpr[32][TS_FPRWIDTH];
202 struct {
203
204 unsigned int pad;
205 unsigned int val; /* Floating point status */
206 } fpscr;
207 int fpexc_mode; /* floating-point exception mode */
208 unsigned int align_ctl; /* alignment handling control */
209 #ifdef CONFIG_PPC64
210 unsigned long start_tb; /* Start purr when proc switched in */
211 unsigned long accum_tb; /* Total accumilated purr for process */
212 #ifdef CONFIG_HAVE_HW_BREAKPOINT
213 struct perf_event *ptrace_bps[HBP_NUM];
214 /*
215 * Helps identify source of single-step exception and subsequent
216 * hw-breakpoint enablement
217 */
218 struct perf_event *last_hit_ubp;
219 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
220 #endif
221 unsigned long dabr; /* Data address breakpoint register */
222 unsigned long trap_nr; /* last trap # on this thread */
223 #ifdef CONFIG_ALTIVEC
224 /* Complete AltiVec register set */
225 vector128 vr[32] __attribute__((aligned(16)));
226 /* AltiVec status */
227 vector128 vscr __attribute__((aligned(16)));
228 unsigned long vrsave;
229 int used_vr; /* set if process has used altivec */
230 #endif /* CONFIG_ALTIVEC */
231 #ifdef CONFIG_VSX
232 /* VSR status */
233 int used_vsr; /* set if process has used altivec */
234 #endif /* CONFIG_VSX */
235 #ifdef CONFIG_SPE
236 unsigned long evr[32]; /* upper 32-bits of SPE regs */
237 u64 acc; /* Accumulator */
238 unsigned long spefscr; /* SPE & eFP status */
239 int used_spe; /* set if process has used spe */
240 #endif /* CONFIG_SPE */
241 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
242 void* kvm_shadow_vcpu; /* KVM internal data */
243 #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
244 #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
245 struct kvm_vcpu *kvm_vcpu;
246 #endif
247 #ifdef CONFIG_PPC64
248 unsigned long dscr;
249 int dscr_inherit;
250 #endif
251 };
252
253 #define ARCH_MIN_TASKALIGN 16
254
255 #define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
256 #define INIT_SP_LIMIT \
257 (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
258
259 #ifdef CONFIG_SPE
260 #define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
261 #else
262 #define SPEFSCR_INIT
263 #endif
264
265 #ifdef CONFIG_PPC32
266 #define INIT_THREAD { \
267 .ksp = INIT_SP, \
268 .ksp_limit = INIT_SP_LIMIT, \
269 .fs = KERNEL_DS, \
270 .pgdir = swapper_pg_dir, \
271 .fpexc_mode = MSR_FE0 | MSR_FE1, \
272 SPEFSCR_INIT \
273 }
274 #else
275 #define INIT_THREAD { \
276 .ksp = INIT_SP, \
277 .ksp_limit = INIT_SP_LIMIT, \
278 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
279 .fs = KERNEL_DS, \
280 .fpr = {{0}}, \
281 .fpscr = { .val = 0, }, \
282 .fpexc_mode = 0, \
283 }
284 #endif
285
286 /*
287 * Return saved PC of a blocked thread. For now, this is the "user" PC
288 */
289 #define thread_saved_pc(tsk) \
290 ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
291
292 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs)
293
294 unsigned long get_wchan(struct task_struct *p);
295
296 #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
297 #define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
298
299 /* Get/set floating-point exception mode */
300 #define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
301 #define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
302
303 extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
304 extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
305
306 #define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr))
307 #define SET_ENDIAN(tsk, val) set_endian((tsk), (val))
308
309 extern int get_endian(struct task_struct *tsk, unsigned long adr);
310 extern int set_endian(struct task_struct *tsk, unsigned int val);
311
312 #define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
313 #define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
314
315 extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
316 extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
317
318 static inline unsigned int __unpack_fe01(unsigned long msr_bits)
319 {
320 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
321 }
322
323 static inline unsigned long __pack_fe01(unsigned int fpmode)
324 {
325 return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
326 }
327
328 #ifdef CONFIG_PPC64
329 #define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
330 #else
331 #define cpu_relax() barrier()
332 #endif
333
334 /* Check that a certain kernel stack pointer is valid in task_struct p */
335 int validate_sp(unsigned long sp, struct task_struct *p,
336 unsigned long nbytes);
337
338 /*
339 * Prefetch macros.
340 */
341 #define ARCH_HAS_PREFETCH
342 #define ARCH_HAS_PREFETCHW
343 #define ARCH_HAS_SPINLOCK_PREFETCH
344
345 static inline void prefetch(const void *x)
346 {
347 if (unlikely(!x))
348 return;
349
350 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
351 }
352
353 static inline void prefetchw(const void *x)
354 {
355 if (unlikely(!x))
356 return;
357
358 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
359 }
360
361 #define spin_lock_prefetch(x) prefetchw(x)
362
363 #ifdef CONFIG_PPC64
364 #define HAVE_ARCH_PICK_MMAP_LAYOUT
365 #endif
366
367 #ifdef CONFIG_PPC64
368 static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
369 {
370 unsigned long sp;
371
372 if (is_32)
373 sp = regs->gpr[1] & 0x0ffffffffUL;
374 else
375 sp = regs->gpr[1];
376
377 return sp;
378 }
379 #else
380 static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
381 {
382 return regs->gpr[1];
383 }
384 #endif
385
386 extern unsigned long cpuidle_disable;
387 enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
388
389 extern int powersave_nap; /* set if nap mode can be used in idle loop */
390
391 #ifdef CONFIG_PSERIES_IDLE
392 extern void update_smt_snooze_delay(int snooze);
393 #else
394 static inline void update_smt_snooze_delay(int snooze) {}
395 #endif
396
397 extern void flush_instruction_cache(void);
398 extern void hard_reset_now(void);
399 extern void poweroff_now(void);
400 extern int fix_alignment(struct pt_regs *);
401 extern void cvt_fd(float *from, double *to);
402 extern void cvt_df(double *from, float *to);
403 extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
404
405 #ifdef CONFIG_PPC64
406 /*
407 * We handle most unaligned accesses in hardware. On the other hand
408 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
409 * powers of 2 writes until it reaches sufficient alignment).
410 *
411 * Based on this we disable the IP header alignment in network drivers.
412 */
413 #define NET_IP_ALIGN 0
414 #endif
415
416 #endif /* __KERNEL__ */
417 #endif /* __ASSEMBLY__ */
418 #endif /* _ASM_POWERPC_PROCESSOR_H */
This page took 0.048439 seconds and 5 git commands to generate.