[SPARC64]: Add prefetch support.
[deliverable/linux.git] / include / asm-ppc64 / processor.h
CommitLineData
1da177e4
LT
1#ifndef __ASM_PPC64_PROCESSOR_H
2#define __ASM_PPC64_PROCESSOR_H
3
4/*
5 * Copyright (C) 2001 PPC 64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/stringify.h>
14#ifndef __ASSEMBLY__
15#include <linux/config.h>
16#include <asm/atomic.h>
17#include <asm/ppcdebug.h>
18#include <asm/a.out.h>
19#endif
20#include <asm/ptrace.h>
21#include <asm/types.h>
22#include <asm/systemcfg.h>
23
24/* Machine State Register (MSR) Fields */
25#define MSR_SF_LG 63 /* Enable 64 bit mode */
26#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
27#define MSR_HV_LG 60 /* Hypervisor state */
28#define MSR_VEC_LG 25 /* Enable AltiVec */
29#define MSR_POW_LG 18 /* Enable Power Management */
30#define MSR_WE_LG 18 /* Wait State Enable */
31#define MSR_TGPR_LG 17 /* TLB Update registers in use */
32#define MSR_CE_LG 17 /* Critical Interrupt Enable */
33#define MSR_ILE_LG 16 /* Interrupt Little Endian */
34#define MSR_EE_LG 15 /* External Interrupt Enable */
35#define MSR_PR_LG 14 /* Problem State / Privilege Level */
36#define MSR_FP_LG 13 /* Floating Point enable */
37#define MSR_ME_LG 12 /* Machine Check Enable */
38#define MSR_FE0_LG 11 /* Floating Exception mode 0 */
39#define MSR_SE_LG 10 /* Single Step */
40#define MSR_BE_LG 9 /* Branch Trace */
41#define MSR_DE_LG 9 /* Debug Exception Enable */
42#define MSR_FE1_LG 8 /* Floating Exception mode 1 */
43#define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */
44#define MSR_IR_LG 5 /* Instruction Relocate */
45#define MSR_DR_LG 4 /* Data Relocate */
46#define MSR_PE_LG 3 /* Protection Enable */
47#define MSR_PX_LG 2 /* Protection Exclusive Mode */
48#define MSR_PMM_LG 2 /* Performance monitor */
49#define MSR_RI_LG 1 /* Recoverable Exception */
50#define MSR_LE_LG 0 /* Little Endian */
51
52#ifdef __ASSEMBLY__
53#define __MASK(X) (1<<(X))
54#else
55#define __MASK(X) (1UL<<(X))
56#endif
57
58#define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
59#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
60#define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
61#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
62#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
63#define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */
64#define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */
65#define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */
66#define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */
67#define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */
68#define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */
69#define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */
70#define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */
71#define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */
72#define MSR_SE __MASK(MSR_SE_LG) /* Single Step */
73#define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */
74#define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */
75#define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */
76#define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */
77#define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */
78#define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */
79#define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */
80#define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */
81#define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */
82#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
83#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
84
85#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
86#define MSR_KERNEL MSR_ | MSR_SF | MSR_HV
87
88#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
89#define MSR_USER64 MSR_USER32 | MSR_SF
90
91/* Floating Point Status and Control Register (FPSCR) Fields */
92
93#define FPSCR_FX 0x80000000 /* FPU exception summary */
94#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
95#define FPSCR_VX 0x20000000 /* Invalid operation summary */
96#define FPSCR_OX 0x10000000 /* Overflow exception summary */
97#define FPSCR_UX 0x08000000 /* Underflow exception summary */
98#define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
99#define FPSCR_XX 0x02000000 /* Inexact exception summary */
100#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
101#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
102#define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */
103#define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */
104#define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */
105#define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */
106#define FPSCR_FR 0x00040000 /* Fraction rounded */
107#define FPSCR_FI 0x00020000 /* Fraction inexact */
108#define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */
109#define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */
110#define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */
111#define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */
112#define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */
113#define FPSCR_VE 0x00000080 /* Invalid op exception enable */
114#define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */
115#define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */
116#define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */
117#define FPSCR_XE 0x00000008 /* FP inexact exception enable */
118#define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */
119#define FPSCR_RN 0x00000003 /* FPU rounding control */
120
121/* Special Purpose Registers (SPRNs)*/
122
1da177e4
LT
123#define SPRN_CTR 0x009 /* Count Register */
124#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
79f12489 125#define DABR_TRANSLATION (1UL << 2)
1da177e4 126#define SPRN_DAR 0x013 /* Data Address Register */
1da177e4 127#define SPRN_DEC 0x016 /* Decrement Register */
1da177e4
LT
128#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
129#define DSISR_NOHPTE 0x40000000 /* no translation found */
130#define DSISR_PROTFAULT 0x08000000 /* protection fault */
131#define DSISR_ISSTORE 0x02000000 /* access was a store */
132#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
133#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
1da177e4 134#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
1da177e4
LT
135#define SPRN_MSRDORM 0x3F1 /* Hardware Implementation Register 1 */
136#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
137#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
138#define SPRN_NIADORM 0x3F3 /* Hardware Implementation Register 2 */
139#define SPRN_HID4 0x3F4 /* 970 HID4 */
140#define SPRN_HID5 0x3F6 /* 970 HID5 */
141#define SPRN_TSC 0x3FD /* Thread switch control */
142#define SPRN_TST 0x3FC /* Thread switch timeout */
1da177e4
LT
143#define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */
144#define SPRN_LR 0x008 /* Link Register */
1da177e4
LT
145#define SPRN_PIR 0x3FF /* Processor Identification Register */
146#define SPRN_PIT 0x3DB /* Programmable Interval Timer */
147#define SPRN_PURR 0x135 /* Processor Utilization of Resources Register */
148#define SPRN_PVR 0x11F /* Processor Version Register */
149#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
150#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
151#define SPRN_SDR1 0x019 /* MMU Hash Base Register */
1da177e4
LT
152#define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */
153#define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */
154#define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
155#define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
156#define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
157#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
158#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
159#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
160#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
161#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, W/O) */
162#define SPRN_TBWU 0x11D /* Time Base Write Upper Register (super, W/O) */
163#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
1da177e4
LT
164#define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */
165#define SPRN_XER 0x001 /* Fixed Point Exception Register */
1da177e4 166#define SPRN_VRSAVE 0x100 /* Vector save */
6dc2f0c7
AB
167#define SPRN_CTRLF 0x088
168#define SPRN_CTRLT 0x098
169#define CTRL_RUNLATCH 0x1
1da177e4
LT
170
171/* Performance monitor SPRs */
172#define SPRN_SIAR 780
173#define SPRN_SDAR 781
174#define SPRN_MMCRA 786
175#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
176#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
177#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
178#define SPRN_PMC1 787
179#define SPRN_PMC2 788
180#define SPRN_PMC3 789
181#define SPRN_PMC4 790
182#define SPRN_PMC5 791
183#define SPRN_PMC6 792
184#define SPRN_PMC7 793
185#define SPRN_PMC8 794
186#define SPRN_MMCR0 795
187#define MMCR0_FC 0x80000000UL /* freeze counters. set to 1 on a perfmon exception */
188#define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
189#define MMCR0_KERNEL_DISABLE MMCR0_FCS
190#define MMCR0_FCP 0x20000000UL /* freeze in problem state */
191#define MMCR0_PROBLEM_DISABLE MMCR0_FCP
192#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
193#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
194#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
195#define MMCR0_FCECE 0x02000000UL /* freeze counters on enabled condition or event */
196/* time base exception enable */
197#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
198#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
199#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
200#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
201#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
202#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
203#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
204#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
205#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
206#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
207#define SPRN_MMCR1 798
208
209/* Short-hand versions for a number of the above SPRNs */
210
211#define CTR SPRN_CTR /* Counter Register */
212#define DAR SPRN_DAR /* Data Address Register */
213#define DABR SPRN_DABR /* Data Address Breakpoint Register */
1da177e4 214#define DEC SPRN_DEC /* Decrement Register */
1da177e4 215#define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */
1da177e4
LT
216#define HID0 SPRN_HID0 /* Hardware Implementation Register 0 */
217#define MSRDORM SPRN_MSRDORM /* MSR Dormant Register */
218#define NIADORM SPRN_NIADORM /* NIA Dormant Register */
219#define TSC SPRN_TSC /* Thread switch control */
220#define TST SPRN_TST /* Thread switch timeout */
221#define IABR SPRN_IABR /* Instruction Address Breakpoint Register */
1da177e4
LT
222#define L2CR SPRN_L2CR /* PPC 750 L2 control register */
223#define __LR SPRN_LR
224#define PVR SPRN_PVR /* Processor Version */
225#define PIR SPRN_PIR /* Processor ID */
226#define PURR SPRN_PURR /* Processor Utilization of Resource Register */
1da177e4
LT
227#define SDR1 SPRN_SDR1 /* MMU hash base register */
228#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
229#define SPR1 SPRN_SPRG1
230#define SPR2 SPRN_SPRG2
231#define SPR3 SPRN_SPRG3
232#define SPRG0 SPRN_SPRG0
233#define SPRG1 SPRN_SPRG1
234#define SPRG2 SPRN_SPRG2
235#define SPRG3 SPRN_SPRG3
236#define SRR0 SPRN_SRR0 /* Save and Restore Register 0 */
237#define SRR1 SPRN_SRR1 /* Save and Restore Register 1 */
238#define TBRL SPRN_TBRL /* Time Base Read Lower Register */
239#define TBRU SPRN_TBRU /* Time Base Read Upper Register */
240#define TBWL SPRN_TBWL /* Time Base Write Lower Register */
241#define TBWU SPRN_TBWU /* Time Base Write Upper Register */
1da177e4
LT
242#define XER SPRN_XER
243
244/* Processor Version Register (PVR) field extraction */
245
246#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
247#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
248
249/* Processor Version Numbers */
250#define PV_NORTHSTAR 0x0033
251#define PV_PULSAR 0x0034
252#define PV_POWER4 0x0035
253#define PV_ICESTAR 0x0036
254#define PV_SSTAR 0x0037
255#define PV_POWER4p 0x0038
256#define PV_970 0x0039
257#define PV_POWER5 0x003A
258#define PV_POWER5p 0x003B
259#define PV_970FX 0x003C
260#define PV_630 0x0040
261#define PV_630p 0x0041
262
263/* Platforms supported by PPC64 */
264#define PLATFORM_PSERIES 0x0100
265#define PLATFORM_PSERIES_LPAR 0x0101
266#define PLATFORM_ISERIES_LPAR 0x0201
267#define PLATFORM_LPAR 0x0001
268#define PLATFORM_POWERMAC 0x0400
269#define PLATFORM_MAPLE 0x0500
270
271/* Compatibility with drivers coming from PPC32 world */
272#define _machine (systemcfg->platform)
273#define _MACH_Pmac PLATFORM_POWERMAC
274
275/*
276 * List of interrupt controllers.
277 */
278#define IC_INVALID 0
279#define IC_OPEN_PIC 1
280#define IC_PPC_XIC 2
281
282#define XGLUE(a,b) a##b
283#define GLUE(a,b) XGLUE(a,b)
284
1da177e4
LT
285#ifdef __ASSEMBLY__
286
287#define _GLOBAL(name) \
288 .section ".text"; \
289 .align 2 ; \
290 .globl name; \
291 .globl GLUE(.,name); \
292 .section ".opd","aw"; \
293name: \
294 .quad GLUE(.,name); \
295 .quad .TOC.@tocbase; \
296 .quad 0; \
297 .previous; \
298 .type GLUE(.,name),@function; \
299GLUE(.,name):
300
301#define _STATIC(name) \
302 .section ".text"; \
303 .align 2 ; \
304 .section ".opd","aw"; \
305name: \
306 .quad GLUE(.,name); \
307 .quad .TOC.@tocbase; \
308 .quad 0; \
309 .previous; \
310 .type GLUE(.,name),@function; \
311GLUE(.,name):
312
313#else /* __ASSEMBLY__ */
314
315/*
316 * Default implementation of macro that returns current
317 * instruction pointer ("program counter").
318 */
319#define current_text_addr() ({ __label__ _l; _l: &&_l;})
320
321/* Macros for setting and retrieving special purpose registers */
322
323#define mfmsr() ({unsigned long rval; \
324 asm volatile("mfmsr %0" : "=r" (rval)); rval;})
325
326#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
327 : : "r" (v))
328#define mtmsrd(v) __mtmsrd((v), 0)
329
330#define mfspr(rn) ({unsigned long rval; \
331 asm volatile("mfspr %0," __stringify(rn) \
332 : "=r" (rval)); rval;})
333#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
334
335#define mftb() ({unsigned long rval; \
336 asm volatile("mftb %0" : "=r" (rval)); rval;})
337
338#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
339#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
340
341#define mfasr() ({unsigned long rval; \
342 asm volatile("mfasr %0" : "=r" (rval)); rval;})
343
344static inline void set_tb(unsigned int upper, unsigned int lower)
345{
346 mttbl(0);
347 mttbu(upper);
348 mttbl(lower);
349}
350
351#define __get_SP() ({unsigned long sp; \
352 asm volatile("mr %0,1": "=r" (sp)); sp;})
353
354#ifdef __KERNEL__
355
356extern int have_of;
357extern u64 ppc64_interrupt_controller;
358
359struct task_struct;
360void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
361void release_thread(struct task_struct *);
362
363/* Prepare to copy thread state - unlazy all lazy status */
364extern void prepare_to_copy(struct task_struct *tsk);
365
366/* Create a new kernel thread. */
367extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
368
369/* Lazy FPU handling on uni-processor */
370extern struct task_struct *last_task_used_math;
371extern struct task_struct *last_task_used_altivec;
372
373/* 64-bit user address space is 41-bits (2TBs user VM) */
374#define TASK_SIZE_USER64 (0x0000020000000000UL)
375
376/*
377 * 32-bit user address space is 4GB - 1 page
378 * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
379 */
380#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
381
382#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
383 TASK_SIZE_USER32 : TASK_SIZE_USER64)
384
1da177e4
LT
385/* This decides where the kernel will search for a free chunk of vm
386 * space during mmap's.
387 */
388#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
389#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
390
391#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)||(ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? \
392 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
393
394typedef struct {
395 unsigned long seg;
396} mm_segment_t;
397
398struct thread_struct {
399 unsigned long ksp; /* Kernel stack pointer */
400 unsigned long ksp_vsid;
401 struct pt_regs *regs; /* Pointer to saved register state */
402 mm_segment_t fs; /* for get_fs() validation */
403 double fpr[32]; /* Complete floating point set */
404 unsigned long fpscr; /* Floating point status (plus pad) */
405 unsigned long fpexc_mode; /* Floating-point exception mode */
406 unsigned long start_tb; /* Start purr when proc switched in */
407 unsigned long accum_tb; /* Total accumilated purr for process */
408 unsigned long vdso_base; /* base of the vDSO library */
409#ifdef CONFIG_ALTIVEC
410 /* Complete AltiVec register set */
411 vector128 vr[32] __attribute((aligned(16)));
412 /* AltiVec status */
413 vector128 vscr __attribute((aligned(16)));
414 unsigned long vrsave;
415 int used_vr; /* set if process has used altivec */
416#endif /* CONFIG_ALTIVEC */
417};
418
419#define ARCH_MIN_TASKALIGN 16
420
421#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
422
423#define INIT_THREAD { \
424 .ksp = INIT_SP, \
425 .regs = (struct pt_regs *)INIT_SP - 1, \
426 .fs = KERNEL_DS, \
427 .fpr = {0}, \
428 .fpscr = 0, \
429 .fpexc_mode = MSR_FE0|MSR_FE1, \
430}
431
432/*
433 * Note: the vm_start and vm_end fields here should *not*
434 * be in kernel space. (Could vm_end == vm_start perhaps?)
435 */
436#define IOREMAP_MMAP { &ioremap_mm, 0, 0x1000, NULL, \
437 PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, \
438 1, NULL, NULL }
439
440extern struct mm_struct ioremap_mm;
441
442/*
443 * Return saved PC of a blocked thread. For now, this is the "user" PC
444 */
445#define thread_saved_pc(tsk) \
446 ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
447
448unsigned long get_wchan(struct task_struct *p);
449
450#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
451#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
452
453/* Get/set floating-point exception mode */
454#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
455#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
456
457extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
458extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
459
460static inline unsigned int __unpack_fe01(unsigned long msr_bits)
461{
462 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
463}
464
465static inline unsigned long __pack_fe01(unsigned int fpmode)
466{
467 return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
468}
469
470#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
471
472/*
473 * Prefetch macros.
474 */
475#define ARCH_HAS_PREFETCH
476#define ARCH_HAS_PREFETCHW
477#define ARCH_HAS_SPINLOCK_PREFETCH
478
479static inline void prefetch(const void *x)
480{
e63f8f43
OJ
481 if (unlikely(!x))
482 return;
483
1da177e4
LT
484 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
485}
486
487static inline void prefetchw(const void *x)
488{
e63f8f43
OJ
489 if (unlikely(!x))
490 return;
491
1da177e4
LT
492 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
493}
494
495#define spin_lock_prefetch(x) prefetchw(x)
496
497#define HAVE_ARCH_PICK_MMAP_LAYOUT
498
6dc2f0c7
AB
499static inline void ppc64_runlatch_on(void)
500{
501 unsigned long ctrl;
502
503 ctrl = mfspr(SPRN_CTRLF);
504 ctrl |= CTRL_RUNLATCH;
505 mtspr(SPRN_CTRLT, ctrl);
506}
507
508static inline void ppc64_runlatch_off(void)
509{
510 unsigned long ctrl;
511
512 ctrl = mfspr(SPRN_CTRLF);
513 ctrl &= ~CTRL_RUNLATCH;
514 mtspr(SPRN_CTRLT, ctrl);
515}
516
1da177e4
LT
517#endif /* __KERNEL__ */
518
519#endif /* __ASSEMBLY__ */
520
521/*
522 * Number of entries in the SLB. If this ever changes we should handle
523 * it with a use a cpu feature fixup.
524 */
525#define SLB_NUM_ENTRIES 64
526
527#endif /* __ASM_PPC64_PROCESSOR_H */
This page took 0.064874 seconds and 5 git commands to generate.