Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_PROCESSOR_H |
2 | #define _ASM_IA64_PROCESSOR_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 1998-2004 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | * Stephane Eranian <eranian@hpl.hp.com> | |
8 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | |
9 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | |
10 | * | |
11 | * 11/24/98 S.Eranian added ia64_set_iva() | |
12 | * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API | |
13 | * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support | |
14 | */ | |
15 | ||
1da177e4 LT |
16 | |
17 | #include <asm/intrinsics.h> | |
18 | #include <asm/kregs.h> | |
19 | #include <asm/ptrace.h> | |
20 | #include <asm/ustack.h> | |
21 | ||
a0776ec8 | 22 | #define IA64_NUM_PHYS_STACK_REG 96 |
1da177e4 | 23 | #define IA64_NUM_DBG_REGS 8 |
1da177e4 LT |
24 | |
25 | #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000) | |
26 | #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000) | |
27 | ||
28 | /* | |
29 | * TASK_SIZE really is a mis-named. It really is the maximum user | |
30 | * space address (plus one). On IA-64, there are five regions of 2TB | |
31 | * each (assuming 8KB page size), for a total of 8TB of user virtual | |
32 | * address space. | |
33 | */ | |
82455257 DH |
34 | #define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size) |
35 | #define TASK_SIZE TASK_SIZE_OF(current) | |
1da177e4 | 36 | |
1da177e4 LT |
37 | /* |
38 | * This decides where the kernel will search for a free chunk of vm | |
39 | * space during mmap's. | |
40 | */ | |
41 | #define TASK_UNMAPPED_BASE (current->thread.map_base) | |
42 | ||
43 | #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */ | |
44 | #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */ | |
45 | #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ | |
46 | #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ | |
47 | #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ | |
e08e6c52 BC |
48 | #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration |
49 | sync at ctx sw */ | |
1da177e4 LT |
50 | #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ |
51 | #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ | |
52 | ||
53 | #define IA64_THREAD_UAC_SHIFT 3 | |
54 | #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS) | |
55 | #define IA64_THREAD_FPEMU_SHIFT 6 | |
56 | #define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE) | |
57 | ||
58 | ||
59 | /* | |
60 | * This shift should be large enough to be able to represent 1000000000/itc_freq with good | |
61 | * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits | |
62 | * (this will give enough slack to represent 10 seconds worth of time as a scaled number). | |
63 | */ | |
64 | #define IA64_NSEC_PER_CYC_SHIFT 30 | |
65 | ||
66 | #ifndef __ASSEMBLY__ | |
67 | ||
68 | #include <linux/cache.h> | |
69 | #include <linux/compiler.h> | |
70 | #include <linux/threads.h> | |
71 | #include <linux/types.h> | |
72 | ||
73 | #include <asm/fpu.h> | |
74 | #include <asm/page.h> | |
75 | #include <asm/percpu.h> | |
76 | #include <asm/rse.h> | |
77 | #include <asm/unwind.h> | |
78 | #include <asm/atomic.h> | |
79 | #ifdef CONFIG_NUMA | |
80 | #include <asm/nodedata.h> | |
81 | #endif | |
82 | ||
83 | /* like above but expressed as bitfields for more efficient access: */ | |
84 | struct ia64_psr { | |
85 | __u64 reserved0 : 1; | |
86 | __u64 be : 1; | |
87 | __u64 up : 1; | |
88 | __u64 ac : 1; | |
89 | __u64 mfl : 1; | |
90 | __u64 mfh : 1; | |
91 | __u64 reserved1 : 7; | |
92 | __u64 ic : 1; | |
93 | __u64 i : 1; | |
94 | __u64 pk : 1; | |
95 | __u64 reserved2 : 1; | |
96 | __u64 dt : 1; | |
97 | __u64 dfl : 1; | |
98 | __u64 dfh : 1; | |
99 | __u64 sp : 1; | |
100 | __u64 pp : 1; | |
101 | __u64 di : 1; | |
102 | __u64 si : 1; | |
103 | __u64 db : 1; | |
104 | __u64 lp : 1; | |
105 | __u64 tb : 1; | |
106 | __u64 rt : 1; | |
107 | __u64 reserved3 : 4; | |
108 | __u64 cpl : 2; | |
109 | __u64 is : 1; | |
110 | __u64 mc : 1; | |
111 | __u64 it : 1; | |
112 | __u64 id : 1; | |
113 | __u64 da : 1; | |
114 | __u64 dd : 1; | |
115 | __u64 ss : 1; | |
116 | __u64 ri : 2; | |
117 | __u64 ed : 1; | |
118 | __u64 bn : 1; | |
119 | __u64 reserved4 : 19; | |
120 | }; | |
121 | ||
122 | /* | |
123 | * CPU type, hardware bug flags, and per-CPU state. Frequently used | |
124 | * state comes earlier: | |
125 | */ | |
126 | struct cpuinfo_ia64 { | |
127 | __u32 softirq_pending; | |
128 | __u64 itm_delta; /* # of clock cycles between clock ticks */ | |
129 | __u64 itm_next; /* interval timer mask value to use for next clock tick */ | |
130 | __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */ | |
131 | __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */ | |
132 | __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */ | |
1da177e4 LT |
133 | __u64 itc_freq; /* frequency of ITC counter */ |
134 | __u64 proc_freq; /* frequency of processor */ | |
135 | __u64 cyc_per_usec; /* itc_freq/1000000 */ | |
136 | __u64 ptce_base; | |
137 | __u32 ptce_count[2]; | |
138 | __u32 ptce_stride[2]; | |
139 | struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */ | |
140 | ||
141 | #ifdef CONFIG_SMP | |
142 | __u64 loops_per_jiffy; | |
143 | int cpu; | |
e927ecb0 SS |
144 | __u32 socket_id; /* physical processor socket id */ |
145 | __u16 core_id; /* core id */ | |
146 | __u16 thread_id; /* thread id */ | |
147 | __u16 num_log; /* Total number of logical processors on | |
148 | * this socket that were successfully booted */ | |
149 | __u8 cores_per_socket; /* Cores per processor socket */ | |
150 | __u8 threads_per_core; /* Threads per core */ | |
1da177e4 LT |
151 | #endif |
152 | ||
153 | /* CPUID-derived information: */ | |
154 | __u64 ppn; | |
155 | __u64 features; | |
156 | __u8 number; | |
157 | __u8 revision; | |
158 | __u8 model; | |
159 | __u8 family; | |
160 | __u8 archrev; | |
161 | char vendor[16]; | |
76d08bb3 | 162 | char *model_name; |
1da177e4 LT |
163 | |
164 | #ifdef CONFIG_NUMA | |
165 | struct ia64_node_data *node_data; | |
166 | #endif | |
167 | }; | |
168 | ||
169 | DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); | |
170 | ||
171 | /* | |
172 | * The "local" data variable. It refers to the per-CPU data of the currently executing | |
173 | * CPU, much like "current" points to the per-task data of the currently executing task. | |
174 | * Do not use the address of local_cpu_data, since it will be different from | |
175 | * cpu_data(smp_processor_id())! | |
176 | */ | |
177 | #define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) | |
178 | #define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) | |
179 | ||
1da177e4 LT |
180 | extern void print_cpu_info (struct cpuinfo_ia64 *); |
181 | ||
182 | typedef struct { | |
183 | unsigned long seg; | |
184 | } mm_segment_t; | |
185 | ||
186 | #define SET_UNALIGN_CTL(task,value) \ | |
187 | ({ \ | |
188 | (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ | |
189 | | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \ | |
190 | 0; \ | |
191 | }) | |
192 | #define GET_UNALIGN_CTL(task,addr) \ | |
193 | ({ \ | |
194 | put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \ | |
195 | (int __user *) (addr)); \ | |
196 | }) | |
197 | ||
198 | #define SET_FPEMU_CTL(task,value) \ | |
199 | ({ \ | |
200 | (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \ | |
201 | | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \ | |
202 | 0; \ | |
203 | }) | |
204 | #define GET_FPEMU_CTL(task,addr) \ | |
205 | ({ \ | |
206 | put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \ | |
207 | (int __user *) (addr)); \ | |
208 | }) | |
209 | ||
210 | #ifdef CONFIG_IA32_SUPPORT | |
211 | struct desc_struct { | |
212 | unsigned int a, b; | |
213 | }; | |
214 | ||
58e94913 | 215 | #define desc_empty(desc) (!((desc)->a | (desc)->b)) |
1da177e4 LT |
216 | #define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) |
217 | ||
218 | #define GDT_ENTRY_TLS_ENTRIES 3 | |
219 | #define GDT_ENTRY_TLS_MIN 6 | |
220 | #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) | |
221 | ||
222 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) | |
223 | ||
3b74d18e | 224 | struct ia64_partial_page_list; |
1da177e4 LT |
225 | #endif |
226 | ||
227 | struct thread_struct { | |
228 | __u32 flags; /* various thread flags (see IA64_THREAD_*) */ | |
229 | /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ | |
230 | __u8 on_ustack; /* executing on user-stacks? */ | |
231 | __u8 pad[3]; | |
232 | __u64 ksp; /* kernel stack pointer */ | |
233 | __u64 map_base; /* base address for get_unmapped_area() */ | |
234 | __u64 task_size; /* limit for task size */ | |
235 | __u64 rbs_bot; /* the base address for the RBS */ | |
236 | int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ | |
237 | ||
238 | #ifdef CONFIG_IA32_SUPPORT | |
239 | __u64 eflag; /* IA32 EFLAGS reg */ | |
240 | __u64 fsr; /* IA32 floating pt status reg */ | |
241 | __u64 fcr; /* IA32 floating pt control reg */ | |
242 | __u64 fir; /* IA32 fp except. instr. reg */ | |
243 | __u64 fdr; /* IA32 fp except. data reg */ | |
244 | __u64 old_k1; /* old value of ar.k1 */ | |
245 | __u64 old_iob; /* old IOBase value */ | |
3b74d18e | 246 | struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */ |
1da177e4 LT |
247 | /* cached TLS descriptors. */ |
248 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | |
249 | ||
250 | # define INIT_THREAD_IA32 .eflag = 0, \ | |
251 | .fsr = 0, \ | |
252 | .fcr = 0x17800000037fULL, \ | |
253 | .fir = 0, \ | |
254 | .fdr = 0, \ | |
255 | .old_k1 = 0, \ | |
256 | .old_iob = 0, \ | |
257 | .ppl = NULL, | |
258 | #else | |
259 | # define INIT_THREAD_IA32 | |
260 | #endif /* CONFIG_IA32_SUPPORT */ | |
261 | #ifdef CONFIG_PERFMON | |
1da177e4 LT |
262 | void *pfm_context; /* pointer to detailed PMU context */ |
263 | unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ | |
35589a8f | 264 | # define INIT_THREAD_PM .pfm_context = NULL, \ |
1da177e4 LT |
265 | .pfm_needs_checking = 0UL, |
266 | #else | |
267 | # define INIT_THREAD_PM | |
268 | #endif | |
269 | __u64 dbr[IA64_NUM_DBG_REGS]; | |
270 | __u64 ibr[IA64_NUM_DBG_REGS]; | |
271 | struct ia64_fpreg fph[96]; /* saved/loaded on demand */ | |
272 | }; | |
273 | ||
274 | #define INIT_THREAD { \ | |
275 | .flags = 0, \ | |
276 | .on_ustack = 0, \ | |
277 | .ksp = 0, \ | |
278 | .map_base = DEFAULT_MAP_BASE, \ | |
279 | .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ | |
280 | .task_size = DEFAULT_TASK_SIZE, \ | |
281 | .last_fph_cpu = -1, \ | |
282 | INIT_THREAD_IA32 \ | |
283 | INIT_THREAD_PM \ | |
284 | .dbr = {0, }, \ | |
285 | .ibr = {0, }, \ | |
286 | .fph = {{{{0}}}, } \ | |
287 | } | |
288 | ||
289 | #define start_thread(regs,new_ip,new_sp) do { \ | |
290 | set_fs(USER_DS); \ | |
291 | regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \ | |
292 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \ | |
293 | regs->cr_iip = new_ip; \ | |
294 | regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \ | |
295 | regs->ar_rnat = 0; \ | |
296 | regs->ar_bspstore = current->thread.rbs_bot; \ | |
297 | regs->ar_fpsr = FPSR_DEFAULT; \ | |
298 | regs->loadrs = 0; \ | |
6c5d5238 | 299 | regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \ |
1da177e4 | 300 | regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ |
6c5d5238 | 301 | if (unlikely(!get_dumpable(current->mm))) { \ |
1da177e4 LT |
302 | /* \ |
303 | * Zap scratch regs to avoid leaking bits between processes with different \ | |
304 | * uid/privileges. \ | |
305 | */ \ | |
306 | regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \ | |
307 | regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \ | |
308 | } \ | |
309 | } while (0) | |
310 | ||
311 | /* Forward declarations, a strange C thing... */ | |
312 | struct mm_struct; | |
313 | struct task_struct; | |
314 | ||
315 | /* | |
316 | * Free all resources held by a thread. This is called after the | |
317 | * parent of DEAD_TASK has collected the exit status of the task via | |
318 | * wait(). | |
319 | */ | |
320 | #define release_thread(dead_task) | |
321 | ||
322 | /* Prepare to copy thread state - unlazy all lazy status */ | |
323 | #define prepare_to_copy(tsk) do { } while (0) | |
324 | ||
325 | /* | |
326 | * This is the mechanism for creating a new kernel thread. | |
327 | * | |
328 | * NOTE 1: Only a kernel-only process (ie the swapper or direct | |
329 | * descendants who haven't done an "execve()") should use this: it | |
330 | * will work within a system call from a "real" process, but the | |
331 | * process memory space will not be free'd until both the parent and | |
332 | * the child have exited. | |
333 | * | |
334 | * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get | |
335 | * into trouble in init/main.c when the child thread returns to | |
336 | * do_basic_setup() and the timing is such that free_initmem() has | |
337 | * been called already. | |
338 | */ | |
339 | extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags); | |
340 | ||
341 | /* Get wait channel for task P. */ | |
342 | extern unsigned long get_wchan (struct task_struct *p); | |
343 | ||
344 | /* Return instruction pointer of blocked task TSK. */ | |
345 | #define KSTK_EIP(tsk) \ | |
346 | ({ \ | |
6450578f | 347 | struct pt_regs *_regs = task_pt_regs(tsk); \ |
1da177e4 LT |
348 | _regs->cr_iip + ia64_psr(_regs)->ri; \ |
349 | }) | |
350 | ||
351 | /* Return stack pointer of blocked task TSK. */ | |
352 | #define KSTK_ESP(tsk) ((tsk)->thread.ksp) | |
353 | ||
354 | extern void ia64_getreg_unknown_kr (void); | |
355 | extern void ia64_setreg_unknown_kr (void); | |
356 | ||
357 | #define ia64_get_kr(regnum) \ | |
358 | ({ \ | |
359 | unsigned long r = 0; \ | |
360 | \ | |
361 | switch (regnum) { \ | |
362 | case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \ | |
363 | case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \ | |
364 | case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \ | |
365 | case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \ | |
366 | case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \ | |
367 | case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \ | |
368 | case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \ | |
369 | case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \ | |
370 | default: ia64_getreg_unknown_kr(); break; \ | |
371 | } \ | |
372 | r; \ | |
373 | }) | |
374 | ||
375 | #define ia64_set_kr(regnum, r) \ | |
376 | ({ \ | |
377 | switch (regnum) { \ | |
378 | case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \ | |
379 | case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \ | |
380 | case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \ | |
381 | case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \ | |
382 | case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \ | |
383 | case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \ | |
384 | case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \ | |
385 | case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \ | |
386 | default: ia64_setreg_unknown_kr(); break; \ | |
387 | } \ | |
388 | }) | |
389 | ||
390 | /* | |
391 | * The following three macros can't be inline functions because we don't have struct | |
392 | * task_struct at this point. | |
393 | */ | |
394 | ||
05062d96 PC |
395 | /* |
396 | * Return TRUE if task T owns the fph partition of the CPU we're running on. | |
397 | * Must be called from code that has preemption disabled. | |
398 | */ | |
1da177e4 LT |
399 | #define ia64_is_local_fpu_owner(t) \ |
400 | ({ \ | |
401 | struct task_struct *__ia64_islfo_task = (t); \ | |
402 | (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \ | |
403 | && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \ | |
404 | }) | |
405 | ||
05062d96 PC |
406 | /* |
407 | * Mark task T as owning the fph partition of the CPU we're running on. | |
408 | * Must be called from code that has preemption disabled. | |
409 | */ | |
1da177e4 LT |
410 | #define ia64_set_local_fpu_owner(t) do { \ |
411 | struct task_struct *__ia64_slfo_task = (t); \ | |
412 | __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \ | |
413 | ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \ | |
414 | } while (0) | |
415 | ||
416 | /* Mark the fph partition of task T as being invalid on all CPUs. */ | |
417 | #define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1) | |
418 | ||
419 | extern void __ia64_init_fpu (void); | |
420 | extern void __ia64_save_fpu (struct ia64_fpreg *fph); | |
421 | extern void __ia64_load_fpu (struct ia64_fpreg *fph); | |
422 | extern void ia64_save_debug_regs (unsigned long *save_area); | |
423 | extern void ia64_load_debug_regs (unsigned long *save_area); | |
424 | ||
425 | #ifdef CONFIG_IA32_SUPPORT | |
426 | extern void ia32_save_state (struct task_struct *task); | |
427 | extern void ia32_load_state (struct task_struct *task); | |
428 | #endif | |
429 | ||
430 | #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) | |
431 | #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) | |
432 | ||
433 | /* load fp 0.0 into fph */ | |
434 | static inline void | |
435 | ia64_init_fpu (void) { | |
436 | ia64_fph_enable(); | |
437 | __ia64_init_fpu(); | |
438 | ia64_fph_disable(); | |
439 | } | |
440 | ||
441 | /* save f32-f127 at FPH */ | |
442 | static inline void | |
443 | ia64_save_fpu (struct ia64_fpreg *fph) { | |
444 | ia64_fph_enable(); | |
445 | __ia64_save_fpu(fph); | |
446 | ia64_fph_disable(); | |
447 | } | |
448 | ||
449 | /* load f32-f127 from FPH */ | |
450 | static inline void | |
451 | ia64_load_fpu (struct ia64_fpreg *fph) { | |
452 | ia64_fph_enable(); | |
453 | __ia64_load_fpu(fph); | |
454 | ia64_fph_disable(); | |
455 | } | |
456 | ||
457 | static inline __u64 | |
458 | ia64_clear_ic (void) | |
459 | { | |
460 | __u64 psr; | |
461 | psr = ia64_getreg(_IA64_REG_PSR); | |
462 | ia64_stop(); | |
463 | ia64_rsm(IA64_PSR_I | IA64_PSR_IC); | |
464 | ia64_srlz_i(); | |
465 | return psr; | |
466 | } | |
467 | ||
468 | /* | |
469 | * Restore the psr. | |
470 | */ | |
471 | static inline void | |
472 | ia64_set_psr (__u64 psr) | |
473 | { | |
474 | ia64_stop(); | |
475 | ia64_setreg(_IA64_REG_PSR_L, psr); | |
f00c2d36 | 476 | ia64_srlz_i(); |
1da177e4 LT |
477 | } |
478 | ||
479 | /* | |
480 | * Insert a translation into an instruction and/or data translation | |
481 | * register. | |
482 | */ | |
483 | static inline void | |
484 | ia64_itr (__u64 target_mask, __u64 tr_num, | |
485 | __u64 vmaddr, __u64 pte, | |
486 | __u64 log_page_size) | |
487 | { | |
488 | ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); | |
489 | ia64_setreg(_IA64_REG_CR_IFA, vmaddr); | |
490 | ia64_stop(); | |
491 | if (target_mask & 0x1) | |
492 | ia64_itri(tr_num, pte); | |
493 | if (target_mask & 0x2) | |
494 | ia64_itrd(tr_num, pte); | |
495 | } | |
496 | ||
497 | /* | |
498 | * Insert a translation into the instruction and/or data translation | |
499 | * cache. | |
500 | */ | |
501 | static inline void | |
502 | ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, | |
503 | __u64 log_page_size) | |
504 | { | |
505 | ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); | |
506 | ia64_setreg(_IA64_REG_CR_IFA, vmaddr); | |
507 | ia64_stop(); | |
508 | /* as per EAS2.6, itc must be the last instruction in an instruction group */ | |
509 | if (target_mask & 0x1) | |
510 | ia64_itci(pte); | |
511 | if (target_mask & 0x2) | |
512 | ia64_itcd(pte); | |
513 | } | |
514 | ||
515 | /* | |
516 | * Purge a range of addresses from instruction and/or data translation | |
517 | * register(s). | |
518 | */ | |
519 | static inline void | |
520 | ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size) | |
521 | { | |
522 | if (target_mask & 0x1) | |
523 | ia64_ptri(vmaddr, (log_size << 2)); | |
524 | if (target_mask & 0x2) | |
525 | ia64_ptrd(vmaddr, (log_size << 2)); | |
526 | } | |
527 | ||
528 | /* Set the interrupt vector address. The address must be suitably aligned (32KB). */ | |
529 | static inline void | |
530 | ia64_set_iva (void *ivt_addr) | |
531 | { | |
532 | ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr); | |
533 | ia64_srlz_i(); | |
534 | } | |
535 | ||
536 | /* Set the page table address and control bits. */ | |
537 | static inline void | |
538 | ia64_set_pta (__u64 pta) | |
539 | { | |
540 | /* Note: srlz.i implies srlz.d */ | |
541 | ia64_setreg(_IA64_REG_CR_PTA, pta); | |
542 | ia64_srlz_i(); | |
543 | } | |
544 | ||
545 | static inline void | |
546 | ia64_eoi (void) | |
547 | { | |
548 | ia64_setreg(_IA64_REG_CR_EOI, 0); | |
549 | ia64_srlz_d(); | |
550 | } | |
551 | ||
552 | #define cpu_relax() ia64_hint(ia64_hint_pause) | |
553 | ||
a5878691 BH |
554 | static inline int |
555 | ia64_get_irr(unsigned int vector) | |
556 | { | |
557 | unsigned int reg = vector / 64; | |
558 | unsigned int bit = vector % 64; | |
559 | u64 irr; | |
560 | ||
561 | switch (reg) { | |
562 | case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; | |
563 | case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break; | |
564 | case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break; | |
565 | case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break; | |
566 | } | |
567 | ||
568 | return test_bit(bit, &irr); | |
569 | } | |
570 | ||
1da177e4 LT |
571 | static inline void |
572 | ia64_set_lrr0 (unsigned long val) | |
573 | { | |
574 | ia64_setreg(_IA64_REG_CR_LRR0, val); | |
575 | ia64_srlz_d(); | |
576 | } | |
577 | ||
578 | static inline void | |
579 | ia64_set_lrr1 (unsigned long val) | |
580 | { | |
581 | ia64_setreg(_IA64_REG_CR_LRR1, val); | |
582 | ia64_srlz_d(); | |
583 | } | |
584 | ||
585 | ||
586 | /* | |
587 | * Given the address to which a spill occurred, return the unat bit | |
588 | * number that corresponds to this address. | |
589 | */ | |
590 | static inline __u64 | |
591 | ia64_unat_pos (void *spill_addr) | |
592 | { | |
593 | return ((__u64) spill_addr >> 3) & 0x3f; | |
594 | } | |
595 | ||
596 | /* | |
597 | * Set the NaT bit of an integer register which was spilled at address | |
598 | * SPILL_ADDR. UNAT is the mask to be updated. | |
599 | */ | |
600 | static inline void | |
601 | ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat) | |
602 | { | |
603 | __u64 bit = ia64_unat_pos(spill_addr); | |
604 | __u64 mask = 1UL << bit; | |
605 | ||
606 | *unat = (*unat & ~mask) | (nat << bit); | |
607 | } | |
608 | ||
609 | /* | |
610 | * Return saved PC of a blocked thread. | |
611 | * Note that the only way T can block is through a call to schedule() -> switch_to(). | |
612 | */ | |
613 | static inline unsigned long | |
614 | thread_saved_pc (struct task_struct *t) | |
615 | { | |
616 | struct unw_frame_info info; | |
617 | unsigned long ip; | |
618 | ||
619 | unw_init_from_blocked_task(&info, t); | |
620 | if (unw_unwind(&info) < 0) | |
621 | return 0; | |
622 | unw_get_ip(&info, &ip); | |
623 | return ip; | |
624 | } | |
625 | ||
626 | /* | |
627 | * Get the current instruction/program counter value. | |
628 | */ | |
629 | #define current_text_addr() \ | |
630 | ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; }) | |
631 | ||
632 | static inline __u64 | |
633 | ia64_get_ivr (void) | |
634 | { | |
635 | __u64 r; | |
636 | ia64_srlz_d(); | |
637 | r = ia64_getreg(_IA64_REG_CR_IVR); | |
638 | ia64_srlz_d(); | |
639 | return r; | |
640 | } | |
641 | ||
642 | static inline void | |
643 | ia64_set_dbr (__u64 regnum, __u64 value) | |
644 | { | |
645 | __ia64_set_dbr(regnum, value); | |
646 | #ifdef CONFIG_ITANIUM | |
647 | ia64_srlz_d(); | |
648 | #endif | |
649 | } | |
650 | ||
651 | static inline __u64 | |
652 | ia64_get_dbr (__u64 regnum) | |
653 | { | |
654 | __u64 retval; | |
655 | ||
656 | retval = __ia64_get_dbr(regnum); | |
657 | #ifdef CONFIG_ITANIUM | |
658 | ia64_srlz_d(); | |
659 | #endif | |
660 | return retval; | |
661 | } | |
662 | ||
663 | static inline __u64 | |
664 | ia64_rotr (__u64 w, __u64 n) | |
665 | { | |
666 | return (w >> n) | (w << (64 - n)); | |
667 | } | |
668 | ||
669 | #define ia64_rotl(w,n) ia64_rotr((w), (64) - (n)) | |
670 | ||
671 | /* | |
672 | * Take a mapped kernel address and return the equivalent address | |
673 | * in the region 7 identity mapped virtual area. | |
674 | */ | |
675 | static inline void * | |
676 | ia64_imva (void *addr) | |
677 | { | |
678 | void *result; | |
679 | result = (void *) ia64_tpa(addr); | |
680 | return __va(result); | |
681 | } | |
682 | ||
683 | #define ARCH_HAS_PREFETCH | |
684 | #define ARCH_HAS_PREFETCHW | |
685 | #define ARCH_HAS_SPINLOCK_PREFETCH | |
686 | #define PREFETCH_STRIDE L1_CACHE_BYTES | |
687 | ||
688 | static inline void | |
689 | prefetch (const void *x) | |
690 | { | |
691 | ia64_lfetch(ia64_lfhint_none, x); | |
692 | } | |
693 | ||
694 | static inline void | |
695 | prefetchw (const void *x) | |
696 | { | |
697 | ia64_lfetch_excl(ia64_lfhint_none, x); | |
698 | } | |
699 | ||
700 | #define spin_lock_prefetch(x) prefetchw(x) | |
701 | ||
702 | extern unsigned long boot_option_idle_override; | |
703 | ||
704 | #endif /* !__ASSEMBLY__ */ | |
705 | ||
706 | #endif /* _ASM_IA64_PROCESSOR_H */ |