Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
3 | */ | |
bbeb3f4c SR |
4 | #ifndef _ASM_POWERPC_SYSTEM_H |
5 | #define _ASM_POWERPC_SYSTEM_H | |
14cf11af | 6 | |
14cf11af PM |
7 | #include <linux/kernel.h> |
8 | ||
9 | #include <asm/hw_irq.h> | |
14cf11af PM |
10 | |
11 | /* | |
12 | * Memory barrier. | |
13 | * The sync instruction guarantees that all memory accesses initiated | |
14 | * by this processor have been performed (with respect to all other | |
15 | * mechanisms that access memory). The eieio instruction is a barrier | |
16 | * providing an ordering (separately) for (a) cacheable stores and (b) | |
17 | * loads and stores to non-cacheable memory (e.g. I/O devices). | |
18 | * | |
19 | * mb() prevents loads and stores being reordered across this point. | |
20 | * rmb() prevents loads being reordered across this point. | |
21 | * wmb() prevents stores being reordered across this point. | |
22 | * read_barrier_depends() prevents data-dependent loads being reordered | |
23 | * across this point (nop on PPC). | |
24 | * | |
25 | * We have to use the sync instructions for mb(), since lwsync doesn't | |
26 | * order loads with respect to previous stores. Lwsync is fine for | |
e0da0dae AF |
27 | * rmb(), though. Note that rmb() actually uses a sync on 32-bit |
28 | * architectures. | |
14cf11af PM |
29 | * |
30 | * For wmb(), we use sync since wmb is used in drivers to order | |
31 | * stores to system memory with respect to writes to the device. | |
32 | * However, smp_wmb() can be a lighter-weight eieio barrier on | |
33 | * SMP since it is only used to order updates to system memory. | |
34 | */ | |
35 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") | |
e0da0dae | 36 | #define rmb() __asm__ __volatile__ (__stringify(LWSYNC) : : : "memory") |
14cf11af PM |
37 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") |
38 | #define read_barrier_depends() do { } while(0) | |
39 | ||
40 | #define set_mb(var, value) do { var = value; mb(); } while (0) | |
14cf11af | 41 | |
88ced031 | 42 | #ifdef __KERNEL__ |
14cf11af PM |
43 | #ifdef CONFIG_SMP |
44 | #define smp_mb() mb() | |
45 | #define smp_rmb() rmb() | |
46 | #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") | |
47 | #define smp_read_barrier_depends() read_barrier_depends() | |
48 | #else | |
49 | #define smp_mb() barrier() | |
50 | #define smp_rmb() barrier() | |
51 | #define smp_wmb() barrier() | |
52 | #define smp_read_barrier_depends() do { } while(0) | |
53 | #endif /* CONFIG_SMP */ | |
54 | ||
5db9fa95 NL |
55 | /* |
56 | * This is a barrier which prevents following instructions from being | |
57 | * started until the value of the argument x is known. For example, if | |
58 | * x is a variable loaded from memory, this prevents following | |
59 | * instructions from being executed until the load has been performed. | |
60 | */ | |
61 | #define data_barrier(x) \ | |
62 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); | |
63 | ||
14cf11af PM |
64 | struct task_struct; |
65 | struct pt_regs; | |
66 | ||
67 | #ifdef CONFIG_DEBUGGER | |
68 | ||
69 | extern int (*__debugger)(struct pt_regs *regs); | |
70 | extern int (*__debugger_ipi)(struct pt_regs *regs); | |
71 | extern int (*__debugger_bpt)(struct pt_regs *regs); | |
72 | extern int (*__debugger_sstep)(struct pt_regs *regs); | |
73 | extern int (*__debugger_iabr_match)(struct pt_regs *regs); | |
74 | extern int (*__debugger_dabr_match)(struct pt_regs *regs); | |
75 | extern int (*__debugger_fault_handler)(struct pt_regs *regs); | |
76 | ||
77 | #define DEBUGGER_BOILERPLATE(__NAME) \ | |
78 | static inline int __NAME(struct pt_regs *regs) \ | |
79 | { \ | |
80 | if (unlikely(__ ## __NAME)) \ | |
81 | return __ ## __NAME(regs); \ | |
82 | return 0; \ | |
83 | } | |
84 | ||
85 | DEBUGGER_BOILERPLATE(debugger) | |
86 | DEBUGGER_BOILERPLATE(debugger_ipi) | |
87 | DEBUGGER_BOILERPLATE(debugger_bpt) | |
88 | DEBUGGER_BOILERPLATE(debugger_sstep) | |
89 | DEBUGGER_BOILERPLATE(debugger_iabr_match) | |
90 | DEBUGGER_BOILERPLATE(debugger_dabr_match) | |
91 | DEBUGGER_BOILERPLATE(debugger_fault_handler) | |
92 | ||
14cf11af PM |
93 | #else |
94 | static inline int debugger(struct pt_regs *regs) { return 0; } | |
95 | static inline int debugger_ipi(struct pt_regs *regs) { return 0; } | |
96 | static inline int debugger_bpt(struct pt_regs *regs) { return 0; } | |
97 | static inline int debugger_sstep(struct pt_regs *regs) { return 0; } | |
98 | static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } | |
99 | static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } | |
100 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } | |
101 | #endif | |
102 | ||
103 | extern int set_dabr(unsigned long dabr); | |
104 | extern void print_backtrace(unsigned long *); | |
105 | extern void show_regs(struct pt_regs * regs); | |
106 | extern void flush_instruction_cache(void); | |
107 | extern void hard_reset_now(void); | |
108 | extern void poweroff_now(void); | |
109 | ||
110 | #ifdef CONFIG_6xx | |
111 | extern long _get_L2CR(void); | |
112 | extern long _get_L3CR(void); | |
113 | extern void _set_L2CR(unsigned long); | |
114 | extern void _set_L3CR(unsigned long); | |
115 | #else | |
116 | #define _get_L2CR() 0L | |
117 | #define _get_L3CR() 0L | |
118 | #define _set_L2CR(val) do { } while(0) | |
119 | #define _set_L3CR(val) do { } while(0) | |
120 | #endif | |
121 | ||
122 | extern void via_cuda_init(void); | |
14cf11af PM |
123 | extern void read_rtc_time(void); |
124 | extern void pmac_find_display(void); | |
125 | extern void giveup_fpu(struct task_struct *); | |
cabb5587 | 126 | extern void disable_kernel_fp(void); |
14cf11af PM |
127 | extern void enable_kernel_fp(void); |
128 | extern void flush_fp_to_thread(struct task_struct *); | |
129 | extern void enable_kernel_altivec(void); | |
130 | extern void giveup_altivec(struct task_struct *); | |
131 | extern void load_up_altivec(struct task_struct *); | |
40ef8cbc | 132 | extern int emulate_altivec(struct pt_regs *); |
d169d140 | 133 | extern void enable_kernel_spe(void); |
14cf11af PM |
134 | extern void giveup_spe(struct task_struct *); |
135 | extern void load_up_spe(struct task_struct *); | |
136 | extern int fix_alignment(struct pt_regs *); | |
25c8a78b DG |
137 | extern void cvt_fd(float *from, double *to, struct thread_struct *thread); |
138 | extern void cvt_df(double *from, float *to, struct thread_struct *thread); | |
14cf11af | 139 | |
5388fb10 PM |
140 | #ifndef CONFIG_SMP |
141 | extern void discard_lazy_cpu_state(void); | |
142 | #else | |
143 | static inline void discard_lazy_cpu_state(void) | |
144 | { | |
145 | } | |
146 | #endif | |
147 | ||
14cf11af PM |
148 | #ifdef CONFIG_ALTIVEC |
149 | extern void flush_altivec_to_thread(struct task_struct *); | |
150 | #else | |
151 | static inline void flush_altivec_to_thread(struct task_struct *t) | |
152 | { | |
153 | } | |
154 | #endif | |
155 | ||
156 | #ifdef CONFIG_SPE | |
157 | extern void flush_spe_to_thread(struct task_struct *); | |
158 | #else | |
159 | static inline void flush_spe_to_thread(struct task_struct *t) | |
160 | { | |
161 | } | |
162 | #endif | |
163 | ||
164 | extern int call_rtas(const char *, int, int, unsigned long *, ...); | |
165 | extern void cacheable_memzero(void *p, unsigned int nb); | |
166 | extern void *cacheable_memcpy(void *, const void *, unsigned int); | |
167 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); | |
168 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); | |
169 | extern int die(const char *, struct pt_regs *, long); | |
170 | extern void _exception(int, struct pt_regs *, int, unsigned long); | |
171 | #ifdef CONFIG_BOOKE_WDT | |
172 | extern u32 booke_wdt_enabled; | |
173 | extern u32 booke_wdt_period; | |
174 | #endif /* CONFIG_BOOKE_WDT */ | |
175 | ||
14cf11af PM |
176 | struct device_node; |
177 | extern void note_scsi_host(struct device_node *, void *); | |
178 | ||
179 | extern struct task_struct *__switch_to(struct task_struct *, | |
180 | struct task_struct *); | |
181 | #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) | |
182 | ||
183 | struct thread_struct; | |
184 | extern struct task_struct *_switch(struct thread_struct *prev, | |
185 | struct thread_struct *next); | |
186 | ||
4dc7a0bb IM |
187 | /* |
188 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
189 | * it needs a way to flush as much of the CPU's caches as possible. | |
190 | * | |
191 | * TODO: fill this in! | |
192 | */ | |
193 | static inline void sched_cacheflush(void) | |
194 | { | |
195 | } | |
196 | ||
14cf11af | 197 | extern unsigned int rtas_data; |
40ef8cbc | 198 | extern int mem_init_done; /* set on boot once kmalloc can be called */ |
cf00a8d1 | 199 | extern unsigned long memory_limit; |
49b09853 | 200 | extern unsigned long klimit; |
14cf11af | 201 | |
17a6392d PM |
202 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ |
203 | ||
14cf11af PM |
204 | /* |
205 | * Atomic exchange | |
206 | * | |
207 | * Changes the memory location '*ptr' to be val and returns | |
208 | * the previous value stored there. | |
209 | */ | |
210 | static __inline__ unsigned long | |
211 | __xchg_u32(volatile void *p, unsigned long val) | |
212 | { | |
213 | unsigned long prev; | |
214 | ||
215 | __asm__ __volatile__( | |
144b9c13 | 216 | LWSYNC_ON_SMP |
14cf11af PM |
217 | "1: lwarx %0,0,%2 \n" |
218 | PPC405_ERR77(0,%2) | |
219 | " stwcx. %3,0,%2 \n\ | |
220 | bne- 1b" | |
221 | ISYNC_ON_SMP | |
e2a3d402 LT |
222 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) |
223 | : "r" (p), "r" (val) | |
14cf11af PM |
224 | : "cc", "memory"); |
225 | ||
226 | return prev; | |
227 | } | |
228 | ||
f46e477e MD |
229 | /* |
230 | * Atomic exchange | |
231 | * | |
232 | * Changes the memory location '*ptr' to be val and returns | |
233 | * the previous value stored there. | |
234 | */ | |
235 | static __inline__ unsigned long | |
236 | __xchg_u32_local(volatile void *p, unsigned long val) | |
237 | { | |
238 | unsigned long prev; | |
239 | ||
240 | __asm__ __volatile__( | |
241 | "1: lwarx %0,0,%2 \n" | |
242 | PPC405_ERR77(0,%2) | |
243 | " stwcx. %3,0,%2 \n\ | |
244 | bne- 1b" | |
245 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) | |
246 | : "r" (p), "r" (val) | |
247 | : "cc", "memory"); | |
248 | ||
249 | return prev; | |
250 | } | |
251 | ||
14cf11af PM |
252 | #ifdef CONFIG_PPC64 |
253 | static __inline__ unsigned long | |
254 | __xchg_u64(volatile void *p, unsigned long val) | |
255 | { | |
256 | unsigned long prev; | |
257 | ||
258 | __asm__ __volatile__( | |
144b9c13 | 259 | LWSYNC_ON_SMP |
14cf11af PM |
260 | "1: ldarx %0,0,%2 \n" |
261 | PPC405_ERR77(0,%2) | |
262 | " stdcx. %3,0,%2 \n\ | |
263 | bne- 1b" | |
264 | ISYNC_ON_SMP | |
e2a3d402 LT |
265 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) |
266 | : "r" (p), "r" (val) | |
14cf11af PM |
267 | : "cc", "memory"); |
268 | ||
269 | return prev; | |
270 | } | |
f46e477e MD |
271 | |
272 | static __inline__ unsigned long | |
273 | __xchg_u64_local(volatile void *p, unsigned long val) | |
274 | { | |
275 | unsigned long prev; | |
276 | ||
277 | __asm__ __volatile__( | |
278 | "1: ldarx %0,0,%2 \n" | |
279 | PPC405_ERR77(0,%2) | |
280 | " stdcx. %3,0,%2 \n\ | |
281 | bne- 1b" | |
282 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) | |
283 | : "r" (p), "r" (val) | |
284 | : "cc", "memory"); | |
285 | ||
286 | return prev; | |
287 | } | |
14cf11af PM |
288 | #endif |
289 | ||
290 | /* | |
291 | * This function doesn't exist, so you'll get a linker error | |
292 | * if something tries to do an invalid xchg(). | |
293 | */ | |
294 | extern void __xchg_called_with_bad_pointer(void); | |
295 | ||
296 | static __inline__ unsigned long | |
297 | __xchg(volatile void *ptr, unsigned long x, unsigned int size) | |
298 | { | |
299 | switch (size) { | |
300 | case 4: | |
301 | return __xchg_u32(ptr, x); | |
302 | #ifdef CONFIG_PPC64 | |
303 | case 8: | |
304 | return __xchg_u64(ptr, x); | |
305 | #endif | |
306 | } | |
307 | __xchg_called_with_bad_pointer(); | |
308 | return x; | |
309 | } | |
310 | ||
f46e477e MD |
311 | static __inline__ unsigned long |
312 | __xchg_local(volatile void *ptr, unsigned long x, unsigned int size) | |
313 | { | |
314 | switch (size) { | |
315 | case 4: | |
316 | return __xchg_u32_local(ptr, x); | |
317 | #ifdef CONFIG_PPC64 | |
318 | case 8: | |
319 | return __xchg_u64_local(ptr, x); | |
320 | #endif | |
321 | } | |
322 | __xchg_called_with_bad_pointer(); | |
323 | return x; | |
324 | } | |
14cf11af PM |
325 | #define xchg(ptr,x) \ |
326 | ({ \ | |
327 | __typeof__(*(ptr)) _x_ = (x); \ | |
328 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ | |
329 | }) | |
330 | ||
f46e477e MD |
331 | #define xchg_local(ptr,x) \ |
332 | ({ \ | |
333 | __typeof__(*(ptr)) _x_ = (x); \ | |
334 | (__typeof__(*(ptr))) __xchg_local((ptr), \ | |
335 | (unsigned long)_x_, sizeof(*(ptr))); \ | |
336 | }) | |
337 | ||
14cf11af PM |
338 | /* |
339 | * Compare and exchange - if *p == old, set it to new, | |
340 | * and return the old value of *p. | |
341 | */ | |
342 | #define __HAVE_ARCH_CMPXCHG 1 | |
343 | ||
344 | static __inline__ unsigned long | |
345 | __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) | |
346 | { | |
347 | unsigned int prev; | |
348 | ||
349 | __asm__ __volatile__ ( | |
144b9c13 | 350 | LWSYNC_ON_SMP |
14cf11af PM |
351 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ |
352 | cmpw 0,%0,%3\n\ | |
353 | bne- 2f\n" | |
354 | PPC405_ERR77(0,%2) | |
355 | " stwcx. %4,0,%2\n\ | |
356 | bne- 1b" | |
357 | ISYNC_ON_SMP | |
358 | "\n\ | |
359 | 2:" | |
e2a3d402 LT |
360 | : "=&r" (prev), "+m" (*p) |
361 | : "r" (p), "r" (old), "r" (new) | |
14cf11af PM |
362 | : "cc", "memory"); |
363 | ||
364 | return prev; | |
365 | } | |
366 | ||
f46e477e MD |
367 | static __inline__ unsigned long |
368 | __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, | |
369 | unsigned long new) | |
370 | { | |
371 | unsigned int prev; | |
372 | ||
373 | __asm__ __volatile__ ( | |
374 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ | |
375 | cmpw 0,%0,%3\n\ | |
376 | bne- 2f\n" | |
377 | PPC405_ERR77(0,%2) | |
378 | " stwcx. %4,0,%2\n\ | |
379 | bne- 1b" | |
380 | "\n\ | |
381 | 2:" | |
382 | : "=&r" (prev), "+m" (*p) | |
383 | : "r" (p), "r" (old), "r" (new) | |
384 | : "cc", "memory"); | |
385 | ||
386 | return prev; | |
387 | } | |
388 | ||
14cf11af PM |
389 | #ifdef CONFIG_PPC64 |
390 | static __inline__ unsigned long | |
3c726f8d | 391 | __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) |
14cf11af PM |
392 | { |
393 | unsigned long prev; | |
394 | ||
395 | __asm__ __volatile__ ( | |
144b9c13 | 396 | LWSYNC_ON_SMP |
14cf11af PM |
397 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ |
398 | cmpd 0,%0,%3\n\ | |
399 | bne- 2f\n\ | |
400 | stdcx. %4,0,%2\n\ | |
401 | bne- 1b" | |
402 | ISYNC_ON_SMP | |
403 | "\n\ | |
404 | 2:" | |
e2a3d402 LT |
405 | : "=&r" (prev), "+m" (*p) |
406 | : "r" (p), "r" (old), "r" (new) | |
14cf11af PM |
407 | : "cc", "memory"); |
408 | ||
409 | return prev; | |
410 | } | |
f46e477e MD |
411 | |
412 | static __inline__ unsigned long | |
413 | __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, | |
414 | unsigned long new) | |
415 | { | |
416 | unsigned long prev; | |
417 | ||
418 | __asm__ __volatile__ ( | |
419 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ | |
420 | cmpd 0,%0,%3\n\ | |
421 | bne- 2f\n\ | |
422 | stdcx. %4,0,%2\n\ | |
423 | bne- 1b" | |
424 | "\n\ | |
425 | 2:" | |
426 | : "=&r" (prev), "+m" (*p) | |
427 | : "r" (p), "r" (old), "r" (new) | |
428 | : "cc", "memory"); | |
429 | ||
430 | return prev; | |
431 | } | |
14cf11af PM |
432 | #endif |
433 | ||
434 | /* This function doesn't exist, so you'll get a linker error | |
435 | if something tries to do an invalid cmpxchg(). */ | |
436 | extern void __cmpxchg_called_with_bad_pointer(void); | |
437 | ||
438 | static __inline__ unsigned long | |
439 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, | |
440 | unsigned int size) | |
441 | { | |
442 | switch (size) { | |
443 | case 4: | |
444 | return __cmpxchg_u32(ptr, old, new); | |
445 | #ifdef CONFIG_PPC64 | |
446 | case 8: | |
447 | return __cmpxchg_u64(ptr, old, new); | |
448 | #endif | |
449 | } | |
450 | __cmpxchg_called_with_bad_pointer(); | |
451 | return old; | |
452 | } | |
453 | ||
f46e477e MD |
454 | static __inline__ unsigned long |
455 | __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, | |
456 | unsigned int size) | |
457 | { | |
458 | switch (size) { | |
459 | case 4: | |
460 | return __cmpxchg_u32_local(ptr, old, new); | |
461 | #ifdef CONFIG_PPC64 | |
462 | case 8: | |
463 | return __cmpxchg_u64_local(ptr, old, new); | |
464 | #endif | |
465 | } | |
466 | __cmpxchg_called_with_bad_pointer(); | |
467 | return old; | |
468 | } | |
469 | ||
14cf11af PM |
470 | #define cmpxchg(ptr,o,n) \ |
471 | ({ \ | |
472 | __typeof__(*(ptr)) _o_ = (o); \ | |
473 | __typeof__(*(ptr)) _n_ = (n); \ | |
474 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | |
475 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
476 | }) | |
477 | ||
f46e477e MD |
478 | |
479 | #define cmpxchg_local(ptr,o,n) \ | |
480 | ({ \ | |
481 | __typeof__(*(ptr)) _o_ = (o); \ | |
482 | __typeof__(*(ptr)) _n_ = (n); \ | |
483 | (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ | |
484 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
485 | }) | |
486 | ||
14cf11af PM |
487 | #ifdef CONFIG_PPC64 |
488 | /* | |
489 | * We handle most unaligned accesses in hardware. On the other hand | |
490 | * unaligned DMA can be very expensive on some ppc64 IO chips (it does | |
491 | * powers of 2 writes until it reaches sufficient alignment). | |
492 | * | |
493 | * Based on this we disable the IP header alignment in network drivers. | |
025be81e AB |
494 | * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining |
495 | * cacheline alignment of buffers. | |
14cf11af | 496 | */ |
025be81e AB |
497 | #define NET_IP_ALIGN 0 |
498 | #define NET_SKB_PAD L1_CACHE_BYTES | |
14cf11af PM |
499 | #endif |
500 | ||
501 | #define arch_align_stack(x) (x) | |
502 | ||
9b6b563c | 503 | /* Used in very early kernel initialization. */ |
cabb5587 | 504 | extern unsigned long reloc_offset(void); |
9b6b563c PM |
505 | extern unsigned long add_reloc_offset(unsigned long); |
506 | extern void reloc_got2(unsigned long); | |
507 | ||
508 | #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) | |
cabb5587 | 509 | |
c87ef117 ME |
510 | static inline void create_instruction(unsigned long addr, unsigned int instr) |
511 | { | |
512 | unsigned int *p; | |
513 | p = (unsigned int *)addr; | |
514 | *p = instr; | |
515 | asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p)); | |
516 | } | |
517 | ||
518 | /* Flags for create_branch: | |
519 | * "b" == create_branch(addr, target, 0); | |
520 | * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); | |
521 | * "bl" == create_branch(addr, target, BRANCH_SET_LINK); | |
522 | * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK); | |
523 | */ | |
524 | #define BRANCH_SET_LINK 0x1 | |
525 | #define BRANCH_ABSOLUTE 0x2 | |
526 | ||
527 | static inline void create_branch(unsigned long addr, | |
528 | unsigned long target, int flags) | |
529 | { | |
530 | unsigned int instruction; | |
531 | ||
532 | if (! (flags & BRANCH_ABSOLUTE)) | |
533 | target = target - addr; | |
534 | ||
535 | /* Mask out the flags and target, so they don't step on each other. */ | |
536 | instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC); | |
537 | ||
538 | create_instruction(addr, instruction); | |
539 | } | |
540 | ||
541 | static inline void create_function_call(unsigned long addr, void * func) | |
542 | { | |
543 | unsigned long func_addr; | |
544 | ||
545 | #ifdef CONFIG_PPC64 | |
546 | /* | |
547 | * On PPC64 the function pointer actually points to the function's | |
548 | * descriptor. The first entry in the descriptor is the address | |
549 | * of the function text. | |
550 | */ | |
551 | func_addr = *(unsigned long *)func; | |
552 | #else | |
553 | func_addr = (unsigned long)func; | |
554 | #endif | |
555 | create_branch(addr, func_addr, BRANCH_SET_LINK); | |
556 | } | |
557 | ||
c6622f63 PM |
558 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
559 | extern void account_system_vtime(struct task_struct *); | |
560 | #endif | |
561 | ||
14cf11af | 562 | #endif /* __KERNEL__ */ |
bbeb3f4c | 563 | #endif /* _ASM_POWERPC_SYSTEM_H */ |