arm: arch_timer: factor out register accessors
[deliverable/linux.git] / arch / arm / include / asm / arch_timer.h
1 #ifndef __ASMARM_ARCH_TIMER_H
2 #define __ASMARM_ARCH_TIMER_H
3
4 #include <asm/barrier.h>
5 #include <asm/errno.h>
6 #include <linux/clocksource.h>
7 #include <linux/types.h>
8
9 #ifdef CONFIG_ARM_ARCH_TIMER
10 int arch_timer_of_register(void);
11 int arch_timer_sched_clock_init(void);
12 struct timecounter *arch_timer_get_timecounter(void);
13
14 #define ARCH_TIMER_CTRL_ENABLE (1 << 0)
15 #define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
16 #define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
17
18 #define ARCH_TIMER_REG_CTRL 0
19 #define ARCH_TIMER_REG_TVAL 1
20
21 #define ARCH_TIMER_PHYS_ACCESS 0
22 #define ARCH_TIMER_VIRT_ACCESS 1
23
24 /*
25 * These register accessors are marked inline so the compiler can
26 * nicely work out which register we want, and chuck away the rest of
27 * the code. At least it does so with a recent GCC (4.6.3).
28 */
29 static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
30 {
31 if (access == ARCH_TIMER_PHYS_ACCESS) {
32 switch (reg) {
33 case ARCH_TIMER_REG_CTRL:
34 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
35 break;
36 case ARCH_TIMER_REG_TVAL:
37 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
38 break;
39 }
40 }
41
42 if (access == ARCH_TIMER_VIRT_ACCESS) {
43 switch (reg) {
44 case ARCH_TIMER_REG_CTRL:
45 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
46 break;
47 case ARCH_TIMER_REG_TVAL:
48 asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
49 break;
50 }
51 }
52 }
53
54 static inline u32 arch_timer_reg_read(const int access, const int reg)
55 {
56 u32 val = 0;
57
58 if (access == ARCH_TIMER_PHYS_ACCESS) {
59 switch (reg) {
60 case ARCH_TIMER_REG_CTRL:
61 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
62 break;
63 case ARCH_TIMER_REG_TVAL:
64 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
65 break;
66 }
67 }
68
69 if (access == ARCH_TIMER_VIRT_ACCESS) {
70 switch (reg) {
71 case ARCH_TIMER_REG_CTRL:
72 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
73 break;
74 case ARCH_TIMER_REG_TVAL:
75 asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
76 break;
77 }
78 }
79
80 return val;
81 }
82
83 static inline u32 arch_timer_get_cntfrq(void)
84 {
85 u32 val;
86 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
87 return val;
88 }
89
90 static inline u64 arch_counter_get_cntpct(void)
91 {
92 u64 cval;
93
94 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
95 return cval;
96 }
97
98 static inline u64 arch_counter_get_cntvct(void)
99 {
100 u64 cval;
101
102 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
103 return cval;
104 }
105 #else
106 static inline int arch_timer_of_register(void)
107 {
108 return -ENXIO;
109 }
110
111 static inline int arch_timer_sched_clock_init(void)
112 {
113 return -ENXIO;
114 }
115
116 static inline struct timecounter *arch_timer_get_timecounter(void)
117 {
118 return NULL;
119 }
120 #endif
121
122 #endif
This page took 0.040348 seconds and 5 git commands to generate.