Merge tag 'master-2014-11-25' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
[deliverable/linux.git] / arch / s390 / kernel / vtime.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Virtual cpu timer based timer functions.
3 *
27f6b416 4 * Copyright IBM Corp. 2004, 2012
1da177e4
LT
5 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
6 */
7
27f6b416 8#include <linux/kernel_stat.h>
27f6b416 9#include <linux/export.h>
1da177e4 10#include <linux/kernel.h>
1da177e4 11#include <linux/timex.h>
27f6b416
MS
12#include <linux/types.h>
13#include <linux/time.h>
1da177e4 14
76d4e00a 15#include <asm/cputime.h>
27f6b416 16#include <asm/vtimer.h>
a5725ac2 17#include <asm/vtime.h>
1da177e4 18
27f6b416 19static void virt_timer_expire(void);
1da177e4 20
27f6b416
MS
21static LIST_HEAD(virt_timer_list);
22static DEFINE_SPINLOCK(virt_timer_lock);
23static atomic64_t virt_timer_current;
24static atomic64_t virt_timer_elapsed;
25
26static inline u64 get_vtimer(void)
9cfb9b3c 27{
27f6b416 28 u64 timer;
9cfb9b3c 29
27f6b416 30 asm volatile("stpt %0" : "=m" (timer));
9cfb9b3c
MS
31 return timer;
32}
33
27f6b416 34static inline void set_vtimer(u64 expires)
9cfb9b3c 35{
27f6b416 36 u64 timer;
9cfb9b3c 37
27f6b416
MS
38 asm volatile(
39 " stpt %0\n" /* Store current cpu timer value */
40 " spt %1" /* Set new value imm. afterwards */
41 : "=m" (timer) : "m" (expires));
9cfb9b3c
MS
42 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
43 S390_lowcore.last_update_timer = expires;
44}
45
27f6b416
MS
46static inline int virt_timer_forward(u64 elapsed)
47{
48 BUG_ON(!irqs_disabled());
49
50 if (list_empty(&virt_timer_list))
51 return 0;
52 elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
53 return elapsed >= atomic64_read(&virt_timer_current);
54}
55
1da177e4
LT
56/*
57 * Update process times based on virtual cpu times stored by entry.S
58 * to the lowcore fields user_timer, system_timer & steal_clock.
59 */
27f6b416 60static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
1da177e4 61{
aa5e97ce 62 struct thread_info *ti = task_thread_info(tsk);
27f6b416 63 u64 timer, clock, user, system, steal;
1da177e4
LT
64
65 timer = S390_lowcore.last_update_timer;
66 clock = S390_lowcore.last_update_clock;
27f6b416
MS
67 asm volatile(
68 " stpt %0\n" /* Store current cpu timer value */
1f759bb3
MS
69#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
70 " stckf %1" /* Store current tod clock value */
71#else
27f6b416 72 " stck %1" /* Store current tod clock value */
1f759bb3 73#endif
27f6b416
MS
74 : "=m" (S390_lowcore.last_update_timer),
75 "=m" (S390_lowcore.last_update_clock));
1da177e4 76 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
aa5e97ce 77 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
1da177e4 78
aa5e97ce
MS
79 user = S390_lowcore.user_timer - ti->user_timer;
80 S390_lowcore.steal_timer -= user;
81 ti->user_timer = S390_lowcore.user_timer;
82 account_user_time(tsk, user, user);
1da177e4 83
aa5e97ce
MS
84 system = S390_lowcore.system_timer - ti->system_timer;
85 S390_lowcore.steal_timer -= system;
86 ti->system_timer = S390_lowcore.system_timer;
9cfb9b3c 87 account_system_time(tsk, hardirq_offset, system, system);
1da177e4 88
aa5e97ce
MS
89 steal = S390_lowcore.steal_timer;
90 if ((s64) steal > 0) {
91 S390_lowcore.steal_timer = 0;
9cfb9b3c 92 account_steal_time(steal);
1da177e4 93 }
27f6b416
MS
94
95 return virt_timer_forward(user + system);
1da177e4
LT
96}
97
bf9fae9f 98void vtime_task_switch(struct task_struct *prev)
1f1c12af 99{
aa5e97ce
MS
100 struct thread_info *ti;
101
102 do_account_vtime(prev, 0);
103 ti = task_thread_info(prev);
104 ti->user_timer = S390_lowcore.user_timer;
105 ti->system_timer = S390_lowcore.system_timer;
baa36046 106 ti = task_thread_info(current);
aa5e97ce
MS
107 S390_lowcore.user_timer = ti->user_timer;
108 S390_lowcore.system_timer = ti->system_timer;
109}
1f1c12af 110
bcebdf84
FW
111/*
112 * In s390, accounting pending user time also implies
113 * accounting system time in order to correctly compute
114 * the stolen time accounting.
115 */
116void vtime_account_user(struct task_struct *tsk)
aa5e97ce 117{
27f6b416
MS
118 if (do_account_vtime(tsk, HARDIRQ_OFFSET))
119 virt_timer_expire();
1f1c12af
MS
120}
121
1da177e4
LT
122/*
123 * Update process times based on virtual cpu times stored by entry.S
124 * to the lowcore fields user_timer, system_timer & steal_clock.
125 */
6a61671b 126void vtime_account_irq_enter(struct task_struct *tsk)
1da177e4 127{
aa5e97ce 128 struct thread_info *ti = task_thread_info(tsk);
27f6b416 129 u64 timer, system;
1da177e4 130
1b2852b1
FW
131 WARN_ON_ONCE(!irqs_disabled());
132
1da177e4 133 timer = S390_lowcore.last_update_timer;
9cfb9b3c 134 S390_lowcore.last_update_timer = get_vtimer();
1da177e4
LT
135 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
136
aa5e97ce
MS
137 system = S390_lowcore.system_timer - ti->system_timer;
138 S390_lowcore.steal_timer -= system;
139 ti->system_timer = S390_lowcore.system_timer;
9cfb9b3c 140 account_system_time(tsk, 0, system, system);
27f6b416
MS
141
142 virt_timer_forward(system);
1da177e4 143}
6a61671b 144EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
1da177e4 145
fd25b4c2 146void vtime_account_system(struct task_struct *tsk)
6a61671b 147__attribute__((alias("vtime_account_irq_enter")));
fd25b4c2 148EXPORT_SYMBOL_GPL(vtime_account_system);
11113334 149
1da177e4
LT
150/*
151 * Sorted add to a list. List is linear searched until first bigger
152 * element is found.
153 */
154static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
155{
27f6b416 156 struct vtimer_list *tmp;
1da177e4 157
27f6b416
MS
158 list_for_each_entry(tmp, head, entry) {
159 if (tmp->expires > timer->expires) {
160 list_add_tail(&timer->entry, &tmp->entry);
1da177e4
LT
161 return;
162 }
163 }
164 list_add_tail(&timer->entry, head);
165}
166
167/*
27f6b416 168 * Handler for expired virtual CPU timer.
1da177e4 169 */
27f6b416 170static void virt_timer_expire(void)
1da177e4 171{
27f6b416
MS
172 struct vtimer_list *timer, *tmp;
173 unsigned long elapsed;
174 LIST_HEAD(cb_list);
175
176 /* walk timer list, fire all expired timers */
177 spin_lock(&virt_timer_lock);
178 elapsed = atomic64_read(&virt_timer_elapsed);
179 list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) {
180 if (timer->expires < elapsed)
9cfb9b3c 181 /* move expired timer to the callback queue */
27f6b416 182 list_move_tail(&timer->entry, &cb_list);
9cfb9b3c 183 else
27f6b416 184 timer->expires -= elapsed;
1da177e4 185 }
27f6b416
MS
186 if (!list_empty(&virt_timer_list)) {
187 timer = list_first_entry(&virt_timer_list,
188 struct vtimer_list, entry);
189 atomic64_set(&virt_timer_current, timer->expires);
190 }
191 atomic64_sub(elapsed, &virt_timer_elapsed);
192 spin_unlock(&virt_timer_lock);
193
194 /* Do callbacks and recharge periodic timers */
195 list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
196 list_del_init(&timer->entry);
197 timer->function(timer->data);
198 if (timer->interval) {
199 /* Recharge interval timer */
200 timer->expires = timer->interval +
201 atomic64_read(&virt_timer_elapsed);
202 spin_lock(&virt_timer_lock);
203 list_add_sorted(timer, &virt_timer_list);
204 spin_unlock(&virt_timer_lock);
205 }
4c1051e3 206 }
1da177e4
LT
207}
208
209void init_virt_timer(struct vtimer_list *timer)
210{
1da177e4
LT
211 timer->function = NULL;
212 INIT_LIST_HEAD(&timer->entry);
1da177e4
LT
213}
214EXPORT_SYMBOL(init_virt_timer);
215
1da177e4
LT
216static inline int vtimer_pending(struct vtimer_list *timer)
217{
27f6b416 218 return !list_empty(&timer->entry);
1da177e4
LT
219}
220
1da177e4
LT
221static void internal_add_vtimer(struct vtimer_list *timer)
222{
27f6b416
MS
223 if (list_empty(&virt_timer_list)) {
224 /* First timer, just program it. */
225 atomic64_set(&virt_timer_current, timer->expires);
226 atomic64_set(&virt_timer_elapsed, 0);
227 list_add(&timer->entry, &virt_timer_list);
9cfb9b3c 228 } else {
27f6b416
MS
229 /* Update timer against current base. */
230 timer->expires += atomic64_read(&virt_timer_elapsed);
231 if (likely((s64) timer->expires <
232 (s64) atomic64_read(&virt_timer_current)))
9cfb9b3c 233 /* The new timer expires before the current timer. */
27f6b416
MS
234 atomic64_set(&virt_timer_current, timer->expires);
235 /* Insert new timer into the list. */
236 list_add_sorted(timer, &virt_timer_list);
1da177e4 237 }
1da177e4
LT
238}
239
27f6b416 240static void __add_vtimer(struct vtimer_list *timer, int periodic)
1da177e4 241{
27f6b416
MS
242 unsigned long flags;
243
244 timer->interval = periodic ? timer->expires : 0;
245 spin_lock_irqsave(&virt_timer_lock, flags);
246 internal_add_vtimer(timer);
247 spin_unlock_irqrestore(&virt_timer_lock, flags);
1da177e4
LT
248}
249
250/*
251 * add_virt_timer - add an oneshot virtual CPU timer
252 */
27f6b416 253void add_virt_timer(struct vtimer_list *timer)
1da177e4 254{
27f6b416 255 __add_vtimer(timer, 0);
1da177e4
LT
256}
257EXPORT_SYMBOL(add_virt_timer);
258
259/*
260 * add_virt_timer_int - add an interval virtual CPU timer
261 */
27f6b416 262void add_virt_timer_periodic(struct vtimer_list *timer)
1da177e4 263{
27f6b416 264 __add_vtimer(timer, 1);
1da177e4
LT
265}
266EXPORT_SYMBOL(add_virt_timer_periodic);
267
27f6b416 268static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic)
1da177e4 269{
1da177e4 270 unsigned long flags;
27f6b416 271 int rc;
1da177e4 272
ca366a32 273 BUG_ON(!timer->function);
1da177e4 274
1da177e4
LT
275 if (timer->expires == expires && vtimer_pending(timer))
276 return 1;
27f6b416
MS
277 spin_lock_irqsave(&virt_timer_lock, flags);
278 rc = vtimer_pending(timer);
279 if (rc)
280 list_del_init(&timer->entry);
281 timer->interval = periodic ? expires : 0;
1da177e4 282 timer->expires = expires;
1da177e4 283 internal_add_vtimer(timer);
27f6b416
MS
284 spin_unlock_irqrestore(&virt_timer_lock, flags);
285 return rc;
1da177e4 286}
b6ecfa92
JG
287
288/*
b6ecfa92
JG
289 * returns whether it has modified a pending timer (1) or not (0)
290 */
27f6b416 291int mod_virt_timer(struct vtimer_list *timer, u64 expires)
b6ecfa92
JG
292{
293 return __mod_vtimer(timer, expires, 0);
294}
1da177e4
LT
295EXPORT_SYMBOL(mod_virt_timer);
296
b6ecfa92 297/*
b6ecfa92
JG
298 * returns whether it has modified a pending timer (1) or not (0)
299 */
27f6b416 300int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires)
b6ecfa92
JG
301{
302 return __mod_vtimer(timer, expires, 1);
303}
304EXPORT_SYMBOL(mod_virt_timer_periodic);
305
1da177e4 306/*
27f6b416 307 * Delete a virtual timer.
1da177e4
LT
308 *
309 * returns whether the deleted timer was pending (1) or not (0)
310 */
311int del_virt_timer(struct vtimer_list *timer)
312{
313 unsigned long flags;
1da177e4 314
1da177e4
LT
315 if (!vtimer_pending(timer))
316 return 0;
27f6b416 317 spin_lock_irqsave(&virt_timer_lock, flags);
1da177e4 318 list_del_init(&timer->entry);
27f6b416 319 spin_unlock_irqrestore(&virt_timer_lock, flags);
1da177e4
LT
320 return 1;
321}
322EXPORT_SYMBOL(del_virt_timer);
323
324/*
325 * Start the virtual CPU timer on the current CPU.
326 */
b5f87f15 327void vtime_init(void)
1da177e4 328{
8b646bd7 329 /* set initial cpu timer */
27f6b416 330 set_vtimer(VTIMER_MAX_SLICE);
1da177e4 331}
This page took 0.941202 seconds and 5 git commands to generate.