Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Virtual cpu timer based timer functions. |
3 | * | |
27f6b416 | 4 | * Copyright IBM Corp. 2004, 2012 |
1da177e4 LT |
5 | * Author(s): Jan Glauber <jan.glauber@de.ibm.com> |
6 | */ | |
7 | ||
27f6b416 | 8 | #include <linux/kernel_stat.h> |
27f6b416 | 9 | #include <linux/export.h> |
1da177e4 | 10 | #include <linux/kernel.h> |
1da177e4 | 11 | #include <linux/timex.h> |
27f6b416 MS |
12 | #include <linux/types.h> |
13 | #include <linux/time.h> | |
1da177e4 | 14 | |
76d4e00a | 15 | #include <asm/cputime.h> |
27f6b416 | 16 | #include <asm/vtimer.h> |
a5725ac2 | 17 | #include <asm/vtime.h> |
10ad34bc MS |
18 | #include <asm/cpu_mf.h> |
19 | #include <asm/smp.h> | |
1da177e4 | 20 | |
27f6b416 | 21 | static void virt_timer_expire(void); |
1da177e4 | 22 | |
27f6b416 MS |
23 | static LIST_HEAD(virt_timer_list); |
24 | static DEFINE_SPINLOCK(virt_timer_lock); | |
25 | static atomic64_t virt_timer_current; | |
26 | static atomic64_t virt_timer_elapsed; | |
27 | ||
72d38b19 | 28 | DEFINE_PER_CPU(u64, mt_cycles[8]); |
10ad34bc MS |
29 | static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; |
30 | static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; | |
f341b8df | 31 | static DEFINE_PER_CPU(u64, mt_scaling_jiffies); |
10ad34bc | 32 | |
27f6b416 | 33 | static inline u64 get_vtimer(void) |
9cfb9b3c | 34 | { |
27f6b416 | 35 | u64 timer; |
9cfb9b3c | 36 | |
27f6b416 | 37 | asm volatile("stpt %0" : "=m" (timer)); |
9cfb9b3c MS |
38 | return timer; |
39 | } | |
40 | ||
27f6b416 | 41 | static inline void set_vtimer(u64 expires) |
9cfb9b3c | 42 | { |
27f6b416 | 43 | u64 timer; |
9cfb9b3c | 44 | |
27f6b416 MS |
45 | asm volatile( |
46 | " stpt %0\n" /* Store current cpu timer value */ | |
47 | " spt %1" /* Set new value imm. afterwards */ | |
48 | : "=m" (timer) : "m" (expires)); | |
9cfb9b3c MS |
49 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; |
50 | S390_lowcore.last_update_timer = expires; | |
51 | } | |
52 | ||
27f6b416 MS |
53 | static inline int virt_timer_forward(u64 elapsed) |
54 | { | |
55 | BUG_ON(!irqs_disabled()); | |
56 | ||
57 | if (list_empty(&virt_timer_list)) | |
58 | return 0; | |
59 | elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed); | |
60 | return elapsed >= atomic64_read(&virt_timer_current); | |
61 | } | |
62 | ||
72d38b19 MS |
63 | static void update_mt_scaling(void) |
64 | { | |
65 | u64 cycles_new[8], *cycles_old; | |
66 | u64 delta, fac, mult, div; | |
67 | int i; | |
68 | ||
69 | stcctm5(smp_cpu_mtid + 1, cycles_new); | |
70 | cycles_old = this_cpu_ptr(mt_cycles); | |
71 | fac = 1; | |
72 | mult = div = 0; | |
73 | for (i = 0; i <= smp_cpu_mtid; i++) { | |
74 | delta = cycles_new[i] - cycles_old[i]; | |
75 | div += delta; | |
76 | mult *= i + 1; | |
77 | mult += delta * fac; | |
78 | fac *= i + 1; | |
79 | } | |
80 | div *= fac; | |
81 | if (div > 0) { | |
82 | /* Update scaling factor */ | |
83 | __this_cpu_write(mt_scaling_mult, mult); | |
84 | __this_cpu_write(mt_scaling_div, div); | |
85 | memcpy(cycles_old, cycles_new, | |
86 | sizeof(u64) * (smp_cpu_mtid + 1)); | |
87 | } | |
88 | __this_cpu_write(mt_scaling_jiffies, jiffies_64); | |
89 | } | |
90 | ||
1da177e4 LT |
91 | /* |
92 | * Update process times based on virtual cpu times stored by entry.S | |
93 | * to the lowcore fields user_timer, system_timer & steal_clock. | |
94 | */ | |
27f6b416 | 95 | static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) |
1da177e4 | 96 | { |
aa5e97ce | 97 | struct thread_info *ti = task_thread_info(tsk); |
27f6b416 | 98 | u64 timer, clock, user, system, steal; |
10ad34bc | 99 | u64 user_scaled, system_scaled; |
1da177e4 LT |
100 | |
101 | timer = S390_lowcore.last_update_timer; | |
102 | clock = S390_lowcore.last_update_clock; | |
27f6b416 MS |
103 | asm volatile( |
104 | " stpt %0\n" /* Store current cpu timer value */ | |
1f759bb3 MS |
105 | #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES |
106 | " stckf %1" /* Store current tod clock value */ | |
107 | #else | |
27f6b416 | 108 | " stck %1" /* Store current tod clock value */ |
1f759bb3 | 109 | #endif |
27f6b416 MS |
110 | : "=m" (S390_lowcore.last_update_timer), |
111 | "=m" (S390_lowcore.last_update_clock)); | |
1da177e4 | 112 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
aa5e97ce | 113 | S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; |
1da177e4 | 114 | |
72d38b19 | 115 | /* Update MT utilization calculation */ |
f341b8df | 116 | if (smp_cpu_mtid && |
72d38b19 MS |
117 | time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) |
118 | update_mt_scaling(); | |
10ad34bc | 119 | |
aa5e97ce MS |
120 | user = S390_lowcore.user_timer - ti->user_timer; |
121 | S390_lowcore.steal_timer -= user; | |
122 | ti->user_timer = S390_lowcore.user_timer; | |
1da177e4 | 123 | |
aa5e97ce MS |
124 | system = S390_lowcore.system_timer - ti->system_timer; |
125 | S390_lowcore.steal_timer -= system; | |
126 | ti->system_timer = S390_lowcore.system_timer; | |
10ad34bc MS |
127 | |
128 | user_scaled = user; | |
129 | system_scaled = system; | |
130 | /* Do MT utilization scaling */ | |
131 | if (smp_cpu_mtid) { | |
132 | u64 mult = __this_cpu_read(mt_scaling_mult); | |
133 | u64 div = __this_cpu_read(mt_scaling_div); | |
134 | ||
135 | user_scaled = (user_scaled * mult) / div; | |
136 | system_scaled = (system_scaled * mult) / div; | |
137 | } | |
138 | account_user_time(tsk, user, user_scaled); | |
139 | account_system_time(tsk, hardirq_offset, system, system_scaled); | |
1da177e4 | 140 | |
aa5e97ce MS |
141 | steal = S390_lowcore.steal_timer; |
142 | if ((s64) steal > 0) { | |
143 | S390_lowcore.steal_timer = 0; | |
9cfb9b3c | 144 | account_steal_time(steal); |
1da177e4 | 145 | } |
27f6b416 MS |
146 | |
147 | return virt_timer_forward(user + system); | |
1da177e4 LT |
148 | } |
149 | ||
bf9fae9f | 150 | void vtime_task_switch(struct task_struct *prev) |
1f1c12af | 151 | { |
aa5e97ce MS |
152 | struct thread_info *ti; |
153 | ||
154 | do_account_vtime(prev, 0); | |
155 | ti = task_thread_info(prev); | |
156 | ti->user_timer = S390_lowcore.user_timer; | |
157 | ti->system_timer = S390_lowcore.system_timer; | |
baa36046 | 158 | ti = task_thread_info(current); |
aa5e97ce MS |
159 | S390_lowcore.user_timer = ti->user_timer; |
160 | S390_lowcore.system_timer = ti->system_timer; | |
161 | } | |
1f1c12af | 162 | |
bcebdf84 FW |
163 | /* |
164 | * In s390, accounting pending user time also implies | |
165 | * accounting system time in order to correctly compute | |
166 | * the stolen time accounting. | |
167 | */ | |
168 | void vtime_account_user(struct task_struct *tsk) | |
aa5e97ce | 169 | { |
27f6b416 MS |
170 | if (do_account_vtime(tsk, HARDIRQ_OFFSET)) |
171 | virt_timer_expire(); | |
1f1c12af MS |
172 | } |
173 | ||
1da177e4 LT |
174 | /* |
175 | * Update process times based on virtual cpu times stored by entry.S | |
176 | * to the lowcore fields user_timer, system_timer & steal_clock. | |
177 | */ | |
6a61671b | 178 | void vtime_account_irq_enter(struct task_struct *tsk) |
1da177e4 | 179 | { |
aa5e97ce | 180 | struct thread_info *ti = task_thread_info(tsk); |
10ad34bc | 181 | u64 timer, system, system_scaled; |
1da177e4 LT |
182 | |
183 | timer = S390_lowcore.last_update_timer; | |
9cfb9b3c | 184 | S390_lowcore.last_update_timer = get_vtimer(); |
1da177e4 LT |
185 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
186 | ||
72d38b19 MS |
187 | /* Update MT utilization calculation */ |
188 | if (smp_cpu_mtid && | |
189 | time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) | |
190 | update_mt_scaling(); | |
191 | ||
aa5e97ce MS |
192 | system = S390_lowcore.system_timer - ti->system_timer; |
193 | S390_lowcore.steal_timer -= system; | |
194 | ti->system_timer = S390_lowcore.system_timer; | |
10ad34bc MS |
195 | system_scaled = system; |
196 | /* Do MT utilization scaling */ | |
197 | if (smp_cpu_mtid) { | |
198 | u64 mult = __this_cpu_read(mt_scaling_mult); | |
199 | u64 div = __this_cpu_read(mt_scaling_div); | |
200 | ||
201 | system_scaled = (system_scaled * mult) / div; | |
202 | } | |
203 | account_system_time(tsk, 0, system, system_scaled); | |
27f6b416 MS |
204 | |
205 | virt_timer_forward(system); | |
1da177e4 | 206 | } |
6a61671b | 207 | EXPORT_SYMBOL_GPL(vtime_account_irq_enter); |
1da177e4 | 208 | |
fd25b4c2 | 209 | void vtime_account_system(struct task_struct *tsk) |
6a61671b | 210 | __attribute__((alias("vtime_account_irq_enter"))); |
fd25b4c2 | 211 | EXPORT_SYMBOL_GPL(vtime_account_system); |
11113334 | 212 | |
1da177e4 LT |
213 | /* |
214 | * Sorted add to a list. List is linear searched until first bigger | |
215 | * element is found. | |
216 | */ | |
217 | static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) | |
218 | { | |
27f6b416 | 219 | struct vtimer_list *tmp; |
1da177e4 | 220 | |
27f6b416 MS |
221 | list_for_each_entry(tmp, head, entry) { |
222 | if (tmp->expires > timer->expires) { | |
223 | list_add_tail(&timer->entry, &tmp->entry); | |
1da177e4 LT |
224 | return; |
225 | } | |
226 | } | |
227 | list_add_tail(&timer->entry, head); | |
228 | } | |
229 | ||
230 | /* | |
27f6b416 | 231 | * Handler for expired virtual CPU timer. |
1da177e4 | 232 | */ |
27f6b416 | 233 | static void virt_timer_expire(void) |
1da177e4 | 234 | { |
27f6b416 MS |
235 | struct vtimer_list *timer, *tmp; |
236 | unsigned long elapsed; | |
237 | LIST_HEAD(cb_list); | |
238 | ||
239 | /* walk timer list, fire all expired timers */ | |
240 | spin_lock(&virt_timer_lock); | |
241 | elapsed = atomic64_read(&virt_timer_elapsed); | |
242 | list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) { | |
243 | if (timer->expires < elapsed) | |
9cfb9b3c | 244 | /* move expired timer to the callback queue */ |
27f6b416 | 245 | list_move_tail(&timer->entry, &cb_list); |
9cfb9b3c | 246 | else |
27f6b416 | 247 | timer->expires -= elapsed; |
1da177e4 | 248 | } |
27f6b416 MS |
249 | if (!list_empty(&virt_timer_list)) { |
250 | timer = list_first_entry(&virt_timer_list, | |
251 | struct vtimer_list, entry); | |
252 | atomic64_set(&virt_timer_current, timer->expires); | |
253 | } | |
254 | atomic64_sub(elapsed, &virt_timer_elapsed); | |
255 | spin_unlock(&virt_timer_lock); | |
256 | ||
257 | /* Do callbacks and recharge periodic timers */ | |
258 | list_for_each_entry_safe(timer, tmp, &cb_list, entry) { | |
259 | list_del_init(&timer->entry); | |
260 | timer->function(timer->data); | |
261 | if (timer->interval) { | |
262 | /* Recharge interval timer */ | |
263 | timer->expires = timer->interval + | |
264 | atomic64_read(&virt_timer_elapsed); | |
265 | spin_lock(&virt_timer_lock); | |
266 | list_add_sorted(timer, &virt_timer_list); | |
267 | spin_unlock(&virt_timer_lock); | |
268 | } | |
4c1051e3 | 269 | } |
1da177e4 LT |
270 | } |
271 | ||
272 | void init_virt_timer(struct vtimer_list *timer) | |
273 | { | |
1da177e4 LT |
274 | timer->function = NULL; |
275 | INIT_LIST_HEAD(&timer->entry); | |
1da177e4 LT |
276 | } |
277 | EXPORT_SYMBOL(init_virt_timer); | |
278 | ||
1da177e4 LT |
279 | static inline int vtimer_pending(struct vtimer_list *timer) |
280 | { | |
27f6b416 | 281 | return !list_empty(&timer->entry); |
1da177e4 LT |
282 | } |
283 | ||
1da177e4 LT |
284 | static void internal_add_vtimer(struct vtimer_list *timer) |
285 | { | |
27f6b416 MS |
286 | if (list_empty(&virt_timer_list)) { |
287 | /* First timer, just program it. */ | |
288 | atomic64_set(&virt_timer_current, timer->expires); | |
289 | atomic64_set(&virt_timer_elapsed, 0); | |
290 | list_add(&timer->entry, &virt_timer_list); | |
9cfb9b3c | 291 | } else { |
27f6b416 MS |
292 | /* Update timer against current base. */ |
293 | timer->expires += atomic64_read(&virt_timer_elapsed); | |
294 | if (likely((s64) timer->expires < | |
295 | (s64) atomic64_read(&virt_timer_current))) | |
9cfb9b3c | 296 | /* The new timer expires before the current timer. */ |
27f6b416 MS |
297 | atomic64_set(&virt_timer_current, timer->expires); |
298 | /* Insert new timer into the list. */ | |
299 | list_add_sorted(timer, &virt_timer_list); | |
1da177e4 | 300 | } |
1da177e4 LT |
301 | } |
302 | ||
27f6b416 | 303 | static void __add_vtimer(struct vtimer_list *timer, int periodic) |
1da177e4 | 304 | { |
27f6b416 MS |
305 | unsigned long flags; |
306 | ||
307 | timer->interval = periodic ? timer->expires : 0; | |
308 | spin_lock_irqsave(&virt_timer_lock, flags); | |
309 | internal_add_vtimer(timer); | |
310 | spin_unlock_irqrestore(&virt_timer_lock, flags); | |
1da177e4 LT |
311 | } |
312 | ||
313 | /* | |
314 | * add_virt_timer - add an oneshot virtual CPU timer | |
315 | */ | |
27f6b416 | 316 | void add_virt_timer(struct vtimer_list *timer) |
1da177e4 | 317 | { |
27f6b416 | 318 | __add_vtimer(timer, 0); |
1da177e4 LT |
319 | } |
320 | EXPORT_SYMBOL(add_virt_timer); | |
321 | ||
322 | /* | |
323 | * add_virt_timer_int - add an interval virtual CPU timer | |
324 | */ | |
27f6b416 | 325 | void add_virt_timer_periodic(struct vtimer_list *timer) |
1da177e4 | 326 | { |
27f6b416 | 327 | __add_vtimer(timer, 1); |
1da177e4 LT |
328 | } |
329 | EXPORT_SYMBOL(add_virt_timer_periodic); | |
330 | ||
27f6b416 | 331 | static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic) |
1da177e4 | 332 | { |
1da177e4 | 333 | unsigned long flags; |
27f6b416 | 334 | int rc; |
1da177e4 | 335 | |
ca366a32 | 336 | BUG_ON(!timer->function); |
1da177e4 | 337 | |
1da177e4 LT |
338 | if (timer->expires == expires && vtimer_pending(timer)) |
339 | return 1; | |
27f6b416 MS |
340 | spin_lock_irqsave(&virt_timer_lock, flags); |
341 | rc = vtimer_pending(timer); | |
342 | if (rc) | |
343 | list_del_init(&timer->entry); | |
344 | timer->interval = periodic ? expires : 0; | |
1da177e4 | 345 | timer->expires = expires; |
1da177e4 | 346 | internal_add_vtimer(timer); |
27f6b416 MS |
347 | spin_unlock_irqrestore(&virt_timer_lock, flags); |
348 | return rc; | |
1da177e4 | 349 | } |
b6ecfa92 JG |
350 | |
351 | /* | |
b6ecfa92 JG |
352 | * returns whether it has modified a pending timer (1) or not (0) |
353 | */ | |
27f6b416 | 354 | int mod_virt_timer(struct vtimer_list *timer, u64 expires) |
b6ecfa92 JG |
355 | { |
356 | return __mod_vtimer(timer, expires, 0); | |
357 | } | |
1da177e4 LT |
358 | EXPORT_SYMBOL(mod_virt_timer); |
359 | ||
b6ecfa92 | 360 | /* |
b6ecfa92 JG |
361 | * returns whether it has modified a pending timer (1) or not (0) |
362 | */ | |
27f6b416 | 363 | int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires) |
b6ecfa92 JG |
364 | { |
365 | return __mod_vtimer(timer, expires, 1); | |
366 | } | |
367 | EXPORT_SYMBOL(mod_virt_timer_periodic); | |
368 | ||
1da177e4 | 369 | /* |
27f6b416 | 370 | * Delete a virtual timer. |
1da177e4 LT |
371 | * |
372 | * returns whether the deleted timer was pending (1) or not (0) | |
373 | */ | |
374 | int del_virt_timer(struct vtimer_list *timer) | |
375 | { | |
376 | unsigned long flags; | |
1da177e4 | 377 | |
1da177e4 LT |
378 | if (!vtimer_pending(timer)) |
379 | return 0; | |
27f6b416 | 380 | spin_lock_irqsave(&virt_timer_lock, flags); |
1da177e4 | 381 | list_del_init(&timer->entry); |
27f6b416 | 382 | spin_unlock_irqrestore(&virt_timer_lock, flags); |
1da177e4 LT |
383 | return 1; |
384 | } | |
385 | EXPORT_SYMBOL(del_virt_timer); | |
386 | ||
387 | /* | |
388 | * Start the virtual CPU timer on the current CPU. | |
389 | */ | |
b5f87f15 | 390 | void vtime_init(void) |
1da177e4 | 391 | { |
8b646bd7 | 392 | /* set initial cpu timer */ |
27f6b416 | 393 | set_vtimer(VTIMER_MAX_SLICE); |
f341b8df MS |
394 | /* Setup initial MT scaling values */ |
395 | if (smp_cpu_mtid) { | |
396 | __this_cpu_write(mt_scaling_jiffies, jiffies); | |
397 | __this_cpu_write(mt_scaling_mult, 1); | |
398 | __this_cpu_write(mt_scaling_div, 1); | |
399 | stcctm5(smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles)); | |
400 | } | |
1da177e4 | 401 | } |