[S390] Introduce get_clock_fast()
[deliverable/linux.git] / arch / s390 / include / asm / timex.h
CommitLineData
1da177e4
LT
1/*
2 * include/asm-s390/timex.h
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 *
7 * Derived from "include/asm-i386/timex.h"
8 * Copyright (C) 1992, Linus Torvalds
9 */
10
11#ifndef _ASM_S390_TIMEX_H
12#define _ASM_S390_TIMEX_H
13
17eb7a5c
HC
14#include <asm/lowcore.h>
15
b6112ccb
MS
16/* The value of the TOD clock for 1.1.1970. */
17#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
18
d54853ef
MS
19/* Inline functions for clock register access. */
20static inline int set_clock(__u64 time)
21{
22 int cc;
23
24 asm volatile(
987bcdac 25 " sck %1\n"
d54853ef
MS
26 " ipm %0\n"
27 " srl %0,28\n"
987bcdac 28 : "=d" (cc) : "Q" (time) : "cc");
d54853ef
MS
29 return cc;
30}
31
32static inline int store_clock(__u64 *time)
33{
34 int cc;
35
36 asm volatile(
987bcdac 37 " stck %1\n"
d54853ef
MS
38 " ipm %0\n"
39 " srl %0,28\n"
987bcdac 40 : "=d" (cc), "=Q" (*time) : : "cc");
d54853ef
MS
41 return cc;
42}
43
44static inline void set_clock_comparator(__u64 time)
45{
987bcdac 46 asm volatile("sckc %0" : : "Q" (time));
d54853ef
MS
47}
48
49static inline void store_clock_comparator(__u64 *time)
50{
987bcdac 51 asm volatile("stckc %0" : "=Q" (*time));
d54853ef
MS
52}
53
17eb7a5c
HC
54void clock_comparator_work(void);
55
56static inline unsigned long long local_tick_disable(void)
57{
58 unsigned long long old;
59
60 old = S390_lowcore.clock_comparator;
61 S390_lowcore.clock_comparator = -1ULL;
545b288d 62 set_clock_comparator(S390_lowcore.clock_comparator);
17eb7a5c
HC
63 return old;
64}
65
66static inline void local_tick_enable(unsigned long long comp)
67{
68 S390_lowcore.clock_comparator = comp;
545b288d 69 set_clock_comparator(S390_lowcore.clock_comparator);
17eb7a5c
HC
70}
71
1da177e4
LT
72#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
73
74typedef unsigned long long cycles_t;
75
1da177e4
LT
76static inline unsigned long long get_clock (void)
77{
78 unsigned long long clk;
79
94c12cc7 80 asm volatile("stck %0" : "=Q" (clk) : : "cc");
1da177e4
LT
81 return clk;
82}
83
57b28f66
MH
84static inline void get_clock_ext(char *clk)
85{
86 asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
87}
88
80376f34
JG
89static inline unsigned long long get_clock_fast(void)
90{
91 unsigned long long clk;
92
93 if (test_facility(25))
94 asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
95 else
96 clk = get_clock();
97 return clk;
98}
99
c0015f91 100static inline unsigned long long get_clock_xt(void)
1b278294 101{
c0015f91 102 unsigned char clk[16];
57b28f66 103 get_clock_ext(clk);
c0015f91 104 return *((unsigned long long *)&clk[1]);
1b278294
JG
105}
106
94c12cc7
MS
107static inline cycles_t get_cycles(void)
108{
109 return (cycles_t) get_clock() >> 2;
110}
111
d54853ef 112int get_sync_clock(unsigned long long *clock);
2b67fc46 113void init_cpu_timer(void);
a806170e 114unsigned long long monotonic_clock(void);
2b67fc46 115
b592e89a
CS
116void tod_to_timeval(__u64, struct timespec *);
117
118static inline
119void stck_to_timespec(unsigned long long stck, struct timespec *ts)
120{
121 tod_to_timeval(stck - TOD_UNIX_EPOCH, ts);
122}
123
b6112ccb
MS
124extern u64 sched_clock_base_cc;
125
05e7ff7d
HC
126/**
127 * get_clock_monotonic - returns current time in clock rate units
128 *
129 * The caller must ensure that preemption is disabled.
130 * The clock and sched_clock_base get changed via stop_machine.
131 * Therefore preemption must be disabled when calling this
132 * function, otherwise the returned value is not guaranteed to
133 * be monotonic.
134 */
135static inline unsigned long long get_clock_monotonic(void)
136{
137 return get_clock_xt() - sched_clock_base_cc;
138}
139
1da177e4 140#endif
This page took 0.56631 seconds and 5 git commands to generate.