Merge remote-tracking branches 'regmap/fix/doc' and 'regmap/fix/mmio' into regmap...
[deliverable/linux.git] / arch / x86 / lib / msr-smp.c
1 #include <linux/module.h>
2 #include <linux/preempt.h>
3 #include <linux/smp.h>
4 #include <asm/msr.h>
5
6 static void __rdmsr_on_cpu(void *info)
7 {
8 struct msr_info *rv = info;
9 struct msr *reg;
10 int this_cpu = raw_smp_processor_id();
11
12 if (rv->msrs)
13 reg = per_cpu_ptr(rv->msrs, this_cpu);
14 else
15 reg = &rv->reg;
16
17 rdmsr(rv->msr_no, reg->l, reg->h);
18 }
19
20 static void __wrmsr_on_cpu(void *info)
21 {
22 struct msr_info *rv = info;
23 struct msr *reg;
24 int this_cpu = raw_smp_processor_id();
25
26 if (rv->msrs)
27 reg = per_cpu_ptr(rv->msrs, this_cpu);
28 else
29 reg = &rv->reg;
30
31 wrmsr(rv->msr_no, reg->l, reg->h);
32 }
33
34 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
35 {
36 int err;
37 struct msr_info rv;
38
39 memset(&rv, 0, sizeof(rv));
40
41 rv.msr_no = msr_no;
42 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
43 *l = rv.reg.l;
44 *h = rv.reg.h;
45
46 return err;
47 }
48 EXPORT_SYMBOL(rdmsr_on_cpu);
49
50 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
51 {
52 int err;
53 struct msr_info rv;
54
55 memset(&rv, 0, sizeof(rv));
56
57 rv.msr_no = msr_no;
58 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
59 *q = rv.reg.q;
60
61 return err;
62 }
63 EXPORT_SYMBOL(rdmsrl_on_cpu);
64
65 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
66 {
67 int err;
68 struct msr_info rv;
69
70 memset(&rv, 0, sizeof(rv));
71
72 rv.msr_no = msr_no;
73 rv.reg.l = l;
74 rv.reg.h = h;
75 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
76
77 return err;
78 }
79 EXPORT_SYMBOL(wrmsr_on_cpu);
80
81 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
82 {
83 int err;
84 struct msr_info rv;
85
86 memset(&rv, 0, sizeof(rv));
87
88 rv.msr_no = msr_no;
89 rv.reg.q = q;
90
91 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
92
93 return err;
94 }
95 EXPORT_SYMBOL(wrmsrl_on_cpu);
96
97 static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
98 struct msr *msrs,
99 void (*msr_func) (void *info))
100 {
101 struct msr_info rv;
102 int this_cpu;
103
104 memset(&rv, 0, sizeof(rv));
105
106 rv.msrs = msrs;
107 rv.msr_no = msr_no;
108
109 this_cpu = get_cpu();
110
111 if (cpumask_test_cpu(this_cpu, mask))
112 msr_func(&rv);
113
114 smp_call_function_many(mask, msr_func, &rv, 1);
115 put_cpu();
116 }
117
118 /* rdmsr on a bunch of CPUs
119 *
120 * @mask: which CPUs
121 * @msr_no: which MSR
122 * @msrs: array of MSR values
123 *
124 */
125 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
126 {
127 __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
128 }
129 EXPORT_SYMBOL(rdmsr_on_cpus);
130
131 /*
132 * wrmsr on a bunch of CPUs
133 *
134 * @mask: which CPUs
135 * @msr_no: which MSR
136 * @msrs: array of MSR values
137 *
138 */
139 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
140 {
141 __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
142 }
143 EXPORT_SYMBOL(wrmsr_on_cpus);
144
145 /* These "safe" variants are slower and should be used when the target MSR
146 may not actually exist. */
147 static void __rdmsr_safe_on_cpu(void *info)
148 {
149 struct msr_info *rv = info;
150
151 rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
152 }
153
154 static void __wrmsr_safe_on_cpu(void *info)
155 {
156 struct msr_info *rv = info;
157
158 rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
159 }
160
161 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
162 {
163 int err;
164 struct msr_info rv;
165
166 memset(&rv, 0, sizeof(rv));
167
168 rv.msr_no = msr_no;
169 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
170 *l = rv.reg.l;
171 *h = rv.reg.h;
172
173 return err ? err : rv.err;
174 }
175 EXPORT_SYMBOL(rdmsr_safe_on_cpu);
176
177 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
178 {
179 int err;
180 struct msr_info rv;
181
182 memset(&rv, 0, sizeof(rv));
183
184 rv.msr_no = msr_no;
185 rv.reg.l = l;
186 rv.reg.h = h;
187 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
188
189 return err ? err : rv.err;
190 }
191 EXPORT_SYMBOL(wrmsr_safe_on_cpu);
192
193 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
194 {
195 int err;
196 struct msr_info rv;
197
198 memset(&rv, 0, sizeof(rv));
199
200 rv.msr_no = msr_no;
201 rv.reg.q = q;
202
203 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
204
205 return err ? err : rv.err;
206 }
207 EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
208
209 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
210 {
211 int err;
212 struct msr_info rv;
213
214 memset(&rv, 0, sizeof(rv));
215
216 rv.msr_no = msr_no;
217 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
218 *q = rv.reg.q;
219
220 return err ? err : rv.err;
221 }
222 EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
223
224 /*
225 * These variants are significantly slower, but allows control over
226 * the entire 32-bit GPR set.
227 */
228 static void __rdmsr_safe_regs_on_cpu(void *info)
229 {
230 struct msr_regs_info *rv = info;
231
232 rv->err = rdmsr_safe_regs(rv->regs);
233 }
234
235 static void __wrmsr_safe_regs_on_cpu(void *info)
236 {
237 struct msr_regs_info *rv = info;
238
239 rv->err = wrmsr_safe_regs(rv->regs);
240 }
241
242 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
243 {
244 int err;
245 struct msr_regs_info rv;
246
247 rv.regs = regs;
248 rv.err = -EIO;
249 err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
250
251 return err ? err : rv.err;
252 }
253 EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
254
255 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
256 {
257 int err;
258 struct msr_regs_info rv;
259
260 rv.regs = regs;
261 rv.err = -EIO;
262 err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
263
264 return err ? err : rv.err;
265 }
266 EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
This page took 0.037801 seconds and 6 git commands to generate.