Merge branch 'upstream' into for-linus
[deliverable/linux.git] / arch / s390 / kvm / gaccess.h
CommitLineData
b0c632db 1/*
092670cd 2 * access.h - access guest memory
b0c632db 3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13#ifndef __KVM_S390_GACCESS_H
14#define __KVM_S390_GACCESS_H
15
16#include <linux/compiler.h>
17#include <linux/kvm_host.h>
18#include <asm/uaccess.h>
628eb9b8 19#include "kvm-s390.h"
b0c632db
HC
20
21static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
0096369d 22 unsigned long guestaddr)
b0c632db 23{
0096369d 24 unsigned long prefix = vcpu->arch.sie_block->prefix;
b0c632db
HC
25
26 if (guestaddr < 2 * PAGE_SIZE)
27 guestaddr += prefix;
28 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
29 guestaddr -= prefix;
30
092670cd 31 return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
b0c632db
HC
32}
33
0096369d 34static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
b0c632db
HC
35 u64 *result)
36{
37 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
38
39 BUG_ON(guestaddr & 7);
40
41 if (IS_ERR((void __force *) uptr))
42 return PTR_ERR((void __force *) uptr);
43
0096369d 44 return get_user(*result, (unsigned long __user *) uptr);
b0c632db
HC
45}
46
0096369d 47static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
b0c632db
HC
48 u32 *result)
49{
50 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
51
52 BUG_ON(guestaddr & 3);
53
54 if (IS_ERR((void __force *) uptr))
55 return PTR_ERR((void __force *) uptr);
56
57 return get_user(*result, (u32 __user *) uptr);
58}
59
0096369d 60static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
b0c632db
HC
61 u16 *result)
62{
63 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
64
65 BUG_ON(guestaddr & 1);
66
67 if (IS_ERR(uptr))
68 return PTR_ERR(uptr);
69
70 return get_user(*result, (u16 __user *) uptr);
71}
72
0096369d 73static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
b0c632db
HC
74 u8 *result)
75{
76 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
77
78 if (IS_ERR((void __force *) uptr))
79 return PTR_ERR((void __force *) uptr);
80
81 return get_user(*result, (u8 __user *) uptr);
82}
83
0096369d 84static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
b0c632db
HC
85 u64 value)
86{
87 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
88
89 BUG_ON(guestaddr & 7);
90
91 if (IS_ERR((void __force *) uptr))
92 return PTR_ERR((void __force *) uptr);
93
94 return put_user(value, (u64 __user *) uptr);
95}
96
0096369d 97static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
b0c632db
HC
98 u32 value)
99{
100 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
101
102 BUG_ON(guestaddr & 3);
103
104 if (IS_ERR((void __force *) uptr))
105 return PTR_ERR((void __force *) uptr);
106
107 return put_user(value, (u32 __user *) uptr);
108}
109
0096369d 110static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
b0c632db
HC
111 u16 value)
112{
113 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
114
115 BUG_ON(guestaddr & 1);
116
117 if (IS_ERR((void __force *) uptr))
118 return PTR_ERR((void __force *) uptr);
119
120 return put_user(value, (u16 __user *) uptr);
121}
122
0096369d 123static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
b0c632db
HC
124 u8 value)
125{
126 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
127
128 if (IS_ERR((void __force *) uptr))
129 return PTR_ERR((void __force *) uptr);
130
131 return put_user(value, (u8 __user *) uptr);
132}
133
134
0096369d
MS
135static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
136 unsigned long guestdest,
092670cd 137 void *from, unsigned long n)
b0c632db
HC
138{
139 int rc;
140 unsigned long i;
092670cd 141 u8 *data = from;
b0c632db
HC
142
143 for (i = 0; i < n; i++) {
144 rc = put_guest_u8(vcpu, guestdest++, *(data++));
145 if (rc < 0)
146 return rc;
147 }
148 return 0;
149}
150
092670cd
CO
151static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
152 unsigned long guestdest,
153 void *from, unsigned long n)
154{
155 int r;
156 void __user *uptr;
157 unsigned long size;
158
159 if (guestdest + n < guestdest)
160 return -EFAULT;
161
162 /* simple case: all within one segment table entry? */
163 if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
164 uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
165
166 if (IS_ERR((void __force *) uptr))
167 return PTR_ERR((void __force *) uptr);
168
169 r = copy_to_user(uptr, from, n);
170
171 if (r)
172 r = -EFAULT;
173
174 goto out;
175 }
176
177 /* copy first segment */
178 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
179
180 if (IS_ERR((void __force *) uptr))
181 return PTR_ERR((void __force *) uptr);
182
183 size = PMD_SIZE - (guestdest & ~PMD_MASK);
184
185 r = copy_to_user(uptr, from, size);
186
187 if (r) {
188 r = -EFAULT;
189 goto out;
190 }
191 from += size;
192 n -= size;
193 guestdest += size;
194
195 /* copy full segments */
196 while (n >= PMD_SIZE) {
197 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
198
199 if (IS_ERR((void __force *) uptr))
200 return PTR_ERR((void __force *) uptr);
201
202 r = copy_to_user(uptr, from, PMD_SIZE);
203
204 if (r) {
205 r = -EFAULT;
206 goto out;
207 }
208 from += PMD_SIZE;
209 n -= PMD_SIZE;
210 guestdest += PMD_SIZE;
211 }
212
213 /* copy the tail segment */
214 if (n) {
215 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
216
217 if (IS_ERR((void __force *) uptr))
218 return PTR_ERR((void __force *) uptr);
219
220 r = copy_to_user(uptr, from, n);
221
222 if (r)
223 r = -EFAULT;
224 }
225out:
226 return r;
227}
228
229static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
230 unsigned long guestdest,
231 void *from, unsigned long n)
232{
233 return __copy_to_guest_fast(vcpu, guestdest, from, n);
234}
235
0096369d 236static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
092670cd 237 void *from, unsigned long n)
b0c632db 238{
0096369d 239 unsigned long prefix = vcpu->arch.sie_block->prefix;
b0c632db
HC
240
241 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
242 goto slowpath;
243
244 if ((guestdest < prefix) && (guestdest + n > prefix))
245 goto slowpath;
246
247 if ((guestdest < prefix + 2 * PAGE_SIZE)
248 && (guestdest + n > prefix + 2 * PAGE_SIZE))
249 goto slowpath;
250
251 if (guestdest < 2 * PAGE_SIZE)
252 guestdest += prefix;
253 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
254 guestdest -= prefix;
255
092670cd 256 return __copy_to_guest_fast(vcpu, guestdest, from, n);
b0c632db
HC
257slowpath:
258 return __copy_to_guest_slow(vcpu, guestdest, from, n);
259}
260
261static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
0096369d
MS
262 unsigned long guestsrc,
263 unsigned long n)
b0c632db
HC
264{
265 int rc;
266 unsigned long i;
267 u8 *data = to;
268
269 for (i = 0; i < n; i++) {
270 rc = get_guest_u8(vcpu, guestsrc++, data++);
271 if (rc < 0)
272 return rc;
273 }
274 return 0;
275}
276
092670cd
CO
277static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
278 unsigned long guestsrc,
279 unsigned long n)
b0c632db 280{
092670cd
CO
281 int r;
282 void __user *uptr;
283 unsigned long size;
b0c632db 284
092670cd
CO
285 if (guestsrc + n < guestsrc)
286 return -EFAULT;
b0c632db 287
092670cd
CO
288 /* simple case: all within one segment table entry? */
289 if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
290 uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
b0c632db 291
092670cd
CO
292 if (IS_ERR((void __force *) uptr))
293 return PTR_ERR((void __force *) uptr);
b0c632db 294
092670cd 295 r = copy_from_user(to, uptr, n);
b0c632db 296
092670cd
CO
297 if (r)
298 r = -EFAULT;
b0c632db 299
092670cd
CO
300 goto out;
301 }
b0c632db 302
092670cd
CO
303 /* copy first segment */
304 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
b0c632db 305
092670cd
CO
306 if (IS_ERR((void __force *) uptr))
307 return PTR_ERR((void __force *) uptr);
b0c632db 308
092670cd 309 size = PMD_SIZE - (guestsrc & ~PMD_MASK);
b0c632db 310
092670cd 311 r = copy_from_user(to, uptr, size);
b0c632db 312
092670cd
CO
313 if (r) {
314 r = -EFAULT;
315 goto out;
316 }
317 to += size;
318 n -= size;
319 guestsrc += size;
320
321 /* copy full segments */
322 while (n >= PMD_SIZE) {
323 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
324
325 if (IS_ERR((void __force *) uptr))
326 return PTR_ERR((void __force *) uptr);
327
328 r = copy_from_user(to, uptr, PMD_SIZE);
329
330 if (r) {
331 r = -EFAULT;
332 goto out;
333 }
334 to += PMD_SIZE;
335 n -= PMD_SIZE;
336 guestsrc += PMD_SIZE;
337 }
338
339 /* copy the tail segment */
340 if (n) {
341 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
b0c632db 342
092670cd
CO
343 if (IS_ERR((void __force *) uptr))
344 return PTR_ERR((void __force *) uptr);
b0c632db 345
092670cd
CO
346 r = copy_from_user(to, uptr, n);
347
348 if (r)
349 r = -EFAULT;
350 }
351out:
352 return r;
b0c632db
HC
353}
354
355static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
0096369d
MS
356 unsigned long guestsrc,
357 unsigned long n)
b0c632db 358{
092670cd
CO
359 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
360}
b0c632db 361
092670cd
CO
362static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
363 unsigned long guestsrc, unsigned long n)
364{
365 unsigned long prefix = vcpu->arch.sie_block->prefix;
b0c632db 366
092670cd
CO
367 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
368 goto slowpath;
b0c632db 369
092670cd
CO
370 if ((guestsrc < prefix) && (guestsrc + n > prefix))
371 goto slowpath;
372
373 if ((guestsrc < prefix + 2 * PAGE_SIZE)
374 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
375 goto slowpath;
376
377 if (guestsrc < 2 * PAGE_SIZE)
378 guestsrc += prefix;
379 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
380 guestsrc -= prefix;
b0c632db 381
092670cd
CO
382 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
383slowpath:
384 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
b0c632db
HC
385}
386#endif
This page took 0.290091 seconds and 5 git commands to generate.