KVM: s390: add lowcore access functions
[deliverable/linux.git] / arch / s390 / kvm / gaccess.h
CommitLineData
b0c632db 1/*
a53c8fab 2 * access guest memory
b0c632db 3 *
d95fb12f 4 * Copyright IBM Corp. 2008, 2014
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13#ifndef __KVM_S390_GACCESS_H
14#define __KVM_S390_GACCESS_H
15
16#include <linux/compiler.h>
17#include <linux/kvm_host.h>
d95fb12f
HC
18#include <linux/uaccess.h>
19#include <linux/ptrace.h>
628eb9b8 20#include "kvm-s390.h"
b0c632db 21
732e5633
TH
22/* Convert real to absolute address by applying the prefix of the CPU */
23static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
24 unsigned long gaddr)
25{
26 unsigned long prefix = vcpu->arch.sie_block->prefix;
27 if (gaddr < 2 * PAGE_SIZE)
28 gaddr += prefix;
29 else if (gaddr >= prefix && gaddr < prefix + 2 * PAGE_SIZE)
30 gaddr -= prefix;
31 return gaddr;
32}
33
072c9878
HC
34/**
35 * kvm_s390_logical_to_effective - convert guest logical to effective address
36 * @vcpu: guest virtual cpu
37 * @ga: guest logical address
38 *
39 * Convert a guest vcpu logical address to a guest vcpu effective address by
40 * applying the rules of the vcpu's addressing mode defined by PSW bits 31
41 * and 32 (extendended/basic addressing mode).
42 *
43 * Depending on the vcpu's addressing mode the upper 40 bits (24 bit addressing
44 * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing mode)
45 * of @ga will be zeroed and the remaining bits will be returned.
46 */
47static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
48 unsigned long ga)
49{
50 psw_t *psw = &vcpu->arch.sie_block->gpsw;
51
52 if (psw_bits(*psw).eaba == PSW_AMODE_64BIT)
53 return ga;
54 if (psw_bits(*psw).eaba == PSW_AMODE_31BIT)
55 return ga & ((1UL << 31) - 1);
56 return ga & ((1UL << 24) - 1);
57}
58
0a75ca27
HC
59static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu,
60 void __user *gptr,
61 int prefixing)
b0c632db 62{
396083a9
HC
63 unsigned long gaddr = (unsigned long) gptr;
64 unsigned long uaddr;
65
732e5633
TH
66 if (prefixing)
67 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
396083a9
HC
68 uaddr = gmap_fault(gaddr, vcpu->arch.gmap);
69 if (IS_ERR_VALUE(uaddr))
70 uaddr = -EFAULT;
0a75ca27 71 return (void __user *)uaddr;
b0c632db
HC
72}
73
396083a9
HC
74#define get_guest(vcpu, x, gptr) \
75({ \
f9dc72e8 76 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
396083a9 77 int __mask = sizeof(__typeof__(*(gptr))) - 1; \
228b8221 78 int __ret; \
396083a9 79 \
228b8221
RR
80 if (IS_ERR((void __force *)__uptr)) { \
81 __ret = PTR_ERR((void __force *)__uptr); \
82 } else { \
396083a9
HC
83 BUG_ON((unsigned long)__uptr & __mask); \
84 __ret = get_user(x, __uptr); \
85 } \
86 __ret; \
87})
88
89#define put_guest(vcpu, x, gptr) \
90({ \
f9dc72e8 91 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
396083a9 92 int __mask = sizeof(__typeof__(*(gptr))) - 1; \
228b8221 93 int __ret; \
396083a9 94 \
228b8221
RR
95 if (IS_ERR((void __force *)__uptr)) { \
96 __ret = PTR_ERR((void __force *)__uptr); \
97 } else { \
396083a9
HC
98 BUG_ON((unsigned long)__uptr & __mask); \
99 __ret = put_user(x, __uptr); \
100 } \
101 __ret; \
102})
b0c632db 103
f9dc72e8
HC
104static inline int __copy_guest(struct kvm_vcpu *vcpu, unsigned long to,
105 unsigned long from, unsigned long len,
106 int to_guest, int prefixing)
b0c632db 107{
f9dc72e8 108 unsigned long _len, rc;
0a75ca27 109 void __user *uptr;
f9dc72e8
HC
110
111 while (len) {
0a75ca27 112 uptr = to_guest ? (void __user *)to : (void __user *)from;
f9dc72e8 113 uptr = __gptr_to_uptr(vcpu, uptr, prefixing);
0a75ca27 114 if (IS_ERR((void __force *)uptr))
f9dc72e8
HC
115 return -EFAULT;
116 _len = PAGE_SIZE - ((unsigned long)uptr & (PAGE_SIZE - 1));
117 _len = min(_len, len);
118 if (to_guest)
0a75ca27 119 rc = copy_to_user((void __user *) uptr, (void *)from, _len);
f9dc72e8 120 else
0a75ca27 121 rc = copy_from_user((void *)to, (void __user *)uptr, _len);
f9dc72e8
HC
122 if (rc)
123 return -EFAULT;
124 len -= _len;
125 from += _len;
126 to += _len;
b0c632db
HC
127 }
128 return 0;
129}
130
f9dc72e8
HC
131#define copy_to_guest(vcpu, to, from, size) \
132 __copy_guest(vcpu, to, (unsigned long)from, size, 1, 1)
133#define copy_from_guest(vcpu, to, from, size) \
134 __copy_guest(vcpu, (unsigned long)to, from, size, 0, 1)
135#define copy_to_guest_absolute(vcpu, to, from, size) \
136 __copy_guest(vcpu, to, (unsigned long)from, size, 1, 0)
137#define copy_from_guest_absolute(vcpu, to, from, size) \
138 __copy_guest(vcpu, (unsigned long)to, from, size, 0, 0)
092670cd 139
d95fb12f
HC
140/*
141 * put_guest_lc, read_guest_lc and write_guest_lc are guest access functions
142 * which shall only be used to access the lowcore of a vcpu.
143 * These functions should be used for e.g. interrupt handlers where no
144 * guest memory access protection facilities, like key or low address
145 * protection, are applicable.
146 * At a later point guest vcpu lowcore access should happen via pinned
147 * prefix pages, so that these pages can be accessed directly via the
148 * kernel mapping. All of these *_lc functions can be removed then.
149 */
150
151/**
152 * put_guest_lc - write a simple variable to a guest vcpu's lowcore
153 * @vcpu: virtual cpu
154 * @x: value to copy to guest
155 * @gra: vcpu's destination guest real address
156 *
157 * Copies a simple value from kernel space to a guest vcpu's lowcore.
158 * The size of the variable may be 1, 2, 4 or 8 bytes. The destination
159 * must be located in the vcpu's lowcore. Otherwise the result is undefined.
160 *
161 * Returns zero on success or -EFAULT on error.
162 *
163 * Note: an error indicates that either the kernel is out of memory or
164 * the guest memory mapping is broken. In any case the best solution
165 * would be to terminate the guest.
166 * It is wrong to inject a guest exception.
167 */
168#define put_guest_lc(vcpu, x, gra) \
169({ \
170 struct kvm_vcpu *__vcpu = (vcpu); \
171 __typeof__(*(gra)) __x = (x); \
172 unsigned long __gpa; \
173 \
174 __gpa = (unsigned long)(gra); \
175 __gpa += __vcpu->arch.sie_block->prefix; \
176 kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x)); \
177})
178
179/**
180 * write_guest_lc - copy data from kernel space to guest vcpu's lowcore
181 * @vcpu: virtual cpu
182 * @gra: vcpu's source guest real address
183 * @data: source address in kernel space
184 * @len: number of bytes to copy
185 *
186 * Copy data from kernel space to guest vcpu's lowcore. The entire range must
187 * be located within the vcpu's lowcore, otherwise the result is undefined.
188 *
189 * Returns zero on success or -EFAULT on error.
190 *
191 * Note: an error indicates that either the kernel is out of memory or
192 * the guest memory mapping is broken. In any case the best solution
193 * would be to terminate the guest.
194 * It is wrong to inject a guest exception.
195 */
196static inline __must_check
197int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
198 unsigned long len)
199{
200 unsigned long gpa = gra + vcpu->arch.sie_block->prefix;
201
202 return kvm_write_guest(vcpu->kvm, gpa, data, len);
203}
204
205/**
206 * read_guest_lc - copy data from guest vcpu's lowcore to kernel space
207 * @vcpu: virtual cpu
208 * @gra: vcpu's source guest real address
209 * @data: destination address in kernel space
210 * @len: number of bytes to copy
211 *
212 * Copy data from guest vcpu's lowcore to kernel space. The entire range must
213 * be located within the vcpu's lowcore, otherwise the result is undefined.
214 *
215 * Returns zero on success or -EFAULT on error.
216 *
217 * Note: an error indicates that either the kernel is out of memory or
218 * the guest memory mapping is broken. In any case the best solution
219 * would be to terminate the guest.
220 * It is wrong to inject a guest exception.
221 */
222static inline __must_check
223int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
224 unsigned long len)
225{
226 unsigned long gpa = gra + vcpu->arch.sie_block->prefix;
227
228 return kvm_read_guest(vcpu->kvm, gpa, data, len);
229}
f9dc72e8 230#endif /* __KVM_S390_GACCESS_H */
This page took 0.357877 seconds and 5 git commands to generate.