6d1bfe246f0a5c7ac94cef8e3cc2effca24ef44e
[deliverable/linux.git] / arch / powerpc / kvm / book3s_pr_papr.c
1 /*
2 * Copyright (C) 2011. Freescale Inc. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Paul Mackerras <paulus@samba.org>
7 *
8 * Description:
9 *
10 * Hypercall handling for running PAPR guests in PR KVM on Book 3S
11 * processors.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License, version 2, as
15 * published by the Free Software Foundation.
16 */
17
18 #include <asm/uaccess.h>
19 #include <asm/kvm_ppc.h>
20 #include <asm/kvm_book3s.h>
21
22 static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
23 {
24 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
25 unsigned long pteg_addr;
26
27 pte_index <<= 4;
28 pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70;
29 pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
30 pteg_addr |= pte_index;
31
32 return pteg_addr;
33 }
34
35 static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
36 {
37 long flags = kvmppc_get_gpr(vcpu, 4);
38 long pte_index = kvmppc_get_gpr(vcpu, 5);
39 unsigned long pteg[2 * 8];
40 unsigned long pteg_addr, i, *hpte;
41
42 pte_index &= ~7UL;
43 pteg_addr = get_pteg_addr(vcpu, pte_index);
44
45 copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
46 hpte = pteg;
47
48 if (likely((flags & H_EXACT) == 0)) {
49 pte_index &= ~7UL;
50 for (i = 0; ; ++i) {
51 if (i == 8)
52 return H_PTEG_FULL;
53 if ((*hpte & HPTE_V_VALID) == 0)
54 break;
55 hpte += 2;
56 }
57 } else {
58 i = kvmppc_get_gpr(vcpu, 5) & 7UL;
59 hpte += i * 2;
60 }
61
62 hpte[0] = kvmppc_get_gpr(vcpu, 6);
63 hpte[1] = kvmppc_get_gpr(vcpu, 7);
64 copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg));
65 kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
66 kvmppc_set_gpr(vcpu, 4, pte_index | i);
67
68 return EMULATE_DONE;
69 }
70
71 static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
72 {
73 unsigned long flags= kvmppc_get_gpr(vcpu, 4);
74 unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
75 unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
76 unsigned long v = 0, pteg, rb;
77 unsigned long pte[2];
78
79 pteg = get_pteg_addr(vcpu, pte_index);
80 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
81
82 if ((pte[0] & HPTE_V_VALID) == 0 ||
83 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
84 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) {
85 kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
86 return EMULATE_DONE;
87 }
88
89 copy_to_user((void __user *)pteg, &v, sizeof(v));
90
91 rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
92 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
93
94 kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
95 kvmppc_set_gpr(vcpu, 4, pte[0]);
96 kvmppc_set_gpr(vcpu, 5, pte[1]);
97
98 return EMULATE_DONE;
99 }
100
101 /* Request defs for kvmppc_h_pr_bulk_remove() */
102 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
103 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
104 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
105 #define H_BULK_REMOVE_END 0xc000000000000000ULL
106 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
107 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
108 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
109 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
110 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
111 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
112 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
113 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
114 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
115 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
116 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
117 #define H_BULK_REMOVE_MAX_BATCH 4
118
119 static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
120 {
121 int i;
122 int paramnr = 4;
123 int ret = H_SUCCESS;
124
125 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
126 unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
127 unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
128 unsigned long pteg, rb, flags;
129 unsigned long pte[2];
130 unsigned long v = 0;
131
132 if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
133 break; /* Exit success */
134 } else if ((tsh & H_BULK_REMOVE_TYPE) !=
135 H_BULK_REMOVE_REQUEST) {
136 ret = H_PARAMETER;
137 break; /* Exit fail */
138 }
139
140 tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
141 tsh |= H_BULK_REMOVE_RESPONSE;
142
143 if ((tsh & H_BULK_REMOVE_ANDCOND) &&
144 (tsh & H_BULK_REMOVE_AVPN)) {
145 tsh |= H_BULK_REMOVE_PARM;
146 kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
147 ret = H_PARAMETER;
148 break; /* Exit fail */
149 }
150
151 pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
152 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
153
154 /* tsl = AVPN */
155 flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
156
157 if ((pte[0] & HPTE_V_VALID) == 0 ||
158 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
159 ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
160 tsh |= H_BULK_REMOVE_NOT_FOUND;
161 } else {
162 /* Splat the pteg in (userland) hpt */
163 copy_to_user((void __user *)pteg, &v, sizeof(v));
164
165 rb = compute_tlbie_rb(pte[0], pte[1],
166 tsh & H_BULK_REMOVE_PTEX);
167 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
168 tsh |= H_BULK_REMOVE_SUCCESS;
169 tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43;
170 }
171 kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
172 }
173 kvmppc_set_gpr(vcpu, 3, ret);
174
175 return EMULATE_DONE;
176 }
177
178 static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
179 {
180 unsigned long flags = kvmppc_get_gpr(vcpu, 4);
181 unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
182 unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
183 unsigned long rb, pteg, r, v;
184 unsigned long pte[2];
185
186 pteg = get_pteg_addr(vcpu, pte_index);
187 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
188
189 if ((pte[0] & HPTE_V_VALID) == 0 ||
190 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) {
191 kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
192 return EMULATE_DONE;
193 }
194
195 v = pte[0];
196 r = pte[1];
197 r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI |
198 HPTE_R_KEY_LO);
199 r |= (flags << 55) & HPTE_R_PP0;
200 r |= (flags << 48) & HPTE_R_KEY_HI;
201 r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
202
203 pte[1] = r;
204
205 rb = compute_tlbie_rb(v, r, pte_index);
206 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
207 copy_to_user((void __user *)pteg, pte, sizeof(pte));
208
209 kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
210
211 return EMULATE_DONE;
212 }
213
214 int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
215 {
216 switch (cmd) {
217 case H_ENTER:
218 return kvmppc_h_pr_enter(vcpu);
219 case H_REMOVE:
220 return kvmppc_h_pr_remove(vcpu);
221 case H_PROTECT:
222 return kvmppc_h_pr_protect(vcpu);
223 case H_BULK_REMOVE:
224 return kvmppc_h_pr_bulk_remove(vcpu);
225 case H_CEDE:
226 kvm_vcpu_block(vcpu);
227 vcpu->stat.halt_wakeup++;
228 return EMULATE_DONE;
229 }
230
231 return EMULATE_FAIL;
232 }
This page took 0.034318 seconds and 4 git commands to generate.