KVM: PPC: e500: refactor core-specific TLB code
[deliverable/linux.git] / arch / powerpc / kvm / e500.h
CommitLineData
bc8080cb 1/*
dd9ebf1f 2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
bc8080cb 3 *
fc6cf995 4 * Author: Yu Liu <yu.liu@freescale.com>
bc8080cb
HB
5 *
6 * Description:
fc6cf995
SW
7 * This file is based on arch/powerpc/kvm/44x_tlb.h and
8 * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>,
9 * Copyright IBM Corp. 2007-2008
bc8080cb
HB
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License, version 2, as
13 * published by the Free Software Foundation.
14 */
15
29a5a6f9
SW
16#ifndef KVM_E500_H
17#define KVM_E500_H
bc8080cb
HB
18
19#include <linux/kvm_host.h>
0cfb50e5 20#include <asm/mmu-book3e.h>
bc8080cb 21#include <asm/tlb.h>
fc6cf995
SW
22
23#define E500_PID_NUM 3
24#define E500_TLB_NUM 2
25
26#define E500_TLB_VALID 1
27#define E500_TLB_DIRTY 2
28
29struct tlbe_ref {
30 pfn_t pfn;
31 unsigned int flags; /* E500_TLB_* */
32};
33
34struct tlbe_priv {
35 struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
36};
37
8fdd21a2 38#ifdef CONFIG_KVM_E500
fc6cf995 39struct vcpu_id_table;
8fdd21a2 40#endif
fc6cf995
SW
41
42struct kvmppc_e500_tlb_params {
43 int entries, ways, sets;
44};
45
46struct kvmppc_vcpu_e500 {
52e1718c
SW
47 struct kvm_vcpu vcpu;
48
fc6cf995
SW
49 /* Unmodified copy of the guest's TLB -- shared with host userspace. */
50 struct kvm_book3e_206_tlb_entry *gtlb_arch;
51
52 /* Starting entry number in gtlb_arch[] */
53 int gtlb_offset[E500_TLB_NUM];
54
55 /* KVM internal information associated with each guest TLB entry */
56 struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
57
58 struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
59
60 unsigned int gtlb_nv[E500_TLB_NUM];
61
62 /*
63 * information associated with each host TLB entry --
64 * TLB1 only for now. If/when guest TLB1 entries can be
65 * mapped with host TLB0, this will be used for that too.
66 *
67 * We don't want to use this for guest TLB0 because then we'd
68 * have the overhead of doing the translation again even if
69 * the entry is still in the guest TLB (e.g. we swapped out
70 * and back, and our host TLB entries got evicted).
71 */
72 struct tlbe_ref *tlb_refs[E500_TLB_NUM];
73 unsigned int host_tlb1_nv;
74
fc6cf995 75 u32 svr;
fc6cf995
SW
76 u32 l1csr0;
77 u32 l1csr1;
78 u32 hid0;
79 u32 hid1;
fc6cf995
SW
80 u64 mcar;
81
82 struct page **shared_tlb_pages;
83 int num_shared_tlb_pages;
8fdd21a2
SW
84
85#ifdef CONFIG_KVM_E500
86 u32 pid[E500_PID_NUM];
87
88 /* vcpu id table */
89 struct vcpu_id_table *idt;
90#endif
fc6cf995
SW
91};
92
93static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
94{
95 return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
96}
bc8080cb 97
dc83b8bc
SW
98/* This geometry is the legacy default -- can be overridden by userspace */
99#define KVM_E500_TLB0_WAY_SIZE 128
100#define KVM_E500_TLB0_WAY_NUM 2
bc8080cb
HB
101
102#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
103#define KVM_E500_TLB1_SIZE 16
104
105#define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
106#define tlbsel_of(index) ((index) >> 16)
107#define esel_of(index) ((index) & 0xFFFF)
108
109#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
110#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
111#define MAS2_ATTRIB_MASK \
046a48b3 112 (MAS2_X0 | MAS2_X1)
bc8080cb
HB
113#define MAS3_ATTRIB_MASK \
114 (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
115 | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
116
52e1718c
SW
117int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
118 ulong value);
119int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
120int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
121int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb);
122int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb);
52e1718c
SW
123int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
124void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
125
126void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
127int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
bc8080cb 128
8fdd21a2
SW
129
130#ifdef CONFIG_KVM_E500
131unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
132 unsigned int as, unsigned int gid,
133 unsigned int pr, int avoid_recursion);
134#endif
135
bc8080cb 136/* TLB helper functions */
dc83b8bc
SW
137static inline unsigned int
138get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
bc8080cb 139{
0cfb50e5 140 return (tlbe->mas1 >> 7) & 0x1f;
bc8080cb
HB
141}
142
dc83b8bc 143static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
bc8080cb
HB
144{
145 return tlbe->mas2 & 0xfffff000;
146}
147
dc83b8bc 148static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
bc8080cb
HB
149{
150 unsigned int pgsize = get_tlb_size(tlbe);
0cfb50e5 151 return 1ULL << 10 << pgsize;
bc8080cb
HB
152}
153
dc83b8bc 154static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
bc8080cb
HB
155{
156 u64 bytes = get_tlb_bytes(tlbe);
157 return get_tlb_eaddr(tlbe) + bytes - 1;
158}
159
dc83b8bc 160static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
bc8080cb 161{
dc83b8bc 162 return tlbe->mas7_3 & ~0xfffULL;
bc8080cb
HB
163}
164
dc83b8bc
SW
165static inline unsigned int
166get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
bc8080cb
HB
167{
168 return (tlbe->mas1 >> 16) & 0xff;
169}
170
dc83b8bc
SW
171static inline unsigned int
172get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
bc8080cb
HB
173{
174 return (tlbe->mas1 >> 12) & 0x1;
175}
176
dc83b8bc
SW
177static inline unsigned int
178get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
bc8080cb
HB
179{
180 return (tlbe->mas1 >> 31) & 0x1;
181}
182
dc83b8bc
SW
183static inline unsigned int
184get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
bc8080cb
HB
185{
186 return (tlbe->mas1 >> 30) & 0x1;
187}
188
8fdd21a2
SW
189static inline unsigned int
190get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
191{
192 return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
193}
194
bc8080cb
HB
195static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
196{
197 return vcpu->arch.pid & 0xff;
198}
199
dd9ebf1f
LY
200static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
201{
202 return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
203}
204
205static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
206{
207 return !!(vcpu->arch.shared->msr & MSR_PR);
208}
209
b5904972 210static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
bc8080cb 211{
b5904972 212 return (vcpu->arch.shared->mas6 >> 16) & 0xff;
bc8080cb
HB
213}
214
b5904972 215static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
bc8080cb 216{
b5904972 217 return vcpu->arch.shared->mas6 & 0x1;
bc8080cb
HB
218}
219
b5904972 220static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
bc8080cb
HB
221{
222 /*
223 * Manual says that tlbsel has 2 bits wide.
fb2838d4 224 * Since we only have two TLBs, only lower bit is used.
bc8080cb 225 */
b5904972 226 return (vcpu->arch.shared->mas0 >> 28) & 0x1;
bc8080cb
HB
227}
228
b5904972 229static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
bc8080cb 230{
b5904972 231 return vcpu->arch.shared->mas0 & 0xfff;
bc8080cb
HB
232}
233
b5904972 234static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
bc8080cb 235{
b5904972 236 return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
bc8080cb
HB
237}
238
bc8080cb 239static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
dc83b8bc 240 const struct kvm_book3e_206_tlb_entry *tlbe)
bc8080cb
HB
241{
242 gpa_t gpa;
243
244 if (!get_tlb_v(tlbe))
245 return 0;
246
247 /* Does it match current guest AS? */
248 /* XXX what about IS != DS? */
666e7252 249 if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
bc8080cb
HB
250 return 0;
251
252 gpa = get_tlb_raddr(tlbe);
253 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
254 /* Mapping is not for RAM. */
255 return 0;
256
257 return 1;
258}
259
8fdd21a2
SW
260static inline struct kvm_book3e_206_tlb_entry *get_entry(
261 struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
262{
263 int offset = vcpu_e500->gtlb_offset[tlbsel];
264 return &vcpu_e500->gtlb_arch[offset + entry];
265}
266
267void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
268 struct kvm_book3e_206_tlb_entry *gtlbe);
269void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
270
271#ifdef CONFIG_KVM_E500
272unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
273 struct kvm_book3e_206_tlb_entry *gtlbe);
274
275static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
276{
277 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
278 unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
279
280 return vcpu_e500->pid[tidseld];
281}
282
283/* Force TS=1 for all guest mappings. */
284#define get_tlb_sts(gtlbe) (MAS1_TS)
285#endif /* CONFIG_KVM_E500 */
286
29a5a6f9 287#endif /* KVM_E500_H */
This page took 0.206718 seconds and 5 git commands to generate.