KVM: PPC: Keep page physical addresses in per-slot arrays
[deliverable/linux.git] / arch / powerpc / kvm / book3s_64_mmu_hv.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 */
17
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
25 #include <linux/hugetlb.h>
26 #include <linux/vmalloc.h>
27
28 #include <asm/tlbflush.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/kvm_book3s.h>
31 #include <asm/mmu-hash64.h>
32 #include <asm/hvcall.h>
33 #include <asm/synch.h>
34 #include <asm/ppc-opcode.h>
35 #include <asm/cputable.h>
36
37 /* Pages in the VRMA are 16MB pages */
38 #define VRMA_PAGE_ORDER 24
39 #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
40
41 /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
42 #define MAX_LPID_970 63
43 #define NR_LPIDS (LPID_RSVD + 1)
44 unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
45
46 long kvmppc_alloc_hpt(struct kvm *kvm)
47 {
48 unsigned long hpt;
49 unsigned long lpid;
50 struct revmap_entry *rev;
51
52 /* Allocate guest's hashed page table */
53 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN,
54 HPT_ORDER - PAGE_SHIFT);
55 if (!hpt) {
56 pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
57 return -ENOMEM;
58 }
59 kvm->arch.hpt_virt = hpt;
60
61 /* Allocate reverse map array */
62 rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE);
63 if (!rev) {
64 pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
65 goto out_freehpt;
66 }
67 kvm->arch.revmap = rev;
68
69 /* Allocate the guest's logical partition ID */
70 do {
71 lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
72 if (lpid >= NR_LPIDS) {
73 pr_err("kvm_alloc_hpt: No LPIDs free\n");
74 goto out_freeboth;
75 }
76 } while (test_and_set_bit(lpid, lpid_inuse));
77
78 kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
79 kvm->arch.lpid = lpid;
80
81 pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
82 return 0;
83
84 out_freeboth:
85 vfree(rev);
86 out_freehpt:
87 free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
88 return -ENOMEM;
89 }
90
91 void kvmppc_free_hpt(struct kvm *kvm)
92 {
93 clear_bit(kvm->arch.lpid, lpid_inuse);
94 vfree(kvm->arch.revmap);
95 free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
96 }
97
98 void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
99 {
100 unsigned long i;
101 unsigned long npages;
102 unsigned long pa;
103 unsigned long *hpte;
104 unsigned long hash;
105 unsigned long porder = kvm->arch.ram_porder;
106 struct revmap_entry *rev;
107 unsigned long *physp;
108
109 physp = kvm->arch.slot_phys[mem->slot];
110 npages = kvm->arch.slot_npages[mem->slot];
111
112 /* VRMA can't be > 1TB */
113 if (npages > 1ul << (40 - porder))
114 npages = 1ul << (40 - porder);
115 /* Can't use more than 1 HPTE per HPTEG */
116 if (npages > HPT_NPTEG)
117 npages = HPT_NPTEG;
118
119 for (i = 0; i < npages; ++i) {
120 pa = physp[i];
121 if (!pa)
122 break;
123 pa &= PAGE_MASK;
124 /* can't use hpt_hash since va > 64 bits */
125 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
126 /*
127 * We assume that the hash table is empty and no
128 * vcpus are using it at this stage. Since we create
129 * at most one HPTE per HPTEG, we just assume entry 7
130 * is available and use it.
131 */
132 hash = (hash << 3) + 7;
133 hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 4));
134 /* HPTE low word - RPN, protection, etc. */
135 hpte[1] = pa | HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
136 smp_wmb();
137 hpte[0] = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
138 (i << (VRMA_PAGE_ORDER - 16)) | HPTE_V_BOLTED |
139 HPTE_V_LARGE | HPTE_V_VALID;
140
141 /* Reverse map info */
142 rev = &kvm->arch.revmap[hash];
143 rev->guest_rpte = (i << porder) | HPTE_R_R | HPTE_R_C |
144 HPTE_R_M | PP_RWXX;
145 }
146 }
147
148 int kvmppc_mmu_hv_init(void)
149 {
150 unsigned long host_lpid, rsvd_lpid;
151
152 if (!cpu_has_feature(CPU_FTR_HVMODE))
153 return -EINVAL;
154
155 memset(lpid_inuse, 0, sizeof(lpid_inuse));
156
157 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
158 host_lpid = mfspr(SPRN_LPID); /* POWER7 */
159 rsvd_lpid = LPID_RSVD;
160 } else {
161 host_lpid = 0; /* PPC970 */
162 rsvd_lpid = MAX_LPID_970;
163 }
164
165 set_bit(host_lpid, lpid_inuse);
166 /* rsvd_lpid is reserved for use in partition switching */
167 set_bit(rsvd_lpid, lpid_inuse);
168
169 return 0;
170 }
171
172 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
173 {
174 }
175
176 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
177 {
178 kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
179 }
180
181 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
182 struct kvmppc_pte *gpte, bool data)
183 {
184 return -ENOENT;
185 }
186
187 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
188 {
189 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
190
191 if (cpu_has_feature(CPU_FTR_ARCH_206))
192 vcpu->arch.slb_nr = 32; /* POWER7 */
193 else
194 vcpu->arch.slb_nr = 64;
195
196 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
197 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
198
199 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
200 }
This page took 0.080028 seconds and 5 git commands to generate.