KVM: PPC: Add support for Book3S processors in hypervisor mode
[deliverable/linux.git] / arch / powerpc / kvm / book3s_64_mmu_hv.c
CommitLineData
de56a948
PM
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 */
17
18#include <linux/types.h>
19#include <linux/string.h>
20#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/highmem.h>
23#include <linux/gfp.h>
24#include <linux/slab.h>
25#include <linux/hugetlb.h>
26
27#include <asm/tlbflush.h>
28#include <asm/kvm_ppc.h>
29#include <asm/kvm_book3s.h>
30#include <asm/mmu-hash64.h>
31#include <asm/hvcall.h>
32#include <asm/synch.h>
33#include <asm/ppc-opcode.h>
34#include <asm/cputable.h>
35
36/* For now use fixed-size 16MB page table */
37#define HPT_ORDER 24
38#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
39#define HPT_HASH_MASK (HPT_NPTEG - 1)
40
41/* Pages in the VRMA are 16MB pages */
42#define VRMA_PAGE_ORDER 24
43#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
44
45#define NR_LPIDS (LPID_RSVD + 1)
46unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
47
48long kvmppc_alloc_hpt(struct kvm *kvm)
49{
50 unsigned long hpt;
51 unsigned long lpid;
52
53 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN,
54 HPT_ORDER - PAGE_SHIFT);
55 if (!hpt) {
56 pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
57 return -ENOMEM;
58 }
59 kvm->arch.hpt_virt = hpt;
60
61 do {
62 lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
63 if (lpid >= NR_LPIDS) {
64 pr_err("kvm_alloc_hpt: No LPIDs free\n");
65 free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
66 return -ENOMEM;
67 }
68 } while (test_and_set_bit(lpid, lpid_inuse));
69
70 kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
71 kvm->arch.lpid = lpid;
72 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
73 kvm->arch.host_lpid = mfspr(SPRN_LPID);
74 kvm->arch.host_lpcr = mfspr(SPRN_LPCR);
75
76 pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
77 return 0;
78}
79
80void kvmppc_free_hpt(struct kvm *kvm)
81{
82 unsigned long i;
83 struct kvmppc_pginfo *pginfo;
84
85 clear_bit(kvm->arch.lpid, lpid_inuse);
86 free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
87
88 if (kvm->arch.ram_pginfo) {
89 pginfo = kvm->arch.ram_pginfo;
90 kvm->arch.ram_pginfo = NULL;
91 for (i = 0; i < kvm->arch.ram_npages; ++i)
92 put_page(pfn_to_page(pginfo[i].pfn));
93 kfree(pginfo);
94 }
95}
96
97static unsigned long user_page_size(unsigned long addr)
98{
99 struct vm_area_struct *vma;
100 unsigned long size = PAGE_SIZE;
101
102 down_read(&current->mm->mmap_sem);
103 vma = find_vma(current->mm, addr);
104 if (vma)
105 size = vma_kernel_pagesize(vma);
106 up_read(&current->mm->mmap_sem);
107 return size;
108}
109
110static pfn_t hva_to_pfn(unsigned long addr)
111{
112 struct page *page[1];
113 int npages;
114
115 might_sleep();
116
117 npages = get_user_pages_fast(addr, 1, 1, page);
118
119 if (unlikely(npages != 1))
120 return 0;
121
122 return page_to_pfn(page[0]);
123}
124
125long kvmppc_prepare_vrma(struct kvm *kvm,
126 struct kvm_userspace_memory_region *mem)
127{
128 unsigned long psize, porder;
129 unsigned long i, npages;
130 struct kvmppc_pginfo *pginfo;
131 pfn_t pfn;
132 unsigned long hva;
133
134 /* First see what page size we have */
135 psize = user_page_size(mem->userspace_addr);
136 /* For now, only allow 16MB pages */
137 if (psize != 1ul << VRMA_PAGE_ORDER || (mem->memory_size & (psize - 1))) {
138 pr_err("bad psize=%lx memory_size=%llx @ %llx\n",
139 psize, mem->memory_size, mem->userspace_addr);
140 return -EINVAL;
141 }
142 porder = __ilog2(psize);
143
144 npages = mem->memory_size >> porder;
145 pginfo = kzalloc(npages * sizeof(struct kvmppc_pginfo), GFP_KERNEL);
146 if (!pginfo) {
147 pr_err("kvmppc_prepare_vrma: couldn't alloc %lu bytes\n",
148 npages * sizeof(struct kvmppc_pginfo));
149 return -ENOMEM;
150 }
151
152 for (i = 0; i < npages; ++i) {
153 hva = mem->userspace_addr + (i << porder);
154 if (user_page_size(hva) != psize)
155 goto err;
156 pfn = hva_to_pfn(hva);
157 if (pfn == 0) {
158 pr_err("oops, no pfn for hva %lx\n", hva);
159 goto err;
160 }
161 if (pfn & ((1ul << (porder - PAGE_SHIFT)) - 1)) {
162 pr_err("oops, unaligned pfn %llx\n", pfn);
163 put_page(pfn_to_page(pfn));
164 goto err;
165 }
166 pginfo[i].pfn = pfn;
167 }
168
169 kvm->arch.ram_npages = npages;
170 kvm->arch.ram_psize = psize;
171 kvm->arch.ram_porder = porder;
172 kvm->arch.ram_pginfo = pginfo;
173
174 return 0;
175
176 err:
177 kfree(pginfo);
178 return -EINVAL;
179}
180
181void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
182{
183 unsigned long i;
184 unsigned long npages = kvm->arch.ram_npages;
185 unsigned long pfn;
186 unsigned long *hpte;
187 unsigned long hash;
188 struct kvmppc_pginfo *pginfo = kvm->arch.ram_pginfo;
189
190 if (!pginfo)
191 return;
192
193 /* VRMA can't be > 1TB */
194 if (npages > 1ul << (40 - kvm->arch.ram_porder))
195 npages = 1ul << (40 - kvm->arch.ram_porder);
196 /* Can't use more than 1 HPTE per HPTEG */
197 if (npages > HPT_NPTEG)
198 npages = HPT_NPTEG;
199
200 for (i = 0; i < npages; ++i) {
201 pfn = pginfo[i].pfn;
202 /* can't use hpt_hash since va > 64 bits */
203 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
204 /*
205 * We assume that the hash table is empty and no
206 * vcpus are using it at this stage. Since we create
207 * at most one HPTE per HPTEG, we just assume entry 7
208 * is available and use it.
209 */
210 hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 7));
211 hpte += 7 * 2;
212 /* HPTE low word - RPN, protection, etc. */
213 hpte[1] = (pfn << PAGE_SHIFT) | HPTE_R_R | HPTE_R_C |
214 HPTE_R_M | PP_RWXX;
215 wmb();
216 hpte[0] = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
217 (i << (VRMA_PAGE_ORDER - 16)) | HPTE_V_BOLTED |
218 HPTE_V_LARGE | HPTE_V_VALID;
219 }
220}
221
222int kvmppc_mmu_hv_init(void)
223{
224 if (!cpu_has_feature(CPU_FTR_HVMODE_206))
225 return -EINVAL;
226 memset(lpid_inuse, 0, sizeof(lpid_inuse));
227 set_bit(mfspr(SPRN_LPID), lpid_inuse);
228 set_bit(LPID_RSVD, lpid_inuse);
229
230 return 0;
231}
232
233void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
234{
235}
236
237static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
238{
239 kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
240}
241
242static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
243 struct kvmppc_pte *gpte, bool data)
244{
245 return -ENOENT;
246}
247
248void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
249{
250 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
251
252 vcpu->arch.slb_nr = 32; /* Assume POWER7 for now */
253
254 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
255 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
256
257 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
258}
This page took 0.047726 seconds and 5 git commands to generate.