2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, yu.liu@freescale.com
5 * Scott Wood, scottwood@freescale.com
6 * Ashish Kalra, ashish.kalra@freescale.com
7 * Varun Sethi, varun.sethi@freescale.com
10 * This file is based on arch/powerpc/kvm/44x_tlb.c,
11 * by Hollis Blanchard <hollisb@us.ibm.com>.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License, version 2, as
15 * published by the Free Software Foundation.
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/log2.h>
26 #include <linux/uaccess.h>
27 #include <linux/sched.h>
28 #include <linux/rwsem.h>
29 #include <linux/vmalloc.h>
30 #include <linux/hugetlb.h>
31 #include <asm/kvm_ppc.h>
37 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
39 static struct kvmppc_e500_tlb_params host_tlb_params
[E500_TLB_NUM
];
41 static inline unsigned int gtlb0_get_next_victim(
42 struct kvmppc_vcpu_e500
*vcpu_e500
)
46 victim
= vcpu_e500
->gtlb_nv
[0]++;
47 if (unlikely(vcpu_e500
->gtlb_nv
[0] >= vcpu_e500
->gtlb_params
[0].ways
))
48 vcpu_e500
->gtlb_nv
[0] = 0;
53 static inline unsigned int tlb1_max_shadow_size(void)
55 /* reserve one entry for magic page */
56 return host_tlb_params
[1].entries
- tlbcam_index
- 1;
59 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry
*tlbe
)
61 return tlbe
->mas7_3
& (MAS3_SW
|MAS3_UW
);
64 static inline u32
e500_shadow_mas3_attrib(u32 mas3
, int usermode
)
66 /* Mask off reserved bits. */
67 mas3
&= MAS3_ATTRIB_MASK
;
69 #ifndef CONFIG_KVM_BOOKE_HV
71 /* Guest is in supervisor mode,
72 * so we need to translate guest
73 * supervisor permissions into user permissions. */
74 mas3
&= ~E500_TLB_USER_PERM_MASK
;
75 mas3
|= (mas3
& E500_TLB_SUPER_PERM_MASK
) << 1;
77 mas3
|= E500_TLB_SUPER_PERM_MASK
;
82 static inline u32
e500_shadow_mas2_attrib(u32 mas2
, int usermode
)
85 return (mas2
& MAS2_ATTRIB_MASK
) | MAS2_M
;
87 return mas2
& MAS2_ATTRIB_MASK
;
92 * writing shadow tlb entry to host TLB
94 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry
*stlbe
,
99 local_irq_save(flags
);
100 mtspr(SPRN_MAS0
, mas0
);
101 mtspr(SPRN_MAS1
, stlbe
->mas1
);
102 mtspr(SPRN_MAS2
, (unsigned long)stlbe
->mas2
);
103 mtspr(SPRN_MAS3
, (u32
)stlbe
->mas7_3
);
104 mtspr(SPRN_MAS7
, (u32
)(stlbe
->mas7_3
>> 32));
105 #ifdef CONFIG_KVM_BOOKE_HV
106 mtspr(SPRN_MAS8
, stlbe
->mas8
);
108 asm volatile("isync; tlbwe" : : : "memory");
110 #ifdef CONFIG_KVM_BOOKE_HV
111 /* Must clear mas8 for other host tlbwe's */
115 local_irq_restore(flags
);
117 trace_kvm_booke206_stlb_write(mas0
, stlbe
->mas8
, stlbe
->mas1
,
118 stlbe
->mas2
, stlbe
->mas7_3
);
122 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
124 * We don't care about the address we're searching for, other than that it's
125 * in the right set and is not present in the TLB. Using a zero PID and a
126 * userspace address means we don't have to set and then restore MAS5, or
127 * calculate a proper MAS6 value.
129 static u32
get_host_mas0(unsigned long eaddr
)
134 local_irq_save(flags
);
136 asm volatile("tlbsx 0, %0" : : "b" (eaddr
& ~CONFIG_PAGE_OFFSET
));
137 mas0
= mfspr(SPRN_MAS0
);
138 local_irq_restore(flags
);
143 /* sesel is for tlb1 only */
144 static inline void write_host_tlbe(struct kvmppc_vcpu_e500
*vcpu_e500
,
145 int tlbsel
, int sesel
, struct kvm_book3e_206_tlb_entry
*stlbe
)
150 mas0
= get_host_mas0(stlbe
->mas2
);
151 __write_host_tlbe(stlbe
, mas0
);
153 __write_host_tlbe(stlbe
,
155 MAS0_ESEL(to_htlb1_esel(sesel
)));
159 #ifdef CONFIG_KVM_E500V2
160 void kvmppc_map_magic(struct kvm_vcpu
*vcpu
)
162 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
163 struct kvm_book3e_206_tlb_entry magic
;
164 ulong shared_page
= ((ulong
)vcpu
->arch
.shared
) & PAGE_MASK
;
168 pfn
= (pfn_t
)virt_to_phys((void *)shared_page
) >> PAGE_SHIFT
;
169 get_page(pfn_to_page(pfn
));
172 stid
= kvmppc_e500_get_sid(vcpu_e500
, 0, 0, 0, 0);
174 magic
.mas1
= MAS1_VALID
| MAS1_TS
| MAS1_TID(stid
) |
175 MAS1_TSIZE(BOOK3E_PAGESZ_4K
);
176 magic
.mas2
= vcpu
->arch
.magic_page_ea
| MAS2_M
;
177 magic
.mas7_3
= ((u64
)pfn
<< PAGE_SHIFT
) |
178 MAS3_SW
| MAS3_SR
| MAS3_UW
| MAS3_UR
;
181 __write_host_tlbe(&magic
, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index
));
186 static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500
*vcpu_e500
,
187 int tlbsel
, int esel
)
189 struct kvm_book3e_206_tlb_entry
*gtlbe
=
190 get_entry(vcpu_e500
, tlbsel
, esel
);
193 vcpu_e500
->gtlb_priv
[1][esel
].ref
.flags
& E500_TLB_BITMAP
) {
194 u64 tmp
= vcpu_e500
->g2h_tlb1_map
[esel
];
198 local_irq_save(flags
);
200 hw_tlb_indx
= __ilog2_u64(tmp
& -tmp
);
203 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx
)));
205 asm volatile("tlbwe");
206 vcpu_e500
->h2g_tlb1_rmap
[hw_tlb_indx
] = 0;
210 vcpu_e500
->g2h_tlb1_map
[esel
] = 0;
211 vcpu_e500
->gtlb_priv
[1][esel
].ref
.flags
&= ~E500_TLB_BITMAP
;
212 local_irq_restore(flags
);
217 /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
218 kvmppc_e500_tlbil_one(vcpu_e500
, gtlbe
);
221 static int tlb0_set_base(gva_t addr
, int sets
, int ways
)
225 set_base
= (addr
>> PAGE_SHIFT
) & (sets
- 1);
231 static int gtlb0_set_base(struct kvmppc_vcpu_e500
*vcpu_e500
, gva_t addr
)
233 return tlb0_set_base(addr
, vcpu_e500
->gtlb_params
[0].sets
,
234 vcpu_e500
->gtlb_params
[0].ways
);
237 static unsigned int get_tlb_esel(struct kvm_vcpu
*vcpu
, int tlbsel
)
239 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
240 int esel
= get_tlb_esel_bit(vcpu
);
243 esel
&= vcpu_e500
->gtlb_params
[0].ways
- 1;
244 esel
+= gtlb0_set_base(vcpu_e500
, vcpu
->arch
.shared
->mas2
);
246 esel
&= vcpu_e500
->gtlb_params
[tlbsel
].entries
- 1;
252 /* Search the guest TLB for a matching entry. */
253 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500
*vcpu_e500
,
254 gva_t eaddr
, int tlbsel
, unsigned int pid
, int as
)
256 int size
= vcpu_e500
->gtlb_params
[tlbsel
].entries
;
257 unsigned int set_base
, offset
;
261 set_base
= gtlb0_set_base(vcpu_e500
, eaddr
);
262 size
= vcpu_e500
->gtlb_params
[0].ways
;
264 if (eaddr
< vcpu_e500
->tlb1_min_eaddr
||
265 eaddr
> vcpu_e500
->tlb1_max_eaddr
)
270 offset
= vcpu_e500
->gtlb_offset
[tlbsel
];
272 for (i
= 0; i
< size
; i
++) {
273 struct kvm_book3e_206_tlb_entry
*tlbe
=
274 &vcpu_e500
->gtlb_arch
[offset
+ set_base
+ i
];
277 if (eaddr
< get_tlb_eaddr(tlbe
))
280 if (eaddr
> get_tlb_end(tlbe
))
283 tid
= get_tlb_tid(tlbe
);
284 if (tid
&& (tid
!= pid
))
287 if (!get_tlb_v(tlbe
))
290 if (get_tlb_ts(tlbe
) != as
&& as
!= -1)
299 static inline void kvmppc_e500_ref_setup(struct tlbe_ref
*ref
,
300 struct kvm_book3e_206_tlb_entry
*gtlbe
,
304 ref
->flags
= E500_TLB_VALID
;
306 if (tlbe_is_writable(gtlbe
)) {
307 ref
->flags
|= E500_TLB_DIRTY
;
308 kvm_set_pfn_dirty(pfn
);
312 static inline void kvmppc_e500_ref_release(struct tlbe_ref
*ref
)
314 if (ref
->flags
& E500_TLB_VALID
) {
315 trace_kvm_booke206_ref_release(ref
->pfn
, ref
->flags
);
320 static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500
*vcpu_e500
)
322 if (vcpu_e500
->g2h_tlb1_map
)
323 memset(vcpu_e500
->g2h_tlb1_map
, 0,
324 sizeof(u64
) * vcpu_e500
->gtlb_params
[1].entries
);
325 if (vcpu_e500
->h2g_tlb1_rmap
)
326 memset(vcpu_e500
->h2g_tlb1_rmap
, 0,
327 sizeof(unsigned int) * host_tlb_params
[1].entries
);
330 static void clear_tlb_privs(struct kvmppc_vcpu_e500
*vcpu_e500
)
335 for (i
= 0; i
< vcpu_e500
->gtlb_params
[tlbsel
].entries
; i
++) {
336 struct tlbe_ref
*ref
=
337 &vcpu_e500
->gtlb_priv
[tlbsel
][i
].ref
;
338 kvmppc_e500_ref_release(ref
);
342 static void clear_tlb_refs(struct kvmppc_vcpu_e500
*vcpu_e500
)
347 kvmppc_e500_tlbil_all(vcpu_e500
);
349 for (i
= 0; i
< host_tlb_params
[stlbsel
].entries
; i
++) {
350 struct tlbe_ref
*ref
=
351 &vcpu_e500
->tlb_refs
[stlbsel
][i
];
352 kvmppc_e500_ref_release(ref
);
355 clear_tlb_privs(vcpu_e500
);
358 void kvmppc_core_flush_tlb(struct kvm_vcpu
*vcpu
)
360 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
361 clear_tlb_refs(vcpu_e500
);
362 clear_tlb1_bitmap(vcpu_e500
);
365 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu
*vcpu
,
366 unsigned int eaddr
, int as
)
368 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
369 unsigned int victim
, tsized
;
372 /* since we only have two TLBs, only lower bit is used. */
373 tlbsel
= (vcpu
->arch
.shared
->mas4
>> 28) & 0x1;
374 victim
= (tlbsel
== 0) ? gtlb0_get_next_victim(vcpu_e500
) : 0;
375 tsized
= (vcpu
->arch
.shared
->mas4
>> 7) & 0x1f;
377 vcpu
->arch
.shared
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(victim
)
378 | MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
379 vcpu
->arch
.shared
->mas1
= MAS1_VALID
| (as
? MAS1_TS
: 0)
380 | MAS1_TID(get_tlbmiss_tid(vcpu
))
381 | MAS1_TSIZE(tsized
);
382 vcpu
->arch
.shared
->mas2
= (eaddr
& MAS2_EPN
)
383 | (vcpu
->arch
.shared
->mas4
& MAS2_ATTRIB_MASK
);
384 vcpu
->arch
.shared
->mas7_3
&= MAS3_U0
| MAS3_U1
| MAS3_U2
| MAS3_U3
;
385 vcpu
->arch
.shared
->mas6
= (vcpu
->arch
.shared
->mas6
& MAS6_SPID1
)
386 | (get_cur_pid(vcpu
) << 16)
387 | (as
? MAS6_SAS
: 0);
390 /* TID must be supplied by the caller */
391 static inline void kvmppc_e500_setup_stlbe(
392 struct kvm_vcpu
*vcpu
,
393 struct kvm_book3e_206_tlb_entry
*gtlbe
,
394 int tsize
, struct tlbe_ref
*ref
, u64 gvaddr
,
395 struct kvm_book3e_206_tlb_entry
*stlbe
)
397 pfn_t pfn
= ref
->pfn
;
398 u32 pr
= vcpu
->arch
.shared
->msr
& MSR_PR
;
400 BUG_ON(!(ref
->flags
& E500_TLB_VALID
));
402 /* Force IPROT=0 for all guest mappings. */
403 stlbe
->mas1
= MAS1_TSIZE(tsize
) | get_tlb_sts(gtlbe
) | MAS1_VALID
;
404 stlbe
->mas2
= (gvaddr
& MAS2_EPN
) |
405 e500_shadow_mas2_attrib(gtlbe
->mas2
, pr
);
406 stlbe
->mas7_3
= ((u64
)pfn
<< PAGE_SHIFT
) |
407 e500_shadow_mas3_attrib(gtlbe
->mas7_3
, pr
);
409 #ifdef CONFIG_KVM_BOOKE_HV
410 stlbe
->mas8
= MAS8_TGS
| vcpu
->kvm
->arch
.lpid
;
414 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
415 u64 gvaddr
, gfn_t gfn
, struct kvm_book3e_206_tlb_entry
*gtlbe
,
416 int tlbsel
, struct kvm_book3e_206_tlb_entry
*stlbe
,
417 struct tlbe_ref
*ref
)
419 struct kvm_memory_slot
*slot
;
420 unsigned long pfn
, hva
;
422 int tsize
= BOOK3E_PAGESZ_4K
;
425 * Translate guest physical to true physical, acquiring
426 * a page reference if it is normal, non-reserved memory.
428 * gfn_to_memslot() must succeed because otherwise we wouldn't
429 * have gotten this far. Eventually we should just pass the slot
430 * pointer through from the first lookup.
432 slot
= gfn_to_memslot(vcpu_e500
->vcpu
.kvm
, gfn
);
433 hva
= gfn_to_hva_memslot(slot
, gfn
);
436 struct vm_area_struct
*vma
;
437 down_read(¤t
->mm
->mmap_sem
);
439 vma
= find_vma(current
->mm
, hva
);
440 if (vma
&& hva
>= vma
->vm_start
&&
441 (vma
->vm_flags
& VM_PFNMAP
)) {
443 * This VMA is a physically contiguous region (e.g.
444 * /dev/mem) that bypasses normal Linux page
445 * management. Find the overlap between the
446 * vma and the memslot.
449 unsigned long start
, end
;
450 unsigned long slot_start
, slot_end
;
454 start
= vma
->vm_pgoff
;
456 ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
);
458 pfn
= start
+ ((hva
- vma
->vm_start
) >> PAGE_SHIFT
);
460 slot_start
= pfn
- (gfn
- slot
->base_gfn
);
461 slot_end
= slot_start
+ slot
->npages
;
463 if (start
< slot_start
)
468 tsize
= (gtlbe
->mas1
& MAS1_TSIZE_MASK
) >>
472 * e500 doesn't implement the lowest tsize bit,
475 tsize
= max(BOOK3E_PAGESZ_4K
, tsize
& ~1);
478 * Now find the largest tsize (up to what the guest
479 * requested) that will cover gfn, stay within the
480 * range, and for which gfn and pfn are mutually
484 for (; tsize
> BOOK3E_PAGESZ_4K
; tsize
-= 2) {
485 unsigned long gfn_start
, gfn_end
, tsize_pages
;
486 tsize_pages
= 1 << (tsize
- 2);
488 gfn_start
= gfn
& ~(tsize_pages
- 1);
489 gfn_end
= gfn_start
+ tsize_pages
;
491 if (gfn_start
+ pfn
- gfn
< start
)
493 if (gfn_end
+ pfn
- gfn
> end
)
495 if ((gfn
& (tsize_pages
- 1)) !=
496 (pfn
& (tsize_pages
- 1)))
499 gvaddr
&= ~((tsize_pages
<< PAGE_SHIFT
) - 1);
500 pfn
&= ~(tsize_pages
- 1);
503 } else if (vma
&& hva
>= vma
->vm_start
&&
504 (vma
->vm_flags
& VM_HUGETLB
)) {
505 unsigned long psize
= vma_kernel_pagesize(vma
);
507 tsize
= (gtlbe
->mas1
& MAS1_TSIZE_MASK
) >>
511 * Take the largest page size that satisfies both host
514 tsize
= min(__ilog2(psize
) - 10, tsize
);
517 * e500 doesn't implement the lowest tsize bit,
520 tsize
= max(BOOK3E_PAGESZ_4K
, tsize
& ~1);
523 up_read(¤t
->mm
->mmap_sem
);
526 if (likely(!pfnmap
)) {
527 unsigned long tsize_pages
= 1 << (tsize
+ 10 - PAGE_SHIFT
);
528 pfn
= gfn_to_pfn_memslot(slot
, gfn
);
529 if (is_error_pfn(pfn
)) {
530 printk(KERN_ERR
"Couldn't get real page for gfn %lx!\n",
535 /* Align guest and physical address to page map boundaries */
536 pfn
&= ~(tsize_pages
- 1);
537 gvaddr
&= ~((tsize_pages
<< PAGE_SHIFT
) - 1);
540 /* Drop old ref and setup new one. */
541 kvmppc_e500_ref_release(ref
);
542 kvmppc_e500_ref_setup(ref
, gtlbe
, pfn
);
544 kvmppc_e500_setup_stlbe(&vcpu_e500
->vcpu
, gtlbe
, tsize
,
547 /* Clear i-cache for new pages */
548 kvmppc_mmu_flush_icache(pfn
);
550 /* Drop refcount on page, so that mmu notifiers can clear it */
551 kvm_release_pfn_clean(pfn
);
554 /* XXX only map the one-one case, for now use TLB0 */
555 static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
557 struct kvm_book3e_206_tlb_entry
*stlbe
)
559 struct kvm_book3e_206_tlb_entry
*gtlbe
;
560 struct tlbe_ref
*ref
;
562 gtlbe
= get_entry(vcpu_e500
, 0, esel
);
563 ref
= &vcpu_e500
->gtlb_priv
[0][esel
].ref
;
565 kvmppc_e500_shadow_map(vcpu_e500
, get_tlb_eaddr(gtlbe
),
566 get_tlb_raddr(gtlbe
) >> PAGE_SHIFT
,
567 gtlbe
, 0, stlbe
, ref
);
570 /* Caller must ensure that the specified guest TLB entry is safe to insert into
572 /* XXX for both one-one and one-to-many , for now use TLB1 */
573 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
574 u64 gvaddr
, gfn_t gfn
, struct kvm_book3e_206_tlb_entry
*gtlbe
,
575 struct kvm_book3e_206_tlb_entry
*stlbe
, int esel
)
577 struct tlbe_ref
*ref
;
580 victim
= vcpu_e500
->host_tlb1_nv
++;
582 if (unlikely(vcpu_e500
->host_tlb1_nv
>= tlb1_max_shadow_size()))
583 vcpu_e500
->host_tlb1_nv
= 0;
585 ref
= &vcpu_e500
->tlb_refs
[1][victim
];
586 kvmppc_e500_shadow_map(vcpu_e500
, gvaddr
, gfn
, gtlbe
, 1, stlbe
, ref
);
588 vcpu_e500
->g2h_tlb1_map
[esel
] |= (u64
)1 << victim
;
589 vcpu_e500
->gtlb_priv
[1][esel
].ref
.flags
|= E500_TLB_BITMAP
;
590 if (vcpu_e500
->h2g_tlb1_rmap
[victim
]) {
591 unsigned int idx
= vcpu_e500
->h2g_tlb1_rmap
[victim
];
592 vcpu_e500
->g2h_tlb1_map
[idx
] &= ~(1ULL << victim
);
594 vcpu_e500
->h2g_tlb1_rmap
[victim
] = esel
;
599 static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500
*vcpu_e500
)
601 int size
= vcpu_e500
->gtlb_params
[1].entries
;
606 vcpu_e500
->tlb1_min_eaddr
= ~0UL;
607 vcpu_e500
->tlb1_max_eaddr
= 0;
608 offset
= vcpu_e500
->gtlb_offset
[1];
610 for (i
= 0; i
< size
; i
++) {
611 struct kvm_book3e_206_tlb_entry
*tlbe
=
612 &vcpu_e500
->gtlb_arch
[offset
+ i
];
614 if (!get_tlb_v(tlbe
))
617 eaddr
= get_tlb_eaddr(tlbe
);
618 vcpu_e500
->tlb1_min_eaddr
=
619 min(vcpu_e500
->tlb1_min_eaddr
, eaddr
);
621 eaddr
= get_tlb_end(tlbe
);
622 vcpu_e500
->tlb1_max_eaddr
=
623 max(vcpu_e500
->tlb1_max_eaddr
, eaddr
);
627 static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500
*vcpu_e500
,
628 struct kvm_book3e_206_tlb_entry
*gtlbe
)
630 unsigned long start
, end
, size
;
632 size
= get_tlb_bytes(gtlbe
);
633 start
= get_tlb_eaddr(gtlbe
) & ~(size
- 1);
634 end
= start
+ size
- 1;
636 return vcpu_e500
->tlb1_min_eaddr
== start
||
637 vcpu_e500
->tlb1_max_eaddr
== end
;
640 /* This function is supposed to be called for a adding a new valid tlb entry */
641 static void kvmppc_set_tlb1map_range(struct kvm_vcpu
*vcpu
,
642 struct kvm_book3e_206_tlb_entry
*gtlbe
)
644 unsigned long start
, end
, size
;
645 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
647 if (!get_tlb_v(gtlbe
))
650 size
= get_tlb_bytes(gtlbe
);
651 start
= get_tlb_eaddr(gtlbe
) & ~(size
- 1);
652 end
= start
+ size
- 1;
654 vcpu_e500
->tlb1_min_eaddr
= min(vcpu_e500
->tlb1_min_eaddr
, start
);
655 vcpu_e500
->tlb1_max_eaddr
= max(vcpu_e500
->tlb1_max_eaddr
, end
);
658 static inline int kvmppc_e500_gtlbe_invalidate(
659 struct kvmppc_vcpu_e500
*vcpu_e500
,
660 int tlbsel
, int esel
)
662 struct kvm_book3e_206_tlb_entry
*gtlbe
=
663 get_entry(vcpu_e500
, tlbsel
, esel
);
665 if (unlikely(get_tlb_iprot(gtlbe
)))
668 if (tlbsel
== 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500
, gtlbe
))
669 kvmppc_recalc_tlb1map_range(vcpu_e500
);
676 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500
*vcpu_e500
, ulong value
)
680 if (value
& MMUCSR0_TLB0FI
)
681 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[0].entries
; esel
++)
682 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 0, esel
);
683 if (value
& MMUCSR0_TLB1FI
)
684 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[1].entries
; esel
++)
685 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 1, esel
);
687 /* Invalidate all vcpu id mappings */
688 kvmppc_e500_tlbil_all(vcpu_e500
);
693 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu
*vcpu
, int ra
, int rb
)
695 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
700 ea
= ((ra
) ? kvmppc_get_gpr(vcpu
, ra
) : 0) + kvmppc_get_gpr(vcpu
, rb
);
702 ia
= (ea
>> 2) & 0x1;
704 /* since we only have two TLBs, only lower bit is used. */
705 tlbsel
= (ea
>> 3) & 0x1;
708 /* invalidate all entries */
709 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[tlbsel
].entries
;
711 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
714 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
,
715 get_cur_pid(vcpu
), -1);
717 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
720 /* Invalidate all vcpu id mappings */
721 kvmppc_e500_tlbil_all(vcpu_e500
);
726 static void tlbilx_all(struct kvmppc_vcpu_e500
*vcpu_e500
, int tlbsel
,
729 struct kvm_book3e_206_tlb_entry
*tlbe
;
732 /* invalidate all entries */
733 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[tlbsel
].entries
; esel
++) {
734 tlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
735 tid
= get_tlb_tid(tlbe
);
736 if (rt
== 0 || tid
== pid
) {
737 inval_gtlbe_on_host(vcpu_e500
, tlbsel
, esel
);
738 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
743 static void tlbilx_one(struct kvmppc_vcpu_e500
*vcpu_e500
, int pid
,
749 ea
= kvmppc_get_gpr(&vcpu_e500
->vcpu
, rb
);
751 ea
+= kvmppc_get_gpr(&vcpu_e500
->vcpu
, ra
);
753 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
754 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
, pid
, -1);
756 inval_gtlbe_on_host(vcpu_e500
, tlbsel
, esel
);
757 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
763 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu
*vcpu
, int rt
, int ra
, int rb
)
765 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
766 int pid
= get_cur_spid(vcpu
);
768 if (rt
== 0 || rt
== 1) {
769 tlbilx_all(vcpu_e500
, 0, pid
, rt
);
770 tlbilx_all(vcpu_e500
, 1, pid
, rt
);
771 } else if (rt
== 3) {
772 tlbilx_one(vcpu_e500
, pid
, ra
, rb
);
778 int kvmppc_e500_emul_tlbre(struct kvm_vcpu
*vcpu
)
780 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
782 struct kvm_book3e_206_tlb_entry
*gtlbe
;
784 tlbsel
= get_tlb_tlbsel(vcpu
);
785 esel
= get_tlb_esel(vcpu
, tlbsel
);
787 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
788 vcpu
->arch
.shared
->mas0
&= ~MAS0_NV(~0);
789 vcpu
->arch
.shared
->mas0
|= MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
790 vcpu
->arch
.shared
->mas1
= gtlbe
->mas1
;
791 vcpu
->arch
.shared
->mas2
= gtlbe
->mas2
;
792 vcpu
->arch
.shared
->mas7_3
= gtlbe
->mas7_3
;
797 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu
*vcpu
, int rb
)
799 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
800 int as
= !!get_cur_sas(vcpu
);
801 unsigned int pid
= get_cur_spid(vcpu
);
803 struct kvm_book3e_206_tlb_entry
*gtlbe
= NULL
;
806 ea
= kvmppc_get_gpr(vcpu
, rb
);
808 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
809 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
, pid
, as
);
811 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
817 esel
&= vcpu_e500
->gtlb_params
[tlbsel
].ways
- 1;
819 vcpu
->arch
.shared
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(esel
)
820 | MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
821 vcpu
->arch
.shared
->mas1
= gtlbe
->mas1
;
822 vcpu
->arch
.shared
->mas2
= gtlbe
->mas2
;
823 vcpu
->arch
.shared
->mas7_3
= gtlbe
->mas7_3
;
827 /* since we only have two TLBs, only lower bit is used. */
828 tlbsel
= vcpu
->arch
.shared
->mas4
>> 28 & 0x1;
829 victim
= (tlbsel
== 0) ? gtlb0_get_next_victim(vcpu_e500
) : 0;
831 vcpu
->arch
.shared
->mas0
= MAS0_TLBSEL(tlbsel
)
833 | MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
834 vcpu
->arch
.shared
->mas1
=
835 (vcpu
->arch
.shared
->mas6
& MAS6_SPID0
)
836 | (vcpu
->arch
.shared
->mas6
& (MAS6_SAS
? MAS1_TS
: 0))
837 | (vcpu
->arch
.shared
->mas4
& MAS4_TSIZED(~0));
838 vcpu
->arch
.shared
->mas2
&= MAS2_EPN
;
839 vcpu
->arch
.shared
->mas2
|= vcpu
->arch
.shared
->mas4
&
841 vcpu
->arch
.shared
->mas7_3
&= MAS3_U0
| MAS3_U1
|
845 kvmppc_set_exit_type(vcpu
, EMULATED_TLBSX_EXITS
);
849 /* sesel is for tlb1 only */
850 static void write_stlbe(struct kvmppc_vcpu_e500
*vcpu_e500
,
851 struct kvm_book3e_206_tlb_entry
*gtlbe
,
852 struct kvm_book3e_206_tlb_entry
*stlbe
,
853 int stlbsel
, int sesel
)
858 stid
= kvmppc_e500_get_tlb_stid(&vcpu_e500
->vcpu
, gtlbe
);
860 stlbe
->mas1
|= MAS1_TID(stid
);
861 write_host_tlbe(vcpu_e500
, stlbsel
, sesel
, stlbe
);
865 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu
*vcpu
)
867 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
868 struct kvm_book3e_206_tlb_entry
*gtlbe
, stlbe
;
869 int tlbsel
, esel
, stlbsel
, sesel
;
872 tlbsel
= get_tlb_tlbsel(vcpu
);
873 esel
= get_tlb_esel(vcpu
, tlbsel
);
875 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
877 if (get_tlb_v(gtlbe
)) {
878 inval_gtlbe_on_host(vcpu_e500
, tlbsel
, esel
);
880 kvmppc_need_recalc_tlb1map_range(vcpu_e500
, gtlbe
))
884 gtlbe
->mas1
= vcpu
->arch
.shared
->mas1
;
885 gtlbe
->mas2
= vcpu
->arch
.shared
->mas2
;
886 gtlbe
->mas7_3
= vcpu
->arch
.shared
->mas7_3
;
888 trace_kvm_booke206_gtlb_write(vcpu
->arch
.shared
->mas0
, gtlbe
->mas1
,
889 gtlbe
->mas2
, gtlbe
->mas7_3
);
893 * If a valid tlb1 entry is overwritten then recalculate the
894 * min/max TLB1 map address range otherwise no need to look
898 kvmppc_recalc_tlb1map_range(vcpu_e500
);
900 kvmppc_set_tlb1map_range(vcpu
, gtlbe
);
903 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
904 if (tlbe_is_host_safe(vcpu
, gtlbe
)) {
911 gtlbe
->mas1
&= ~MAS1_TSIZE(~0);
912 gtlbe
->mas1
|= MAS1_TSIZE(BOOK3E_PAGESZ_4K
);
915 kvmppc_e500_tlb0_map(vcpu_e500
, esel
, &stlbe
);
916 sesel
= 0; /* unused */
922 eaddr
= get_tlb_eaddr(gtlbe
);
923 raddr
= get_tlb_raddr(gtlbe
);
925 /* Create a 4KB mapping on the host.
926 * If the guest wanted a large page,
927 * only the first 4KB is mapped here and the rest
928 * are mapped on the fly. */
930 sesel
= kvmppc_e500_tlb1_map(vcpu_e500
, eaddr
,
931 raddr
>> PAGE_SHIFT
, gtlbe
, &stlbe
, esel
);
938 write_stlbe(vcpu_e500
, gtlbe
, &stlbe
, stlbsel
, sesel
);
941 kvmppc_set_exit_type(vcpu
, EMULATED_TLBWE_EXITS
);
945 static int kvmppc_e500_tlb_search(struct kvm_vcpu
*vcpu
,
946 gva_t eaddr
, unsigned int pid
, int as
)
948 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
951 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
952 esel
= kvmppc_e500_tlb_index(vcpu_e500
, eaddr
, tlbsel
, pid
, as
);
954 return index_of(tlbsel
, esel
);
960 /* 'linear_address' is actually an encoding of AS|PID|EADDR . */
961 int kvmppc_core_vcpu_translate(struct kvm_vcpu
*vcpu
,
962 struct kvm_translation
*tr
)
969 eaddr
= tr
->linear_address
;
970 pid
= (tr
->linear_address
>> 32) & 0xff;
971 as
= (tr
->linear_address
>> 40) & 0x1;
973 index
= kvmppc_e500_tlb_search(vcpu
, eaddr
, pid
, as
);
979 tr
->physical_address
= kvmppc_mmu_xlate(vcpu
, index
, eaddr
);
980 /* XXX what does "writeable" and "usermode" even mean? */
987 int kvmppc_mmu_itlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
989 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_IS
);
991 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
994 int kvmppc_mmu_dtlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
996 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_DS
);
998 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
1001 void kvmppc_mmu_itlb_miss(struct kvm_vcpu
*vcpu
)
1003 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_IS
);
1005 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.pc
, as
);
1008 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu
*vcpu
)
1010 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_DS
);
1012 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.fault_dear
, as
);
1015 gpa_t
kvmppc_mmu_xlate(struct kvm_vcpu
*vcpu
, unsigned int index
,
1018 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
1019 struct kvm_book3e_206_tlb_entry
*gtlbe
;
1022 gtlbe
= get_entry(vcpu_e500
, tlbsel_of(index
), esel_of(index
));
1023 pgmask
= get_tlb_bytes(gtlbe
) - 1;
1025 return get_tlb_raddr(gtlbe
) | (eaddr
& pgmask
);
1028 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
1032 void kvmppc_mmu_map(struct kvm_vcpu
*vcpu
, u64 eaddr
, gpa_t gpaddr
,
1035 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
1036 struct tlbe_priv
*priv
;
1037 struct kvm_book3e_206_tlb_entry
*gtlbe
, stlbe
;
1038 int tlbsel
= tlbsel_of(index
);
1039 int esel
= esel_of(index
);
1042 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
1047 sesel
= 0; /* unused */
1048 priv
= &vcpu_e500
->gtlb_priv
[tlbsel
][esel
];
1050 /* Only triggers after clear_tlb_refs */
1051 if (unlikely(!(priv
->ref
.flags
& E500_TLB_VALID
)))
1052 kvmppc_e500_tlb0_map(vcpu_e500
, esel
, &stlbe
);
1054 kvmppc_e500_setup_stlbe(vcpu
, gtlbe
, BOOK3E_PAGESZ_4K
,
1055 &priv
->ref
, eaddr
, &stlbe
);
1059 gfn_t gfn
= gpaddr
>> PAGE_SHIFT
;
1062 sesel
= kvmppc_e500_tlb1_map(vcpu_e500
, eaddr
, gfn
,
1063 gtlbe
, &stlbe
, esel
);
1072 write_stlbe(vcpu_e500
, gtlbe
, &stlbe
, stlbsel
, sesel
);
1075 /************* MMU Notifiers *************/
1077 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
1079 trace_kvm_unmap_hva(hva
);
1082 * Flush all shadow tlb entries everywhere. This is slow, but
1083 * we are 100% sure that we catch the to be unmapped page
1085 kvm_flush_remote_tlbs(kvm
);
1090 int kvm_unmap_hva_range(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
1092 /* kvm_unmap_hva flushes everything anyways */
1093 kvm_unmap_hva(kvm
, start
);
1098 int kvm_age_hva(struct kvm
*kvm
, unsigned long hva
)
1100 /* XXX could be more clever ;) */
1104 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
1106 /* XXX could be more clever ;) */
1110 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
1112 /* The page will get remapped properly on its next fault */
1113 kvm_unmap_hva(kvm
, hva
);
1116 /*****************************************/
1118 static void free_gtlb(struct kvmppc_vcpu_e500
*vcpu_e500
)
1122 clear_tlb1_bitmap(vcpu_e500
);
1123 kfree(vcpu_e500
->g2h_tlb1_map
);
1125 clear_tlb_refs(vcpu_e500
);
1126 kfree(vcpu_e500
->gtlb_priv
[0]);
1127 kfree(vcpu_e500
->gtlb_priv
[1]);
1129 if (vcpu_e500
->shared_tlb_pages
) {
1130 vfree((void *)(round_down((uintptr_t)vcpu_e500
->gtlb_arch
,
1133 for (i
= 0; i
< vcpu_e500
->num_shared_tlb_pages
; i
++) {
1134 set_page_dirty_lock(vcpu_e500
->shared_tlb_pages
[i
]);
1135 put_page(vcpu_e500
->shared_tlb_pages
[i
]);
1138 vcpu_e500
->num_shared_tlb_pages
= 0;
1139 vcpu_e500
->shared_tlb_pages
= NULL
;
1141 kfree(vcpu_e500
->gtlb_arch
);
1144 vcpu_e500
->gtlb_arch
= NULL
;
1147 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
1149 sregs
->u
.e
.mas0
= vcpu
->arch
.shared
->mas0
;
1150 sregs
->u
.e
.mas1
= vcpu
->arch
.shared
->mas1
;
1151 sregs
->u
.e
.mas2
= vcpu
->arch
.shared
->mas2
;
1152 sregs
->u
.e
.mas7_3
= vcpu
->arch
.shared
->mas7_3
;
1153 sregs
->u
.e
.mas4
= vcpu
->arch
.shared
->mas4
;
1154 sregs
->u
.e
.mas6
= vcpu
->arch
.shared
->mas6
;
1156 sregs
->u
.e
.mmucfg
= vcpu
->arch
.mmucfg
;
1157 sregs
->u
.e
.tlbcfg
[0] = vcpu
->arch
.tlbcfg
[0];
1158 sregs
->u
.e
.tlbcfg
[1] = vcpu
->arch
.tlbcfg
[1];
1159 sregs
->u
.e
.tlbcfg
[2] = 0;
1160 sregs
->u
.e
.tlbcfg
[3] = 0;
1163 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
1165 if (sregs
->u
.e
.features
& KVM_SREGS_E_ARCH206_MMU
) {
1166 vcpu
->arch
.shared
->mas0
= sregs
->u
.e
.mas0
;
1167 vcpu
->arch
.shared
->mas1
= sregs
->u
.e
.mas1
;
1168 vcpu
->arch
.shared
->mas2
= sregs
->u
.e
.mas2
;
1169 vcpu
->arch
.shared
->mas7_3
= sregs
->u
.e
.mas7_3
;
1170 vcpu
->arch
.shared
->mas4
= sregs
->u
.e
.mas4
;
1171 vcpu
->arch
.shared
->mas6
= sregs
->u
.e
.mas6
;
1177 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu
*vcpu
,
1178 struct kvm_config_tlb
*cfg
)
1180 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
1181 struct kvm_book3e_206_tlb_params params
;
1183 struct page
**pages
;
1184 struct tlbe_priv
*privs
[2] = {};
1185 u64
*g2h_bitmap
= NULL
;
1188 int num_pages
, ret
, i
;
1190 if (cfg
->mmu_type
!= KVM_MMU_FSL_BOOKE_NOHV
)
1193 if (copy_from_user(¶ms
, (void __user
*)(uintptr_t)cfg
->params
,
1197 if (params
.tlb_sizes
[1] > 64)
1199 if (params
.tlb_ways
[1] != params
.tlb_sizes
[1])
1201 if (params
.tlb_sizes
[2] != 0 || params
.tlb_sizes
[3] != 0)
1203 if (params
.tlb_ways
[2] != 0 || params
.tlb_ways
[3] != 0)
1206 if (!is_power_of_2(params
.tlb_ways
[0]))
1209 sets
= params
.tlb_sizes
[0] >> ilog2(params
.tlb_ways
[0]);
1210 if (!is_power_of_2(sets
))
1213 array_len
= params
.tlb_sizes
[0] + params
.tlb_sizes
[1];
1214 array_len
*= sizeof(struct kvm_book3e_206_tlb_entry
);
1216 if (cfg
->array_len
< array_len
)
1219 num_pages
= DIV_ROUND_UP(cfg
->array
+ array_len
- 1, PAGE_SIZE
) -
1220 cfg
->array
/ PAGE_SIZE
;
1221 pages
= kmalloc(sizeof(struct page
*) * num_pages
, GFP_KERNEL
);
1225 ret
= get_user_pages_fast(cfg
->array
, num_pages
, 1, pages
);
1229 if (ret
!= num_pages
) {
1235 virt
= vmap(pages
, num_pages
, VM_MAP
, PAGE_KERNEL
);
1239 privs
[0] = kzalloc(sizeof(struct tlbe_priv
) * params
.tlb_sizes
[0],
1241 privs
[1] = kzalloc(sizeof(struct tlbe_priv
) * params
.tlb_sizes
[1],
1244 if (!privs
[0] || !privs
[1])
1247 g2h_bitmap
= kzalloc(sizeof(u64
) * params
.tlb_sizes
[1],
1252 free_gtlb(vcpu_e500
);
1254 vcpu_e500
->gtlb_priv
[0] = privs
[0];
1255 vcpu_e500
->gtlb_priv
[1] = privs
[1];
1256 vcpu_e500
->g2h_tlb1_map
= g2h_bitmap
;
1258 vcpu_e500
->gtlb_arch
= (struct kvm_book3e_206_tlb_entry
*)
1259 (virt
+ (cfg
->array
& (PAGE_SIZE
- 1)));
1261 vcpu_e500
->gtlb_params
[0].entries
= params
.tlb_sizes
[0];
1262 vcpu_e500
->gtlb_params
[1].entries
= params
.tlb_sizes
[1];
1264 vcpu_e500
->gtlb_offset
[0] = 0;
1265 vcpu_e500
->gtlb_offset
[1] = params
.tlb_sizes
[0];
1267 vcpu
->arch
.mmucfg
= mfspr(SPRN_MMUCFG
) & ~MMUCFG_LPIDSIZE
;
1269 vcpu
->arch
.tlbcfg
[0] &= ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
1270 if (params
.tlb_sizes
[0] <= 2048)
1271 vcpu
->arch
.tlbcfg
[0] |= params
.tlb_sizes
[0];
1272 vcpu
->arch
.tlbcfg
[0] |= params
.tlb_ways
[0] << TLBnCFG_ASSOC_SHIFT
;
1274 vcpu
->arch
.tlbcfg
[1] &= ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
1275 vcpu
->arch
.tlbcfg
[1] |= params
.tlb_sizes
[1];
1276 vcpu
->arch
.tlbcfg
[1] |= params
.tlb_ways
[1] << TLBnCFG_ASSOC_SHIFT
;
1278 vcpu_e500
->shared_tlb_pages
= pages
;
1279 vcpu_e500
->num_shared_tlb_pages
= num_pages
;
1281 vcpu_e500
->gtlb_params
[0].ways
= params
.tlb_ways
[0];
1282 vcpu_e500
->gtlb_params
[0].sets
= sets
;
1284 vcpu_e500
->gtlb_params
[1].ways
= params
.tlb_sizes
[1];
1285 vcpu_e500
->gtlb_params
[1].sets
= 1;
1287 kvmppc_recalc_tlb1map_range(vcpu_e500
);
1294 for (i
= 0; i
< num_pages
; i
++)
1302 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu
*vcpu
,
1303 struct kvm_dirty_tlb
*dirty
)
1305 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
1306 kvmppc_recalc_tlb1map_range(vcpu_e500
);
1307 clear_tlb_refs(vcpu_e500
);
1311 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500
*vcpu_e500
)
1313 struct kvm_vcpu
*vcpu
= &vcpu_e500
->vcpu
;
1314 int entry_size
= sizeof(struct kvm_book3e_206_tlb_entry
);
1315 int entries
= KVM_E500_TLB0_SIZE
+ KVM_E500_TLB1_SIZE
;
1317 host_tlb_params
[0].entries
= mfspr(SPRN_TLB0CFG
) & TLBnCFG_N_ENTRY
;
1318 host_tlb_params
[1].entries
= mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
;
1321 * This should never happen on real e500 hardware, but is
1322 * architecturally possible -- e.g. in some weird nested
1323 * virtualization case.
1325 if (host_tlb_params
[0].entries
== 0 ||
1326 host_tlb_params
[1].entries
== 0) {
1327 pr_err("%s: need to know host tlb size\n", __func__
);
1331 host_tlb_params
[0].ways
= (mfspr(SPRN_TLB0CFG
) & TLBnCFG_ASSOC
) >>
1332 TLBnCFG_ASSOC_SHIFT
;
1333 host_tlb_params
[1].ways
= host_tlb_params
[1].entries
;
1335 if (!is_power_of_2(host_tlb_params
[0].entries
) ||
1336 !is_power_of_2(host_tlb_params
[0].ways
) ||
1337 host_tlb_params
[0].entries
< host_tlb_params
[0].ways
||
1338 host_tlb_params
[0].ways
== 0) {
1339 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
1340 __func__
, host_tlb_params
[0].entries
,
1341 host_tlb_params
[0].ways
);
1345 host_tlb_params
[0].sets
=
1346 host_tlb_params
[0].entries
/ host_tlb_params
[0].ways
;
1347 host_tlb_params
[1].sets
= 1;
1349 vcpu_e500
->gtlb_params
[0].entries
= KVM_E500_TLB0_SIZE
;
1350 vcpu_e500
->gtlb_params
[1].entries
= KVM_E500_TLB1_SIZE
;
1352 vcpu_e500
->gtlb_params
[0].ways
= KVM_E500_TLB0_WAY_NUM
;
1353 vcpu_e500
->gtlb_params
[0].sets
=
1354 KVM_E500_TLB0_SIZE
/ KVM_E500_TLB0_WAY_NUM
;
1356 vcpu_e500
->gtlb_params
[1].ways
= KVM_E500_TLB1_SIZE
;
1357 vcpu_e500
->gtlb_params
[1].sets
= 1;
1359 vcpu_e500
->gtlb_arch
= kmalloc(entries
* entry_size
, GFP_KERNEL
);
1360 if (!vcpu_e500
->gtlb_arch
)
1363 vcpu_e500
->gtlb_offset
[0] = 0;
1364 vcpu_e500
->gtlb_offset
[1] = KVM_E500_TLB0_SIZE
;
1366 vcpu_e500
->tlb_refs
[0] =
1367 kzalloc(sizeof(struct tlbe_ref
) * host_tlb_params
[0].entries
,
1369 if (!vcpu_e500
->tlb_refs
[0])
1372 vcpu_e500
->tlb_refs
[1] =
1373 kzalloc(sizeof(struct tlbe_ref
) * host_tlb_params
[1].entries
,
1375 if (!vcpu_e500
->tlb_refs
[1])
1378 vcpu_e500
->gtlb_priv
[0] = kzalloc(sizeof(struct tlbe_ref
) *
1379 vcpu_e500
->gtlb_params
[0].entries
,
1381 if (!vcpu_e500
->gtlb_priv
[0])
1384 vcpu_e500
->gtlb_priv
[1] = kzalloc(sizeof(struct tlbe_ref
) *
1385 vcpu_e500
->gtlb_params
[1].entries
,
1387 if (!vcpu_e500
->gtlb_priv
[1])
1390 vcpu_e500
->g2h_tlb1_map
= kzalloc(sizeof(unsigned int) *
1391 vcpu_e500
->gtlb_params
[1].entries
,
1393 if (!vcpu_e500
->g2h_tlb1_map
)
1396 vcpu_e500
->h2g_tlb1_rmap
= kzalloc(sizeof(unsigned int) *
1397 host_tlb_params
[1].entries
,
1399 if (!vcpu_e500
->h2g_tlb1_rmap
)
1402 /* Init TLB configuration register */
1403 vcpu
->arch
.tlbcfg
[0] = mfspr(SPRN_TLB0CFG
) &
1404 ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
1405 vcpu
->arch
.tlbcfg
[0] |= vcpu_e500
->gtlb_params
[0].entries
;
1406 vcpu
->arch
.tlbcfg
[0] |=
1407 vcpu_e500
->gtlb_params
[0].ways
<< TLBnCFG_ASSOC_SHIFT
;
1409 vcpu
->arch
.tlbcfg
[1] = mfspr(SPRN_TLB1CFG
) &
1410 ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
1411 vcpu
->arch
.tlbcfg
[1] |= vcpu_e500
->gtlb_params
[1].entries
;
1412 vcpu
->arch
.tlbcfg
[1] |=
1413 vcpu_e500
->gtlb_params
[1].ways
<< TLBnCFG_ASSOC_SHIFT
;
1415 kvmppc_recalc_tlb1map_range(vcpu_e500
);
1419 free_gtlb(vcpu_e500
);
1420 kfree(vcpu_e500
->tlb_refs
[0]);
1421 kfree(vcpu_e500
->tlb_refs
[1]);
1425 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500
*vcpu_e500
)
1427 free_gtlb(vcpu_e500
);
1428 kfree(vcpu_e500
->h2g_tlb1_rmap
);
1429 kfree(vcpu_e500
->tlb_refs
[0]);
1430 kfree(vcpu_e500
->tlb_refs
[1]);