struct kvm_rmap_desc *desc;
struct kvm_rmap_desc *prev_desc;
struct kvm_mmu_page *page;
+ struct page *release_page;
unsigned long *rmapp;
int i;
if (!is_rmap_pte(*spte))
return;
page = page_header(__pa(spte));
- kvm_release_page(pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >>
- PAGE_SHIFT));
+ release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
+ if (is_writeble_pte(*spte))
+ kvm_release_page_dirty(release_page);
+ else
+ kvm_release_page_clean(release_page);
rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
if (!*rmapp) {
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
{
u64 *parent_pte;
+ ++kvm->stat.mmu_shadow_zapped;
while (page->multimapped || page->parent_pte) {
if (!page->multimapped)
parent_pte = page->parent_pte;
{
int level = PT32E_ROOT_LEVEL;
hpa_t table_addr = vcpu->mmu.root_hpa;
+ struct page *page;
+ page = pfn_to_page(p >> PAGE_SHIFT);
for (; ; level--) {
u32 index = PT64_INDEX(v, level);
u64 *table;
pte = table[index];
was_rmapped = is_rmap_pte(pte);
- if (is_shadow_present_pte(pte) && is_writeble_pte(pte))
+ if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
+ kvm_release_page_clean(page);
return 0;
+ }
mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
page_header_update_slot(vcpu->kvm, table, v);
table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
if (!was_rmapped)
rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
else
- kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
+ kvm_release_page_clean(page);
+
return 0;
}
1, 3, &table[index]);
if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n");
- kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
+ kvm_release_page_clean(page);
return -ENOMEM;
}
paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
if (is_error_hpa(paddr)) {
- kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
- >> PAGE_SHIFT));
+ kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+ >> PAGE_SHIFT));
return 1;
}
const void *new, int bytes,
int offset_in_pte)
{
- if (page->role.level != PT_PAGE_TABLE_LEVEL)
+ if (page->role.level != PT_PAGE_TABLE_LEVEL) {
+ ++vcpu->kvm->stat.mmu_pde_zapped;
return;
+ }
+ ++vcpu->kvm->stat.mmu_pte_updated;
if (page->role.glevels == PT32_ROOT_LEVEL)
paging32_update_pte(vcpu, page, spte, new, bytes,
offset_in_pte);
int npte;
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
+ ++vcpu->kvm->stat.mmu_pte_write;
kvm_mmu_audit(vcpu, "pre pte write");
if (gfn == vcpu->last_pt_write_gfn
&& !last_updated_pte_accessed(vcpu)) {
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
gpa, bytes, page->role.word);
kvm_mmu_zap_page(vcpu->kvm, page);
+ ++vcpu->kvm->stat.mmu_flooded;
continue;
}
page_offset = offset;
page = container_of(vcpu->kvm->active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_zap_page(vcpu->kvm, page);
+ ++vcpu->kvm->stat.mmu_recycled;
}
}
goto out;
}
+ r = mmu_topup_memory_caches(vcpu);
+ if (r)
+ goto out;
+
er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
mutex_unlock(&vcpu->kvm->lock);
return -ENOMEM;
}
+/*
+ * Caculate mmu pages needed for kvm.
+ */
+unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
+{
+ int i;
+ unsigned int nr_mmu_pages;
+ unsigned int nr_pages = 0;
+
+ for (i = 0; i < kvm->nmemslots; i++)
+ nr_pages += kvm->memslots[i].npages;
+
+ nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
+ nr_mmu_pages = max(nr_mmu_pages,
+ (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+
+ return nr_mmu_pages;
+}
+
#ifdef AUDIT
static const char *audit_msg;
" valid guest gva %lx\n", audit_msg, va);
page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT);
- kvm_release_page(page);
+ kvm_release_page_clean(page);
}
}