Commit | Line | Data |
---|---|---|
1d737c8a ZX |
1 | #ifndef __KVM_X86_MMU_H |
2 | #define __KVM_X86_MMU_H | |
3 | ||
edf88417 | 4 | #include <linux/kvm_host.h> |
fc78f519 | 5 | #include "kvm_cache_regs.h" |
1d737c8a | 6 | |
8c6d6adc SY |
7 | #define PT64_PT_BITS 9 |
8 | #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) | |
9 | #define PT32_PT_BITS 10 | |
10 | #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) | |
11 | ||
12 | #define PT_WRITABLE_SHIFT 1 | |
be94f6b7 | 13 | #define PT_USER_SHIFT 2 |
8c6d6adc SY |
14 | |
15 | #define PT_PRESENT_MASK (1ULL << 0) | |
16 | #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) | |
be94f6b7 | 17 | #define PT_USER_MASK (1ULL << PT_USER_SHIFT) |
8c6d6adc SY |
18 | #define PT_PWT_MASK (1ULL << 3) |
19 | #define PT_PCD_MASK (1ULL << 4) | |
1b7fcd32 AK |
20 | #define PT_ACCESSED_SHIFT 5 |
21 | #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) | |
8ea667f2 AK |
22 | #define PT_DIRTY_SHIFT 6 |
23 | #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT) | |
6fd01b71 AK |
24 | #define PT_PAGE_SIZE_SHIFT 7 |
25 | #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT) | |
8c6d6adc SY |
26 | #define PT_PAT_MASK (1ULL << 7) |
27 | #define PT_GLOBAL_MASK (1ULL << 8) | |
28 | #define PT64_NX_SHIFT 63 | |
29 | #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) | |
30 | ||
31 | #define PT_PAT_SHIFT 7 | |
32 | #define PT_DIR_PAT_SHIFT 12 | |
33 | #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) | |
34 | ||
35 | #define PT32_DIR_PSE36_SIZE 4 | |
36 | #define PT32_DIR_PSE36_SHIFT 13 | |
37 | #define PT32_DIR_PSE36_MASK \ | |
38 | (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) | |
39 | ||
40 | #define PT64_ROOT_LEVEL 4 | |
41 | #define PT32_ROOT_LEVEL 2 | |
42 | #define PT32E_ROOT_LEVEL 3 | |
43 | ||
c9c54174 SY |
44 | #define PT_PDPE_LEVEL 3 |
45 | #define PT_DIRECTORY_LEVEL 2 | |
46 | #define PT_PAGE_TABLE_LEVEL 1 | |
8a3d08f1 | 47 | #define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1) |
c9c54174 | 48 | |
d1431483 TC |
49 | static inline u64 rsvd_bits(int s, int e) |
50 | { | |
51 | return ((1ULL << (e - s + 1)) - 1) << s; | |
52 | } | |
53 | ||
ce88decf | 54 | void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); |
b37fbea6 | 55 | |
c258b62b XG |
56 | void |
57 | reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); | |
58 | ||
b37fbea6 | 59 | /* |
450869d6 | 60 | * Return values of handle_mmio_page_fault: |
b37fbea6 | 61 | * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction |
f8f55942 XG |
62 | * directly. |
63 | * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page | |
64 | * fault path update the mmio spte. | |
b37fbea6 | 65 | * RET_MMIO_PF_RETRY: let CPU fault again on the address. |
450869d6 | 66 | * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed). |
b37fbea6 XG |
67 | */ |
68 | enum { | |
69 | RET_MMIO_PF_EMULATE = 1, | |
f8f55942 | 70 | RET_MMIO_PF_INVALID = 2, |
b37fbea6 XG |
71 | RET_MMIO_PF_RETRY = 0, |
72 | RET_MMIO_PF_BUG = -1 | |
73 | }; | |
74 | ||
450869d6 | 75 | int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct); |
ad896af0 PB |
76 | void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); |
77 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly); | |
94d8b056 | 78 | |
e0df7b9f DH |
79 | static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) |
80 | { | |
5d218814 MT |
81 | if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) |
82 | return kvm->arch.n_max_mmu_pages - | |
83 | kvm->arch.n_used_mmu_pages; | |
84 | ||
85 | return 0; | |
e0df7b9f DH |
86 | } |
87 | ||
1d737c8a ZX |
88 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) |
89 | { | |
90 | if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) | |
91 | return 0; | |
92 | ||
93 | return kvm_mmu_load(vcpu); | |
94 | } | |
95 | ||
43a3795a | 96 | static inline int is_present_gpte(unsigned long pte) |
20c466b5 DE |
97 | { |
98 | return pte & PT_PRESENT_MASK; | |
99 | } | |
100 | ||
198c74f4 XG |
101 | /* |
102 | * Currently, we have two sorts of write-protection, a) the first one | |
103 | * write-protects guest page to sync the guest modification, b) another one is | |
104 | * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences | |
105 | * between these two sorts are: | |
106 | * 1) the first case clears SPTE_MMU_WRITEABLE bit. | |
107 | * 2) the first case requires flushing tlb immediately avoiding corrupting | |
108 | * shadow page table between all vcpus so it should be in the protection of | |
109 | * mmu-lock. And the another case does not need to flush tlb until returning | |
110 | * the dirty bitmap to userspace since it only write-protects the page | |
111 | * logged in the bitmap, that means the page in the dirty bitmap is not | |
112 | * missed, so it can flush tlb out of mmu-lock. | |
113 | * | |
114 | * So, there is the problem: the first case can meet the corrupted tlb caused | |
115 | * by another case which write-protects pages but without flush tlb | |
116 | * immediately. In order to making the first case be aware this problem we let | |
117 | * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit | |
118 | * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit. | |
119 | * | |
120 | * Anyway, whenever a spte is updated (only permission and status bits are | |
121 | * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes | |
122 | * readonly, if that happens, we need to flush tlb. Fortunately, | |
123 | * mmu_spte_update() has already handled it perfectly. | |
124 | * | |
125 | * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK: | |
126 | * - if we want to see if it has writable tlb entry or if the spte can be | |
127 | * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most | |
128 | * case, otherwise | |
129 | * - if we fix page fault on the spte or do write-protection by dirty logging, | |
130 | * check PT_WRITABLE_MASK. | |
131 | * | |
132 | * TODO: introduce APIs to split these two cases. | |
133 | */ | |
bebb106a XG |
134 | static inline int is_writable_pte(unsigned long pte) |
135 | { | |
136 | return pte & PT_WRITABLE_MASK; | |
137 | } | |
138 | ||
139 | static inline bool is_write_protection(struct kvm_vcpu *vcpu) | |
140 | { | |
141 | return kvm_read_cr0_bits(vcpu, X86_CR0_WP); | |
142 | } | |
143 | ||
97d64b78 | 144 | /* |
f13577e8 PB |
145 | * Check if a given access (described through the I/D, W/R and U/S bits of a |
146 | * page fault error code pfec) causes a permission fault with the given PTE | |
147 | * access rights (in ACC_* format). | |
148 | * | |
149 | * Return zero if the access does not fault; return the page fault error code | |
150 | * if the access faults. | |
97d64b78 | 151 | */ |
f13577e8 | 152 | static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
be94f6b7 HH |
153 | unsigned pte_access, unsigned pte_pkey, |
154 | unsigned pfec) | |
bebb106a | 155 | { |
97ec8c06 FW |
156 | int cpl = kvm_x86_ops->get_cpl(vcpu); |
157 | unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); | |
158 | ||
159 | /* | |
160 | * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. | |
161 | * | |
162 | * If CPL = 3, SMAP applies to all supervisor-mode data accesses | |
163 | * (these are implicit supervisor accesses) regardless of the value | |
164 | * of EFLAGS.AC. | |
165 | * | |
166 | * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving | |
167 | * the result in X86_EFLAGS_AC. We then insert it in place of | |
168 | * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec, | |
169 | * but it will be one in index if SMAP checks are being overridden. | |
170 | * It is important to keep this branchless. | |
171 | */ | |
172 | unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); | |
173 | int index = (pfec >> 1) + | |
174 | (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); | |
be94f6b7 | 175 | bool fault = (mmu->permissions[index] >> pte_access) & 1; |
7a98205d | 176 | u32 errcode = PFERR_PRESENT_MASK; |
97ec8c06 | 177 | |
be94f6b7 | 178 | WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK)); |
be94f6b7 HH |
179 | if (unlikely(mmu->pkru_mask)) { |
180 | u32 pkru_bits, offset; | |
181 | ||
182 | /* | |
183 | * PKRU defines 32 bits, there are 16 domains and 2 | |
184 | * attribute bits per domain in pkru. pte_pkey is the | |
185 | * index of the protection domain, so pte_pkey * 2 is | |
186 | * is the index of the first bit for the domain. | |
187 | */ | |
188 | pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3; | |
189 | ||
190 | /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ | |
7a98205d | 191 | offset = (pfec & ~1) + |
be94f6b7 HH |
192 | ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT)); |
193 | ||
194 | pkru_bits &= mmu->pkru_mask >> offset; | |
7a98205d | 195 | errcode |= -pkru_bits & PFERR_PK_MASK; |
be94f6b7 HH |
196 | fault |= (pkru_bits != 0); |
197 | } | |
198 | ||
7a98205d | 199 | return -(u32)fault & errcode; |
bebb106a | 200 | } |
97d64b78 | 201 | |
5304b8d3 | 202 | void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); |
efdfe536 | 203 | void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); |
547ffaed XG |
204 | |
205 | void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); | |
206 | void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); | |
aeecee2e XG |
207 | bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, |
208 | struct kvm_memory_slot *slot, u64 gfn); | |
1d737c8a | 209 | #endif |