arm/arm64: KVM: dont rely on a valid GICH base address
[deliverable/linux.git] / include / kvm / arm_vgic.h
CommitLineData
1a89dd91
MZ
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __ASM_ARM_KVM_VGIC_H
20#define __ASM_ARM_KVM_VGIC_H
21
b47ef92a
MZ
22#include <linux/kernel.h>
23#include <linux/kvm.h>
b47ef92a
MZ
24#include <linux/irqreturn.h>
25#include <linux/spinlock.h>
26#include <linux/types.h>
1a89dd91 27
5fb66da6 28#define VGIC_NR_IRQS_LEGACY 256
b47ef92a
MZ
29#define VGIC_NR_SGIS 16
30#define VGIC_NR_PPIS 16
31#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
8f186d52
MZ
32
33#define VGIC_V2_MAX_LRS (1 << 6)
b2fb1c0d 34#define VGIC_V3_MAX_LRS 16
c3c91836 35#define VGIC_MAX_IRQS 1024
b47ef92a
MZ
36
37/* Sanity checks... */
fc675e35 38#if (KVM_MAX_VCPUS > 8)
b47ef92a
MZ
39#error Invalid number of CPU interfaces
40#endif
41
5fb66da6 42#if (VGIC_NR_IRQS_LEGACY & 31)
b47ef92a
MZ
43#error "VGIC_NR_IRQS must be a multiple of 32"
44#endif
45
5fb66da6 46#if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS)
b47ef92a
MZ
47#error "VGIC_NR_IRQS must be <= 1024"
48#endif
49
50/*
51 * The GIC distributor registers describing interrupts have two parts:
52 * - 32 per-CPU interrupts (SGI + PPI)
53 * - a bunch of shared interrupts (SPI)
54 */
55struct vgic_bitmap {
c1bfb577
MZ
56 /*
57 * - One UL per VCPU for private interrupts (assumes UL is at
58 * least 32 bits)
59 * - As many UL as necessary for shared interrupts.
60 *
61 * The private interrupts are accessed via the "private"
62 * field, one UL per vcpu (the state for vcpu n is in
63 * private[n]). The shared interrupts are accessed via the
64 * "shared" pointer (IRQn state is at bit n-32 in the bitmap).
65 */
66 unsigned long *private;
67 unsigned long *shared;
b47ef92a
MZ
68};
69
70struct vgic_bytemap {
c1bfb577
MZ
71 /*
72 * - 8 u32 per VCPU for private interrupts
73 * - As many u32 as necessary for shared interrupts.
74 *
75 * The private interrupts are accessed via the "private"
76 * field, (the state for vcpu n is in private[n*8] to
77 * private[n*8 + 7]). The shared interrupts are accessed via
78 * the "shared" pointer (IRQn state is at byte (n-32)%4 of the
79 * shared[(n-32)/4] word).
80 */
81 u32 *private;
82 u32 *shared;
b47ef92a
MZ
83};
84
8d5c6b06
MZ
85struct kvm_vcpu;
86
1a9b1305
MZ
87enum vgic_type {
88 VGIC_V2, /* Good ol' GICv2 */
b2fb1c0d 89 VGIC_V3, /* New fancy GICv3 */
1a9b1305
MZ
90};
91
8d5c6b06
MZ
92#define LR_STATE_PENDING (1 << 0)
93#define LR_STATE_ACTIVE (1 << 1)
94#define LR_STATE_MASK (3 << 0)
95#define LR_EOI_INT (1 << 2)
96
97struct vgic_lr {
98 u16 irq;
99 u8 source;
100 u8 state;
101};
102
beee38b9
MZ
103struct vgic_vmcr {
104 u32 ctlr;
105 u32 abpr;
106 u32 bpr;
107 u32 pmr;
108};
109
8d5c6b06
MZ
110struct vgic_ops {
111 struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
112 void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
69bb2c9f
MZ
113 void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
114 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
8d6a0313 115 u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
495dd859 116 u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
909d9b50
MZ
117 void (*enable_underflow)(struct kvm_vcpu *vcpu);
118 void (*disable_underflow)(struct kvm_vcpu *vcpu);
beee38b9
MZ
119 void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
120 void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
da8dafd1 121 void (*enable)(struct kvm_vcpu *vcpu);
8d5c6b06
MZ
122};
123
ca85f623 124struct vgic_params {
1a9b1305
MZ
125 /* vgic type */
126 enum vgic_type type;
ca85f623
MZ
127 /* Physical address of vgic virtual cpu interface */
128 phys_addr_t vcpu_base;
129 /* Number of list registers */
130 u32 nr_lr;
131 /* Interrupt number */
132 unsigned int maint_irq;
133 /* Virtual control interface base address */
134 void __iomem *vctrl_base;
135};
136
b26e5fda
AP
137struct vgic_vm_ops {
138 bool (*handle_mmio)(struct kvm_vcpu *, struct kvm_run *,
139 struct kvm_exit_mmio *);
140 bool (*queue_sgi)(struct kvm_vcpu *, int irq);
141 void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source);
142 int (*init_model)(struct kvm *);
143 int (*map_resources)(struct kvm *, const struct vgic_params *);
144};
145
1a89dd91 146struct vgic_dist {
b47ef92a
MZ
147#ifdef CONFIG_KVM_ARM_VGIC
148 spinlock_t lock;
f982cf4e 149 bool in_kernel;
01ac5e34 150 bool ready;
b47ef92a 151
59892136
AP
152 /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
153 u32 vgic_model;
154
c1bfb577
MZ
155 int nr_cpus;
156 int nr_irqs;
157
b47ef92a
MZ
158 /* Virtual control interface mapping */
159 void __iomem *vctrl_base;
160
330690cd
CD
161 /* Distributor and vcpu interface mapping in the guest */
162 phys_addr_t vgic_dist_base;
163 phys_addr_t vgic_cpu_base;
b47ef92a
MZ
164
165 /* Distributor enabled */
166 u32 enabled;
167
168 /* Interrupt enabled (one bit per IRQ) */
169 struct vgic_bitmap irq_enabled;
170
faa1b46c
CD
171 /* Level-triggered interrupt external input is asserted */
172 struct vgic_bitmap irq_level;
173
174 /*
175 * Interrupt state is pending on the distributor
176 */
227844f5 177 struct vgic_bitmap irq_pending;
b47ef92a 178
faa1b46c
CD
179 /*
180 * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered
181 * interrupts. Essentially holds the state of the flip-flop in
182 * Figure 4-10 on page 4-101 in ARM IHI 0048B.b.
183 * Once set, it is only cleared for level-triggered interrupts on
184 * guest ACKs (when we queue it) or writes to GICD_ICPENDRn.
185 */
186 struct vgic_bitmap irq_soft_pend;
187
dbf20f9d
CD
188 /* Level-triggered interrupt queued on VCPU interface */
189 struct vgic_bitmap irq_queued;
b47ef92a
MZ
190
191 /* Interrupt priority. Not used yet. */
192 struct vgic_bytemap irq_priority;
193
194 /* Level/edge triggered */
195 struct vgic_bitmap irq_cfg;
196
c1bfb577
MZ
197 /*
198 * Source CPU per SGI and target CPU:
199 *
200 * Each byte represent a SGI observable on a VCPU, each bit of
201 * this byte indicating if the corresponding VCPU has
202 * generated this interrupt. This is a GICv2 feature only.
203 *
204 * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are
205 * the SGIs observable on VCPUn.
206 */
207 u8 *irq_sgi_sources;
b47ef92a 208
c1bfb577
MZ
209 /*
210 * Target CPU for each SPI:
211 *
212 * Array of available SPI, each byte indicating the target
213 * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32].
214 */
215 u8 *irq_spi_cpu;
216
217 /*
218 * Reverse lookup of irq_spi_cpu for faster compute pending:
219 *
220 * Array of bitmaps, one per VCPU, describing if IRQn is
221 * routed to a particular VCPU.
222 */
223 struct vgic_bitmap *irq_spi_target;
b47ef92a
MZ
224
225 /* Bitmap indicating which CPU has something pending */
c1bfb577 226 unsigned long *irq_pending_on_cpu;
b26e5fda
AP
227
228 struct vgic_vm_ops vm_ops;
b47ef92a 229#endif
1a89dd91
MZ
230};
231
eede821d
MZ
232struct vgic_v2_cpu_if {
233 u32 vgic_hcr;
234 u32 vgic_vmcr;
235 u32 vgic_misr; /* Saved only */
2df36a5d
CD
236 u64 vgic_eisr; /* Saved only */
237 u64 vgic_elrsr; /* Saved only */
eede821d 238 u32 vgic_apr;
8f186d52 239 u32 vgic_lr[VGIC_V2_MAX_LRS];
eede821d
MZ
240};
241
b2fb1c0d
MZ
242struct vgic_v3_cpu_if {
243#ifdef CONFIG_ARM_GIC_V3
244 u32 vgic_hcr;
245 u32 vgic_vmcr;
246 u32 vgic_misr; /* Saved only */
247 u32 vgic_eisr; /* Saved only */
248 u32 vgic_elrsr; /* Saved only */
249 u32 vgic_ap0r[4];
250 u32 vgic_ap1r[4];
251 u64 vgic_lr[VGIC_V3_MAX_LRS];
252#endif
253};
254
1a89dd91 255struct vgic_cpu {
9d949dce
MZ
256#ifdef CONFIG_KVM_ARM_VGIC
257 /* per IRQ to LR mapping */
c1bfb577 258 u8 *vgic_irq_lr_map;
9d949dce
MZ
259
260 /* Pending interrupts on this VCPU */
261 DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
c1bfb577 262 unsigned long *pending_shared;
9d949dce
MZ
263
264 /* Bitmap of used/free list registers */
8f186d52 265 DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS);
9d949dce
MZ
266
267 /* Number of list registers on this CPU */
268 int nr_lr;
269
270 /* CPU vif control registers for world switch */
eede821d
MZ
271 union {
272 struct vgic_v2_cpu_if vgic_v2;
b2fb1c0d 273 struct vgic_v3_cpu_if vgic_v3;
eede821d 274 };
9d949dce 275#endif
1a89dd91
MZ
276};
277
9d949dce
MZ
278#define LR_EMPTY 0xff
279
495dd859
MZ
280#define INT_STATUS_EOI (1 << 0)
281#define INT_STATUS_UNDERFLOW (1 << 1)
282
1a89dd91
MZ
283struct kvm;
284struct kvm_vcpu;
285struct kvm_run;
286struct kvm_exit_mmio;
287
288#ifdef CONFIG_KVM_ARM_VGIC
ce01e4e8 289int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
01ac5e34 290int kvm_vgic_hyp_init(void);
6d3cfbe2 291int kvm_vgic_map_resources(struct kvm *kvm);
59892136 292int kvm_vgic_create(struct kvm *kvm, u32 type);
c1bfb577 293void kvm_vgic_destroy(struct kvm *kvm);
c1bfb577 294void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
9d949dce
MZ
295void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
296void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
5863c2ce
MZ
297int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
298 bool level);
9d949dce 299int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
1a89dd91
MZ
300bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
301 struct kvm_exit_mmio *mmio);
302
f982cf4e 303#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
1f57be28 304#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
c52edf5f 305#define vgic_ready(k) ((k)->arch.vgic.ready)
9d949dce 306
8f186d52
MZ
307int vgic_v2_probe(struct device_node *vgic_node,
308 const struct vgic_ops **ops,
309 const struct vgic_params **params);
b2fb1c0d
MZ
310#ifdef CONFIG_ARM_GIC_V3
311int vgic_v3_probe(struct device_node *vgic_node,
312 const struct vgic_ops **ops,
313 const struct vgic_params **params);
314#else
315static inline int vgic_v3_probe(struct device_node *vgic_node,
316 const struct vgic_ops **ops,
317 const struct vgic_params **params)
318{
319 return -ENODEV;
320}
321#endif
8f186d52 322
1a89dd91
MZ
323#else
324static inline int kvm_vgic_hyp_init(void)
325{
326 return 0;
327}
328
330690cd
CD
329static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
330{
331 return 0;
332}
333
6cbde825
MZ
334static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
335{
336 return -ENXIO;
337}
338
6d3cfbe2 339static inline int kvm_vgic_map_resources(struct kvm *kvm)
1a89dd91
MZ
340{
341 return 0;
342}
343
59892136 344static inline int kvm_vgic_create(struct kvm *kvm, u32 type)
1a89dd91
MZ
345{
346 return 0;
347}
348
b5e7a955
AB
349static inline void kvm_vgic_destroy(struct kvm *kvm)
350{
351}
352
353static inline void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
354{
355}
356
1a89dd91
MZ
357static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
358{
359 return 0;
360}
361
362static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
363static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
364
5863c2ce
MZ
365static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
366 unsigned int irq_num, bool level)
367{
368 return 0;
369}
370
1a89dd91
MZ
371static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
372{
373 return 0;
374}
375
376static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
377 struct kvm_exit_mmio *mmio)
378{
379 return false;
380}
381
382static inline int irqchip_in_kernel(struct kvm *kvm)
383{
384 return 0;
385}
01ac5e34 386
1f57be28
CD
387static inline bool vgic_initialized(struct kvm *kvm)
388{
389 return true;
390}
391
c52edf5f 392static inline bool vgic_ready(struct kvm *kvm)
01ac5e34
MZ
393{
394 return true;
395}
1a89dd91
MZ
396#endif
397
398#endif
This page took 0.094528 seconds and 5 git commands to generate.