Merge tag 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux...
[deliverable/linux.git] / include / kvm / arm_vgic.h
CommitLineData
1a89dd91
MZ
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __ASM_ARM_KVM_VGIC_H
20#define __ASM_ARM_KVM_VGIC_H
21
b47ef92a
MZ
22#include <linux/kernel.h>
23#include <linux/kvm.h>
b47ef92a
MZ
24#include <linux/irqreturn.h>
25#include <linux/spinlock.h>
26#include <linux/types.h>
1a89dd91 27
5fb66da6 28#define VGIC_NR_IRQS_LEGACY 256
b47ef92a
MZ
29#define VGIC_NR_SGIS 16
30#define VGIC_NR_PPIS 16
31#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
8f186d52
MZ
32
33#define VGIC_V2_MAX_LRS (1 << 6)
b2fb1c0d 34#define VGIC_V3_MAX_LRS 16
c3c91836 35#define VGIC_MAX_IRQS 1024
3caa2d8c 36#define VGIC_V2_MAX_CPUS 8
b47ef92a
MZ
37
38/* Sanity checks... */
ac3d3735
AP
39#if (KVM_MAX_VCPUS > 255)
40#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
b47ef92a
MZ
41#endif
42
5fb66da6 43#if (VGIC_NR_IRQS_LEGACY & 31)
b47ef92a
MZ
44#error "VGIC_NR_IRQS must be a multiple of 32"
45#endif
46
5fb66da6 47#if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS)
b47ef92a
MZ
48#error "VGIC_NR_IRQS must be <= 1024"
49#endif
50
51/*
52 * The GIC distributor registers describing interrupts have two parts:
53 * - 32 per-CPU interrupts (SGI + PPI)
54 * - a bunch of shared interrupts (SPI)
55 */
56struct vgic_bitmap {
c1bfb577
MZ
57 /*
58 * - One UL per VCPU for private interrupts (assumes UL is at
59 * least 32 bits)
60 * - As many UL as necessary for shared interrupts.
61 *
62 * The private interrupts are accessed via the "private"
63 * field, one UL per vcpu (the state for vcpu n is in
64 * private[n]). The shared interrupts are accessed via the
65 * "shared" pointer (IRQn state is at bit n-32 in the bitmap).
66 */
67 unsigned long *private;
68 unsigned long *shared;
b47ef92a
MZ
69};
70
71struct vgic_bytemap {
c1bfb577
MZ
72 /*
73 * - 8 u32 per VCPU for private interrupts
74 * - As many u32 as necessary for shared interrupts.
75 *
76 * The private interrupts are accessed via the "private"
77 * field, (the state for vcpu n is in private[n*8] to
78 * private[n*8 + 7]). The shared interrupts are accessed via
79 * the "shared" pointer (IRQn state is at byte (n-32)%4 of the
80 * shared[(n-32)/4] word).
81 */
82 u32 *private;
83 u32 *shared;
b47ef92a
MZ
84};
85
8d5c6b06
MZ
86struct kvm_vcpu;
87
1a9b1305
MZ
88enum vgic_type {
89 VGIC_V2, /* Good ol' GICv2 */
b2fb1c0d 90 VGIC_V3, /* New fancy GICv3 */
1a9b1305
MZ
91};
92
8d5c6b06
MZ
93#define LR_STATE_PENDING (1 << 0)
94#define LR_STATE_ACTIVE (1 << 1)
95#define LR_STATE_MASK (3 << 0)
96#define LR_EOI_INT (1 << 2)
97
98struct vgic_lr {
99 u16 irq;
100 u8 source;
101 u8 state;
102};
103
beee38b9
MZ
104struct vgic_vmcr {
105 u32 ctlr;
106 u32 abpr;
107 u32 bpr;
108 u32 pmr;
109};
110
8d5c6b06
MZ
111struct vgic_ops {
112 struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
113 void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
69bb2c9f
MZ
114 void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
115 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
8d6a0313 116 u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
ae705930 117 void (*clear_eisr)(struct kvm_vcpu *vcpu);
495dd859 118 u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
909d9b50
MZ
119 void (*enable_underflow)(struct kvm_vcpu *vcpu);
120 void (*disable_underflow)(struct kvm_vcpu *vcpu);
beee38b9
MZ
121 void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
122 void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
da8dafd1 123 void (*enable)(struct kvm_vcpu *vcpu);
8d5c6b06
MZ
124};
125
ca85f623 126struct vgic_params {
1a9b1305
MZ
127 /* vgic type */
128 enum vgic_type type;
ca85f623
MZ
129 /* Physical address of vgic virtual cpu interface */
130 phys_addr_t vcpu_base;
131 /* Number of list registers */
132 u32 nr_lr;
133 /* Interrupt number */
134 unsigned int maint_irq;
135 /* Virtual control interface base address */
136 void __iomem *vctrl_base;
3caa2d8c 137 int max_gic_vcpus;
b5d84ff6
AP
138 /* Only needed for the legacy KVM_CREATE_IRQCHIP */
139 bool can_emulate_gicv2;
ca85f623
MZ
140};
141
b26e5fda
AP
142struct vgic_vm_ops {
143 bool (*handle_mmio)(struct kvm_vcpu *, struct kvm_run *,
144 struct kvm_exit_mmio *);
145 bool (*queue_sgi)(struct kvm_vcpu *, int irq);
146 void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source);
147 int (*init_model)(struct kvm *);
148 int (*map_resources)(struct kvm *, const struct vgic_params *);
149};
150
1a89dd91 151struct vgic_dist {
b47ef92a
MZ
152#ifdef CONFIG_KVM_ARM_VGIC
153 spinlock_t lock;
f982cf4e 154 bool in_kernel;
01ac5e34 155 bool ready;
b47ef92a 156
59892136
AP
157 /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
158 u32 vgic_model;
159
c1bfb577
MZ
160 int nr_cpus;
161 int nr_irqs;
162
b47ef92a
MZ
163 /* Virtual control interface mapping */
164 void __iomem *vctrl_base;
165
330690cd
CD
166 /* Distributor and vcpu interface mapping in the guest */
167 phys_addr_t vgic_dist_base;
a0675c25
AP
168 /* GICv2 and GICv3 use different mapped register blocks */
169 union {
170 phys_addr_t vgic_cpu_base;
171 phys_addr_t vgic_redist_base;
172 };
b47ef92a
MZ
173
174 /* Distributor enabled */
175 u32 enabled;
176
177 /* Interrupt enabled (one bit per IRQ) */
178 struct vgic_bitmap irq_enabled;
179
faa1b46c
CD
180 /* Level-triggered interrupt external input is asserted */
181 struct vgic_bitmap irq_level;
182
183 /*
184 * Interrupt state is pending on the distributor
185 */
227844f5 186 struct vgic_bitmap irq_pending;
b47ef92a 187
faa1b46c
CD
188 /*
189 * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered
190 * interrupts. Essentially holds the state of the flip-flop in
191 * Figure 4-10 on page 4-101 in ARM IHI 0048B.b.
192 * Once set, it is only cleared for level-triggered interrupts on
193 * guest ACKs (when we queue it) or writes to GICD_ICPENDRn.
194 */
195 struct vgic_bitmap irq_soft_pend;
196
dbf20f9d
CD
197 /* Level-triggered interrupt queued on VCPU interface */
198 struct vgic_bitmap irq_queued;
b47ef92a
MZ
199
200 /* Interrupt priority. Not used yet. */
201 struct vgic_bytemap irq_priority;
202
203 /* Level/edge triggered */
204 struct vgic_bitmap irq_cfg;
205
c1bfb577
MZ
206 /*
207 * Source CPU per SGI and target CPU:
208 *
209 * Each byte represent a SGI observable on a VCPU, each bit of
210 * this byte indicating if the corresponding VCPU has
211 * generated this interrupt. This is a GICv2 feature only.
212 *
213 * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are
214 * the SGIs observable on VCPUn.
215 */
216 u8 *irq_sgi_sources;
b47ef92a 217
c1bfb577
MZ
218 /*
219 * Target CPU for each SPI:
220 *
221 * Array of available SPI, each byte indicating the target
222 * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32].
223 */
224 u8 *irq_spi_cpu;
225
226 /*
227 * Reverse lookup of irq_spi_cpu for faster compute pending:
228 *
229 * Array of bitmaps, one per VCPU, describing if IRQn is
230 * routed to a particular VCPU.
231 */
232 struct vgic_bitmap *irq_spi_target;
b47ef92a 233
a0675c25
AP
234 /* Target MPIDR for each IRQ (needed for GICv3 IROUTERn) only */
235 u32 *irq_spi_mpidr;
236
b47ef92a 237 /* Bitmap indicating which CPU has something pending */
c1bfb577 238 unsigned long *irq_pending_on_cpu;
b26e5fda
AP
239
240 struct vgic_vm_ops vm_ops;
b47ef92a 241#endif
1a89dd91
MZ
242};
243
eede821d
MZ
244struct vgic_v2_cpu_if {
245 u32 vgic_hcr;
246 u32 vgic_vmcr;
247 u32 vgic_misr; /* Saved only */
2df36a5d
CD
248 u64 vgic_eisr; /* Saved only */
249 u64 vgic_elrsr; /* Saved only */
eede821d 250 u32 vgic_apr;
8f186d52 251 u32 vgic_lr[VGIC_V2_MAX_LRS];
eede821d
MZ
252};
253
b2fb1c0d
MZ
254struct vgic_v3_cpu_if {
255#ifdef CONFIG_ARM_GIC_V3
256 u32 vgic_hcr;
257 u32 vgic_vmcr;
2f5fa41a 258 u32 vgic_sre; /* Restored only, change ignored */
b2fb1c0d
MZ
259 u32 vgic_misr; /* Saved only */
260 u32 vgic_eisr; /* Saved only */
261 u32 vgic_elrsr; /* Saved only */
262 u32 vgic_ap0r[4];
263 u32 vgic_ap1r[4];
264 u64 vgic_lr[VGIC_V3_MAX_LRS];
265#endif
266};
267
1a89dd91 268struct vgic_cpu {
9d949dce
MZ
269#ifdef CONFIG_KVM_ARM_VGIC
270 /* per IRQ to LR mapping */
c1bfb577 271 u8 *vgic_irq_lr_map;
9d949dce
MZ
272
273 /* Pending interrupts on this VCPU */
274 DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
c1bfb577 275 unsigned long *pending_shared;
9d949dce
MZ
276
277 /* Bitmap of used/free list registers */
8f186d52 278 DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS);
9d949dce
MZ
279
280 /* Number of list registers on this CPU */
281 int nr_lr;
282
283 /* CPU vif control registers for world switch */
eede821d
MZ
284 union {
285 struct vgic_v2_cpu_if vgic_v2;
b2fb1c0d 286 struct vgic_v3_cpu_if vgic_v3;
eede821d 287 };
9d949dce 288#endif
1a89dd91
MZ
289};
290
9d949dce
MZ
291#define LR_EMPTY 0xff
292
495dd859
MZ
293#define INT_STATUS_EOI (1 << 0)
294#define INT_STATUS_UNDERFLOW (1 << 1)
295
1a89dd91
MZ
296struct kvm;
297struct kvm_vcpu;
298struct kvm_run;
299struct kvm_exit_mmio;
300
301#ifdef CONFIG_KVM_ARM_VGIC
ce01e4e8 302int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
01ac5e34 303int kvm_vgic_hyp_init(void);
6d3cfbe2 304int kvm_vgic_map_resources(struct kvm *kvm);
3caa2d8c 305int kvm_vgic_get_max_vcpus(void);
59892136 306int kvm_vgic_create(struct kvm *kvm, u32 type);
c1bfb577 307void kvm_vgic_destroy(struct kvm *kvm);
c1bfb577 308void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
9d949dce
MZ
309void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
310void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
5863c2ce
MZ
311int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
312 bool level);
6d52f35a 313void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
9d949dce 314int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
1a89dd91
MZ
315bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
316 struct kvm_exit_mmio *mmio);
317
f982cf4e 318#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
1f57be28 319#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
c52edf5f 320#define vgic_ready(k) ((k)->arch.vgic.ready)
9d949dce 321
8f186d52
MZ
322int vgic_v2_probe(struct device_node *vgic_node,
323 const struct vgic_ops **ops,
324 const struct vgic_params **params);
b2fb1c0d
MZ
325#ifdef CONFIG_ARM_GIC_V3
326int vgic_v3_probe(struct device_node *vgic_node,
327 const struct vgic_ops **ops,
328 const struct vgic_params **params);
329#else
330static inline int vgic_v3_probe(struct device_node *vgic_node,
331 const struct vgic_ops **ops,
332 const struct vgic_params **params)
333{
334 return -ENODEV;
335}
336#endif
8f186d52 337
1a89dd91
MZ
338#else
339static inline int kvm_vgic_hyp_init(void)
340{
341 return 0;
342}
343
330690cd
CD
344static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
345{
346 return 0;
347}
348
6cbde825
MZ
349static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
350{
351 return -ENXIO;
352}
353
6d3cfbe2 354static inline int kvm_vgic_map_resources(struct kvm *kvm)
1a89dd91
MZ
355{
356 return 0;
357}
358
59892136 359static inline int kvm_vgic_create(struct kvm *kvm, u32 type)
1a89dd91
MZ
360{
361 return 0;
362}
363
b5e7a955
AB
364static inline void kvm_vgic_destroy(struct kvm *kvm)
365{
366}
367
368static inline void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
369{
370}
371
1a89dd91
MZ
372static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
373{
374 return 0;
375}
376
377static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
378static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
379
5863c2ce
MZ
380static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
381 unsigned int irq_num, bool level)
382{
383 return 0;
384}
385
1a89dd91
MZ
386static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
387{
388 return 0;
389}
390
391static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
392 struct kvm_exit_mmio *mmio)
393{
394 return false;
395}
396
397static inline int irqchip_in_kernel(struct kvm *kvm)
398{
399 return 0;
400}
01ac5e34 401
1f57be28
CD
402static inline bool vgic_initialized(struct kvm *kvm)
403{
404 return true;
405}
406
c52edf5f 407static inline bool vgic_ready(struct kvm *kvm)
01ac5e34
MZ
408{
409 return true;
410}
3caa2d8c
AP
411
412static inline int kvm_vgic_get_max_vcpus(void)
413{
414 return KVM_MAX_VCPUS;
415}
1a89dd91
MZ
416#endif
417
418#endif
This page took 0.099385 seconds and 5 git commands to generate.