1 #include <linux/dmar.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
6 #include <asm/io_apic.h>
7 #include "intel-iommu.h"
8 #include "intr_remapping.h"
10 static struct ioapic_scope ir_ioapic
[MAX_IO_APICS
];
11 static int ir_ioapic_num
;
12 int intr_remapping_enabled
;
15 struct intel_iommu
*iommu
;
19 } irq_2_iommu
[NR_IRQS
];
21 static DEFINE_SPINLOCK(irq_2_ir_lock
);
23 int irq_remapped(int irq
)
28 if (!irq_2_iommu
[irq
].iommu
)
34 int get_irte(int irq
, struct irte
*entry
)
38 if (!entry
|| irq
> nr_irqs
)
41 spin_lock(&irq_2_ir_lock
);
42 if (!irq_2_iommu
[irq
].iommu
) {
43 spin_unlock(&irq_2_ir_lock
);
47 index
= irq_2_iommu
[irq
].irte_index
+ irq_2_iommu
[irq
].sub_handle
;
48 *entry
= *(irq_2_iommu
[irq
].iommu
->ir_table
->base
+ index
);
50 spin_unlock(&irq_2_ir_lock
);
54 int alloc_irte(struct intel_iommu
*iommu
, int irq
, u16 count
)
56 struct ir_table
*table
= iommu
->ir_table
;
57 u16 index
, start_index
;
58 unsigned int mask
= 0;
65 * start the IRTE search from index 0.
67 index
= start_index
= 0;
70 count
= __roundup_pow_of_two(count
);
74 if (mask
> ecap_max_handle_mask(iommu
->ecap
)) {
76 "Requested mask %x exceeds the max invalidation handle"
77 " mask value %Lx\n", mask
,
78 ecap_max_handle_mask(iommu
->ecap
));
82 spin_lock(&irq_2_ir_lock
);
84 for (i
= index
; i
< index
+ count
; i
++)
85 if (table
->base
[i
].present
)
87 /* empty index found */
88 if (i
== index
+ count
)
91 index
= (index
+ count
) % INTR_REMAP_TABLE_ENTRIES
;
93 if (index
== start_index
) {
94 spin_unlock(&irq_2_ir_lock
);
95 printk(KERN_ERR
"can't allocate an IRTE\n");
100 for (i
= index
; i
< index
+ count
; i
++)
101 table
->base
[i
].present
= 1;
103 irq_2_iommu
[irq
].iommu
= iommu
;
104 irq_2_iommu
[irq
].irte_index
= index
;
105 irq_2_iommu
[irq
].sub_handle
= 0;
106 irq_2_iommu
[irq
].irte_mask
= mask
;
108 spin_unlock(&irq_2_ir_lock
);
113 static void qi_flush_iec(struct intel_iommu
*iommu
, int index
, int mask
)
117 desc
.low
= QI_IEC_IIDEX(index
) | QI_IEC_TYPE
| QI_IEC_IM(mask
)
121 qi_submit_sync(&desc
, iommu
);
124 int map_irq_to_irte_handle(int irq
, u16
*sub_handle
)
128 spin_lock(&irq_2_ir_lock
);
129 if (irq
>= nr_irqs
|| !irq_2_iommu
[irq
].iommu
) {
130 spin_unlock(&irq_2_ir_lock
);
134 *sub_handle
= irq_2_iommu
[irq
].sub_handle
;
135 index
= irq_2_iommu
[irq
].irte_index
;
136 spin_unlock(&irq_2_ir_lock
);
140 int set_irte_irq(int irq
, struct intel_iommu
*iommu
, u16 index
, u16 subhandle
)
142 spin_lock(&irq_2_ir_lock
);
143 if (irq
>= nr_irqs
|| irq_2_iommu
[irq
].iommu
) {
144 spin_unlock(&irq_2_ir_lock
);
148 irq_2_iommu
[irq
].iommu
= iommu
;
149 irq_2_iommu
[irq
].irte_index
= index
;
150 irq_2_iommu
[irq
].sub_handle
= subhandle
;
151 irq_2_iommu
[irq
].irte_mask
= 0;
153 spin_unlock(&irq_2_ir_lock
);
158 int clear_irte_irq(int irq
, struct intel_iommu
*iommu
, u16 index
)
160 spin_lock(&irq_2_ir_lock
);
161 if (irq
>= nr_irqs
|| !irq_2_iommu
[irq
].iommu
) {
162 spin_unlock(&irq_2_ir_lock
);
166 irq_2_iommu
[irq
].iommu
= NULL
;
167 irq_2_iommu
[irq
].irte_index
= 0;
168 irq_2_iommu
[irq
].sub_handle
= 0;
169 irq_2_iommu
[irq
].irte_mask
= 0;
171 spin_unlock(&irq_2_ir_lock
);
176 int modify_irte(int irq
, struct irte
*irte_modified
)
180 struct intel_iommu
*iommu
;
182 spin_lock(&irq_2_ir_lock
);
183 if (irq
>= nr_irqs
|| !irq_2_iommu
[irq
].iommu
) {
184 spin_unlock(&irq_2_ir_lock
);
188 iommu
= irq_2_iommu
[irq
].iommu
;
190 index
= irq_2_iommu
[irq
].irte_index
+ irq_2_iommu
[irq
].sub_handle
;
191 irte
= &iommu
->ir_table
->base
[index
];
193 set_64bit((unsigned long *)irte
, irte_modified
->low
| (1 << 1));
194 __iommu_flush_cache(iommu
, irte
, sizeof(*irte
));
196 qi_flush_iec(iommu
, index
, 0);
198 spin_unlock(&irq_2_ir_lock
);
202 int flush_irte(int irq
)
205 struct intel_iommu
*iommu
;
207 spin_lock(&irq_2_ir_lock
);
208 if (irq
>= nr_irqs
|| !irq_2_iommu
[irq
].iommu
) {
209 spin_unlock(&irq_2_ir_lock
);
213 iommu
= irq_2_iommu
[irq
].iommu
;
215 index
= irq_2_iommu
[irq
].irte_index
+ irq_2_iommu
[irq
].sub_handle
;
217 qi_flush_iec(iommu
, index
, irq_2_iommu
[irq
].irte_mask
);
218 spin_unlock(&irq_2_ir_lock
);
223 struct intel_iommu
*map_ioapic_to_ir(int apic
)
227 for (i
= 0; i
< MAX_IO_APICS
; i
++)
228 if (ir_ioapic
[i
].id
== apic
)
229 return ir_ioapic
[i
].iommu
;
233 struct intel_iommu
*map_dev_to_ir(struct pci_dev
*dev
)
235 struct dmar_drhd_unit
*drhd
;
237 drhd
= dmar_find_matched_drhd_unit(dev
);
244 int free_irte(int irq
)
248 struct intel_iommu
*iommu
;
250 spin_lock(&irq_2_ir_lock
);
251 if (irq
>= nr_irqs
|| !irq_2_iommu
[irq
].iommu
) {
252 spin_unlock(&irq_2_ir_lock
);
256 iommu
= irq_2_iommu
[irq
].iommu
;
258 index
= irq_2_iommu
[irq
].irte_index
+ irq_2_iommu
[irq
].sub_handle
;
259 irte
= &iommu
->ir_table
->base
[index
];
261 if (!irq_2_iommu
[irq
].sub_handle
) {
262 for (i
= 0; i
< (1 << irq_2_iommu
[irq
].irte_mask
); i
++)
263 set_64bit((unsigned long *)irte
, 0);
264 qi_flush_iec(iommu
, index
, irq_2_iommu
[irq
].irte_mask
);
267 irq_2_iommu
[irq
].iommu
= NULL
;
268 irq_2_iommu
[irq
].irte_index
= 0;
269 irq_2_iommu
[irq
].sub_handle
= 0;
270 irq_2_iommu
[irq
].irte_mask
= 0;
272 spin_unlock(&irq_2_ir_lock
);
277 static void iommu_set_intr_remapping(struct intel_iommu
*iommu
, int mode
)
283 addr
= virt_to_phys((void *)iommu
->ir_table
->base
);
285 spin_lock_irqsave(&iommu
->register_lock
, flags
);
287 dmar_writeq(iommu
->reg
+ DMAR_IRTA_REG
,
288 (addr
) | IR_X2APIC_MODE(mode
) | INTR_REMAP_TABLE_REG_SIZE
);
290 /* Set interrupt-remapping table pointer */
291 cmd
= iommu
->gcmd
| DMA_GCMD_SIRTP
;
292 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
294 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
295 readl
, (sts
& DMA_GSTS_IRTPS
), sts
);
296 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
299 * global invalidation of interrupt entry cache before enabling
300 * interrupt-remapping.
302 qi_global_iec(iommu
);
304 spin_lock_irqsave(&iommu
->register_lock
, flags
);
306 /* Enable interrupt-remapping */
307 cmd
= iommu
->gcmd
| DMA_GCMD_IRE
;
308 iommu
->gcmd
|= DMA_GCMD_IRE
;
309 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
311 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
312 readl
, (sts
& DMA_GSTS_IRES
), sts
);
314 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
318 static int setup_intr_remapping(struct intel_iommu
*iommu
, int mode
)
320 struct ir_table
*ir_table
;
323 ir_table
= iommu
->ir_table
= kzalloc(sizeof(struct ir_table
),
326 if (!iommu
->ir_table
)
329 pages
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
, INTR_REMAP_PAGE_ORDER
);
332 printk(KERN_ERR
"failed to allocate pages of order %d\n",
333 INTR_REMAP_PAGE_ORDER
);
334 kfree(iommu
->ir_table
);
338 ir_table
->base
= page_address(pages
);
340 iommu_set_intr_remapping(iommu
, mode
);
344 int __init
enable_intr_remapping(int eim
)
346 struct dmar_drhd_unit
*drhd
;
350 * check for the Interrupt-remapping support
352 for_each_drhd_unit(drhd
) {
353 struct intel_iommu
*iommu
= drhd
->iommu
;
355 if (!ecap_ir_support(iommu
->ecap
))
358 if (eim
&& !ecap_eim_support(iommu
->ecap
)) {
359 printk(KERN_INFO
"DRHD %Lx: EIM not supported by DRHD, "
360 " ecap %Lx\n", drhd
->reg_base_addr
, iommu
->ecap
);
366 * Enable queued invalidation for all the DRHD's.
368 for_each_drhd_unit(drhd
) {
370 struct intel_iommu
*iommu
= drhd
->iommu
;
371 ret
= dmar_enable_qi(iommu
);
374 printk(KERN_ERR
"DRHD %Lx: failed to enable queued, "
375 " invalidation, ecap %Lx, ret %d\n",
376 drhd
->reg_base_addr
, iommu
->ecap
, ret
);
382 * Setup Interrupt-remapping for all the DRHD's now.
384 for_each_drhd_unit(drhd
) {
385 struct intel_iommu
*iommu
= drhd
->iommu
;
387 if (!ecap_ir_support(iommu
->ecap
))
390 if (setup_intr_remapping(iommu
, eim
))
399 intr_remapping_enabled
= 1;
405 * handle error condition gracefully here!
410 static int ir_parse_ioapic_scope(struct acpi_dmar_header
*header
,
411 struct intel_iommu
*iommu
)
413 struct acpi_dmar_hardware_unit
*drhd
;
414 struct acpi_dmar_device_scope
*scope
;
417 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
419 start
= (void *)(drhd
+ 1);
420 end
= ((void *)drhd
) + header
->length
;
422 while (start
< end
) {
424 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_IOAPIC
) {
425 if (ir_ioapic_num
== MAX_IO_APICS
) {
426 printk(KERN_WARNING
"Exceeded Max IO APICS\n");
430 printk(KERN_INFO
"IOAPIC id %d under DRHD base"
431 " 0x%Lx\n", scope
->enumeration_id
,
434 ir_ioapic
[ir_ioapic_num
].iommu
= iommu
;
435 ir_ioapic
[ir_ioapic_num
].id
= scope
->enumeration_id
;
438 start
+= scope
->length
;
445 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
448 int __init
parse_ioapics_under_ir(void)
450 struct dmar_drhd_unit
*drhd
;
451 int ir_supported
= 0;
453 for_each_drhd_unit(drhd
) {
454 struct intel_iommu
*iommu
= drhd
->iommu
;
456 if (ecap_ir_support(iommu
->ecap
)) {
457 if (ir_parse_ioapic_scope(drhd
->hdr
, iommu
))
464 if (ir_supported
&& ir_ioapic_num
!= nr_ioapics
) {
466 "Not all IO-APIC's listed under remapping hardware\n");