1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
7 #include <asm/io_apic.h>
8 #include "intel-iommu.h"
9 #include "intr_remapping.h"
11 static struct ioapic_scope ir_ioapic
[MAX_IO_APICS
];
12 static int ir_ioapic_num
;
13 int intr_remapping_enabled
;
16 struct intel_iommu
*iommu
;
22 #ifdef CONFIG_HAVE_DYNA_ARRAY
23 static struct irq_2_iommu
*irq_2_iommu
;
24 DEFINE_DYN_ARRAY(irq_2_iommu
, sizeof(struct irq_2_iommu
), nr_irqs
, PAGE_SIZE
, NULL
);
26 static struct irq_2_iommu irq_2_iommu
[NR_IRQS
];
29 static DEFINE_SPINLOCK(irq_2_ir_lock
);
31 int irq_remapped(int irq
)
36 if (!irq_2_iommu
[irq
].iommu
)
42 int get_irte(int irq
, struct irte
*entry
)
46 if (!entry
|| irq
> nr_irqs
)
49 spin_lock(&irq_2_ir_lock
);
50 if (!irq_2_iommu
[irq
].iommu
) {
51 spin_unlock(&irq_2_ir_lock
);
55 index
= irq_2_iommu
[irq
].irte_index
+ irq_2_iommu
[irq
].sub_handle
;
56 *entry
= *(irq_2_iommu
[irq
].iommu
->ir_table
->base
+ index
);
58 spin_unlock(&irq_2_ir_lock
);
62 int alloc_irte(struct intel_iommu
*iommu
, int irq
, u16 count
)
64 struct ir_table
*table
= iommu
->ir_table
;
65 u16 index
, start_index
;
66 unsigned int mask
= 0;
73 * start the IRTE search from index 0.
75 index
= start_index
= 0;
78 count
= __roundup_pow_of_two(count
);
82 if (mask
> ecap_max_handle_mask(iommu
->ecap
)) {
84 "Requested mask %x exceeds the max invalidation handle"
85 " mask value %Lx\n", mask
,
86 ecap_max_handle_mask(iommu
->ecap
));
90 spin_lock(&irq_2_ir_lock
);
92 for (i
= index
; i
< index
+ count
; i
++)
93 if (table
->base
[i
].present
)
95 /* empty index found */
96 if (i
== index
+ count
)
99 index
= (index
+ count
) % INTR_REMAP_TABLE_ENTRIES
;
101 if (index
== start_index
) {
102 spin_unlock(&irq_2_ir_lock
);
103 printk(KERN_ERR
"can't allocate an IRTE\n");
108 for (i
= index
; i
< index
+ count
; i
++)
109 table
->base
[i
].present
= 1;
111 irq_2_iommu
[irq
].iommu
= iommu
;
112 irq_2_iommu
[irq
].irte_index
= index
;
113 irq_2_iommu
[irq
].sub_handle
= 0;
114 irq_2_iommu
[irq
].irte_mask
= mask
;
116 spin_unlock(&irq_2_ir_lock
);
121 static void qi_flush_iec(struct intel_iommu
*iommu
, int index
, int mask
)
125 desc
.low
= QI_IEC_IIDEX(index
) | QI_IEC_TYPE
| QI_IEC_IM(mask
)
129 qi_submit_sync(&desc
, iommu
);
132 int map_irq_to_irte_handle(int irq
, u16
*sub_handle
)
136 spin_lock(&irq_2_ir_lock
);
137 if (irq
>= nr_irqs
|| !irq_2_iommu
[irq
].iommu
) {
138 spin_unlock(&irq_2_ir_lock
);
142 *sub_handle
= irq_2_iommu
[irq
].sub_handle
;
143 index
= irq_2_iommu
[irq
].irte_index
;
144 spin_unlock(&irq_2_ir_lock
);
148 int set_irte_irq(int irq
, struct intel_iommu
*iommu
, u16 index
, u16 subhandle
)
150 spin_lock(&irq_2_ir_lock
);
151 if (irq
>= nr_irqs
|| irq_2_iommu
[irq
].iommu
) {
152 spin_unlock(&irq_2_ir_lock
);
156 irq_2_iommu
[irq
].iommu
= iommu
;
157 irq_2_iommu
[irq
].irte_index
= index
;
158 irq_2_iommu
[irq
].sub_handle
= subhandle
;
159 irq_2_iommu
[irq
].irte_mask
= 0;
161 spin_unlock(&irq_2_ir_lock
);
166 int clear_irte_irq(int irq
, struct intel_iommu
*iommu
, u16 index
)
168 spin_lock(&irq_2_ir_lock
);
169 if (irq
>= nr_irqs
|| !irq_2_iommu
[irq
].iommu
) {
170 spin_unlock(&irq_2_ir_lock
);
174 irq_2_iommu
[irq
].iommu
= NULL
;
175 irq_2_iommu
[irq
].irte_index
= 0;
176 irq_2_iommu
[irq
].sub_handle
= 0;
177 irq_2_iommu
[irq
].irte_mask
= 0;
179 spin_unlock(&irq_2_ir_lock
);
184 int modify_irte(int irq
, struct irte
*irte_modified
)
188 struct intel_iommu
*iommu
;
190 spin_lock(&irq_2_ir_lock
);
191 if (irq
>= nr_irqs
|| !irq_2_iommu
[irq
].iommu
) {
192 spin_unlock(&irq_2_ir_lock
);
196 iommu
= irq_2_iommu
[irq
].iommu
;
198 index
= irq_2_iommu
[irq
].irte_index
+ irq_2_iommu
[irq
].sub_handle
;
199 irte
= &iommu
->ir_table
->base
[index
];
201 set_64bit((unsigned long *)irte
, irte_modified
->low
| (1 << 1));
202 __iommu_flush_cache(iommu
, irte
, sizeof(*irte
));
204 qi_flush_iec(iommu
, index
, 0);
206 spin_unlock(&irq_2_ir_lock
);
210 int flush_irte(int irq
)
213 struct intel_iommu
*iommu
;
215 spin_lock(&irq_2_ir_lock
);
216 if (irq
>= nr_irqs
|| !irq_2_iommu
[irq
].iommu
) {
217 spin_unlock(&irq_2_ir_lock
);
221 iommu
= irq_2_iommu
[irq
].iommu
;
223 index
= irq_2_iommu
[irq
].irte_index
+ irq_2_iommu
[irq
].sub_handle
;
225 qi_flush_iec(iommu
, index
, irq_2_iommu
[irq
].irte_mask
);
226 spin_unlock(&irq_2_ir_lock
);
231 struct intel_iommu
*map_ioapic_to_ir(int apic
)
235 for (i
= 0; i
< MAX_IO_APICS
; i
++)
236 if (ir_ioapic
[i
].id
== apic
)
237 return ir_ioapic
[i
].iommu
;
241 struct intel_iommu
*map_dev_to_ir(struct pci_dev
*dev
)
243 struct dmar_drhd_unit
*drhd
;
245 drhd
= dmar_find_matched_drhd_unit(dev
);
252 int free_irte(int irq
)
256 struct intel_iommu
*iommu
;
258 spin_lock(&irq_2_ir_lock
);
259 if (irq
>= nr_irqs
|| !irq_2_iommu
[irq
].iommu
) {
260 spin_unlock(&irq_2_ir_lock
);
264 iommu
= irq_2_iommu
[irq
].iommu
;
266 index
= irq_2_iommu
[irq
].irte_index
+ irq_2_iommu
[irq
].sub_handle
;
267 irte
= &iommu
->ir_table
->base
[index
];
269 if (!irq_2_iommu
[irq
].sub_handle
) {
270 for (i
= 0; i
< (1 << irq_2_iommu
[irq
].irte_mask
); i
++)
271 set_64bit((unsigned long *)irte
, 0);
272 qi_flush_iec(iommu
, index
, irq_2_iommu
[irq
].irte_mask
);
275 irq_2_iommu
[irq
].iommu
= NULL
;
276 irq_2_iommu
[irq
].irte_index
= 0;
277 irq_2_iommu
[irq
].sub_handle
= 0;
278 irq_2_iommu
[irq
].irte_mask
= 0;
280 spin_unlock(&irq_2_ir_lock
);
285 static void iommu_set_intr_remapping(struct intel_iommu
*iommu
, int mode
)
291 addr
= virt_to_phys((void *)iommu
->ir_table
->base
);
293 spin_lock_irqsave(&iommu
->register_lock
, flags
);
295 dmar_writeq(iommu
->reg
+ DMAR_IRTA_REG
,
296 (addr
) | IR_X2APIC_MODE(mode
) | INTR_REMAP_TABLE_REG_SIZE
);
298 /* Set interrupt-remapping table pointer */
299 cmd
= iommu
->gcmd
| DMA_GCMD_SIRTP
;
300 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
302 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
303 readl
, (sts
& DMA_GSTS_IRTPS
), sts
);
304 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
307 * global invalidation of interrupt entry cache before enabling
308 * interrupt-remapping.
310 qi_global_iec(iommu
);
312 spin_lock_irqsave(&iommu
->register_lock
, flags
);
314 /* Enable interrupt-remapping */
315 cmd
= iommu
->gcmd
| DMA_GCMD_IRE
;
316 iommu
->gcmd
|= DMA_GCMD_IRE
;
317 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
319 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
320 readl
, (sts
& DMA_GSTS_IRES
), sts
);
322 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
326 static int setup_intr_remapping(struct intel_iommu
*iommu
, int mode
)
328 struct ir_table
*ir_table
;
331 ir_table
= iommu
->ir_table
= kzalloc(sizeof(struct ir_table
),
334 if (!iommu
->ir_table
)
337 pages
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
, INTR_REMAP_PAGE_ORDER
);
340 printk(KERN_ERR
"failed to allocate pages of order %d\n",
341 INTR_REMAP_PAGE_ORDER
);
342 kfree(iommu
->ir_table
);
346 ir_table
->base
= page_address(pages
);
348 iommu_set_intr_remapping(iommu
, mode
);
352 int __init
enable_intr_remapping(int eim
)
354 struct dmar_drhd_unit
*drhd
;
358 * check for the Interrupt-remapping support
360 for_each_drhd_unit(drhd
) {
361 struct intel_iommu
*iommu
= drhd
->iommu
;
363 if (!ecap_ir_support(iommu
->ecap
))
366 if (eim
&& !ecap_eim_support(iommu
->ecap
)) {
367 printk(KERN_INFO
"DRHD %Lx: EIM not supported by DRHD, "
368 " ecap %Lx\n", drhd
->reg_base_addr
, iommu
->ecap
);
374 * Enable queued invalidation for all the DRHD's.
376 for_each_drhd_unit(drhd
) {
378 struct intel_iommu
*iommu
= drhd
->iommu
;
379 ret
= dmar_enable_qi(iommu
);
382 printk(KERN_ERR
"DRHD %Lx: failed to enable queued, "
383 " invalidation, ecap %Lx, ret %d\n",
384 drhd
->reg_base_addr
, iommu
->ecap
, ret
);
390 * Setup Interrupt-remapping for all the DRHD's now.
392 for_each_drhd_unit(drhd
) {
393 struct intel_iommu
*iommu
= drhd
->iommu
;
395 if (!ecap_ir_support(iommu
->ecap
))
398 if (setup_intr_remapping(iommu
, eim
))
407 intr_remapping_enabled
= 1;
413 * handle error condition gracefully here!
418 static int ir_parse_ioapic_scope(struct acpi_dmar_header
*header
,
419 struct intel_iommu
*iommu
)
421 struct acpi_dmar_hardware_unit
*drhd
;
422 struct acpi_dmar_device_scope
*scope
;
425 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
427 start
= (void *)(drhd
+ 1);
428 end
= ((void *)drhd
) + header
->length
;
430 while (start
< end
) {
432 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_IOAPIC
) {
433 if (ir_ioapic_num
== MAX_IO_APICS
) {
434 printk(KERN_WARNING
"Exceeded Max IO APICS\n");
438 printk(KERN_INFO
"IOAPIC id %d under DRHD base"
439 " 0x%Lx\n", scope
->enumeration_id
,
442 ir_ioapic
[ir_ioapic_num
].iommu
= iommu
;
443 ir_ioapic
[ir_ioapic_num
].id
= scope
->enumeration_id
;
446 start
+= scope
->length
;
453 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
456 int __init
parse_ioapics_under_ir(void)
458 struct dmar_drhd_unit
*drhd
;
459 int ir_supported
= 0;
461 for_each_drhd_unit(drhd
) {
462 struct intel_iommu
*iommu
= drhd
->iommu
;
464 if (ecap_ir_support(iommu
->ecap
)) {
465 if (ir_parse_ioapic_scope(drhd
->hdr
, iommu
))
472 if (ir_supported
&& ir_ioapic_num
!= nr_ioapics
) {
474 "Not all IO-APIC's listed under remapping hardware\n");