2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/acpi.h>
22 #include <linux/list.h>
23 #include <linux/slab.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/interrupt.h>
26 #include <linux/msi.h>
27 #include <linux/amd-iommu.h>
28 #include <linux/export.h>
29 #include <linux/iommu.h>
30 #include <asm/pci-direct.h>
31 #include <asm/iommu.h>
33 #include <asm/x86_init.h>
34 #include <asm/iommu_table.h>
35 #include <asm/io_apic.h>
36 #include <asm/irq_remapping.h>
38 #include "amd_iommu_proto.h"
39 #include "amd_iommu_types.h"
40 #include "irq_remapping.h"
43 * definitions for the ACPI scanning code
45 #define IVRS_HEADER_LENGTH 48
47 #define ACPI_IVHD_TYPE 0x10
48 #define ACPI_IVMD_TYPE_ALL 0x20
49 #define ACPI_IVMD_TYPE 0x21
50 #define ACPI_IVMD_TYPE_RANGE 0x22
52 #define IVHD_DEV_ALL 0x01
53 #define IVHD_DEV_SELECT 0x02
54 #define IVHD_DEV_SELECT_RANGE_START 0x03
55 #define IVHD_DEV_RANGE_END 0x04
56 #define IVHD_DEV_ALIAS 0x42
57 #define IVHD_DEV_ALIAS_RANGE 0x43
58 #define IVHD_DEV_EXT_SELECT 0x46
59 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
60 #define IVHD_DEV_SPECIAL 0x48
62 #define IVHD_SPECIAL_IOAPIC 1
63 #define IVHD_SPECIAL_HPET 2
65 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
66 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
67 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
68 #define IVHD_FLAG_ISOC_EN_MASK 0x08
70 #define IVMD_FLAG_EXCL_RANGE 0x08
71 #define IVMD_FLAG_UNITY_MAP 0x01
73 #define ACPI_DEVFLAG_INITPASS 0x01
74 #define ACPI_DEVFLAG_EXTINT 0x02
75 #define ACPI_DEVFLAG_NMI 0x04
76 #define ACPI_DEVFLAG_SYSMGT1 0x10
77 #define ACPI_DEVFLAG_SYSMGT2 0x20
78 #define ACPI_DEVFLAG_LINT0 0x40
79 #define ACPI_DEVFLAG_LINT1 0x80
80 #define ACPI_DEVFLAG_ATSDIS 0x10000000
83 * ACPI table definitions
85 * These data structures are laid over the table to parse the important values
90 * structure describing one IOMMU in the ACPI table. Typically followed by one
91 * or more ivhd_entrys.
103 } __attribute__((packed
));
106 * A device entry describing which devices a specific IOMMU translates and
107 * which requestor ids they use.
114 } __attribute__((packed
));
117 * An AMD IOMMU memory definition structure. It defines things like exclusion
118 * ranges for devices and regions that should be unity mapped.
129 } __attribute__((packed
));
132 bool amd_iommu_irq_remap __read_mostly
;
134 static bool amd_iommu_detected
;
135 static bool __initdata amd_iommu_disabled
;
137 u16 amd_iommu_last_bdf
; /* largest PCI device id we have
139 LIST_HEAD(amd_iommu_unity_map
); /* a list of required unity mappings
141 u32 amd_iommu_unmap_flush
; /* if true, flush on every unmap */
143 LIST_HEAD(amd_iommu_list
); /* list of all AMD IOMMUs in the
146 /* Array to assign indices to IOMMUs*/
147 struct amd_iommu
*amd_iommus
[MAX_IOMMUS
];
148 int amd_iommus_present
;
150 /* IOMMUs have a non-present cache? */
151 bool amd_iommu_np_cache __read_mostly
;
152 bool amd_iommu_iotlb_sup __read_mostly
= true;
154 u32 amd_iommu_max_pasid __read_mostly
= ~0;
156 bool amd_iommu_v2_present __read_mostly
;
157 static bool amd_iommu_pc_present __read_mostly
;
159 bool amd_iommu_force_isolation __read_mostly
;
162 * List of protection domains - used during resume
164 LIST_HEAD(amd_iommu_pd_list
);
165 spinlock_t amd_iommu_pd_lock
;
168 * Pointer to the device table which is shared by all AMD IOMMUs
169 * it is indexed by the PCI device id or the HT unit id and contains
170 * information about the domain the device belongs to as well as the
171 * page table root pointer.
173 struct dev_table_entry
*amd_iommu_dev_table
;
176 * The alias table is a driver specific data structure which contains the
177 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
178 * More than one device can share the same requestor id.
180 u16
*amd_iommu_alias_table
;
183 * The rlookup table is used to find the IOMMU which is responsible
184 * for a specific device. It is also indexed by the PCI device id.
186 struct amd_iommu
**amd_iommu_rlookup_table
;
189 * This table is used to find the irq remapping table for a given device id
192 struct irq_remap_table
**irq_lookup_table
;
195 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
196 * to know which ones are already in use.
198 unsigned long *amd_iommu_pd_alloc_bitmap
;
200 static u32 dev_table_size
; /* size of the device table */
201 static u32 alias_table_size
; /* size of the alias table */
202 static u32 rlookup_table_size
; /* size if the rlookup table */
204 enum iommu_init_state
{
217 /* Early ioapic and hpet maps from kernel command line */
218 #define EARLY_MAP_SIZE 4
219 static struct devid_map __initdata early_ioapic_map
[EARLY_MAP_SIZE
];
220 static struct devid_map __initdata early_hpet_map
[EARLY_MAP_SIZE
];
221 static int __initdata early_ioapic_map_size
;
222 static int __initdata early_hpet_map_size
;
223 static bool __initdata cmdline_maps
;
225 static enum iommu_init_state init_state
= IOMMU_START_STATE
;
227 static int amd_iommu_enable_interrupts(void);
228 static int __init
iommu_go_to_state(enum iommu_init_state state
);
229 static void init_device_table_dma(void);
231 static inline void update_last_devid(u16 devid
)
233 if (devid
> amd_iommu_last_bdf
)
234 amd_iommu_last_bdf
= devid
;
237 static inline unsigned long tbl_size(int entry_size
)
239 unsigned shift
= PAGE_SHIFT
+
240 get_order(((int)amd_iommu_last_bdf
+ 1) * entry_size
);
245 /* Access to l1 and l2 indexed register spaces */
247 static u32
iommu_read_l1(struct amd_iommu
*iommu
, u16 l1
, u8 address
)
251 pci_write_config_dword(iommu
->dev
, 0xf8, (address
| l1
<< 16));
252 pci_read_config_dword(iommu
->dev
, 0xfc, &val
);
256 static void iommu_write_l1(struct amd_iommu
*iommu
, u16 l1
, u8 address
, u32 val
)
258 pci_write_config_dword(iommu
->dev
, 0xf8, (address
| l1
<< 16 | 1 << 31));
259 pci_write_config_dword(iommu
->dev
, 0xfc, val
);
260 pci_write_config_dword(iommu
->dev
, 0xf8, (address
| l1
<< 16));
263 static u32
iommu_read_l2(struct amd_iommu
*iommu
, u8 address
)
267 pci_write_config_dword(iommu
->dev
, 0xf0, address
);
268 pci_read_config_dword(iommu
->dev
, 0xf4, &val
);
272 static void iommu_write_l2(struct amd_iommu
*iommu
, u8 address
, u32 val
)
274 pci_write_config_dword(iommu
->dev
, 0xf0, (address
| 1 << 8));
275 pci_write_config_dword(iommu
->dev
, 0xf4, val
);
278 /****************************************************************************
280 * AMD IOMMU MMIO register space handling functions
282 * These functions are used to program the IOMMU device registers in
283 * MMIO space required for that driver.
285 ****************************************************************************/
288 * This function set the exclusion range in the IOMMU. DMA accesses to the
289 * exclusion range are passed through untranslated
291 static void iommu_set_exclusion_range(struct amd_iommu
*iommu
)
293 u64 start
= iommu
->exclusion_start
& PAGE_MASK
;
294 u64 limit
= (start
+ iommu
->exclusion_length
) & PAGE_MASK
;
297 if (!iommu
->exclusion_start
)
300 entry
= start
| MMIO_EXCL_ENABLE_MASK
;
301 memcpy_toio(iommu
->mmio_base
+ MMIO_EXCL_BASE_OFFSET
,
302 &entry
, sizeof(entry
));
305 memcpy_toio(iommu
->mmio_base
+ MMIO_EXCL_LIMIT_OFFSET
,
306 &entry
, sizeof(entry
));
309 /* Programs the physical address of the device table into the IOMMU hardware */
310 static void iommu_set_device_table(struct amd_iommu
*iommu
)
314 BUG_ON(iommu
->mmio_base
== NULL
);
316 entry
= virt_to_phys(amd_iommu_dev_table
);
317 entry
|= (dev_table_size
>> 12) - 1;
318 memcpy_toio(iommu
->mmio_base
+ MMIO_DEV_TABLE_OFFSET
,
319 &entry
, sizeof(entry
));
322 /* Generic functions to enable/disable certain features of the IOMMU. */
323 static void iommu_feature_enable(struct amd_iommu
*iommu
, u8 bit
)
327 ctrl
= readl(iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
329 writel(ctrl
, iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
332 static void iommu_feature_disable(struct amd_iommu
*iommu
, u8 bit
)
336 ctrl
= readl(iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
338 writel(ctrl
, iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
341 static void iommu_set_inv_tlb_timeout(struct amd_iommu
*iommu
, int timeout
)
345 ctrl
= readl(iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
346 ctrl
&= ~CTRL_INV_TO_MASK
;
347 ctrl
|= (timeout
<< CONTROL_INV_TIMEOUT
) & CTRL_INV_TO_MASK
;
348 writel(ctrl
, iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
351 /* Function to enable the hardware */
352 static void iommu_enable(struct amd_iommu
*iommu
)
354 iommu_feature_enable(iommu
, CONTROL_IOMMU_EN
);
357 static void iommu_disable(struct amd_iommu
*iommu
)
359 /* Disable command buffer */
360 iommu_feature_disable(iommu
, CONTROL_CMDBUF_EN
);
362 /* Disable event logging and event interrupts */
363 iommu_feature_disable(iommu
, CONTROL_EVT_INT_EN
);
364 iommu_feature_disable(iommu
, CONTROL_EVT_LOG_EN
);
366 /* Disable IOMMU hardware itself */
367 iommu_feature_disable(iommu
, CONTROL_IOMMU_EN
);
371 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
372 * the system has one.
374 static u8 __iomem
* __init
iommu_map_mmio_space(u64 address
, u64 end
)
376 if (!request_mem_region(address
, end
, "amd_iommu")) {
377 pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
379 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
383 return (u8 __iomem
*)ioremap_nocache(address
, end
);
386 static void __init
iommu_unmap_mmio_space(struct amd_iommu
*iommu
)
388 if (iommu
->mmio_base
)
389 iounmap(iommu
->mmio_base
);
390 release_mem_region(iommu
->mmio_phys
, iommu
->mmio_phys_end
);
393 /****************************************************************************
395 * The functions below belong to the first pass of AMD IOMMU ACPI table
396 * parsing. In this pass we try to find out the highest device id this
397 * code has to handle. Upon this information the size of the shared data
398 * structures is determined later.
400 ****************************************************************************/
403 * This function calculates the length of a given IVHD entry
405 static inline int ivhd_entry_length(u8
*ivhd
)
407 return 0x04 << (*ivhd
>> 6);
411 * This function reads the last device id the IOMMU has to handle from the PCI
412 * capability header for this IOMMU
414 static int __init
find_last_devid_on_pci(int bus
, int dev
, int fn
, int cap_ptr
)
418 cap
= read_pci_config(bus
, dev
, fn
, cap_ptr
+MMIO_RANGE_OFFSET
);
419 update_last_devid(PCI_DEVID(MMIO_GET_BUS(cap
), MMIO_GET_LD(cap
)));
425 * After reading the highest device id from the IOMMU PCI capability header
426 * this function looks if there is a higher device id defined in the ACPI table
428 static int __init
find_last_devid_from_ivhd(struct ivhd_header
*h
)
430 u8
*p
= (void *)h
, *end
= (void *)h
;
431 struct ivhd_entry
*dev
;
436 find_last_devid_on_pci(PCI_BUS_NUM(h
->devid
),
442 dev
= (struct ivhd_entry
*)p
;
444 case IVHD_DEV_SELECT
:
445 case IVHD_DEV_RANGE_END
:
447 case IVHD_DEV_EXT_SELECT
:
448 /* all the above subfield types refer to device ids */
449 update_last_devid(dev
->devid
);
454 p
+= ivhd_entry_length(p
);
463 * Iterate over all IVHD entries in the ACPI table and find the highest device
464 * id which we need to handle. This is the first of three functions which parse
465 * the ACPI table. So we check the checksum here.
467 static int __init
find_last_devid_acpi(struct acpi_table_header
*table
)
470 u8 checksum
= 0, *p
= (u8
*)table
, *end
= (u8
*)table
;
471 struct ivhd_header
*h
;
474 * Validate checksum here so we don't need to do it when
475 * we actually parse the table
477 for (i
= 0; i
< table
->length
; ++i
)
480 /* ACPI table corrupt */
483 p
+= IVRS_HEADER_LENGTH
;
485 end
+= table
->length
;
487 h
= (struct ivhd_header
*)p
;
490 find_last_devid_from_ivhd(h
);
502 /****************************************************************************
504 * The following functions belong to the code path which parses the ACPI table
505 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
506 * data structures, initialize the device/alias/rlookup table and also
507 * basically initialize the hardware.
509 ****************************************************************************/
512 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
513 * write commands to that buffer later and the IOMMU will execute them
516 static u8
* __init
alloc_command_buffer(struct amd_iommu
*iommu
)
518 u8
*cmd_buf
= (u8
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
519 get_order(CMD_BUFFER_SIZE
));
524 iommu
->cmd_buf_size
= CMD_BUFFER_SIZE
| CMD_BUFFER_UNINITIALIZED
;
530 * This function resets the command buffer if the IOMMU stopped fetching
533 void amd_iommu_reset_cmd_buffer(struct amd_iommu
*iommu
)
535 iommu_feature_disable(iommu
, CONTROL_CMDBUF_EN
);
537 writel(0x00, iommu
->mmio_base
+ MMIO_CMD_HEAD_OFFSET
);
538 writel(0x00, iommu
->mmio_base
+ MMIO_CMD_TAIL_OFFSET
);
540 iommu_feature_enable(iommu
, CONTROL_CMDBUF_EN
);
544 * This function writes the command buffer address to the hardware and
547 static void iommu_enable_command_buffer(struct amd_iommu
*iommu
)
551 BUG_ON(iommu
->cmd_buf
== NULL
);
553 entry
= (u64
)virt_to_phys(iommu
->cmd_buf
);
554 entry
|= MMIO_CMD_SIZE_512
;
556 memcpy_toio(iommu
->mmio_base
+ MMIO_CMD_BUF_OFFSET
,
557 &entry
, sizeof(entry
));
559 amd_iommu_reset_cmd_buffer(iommu
);
560 iommu
->cmd_buf_size
&= ~(CMD_BUFFER_UNINITIALIZED
);
563 static void __init
free_command_buffer(struct amd_iommu
*iommu
)
565 free_pages((unsigned long)iommu
->cmd_buf
,
566 get_order(iommu
->cmd_buf_size
& ~(CMD_BUFFER_UNINITIALIZED
)));
569 /* allocates the memory where the IOMMU will log its events to */
570 static u8
* __init
alloc_event_buffer(struct amd_iommu
*iommu
)
572 iommu
->evt_buf
= (u8
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
573 get_order(EVT_BUFFER_SIZE
));
575 if (iommu
->evt_buf
== NULL
)
578 iommu
->evt_buf_size
= EVT_BUFFER_SIZE
;
580 return iommu
->evt_buf
;
583 static void iommu_enable_event_buffer(struct amd_iommu
*iommu
)
587 BUG_ON(iommu
->evt_buf
== NULL
);
589 entry
= (u64
)virt_to_phys(iommu
->evt_buf
) | EVT_LEN_MASK
;
591 memcpy_toio(iommu
->mmio_base
+ MMIO_EVT_BUF_OFFSET
,
592 &entry
, sizeof(entry
));
594 /* set head and tail to zero manually */
595 writel(0x00, iommu
->mmio_base
+ MMIO_EVT_HEAD_OFFSET
);
596 writel(0x00, iommu
->mmio_base
+ MMIO_EVT_TAIL_OFFSET
);
598 iommu_feature_enable(iommu
, CONTROL_EVT_LOG_EN
);
601 static void __init
free_event_buffer(struct amd_iommu
*iommu
)
603 free_pages((unsigned long)iommu
->evt_buf
, get_order(EVT_BUFFER_SIZE
));
606 /* allocates the memory where the IOMMU will log its events to */
607 static u8
* __init
alloc_ppr_log(struct amd_iommu
*iommu
)
609 iommu
->ppr_log
= (u8
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
610 get_order(PPR_LOG_SIZE
));
612 if (iommu
->ppr_log
== NULL
)
615 return iommu
->ppr_log
;
618 static void iommu_enable_ppr_log(struct amd_iommu
*iommu
)
622 if (iommu
->ppr_log
== NULL
)
625 entry
= (u64
)virt_to_phys(iommu
->ppr_log
) | PPR_LOG_SIZE_512
;
627 memcpy_toio(iommu
->mmio_base
+ MMIO_PPR_LOG_OFFSET
,
628 &entry
, sizeof(entry
));
630 /* set head and tail to zero manually */
631 writel(0x00, iommu
->mmio_base
+ MMIO_PPR_HEAD_OFFSET
);
632 writel(0x00, iommu
->mmio_base
+ MMIO_PPR_TAIL_OFFSET
);
634 iommu_feature_enable(iommu
, CONTROL_PPFLOG_EN
);
635 iommu_feature_enable(iommu
, CONTROL_PPR_EN
);
638 static void __init
free_ppr_log(struct amd_iommu
*iommu
)
640 if (iommu
->ppr_log
== NULL
)
643 free_pages((unsigned long)iommu
->ppr_log
, get_order(PPR_LOG_SIZE
));
646 static void iommu_enable_gt(struct amd_iommu
*iommu
)
648 if (!iommu_feature(iommu
, FEATURE_GT
))
651 iommu_feature_enable(iommu
, CONTROL_GT_EN
);
654 /* sets a specific bit in the device table entry. */
655 static void set_dev_entry_bit(u16 devid
, u8 bit
)
657 int i
= (bit
>> 6) & 0x03;
658 int _bit
= bit
& 0x3f;
660 amd_iommu_dev_table
[devid
].data
[i
] |= (1UL << _bit
);
663 static int get_dev_entry_bit(u16 devid
, u8 bit
)
665 int i
= (bit
>> 6) & 0x03;
666 int _bit
= bit
& 0x3f;
668 return (amd_iommu_dev_table
[devid
].data
[i
] & (1UL << _bit
)) >> _bit
;
672 void amd_iommu_apply_erratum_63(u16 devid
)
676 sysmgt
= get_dev_entry_bit(devid
, DEV_ENTRY_SYSMGT1
) |
677 (get_dev_entry_bit(devid
, DEV_ENTRY_SYSMGT2
) << 1);
680 set_dev_entry_bit(devid
, DEV_ENTRY_IW
);
683 /* Writes the specific IOMMU for a device into the rlookup table */
684 static void __init
set_iommu_for_device(struct amd_iommu
*iommu
, u16 devid
)
686 amd_iommu_rlookup_table
[devid
] = iommu
;
690 * This function takes the device specific flags read from the ACPI
691 * table and sets up the device table entry with that information
693 static void __init
set_dev_entry_from_acpi(struct amd_iommu
*iommu
,
694 u16 devid
, u32 flags
, u32 ext_flags
)
696 if (flags
& ACPI_DEVFLAG_INITPASS
)
697 set_dev_entry_bit(devid
, DEV_ENTRY_INIT_PASS
);
698 if (flags
& ACPI_DEVFLAG_EXTINT
)
699 set_dev_entry_bit(devid
, DEV_ENTRY_EINT_PASS
);
700 if (flags
& ACPI_DEVFLAG_NMI
)
701 set_dev_entry_bit(devid
, DEV_ENTRY_NMI_PASS
);
702 if (flags
& ACPI_DEVFLAG_SYSMGT1
)
703 set_dev_entry_bit(devid
, DEV_ENTRY_SYSMGT1
);
704 if (flags
& ACPI_DEVFLAG_SYSMGT2
)
705 set_dev_entry_bit(devid
, DEV_ENTRY_SYSMGT2
);
706 if (flags
& ACPI_DEVFLAG_LINT0
)
707 set_dev_entry_bit(devid
, DEV_ENTRY_LINT0_PASS
);
708 if (flags
& ACPI_DEVFLAG_LINT1
)
709 set_dev_entry_bit(devid
, DEV_ENTRY_LINT1_PASS
);
711 amd_iommu_apply_erratum_63(devid
);
713 set_iommu_for_device(iommu
, devid
);
716 static int __init
add_special_device(u8 type
, u8 id
, u16
*devid
, bool cmd_line
)
718 struct devid_map
*entry
;
719 struct list_head
*list
;
721 if (type
== IVHD_SPECIAL_IOAPIC
)
723 else if (type
== IVHD_SPECIAL_HPET
)
728 list_for_each_entry(entry
, list
, list
) {
729 if (!(entry
->id
== id
&& entry
->cmd_line
))
732 pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
733 type
== IVHD_SPECIAL_IOAPIC
? "IOAPIC" : "HPET", id
);
735 *devid
= entry
->devid
;
740 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
745 entry
->devid
= *devid
;
746 entry
->cmd_line
= cmd_line
;
748 list_add_tail(&entry
->list
, list
);
753 static int __init
add_early_maps(void)
757 for (i
= 0; i
< early_ioapic_map_size
; ++i
) {
758 ret
= add_special_device(IVHD_SPECIAL_IOAPIC
,
759 early_ioapic_map
[i
].id
,
760 &early_ioapic_map
[i
].devid
,
761 early_ioapic_map
[i
].cmd_line
);
766 for (i
= 0; i
< early_hpet_map_size
; ++i
) {
767 ret
= add_special_device(IVHD_SPECIAL_HPET
,
768 early_hpet_map
[i
].id
,
769 &early_hpet_map
[i
].devid
,
770 early_hpet_map
[i
].cmd_line
);
779 * Reads the device exclusion range from ACPI and initializes the IOMMU with
782 static void __init
set_device_exclusion_range(u16 devid
, struct ivmd_header
*m
)
784 struct amd_iommu
*iommu
= amd_iommu_rlookup_table
[devid
];
786 if (!(m
->flags
& IVMD_FLAG_EXCL_RANGE
))
791 * We only can configure exclusion ranges per IOMMU, not
792 * per device. But we can enable the exclusion range per
793 * device. This is done here
795 set_dev_entry_bit(devid
, DEV_ENTRY_EX
);
796 iommu
->exclusion_start
= m
->range_start
;
797 iommu
->exclusion_length
= m
->range_length
;
802 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
803 * initializes the hardware and our data structures with it.
805 static int __init
init_iommu_from_acpi(struct amd_iommu
*iommu
,
806 struct ivhd_header
*h
)
809 u8
*end
= p
, flags
= 0;
810 u16 devid
= 0, devid_start
= 0, devid_to
= 0;
811 u32 dev_i
, ext_flags
= 0;
813 struct ivhd_entry
*e
;
817 ret
= add_early_maps();
822 * First save the recommended feature enable bits from ACPI
824 iommu
->acpi_flags
= h
->flags
;
827 * Done. Now parse the device entries
829 p
+= sizeof(struct ivhd_header
);
834 e
= (struct ivhd_entry
*)p
;
838 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
839 " last device %02x:%02x.%x flags: %02x\n",
840 PCI_BUS_NUM(iommu
->first_device
),
841 PCI_SLOT(iommu
->first_device
),
842 PCI_FUNC(iommu
->first_device
),
843 PCI_BUS_NUM(iommu
->last_device
),
844 PCI_SLOT(iommu
->last_device
),
845 PCI_FUNC(iommu
->last_device
),
848 for (dev_i
= iommu
->first_device
;
849 dev_i
<= iommu
->last_device
; ++dev_i
)
850 set_dev_entry_from_acpi(iommu
, dev_i
,
853 case IVHD_DEV_SELECT
:
855 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
857 PCI_BUS_NUM(e
->devid
),
863 set_dev_entry_from_acpi(iommu
, devid
, e
->flags
, 0);
865 case IVHD_DEV_SELECT_RANGE_START
:
867 DUMP_printk(" DEV_SELECT_RANGE_START\t "
868 "devid: %02x:%02x.%x flags: %02x\n",
869 PCI_BUS_NUM(e
->devid
),
874 devid_start
= e
->devid
;
881 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
882 "flags: %02x devid_to: %02x:%02x.%x\n",
883 PCI_BUS_NUM(e
->devid
),
887 PCI_BUS_NUM(e
->ext
>> 8),
888 PCI_SLOT(e
->ext
>> 8),
889 PCI_FUNC(e
->ext
>> 8));
892 devid_to
= e
->ext
>> 8;
893 set_dev_entry_from_acpi(iommu
, devid
, e
->flags
, 0);
894 set_dev_entry_from_acpi(iommu
, devid_to
, e
->flags
, 0);
895 amd_iommu_alias_table
[devid
] = devid_to
;
897 case IVHD_DEV_ALIAS_RANGE
:
899 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
900 "devid: %02x:%02x.%x flags: %02x "
901 "devid_to: %02x:%02x.%x\n",
902 PCI_BUS_NUM(e
->devid
),
906 PCI_BUS_NUM(e
->ext
>> 8),
907 PCI_SLOT(e
->ext
>> 8),
908 PCI_FUNC(e
->ext
>> 8));
910 devid_start
= e
->devid
;
912 devid_to
= e
->ext
>> 8;
916 case IVHD_DEV_EXT_SELECT
:
918 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
919 "flags: %02x ext: %08x\n",
920 PCI_BUS_NUM(e
->devid
),
926 set_dev_entry_from_acpi(iommu
, devid
, e
->flags
,
929 case IVHD_DEV_EXT_SELECT_RANGE
:
931 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
932 "%02x:%02x.%x flags: %02x ext: %08x\n",
933 PCI_BUS_NUM(e
->devid
),
938 devid_start
= e
->devid
;
943 case IVHD_DEV_RANGE_END
:
945 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
946 PCI_BUS_NUM(e
->devid
),
951 for (dev_i
= devid_start
; dev_i
<= devid
; ++dev_i
) {
953 amd_iommu_alias_table
[dev_i
] = devid_to
;
954 set_dev_entry_from_acpi(iommu
,
955 devid_to
, flags
, ext_flags
);
957 set_dev_entry_from_acpi(iommu
, dev_i
,
961 case IVHD_DEV_SPECIAL
: {
967 handle
= e
->ext
& 0xff;
968 devid
= (e
->ext
>> 8) & 0xffff;
969 type
= (e
->ext
>> 24) & 0xff;
971 if (type
== IVHD_SPECIAL_IOAPIC
)
973 else if (type
== IVHD_SPECIAL_HPET
)
978 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
984 ret
= add_special_device(type
, handle
, &devid
, false);
989 * add_special_device might update the devid in case a
990 * command-line override is present. So call
991 * set_dev_entry_from_acpi after add_special_device.
993 set_dev_entry_from_acpi(iommu
, devid
, e
->flags
, 0);
1001 p
+= ivhd_entry_length(p
);
1007 /* Initializes the device->iommu mapping for the driver */
1008 static int __init
init_iommu_devices(struct amd_iommu
*iommu
)
1012 for (i
= iommu
->first_device
; i
<= iommu
->last_device
; ++i
)
1013 set_iommu_for_device(iommu
, i
);
1018 static void __init
free_iommu_one(struct amd_iommu
*iommu
)
1020 free_command_buffer(iommu
);
1021 free_event_buffer(iommu
);
1022 free_ppr_log(iommu
);
1023 iommu_unmap_mmio_space(iommu
);
1026 static void __init
free_iommu_all(void)
1028 struct amd_iommu
*iommu
, *next
;
1030 for_each_iommu_safe(iommu
, next
) {
1031 list_del(&iommu
->list
);
1032 free_iommu_one(iommu
);
1038 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1040 * BIOS should disable L2B micellaneous clock gating by setting
1041 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1043 static void amd_iommu_erratum_746_workaround(struct amd_iommu
*iommu
)
1047 if ((boot_cpu_data
.x86
!= 0x15) ||
1048 (boot_cpu_data
.x86_model
< 0x10) ||
1049 (boot_cpu_data
.x86_model
> 0x1f))
1052 pci_write_config_dword(iommu
->dev
, 0xf0, 0x90);
1053 pci_read_config_dword(iommu
->dev
, 0xf4, &value
);
1058 /* Select NB indirect register 0x90 and enable writing */
1059 pci_write_config_dword(iommu
->dev
, 0xf0, 0x90 | (1 << 8));
1061 pci_write_config_dword(iommu
->dev
, 0xf4, value
| 0x4);
1062 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1063 dev_name(&iommu
->dev
->dev
));
1065 /* Clear the enable writing bit */
1066 pci_write_config_dword(iommu
->dev
, 0xf0, 0x90);
1070 * This function clues the initialization function for one IOMMU
1071 * together and also allocates the command buffer and programs the
1072 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1074 static int __init
init_iommu_one(struct amd_iommu
*iommu
, struct ivhd_header
*h
)
1078 spin_lock_init(&iommu
->lock
);
1080 /* Add IOMMU to internal data structures */
1081 list_add_tail(&iommu
->list
, &amd_iommu_list
);
1082 iommu
->index
= amd_iommus_present
++;
1084 if (unlikely(iommu
->index
>= MAX_IOMMUS
)) {
1085 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1089 /* Index is fine - add IOMMU to the array */
1090 amd_iommus
[iommu
->index
] = iommu
;
1093 * Copy data from ACPI table entry to the iommu struct
1095 iommu
->devid
= h
->devid
;
1096 iommu
->cap_ptr
= h
->cap_ptr
;
1097 iommu
->pci_seg
= h
->pci_seg
;
1098 iommu
->mmio_phys
= h
->mmio_phys
;
1100 /* Check if IVHD EFR contains proper max banks/counters */
1101 if ((h
->efr
!= 0) &&
1102 ((h
->efr
& (0xF << 13)) != 0) &&
1103 ((h
->efr
& (0x3F << 17)) != 0)) {
1104 iommu
->mmio_phys_end
= MMIO_REG_END_OFFSET
;
1106 iommu
->mmio_phys_end
= MMIO_CNTR_CONF_OFFSET
;
1109 iommu
->mmio_base
= iommu_map_mmio_space(iommu
->mmio_phys
,
1110 iommu
->mmio_phys_end
);
1111 if (!iommu
->mmio_base
)
1114 iommu
->cmd_buf
= alloc_command_buffer(iommu
);
1115 if (!iommu
->cmd_buf
)
1118 iommu
->evt_buf
= alloc_event_buffer(iommu
);
1119 if (!iommu
->evt_buf
)
1122 iommu
->int_enabled
= false;
1124 ret
= init_iommu_from_acpi(iommu
, h
);
1128 ret
= amd_iommu_create_irq_domain(iommu
);
1133 * Make sure IOMMU is not considered to translate itself. The IVRS
1134 * table tells us so, but this is a lie!
1136 amd_iommu_rlookup_table
[iommu
->devid
] = NULL
;
1138 init_iommu_devices(iommu
);
1144 * Iterates over all IOMMU entries in the ACPI table, allocates the
1145 * IOMMU structure and initializes it with init_iommu_one()
1147 static int __init
init_iommu_all(struct acpi_table_header
*table
)
1149 u8
*p
= (u8
*)table
, *end
= (u8
*)table
;
1150 struct ivhd_header
*h
;
1151 struct amd_iommu
*iommu
;
1154 end
+= table
->length
;
1155 p
+= IVRS_HEADER_LENGTH
;
1158 h
= (struct ivhd_header
*)p
;
1160 case ACPI_IVHD_TYPE
:
1162 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1163 "seg: %d flags: %01x info %04x\n",
1164 PCI_BUS_NUM(h
->devid
), PCI_SLOT(h
->devid
),
1165 PCI_FUNC(h
->devid
), h
->cap_ptr
,
1166 h
->pci_seg
, h
->flags
, h
->info
);
1167 DUMP_printk(" mmio-addr: %016llx\n",
1170 iommu
= kzalloc(sizeof(struct amd_iommu
), GFP_KERNEL
);
1174 ret
= init_iommu_one(iommu
, h
);
1190 static void init_iommu_perf_ctr(struct amd_iommu
*iommu
)
1192 u64 val
= 0xabcd, val2
= 0;
1194 if (!iommu_feature(iommu
, FEATURE_PC
))
1197 amd_iommu_pc_present
= true;
1199 /* Check if the performance counters can be written to */
1200 if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val
, true)) ||
1201 (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2
, false)) ||
1203 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1204 amd_iommu_pc_present
= false;
1208 pr_info("AMD-Vi: IOMMU performance counters supported\n");
1210 val
= readl(iommu
->mmio_base
+ MMIO_CNTR_CONF_OFFSET
);
1211 iommu
->max_banks
= (u8
) ((val
>> 12) & 0x3f);
1212 iommu
->max_counters
= (u8
) ((val
>> 7) & 0xf);
1215 static ssize_t
amd_iommu_show_cap(struct device
*dev
,
1216 struct device_attribute
*attr
,
1219 struct amd_iommu
*iommu
= dev_get_drvdata(dev
);
1220 return sprintf(buf
, "%x\n", iommu
->cap
);
1222 static DEVICE_ATTR(cap
, S_IRUGO
, amd_iommu_show_cap
, NULL
);
1224 static ssize_t
amd_iommu_show_features(struct device
*dev
,
1225 struct device_attribute
*attr
,
1228 struct amd_iommu
*iommu
= dev_get_drvdata(dev
);
1229 return sprintf(buf
, "%llx\n", iommu
->features
);
1231 static DEVICE_ATTR(features
, S_IRUGO
, amd_iommu_show_features
, NULL
);
1233 static struct attribute
*amd_iommu_attrs
[] = {
1235 &dev_attr_features
.attr
,
1239 static struct attribute_group amd_iommu_group
= {
1240 .name
= "amd-iommu",
1241 .attrs
= amd_iommu_attrs
,
1244 static const struct attribute_group
*amd_iommu_groups
[] = {
1249 static int iommu_init_pci(struct amd_iommu
*iommu
)
1251 int cap_ptr
= iommu
->cap_ptr
;
1252 u32 range
, misc
, low
, high
;
1254 iommu
->dev
= pci_get_bus_and_slot(PCI_BUS_NUM(iommu
->devid
),
1255 iommu
->devid
& 0xff);
1259 pci_read_config_dword(iommu
->dev
, cap_ptr
+ MMIO_CAP_HDR_OFFSET
,
1261 pci_read_config_dword(iommu
->dev
, cap_ptr
+ MMIO_RANGE_OFFSET
,
1263 pci_read_config_dword(iommu
->dev
, cap_ptr
+ MMIO_MISC_OFFSET
,
1266 iommu
->first_device
= PCI_DEVID(MMIO_GET_BUS(range
),
1267 MMIO_GET_FD(range
));
1268 iommu
->last_device
= PCI_DEVID(MMIO_GET_BUS(range
),
1269 MMIO_GET_LD(range
));
1271 if (!(iommu
->cap
& (1 << IOMMU_CAP_IOTLB
)))
1272 amd_iommu_iotlb_sup
= false;
1274 /* read extended feature bits */
1275 low
= readl(iommu
->mmio_base
+ MMIO_EXT_FEATURES
);
1276 high
= readl(iommu
->mmio_base
+ MMIO_EXT_FEATURES
+ 4);
1278 iommu
->features
= ((u64
)high
<< 32) | low
;
1280 if (iommu_feature(iommu
, FEATURE_GT
)) {
1285 pasmax
= iommu
->features
& FEATURE_PASID_MASK
;
1286 pasmax
>>= FEATURE_PASID_SHIFT
;
1287 max_pasid
= (1 << (pasmax
+ 1)) - 1;
1289 amd_iommu_max_pasid
= min(amd_iommu_max_pasid
, max_pasid
);
1291 BUG_ON(amd_iommu_max_pasid
& ~PASID_MASK
);
1293 glxval
= iommu
->features
& FEATURE_GLXVAL_MASK
;
1294 glxval
>>= FEATURE_GLXVAL_SHIFT
;
1296 if (amd_iommu_max_glx_val
== -1)
1297 amd_iommu_max_glx_val
= glxval
;
1299 amd_iommu_max_glx_val
= min(amd_iommu_max_glx_val
, glxval
);
1302 if (iommu_feature(iommu
, FEATURE_GT
) &&
1303 iommu_feature(iommu
, FEATURE_PPR
)) {
1304 iommu
->is_iommu_v2
= true;
1305 amd_iommu_v2_present
= true;
1308 if (iommu_feature(iommu
, FEATURE_PPR
)) {
1309 iommu
->ppr_log
= alloc_ppr_log(iommu
);
1310 if (!iommu
->ppr_log
)
1314 if (iommu
->cap
& (1UL << IOMMU_CAP_NPCACHE
))
1315 amd_iommu_np_cache
= true;
1317 init_iommu_perf_ctr(iommu
);
1319 if (is_rd890_iommu(iommu
->dev
)) {
1322 iommu
->root_pdev
= pci_get_bus_and_slot(iommu
->dev
->bus
->number
,
1326 * Some rd890 systems may not be fully reconfigured by the
1327 * BIOS, so it's necessary for us to store this information so
1328 * it can be reprogrammed on resume
1330 pci_read_config_dword(iommu
->dev
, iommu
->cap_ptr
+ 4,
1331 &iommu
->stored_addr_lo
);
1332 pci_read_config_dword(iommu
->dev
, iommu
->cap_ptr
+ 8,
1333 &iommu
->stored_addr_hi
);
1335 /* Low bit locks writes to configuration space */
1336 iommu
->stored_addr_lo
&= ~1;
1338 for (i
= 0; i
< 6; i
++)
1339 for (j
= 0; j
< 0x12; j
++)
1340 iommu
->stored_l1
[i
][j
] = iommu_read_l1(iommu
, i
, j
);
1342 for (i
= 0; i
< 0x83; i
++)
1343 iommu
->stored_l2
[i
] = iommu_read_l2(iommu
, i
);
1346 amd_iommu_erratum_746_workaround(iommu
);
1348 iommu
->iommu_dev
= iommu_device_create(&iommu
->dev
->dev
, iommu
,
1349 amd_iommu_groups
, "ivhd%d",
1352 return pci_enable_device(iommu
->dev
);
1355 static void print_iommu_info(void)
1357 static const char * const feat_str
[] = {
1358 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1359 "IA", "GA", "HE", "PC"
1361 struct amd_iommu
*iommu
;
1363 for_each_iommu(iommu
) {
1366 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1367 dev_name(&iommu
->dev
->dev
), iommu
->cap_ptr
);
1369 if (iommu
->cap
& (1 << IOMMU_CAP_EFR
)) {
1370 pr_info("AMD-Vi: Extended features: ");
1371 for (i
= 0; i
< ARRAY_SIZE(feat_str
); ++i
) {
1372 if (iommu_feature(iommu
, (1ULL << i
)))
1373 pr_cont(" %s", feat_str
[i
]);
1378 if (irq_remapping_enabled
)
1379 pr_info("AMD-Vi: Interrupt remapping enabled\n");
1382 static int __init
amd_iommu_init_pci(void)
1384 struct amd_iommu
*iommu
;
1387 for_each_iommu(iommu
) {
1388 ret
= iommu_init_pci(iommu
);
1393 init_device_table_dma();
1395 for_each_iommu(iommu
)
1396 iommu_flush_all_caches(iommu
);
1398 ret
= amd_iommu_init_api();
1406 /****************************************************************************
1408 * The following functions initialize the MSI interrupts for all IOMMUs
1409 * in the system. It's a bit challenging because there could be multiple
1410 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1413 ****************************************************************************/
1415 static int iommu_setup_msi(struct amd_iommu
*iommu
)
1419 r
= pci_enable_msi(iommu
->dev
);
1423 r
= request_threaded_irq(iommu
->dev
->irq
,
1424 amd_iommu_int_handler
,
1425 amd_iommu_int_thread
,
1430 pci_disable_msi(iommu
->dev
);
1434 iommu
->int_enabled
= true;
1439 static int iommu_init_msi(struct amd_iommu
*iommu
)
1443 if (iommu
->int_enabled
)
1446 if (iommu
->dev
->msi_cap
)
1447 ret
= iommu_setup_msi(iommu
);
1455 iommu_feature_enable(iommu
, CONTROL_EVT_INT_EN
);
1457 if (iommu
->ppr_log
!= NULL
)
1458 iommu_feature_enable(iommu
, CONTROL_PPFINT_EN
);
1463 /****************************************************************************
1465 * The next functions belong to the third pass of parsing the ACPI
1466 * table. In this last pass the memory mapping requirements are
1467 * gathered (like exclusion and unity mapping ranges).
1469 ****************************************************************************/
1471 static void __init
free_unity_maps(void)
1473 struct unity_map_entry
*entry
, *next
;
1475 list_for_each_entry_safe(entry
, next
, &amd_iommu_unity_map
, list
) {
1476 list_del(&entry
->list
);
1481 /* called when we find an exclusion range definition in ACPI */
1482 static int __init
init_exclusion_range(struct ivmd_header
*m
)
1487 case ACPI_IVMD_TYPE
:
1488 set_device_exclusion_range(m
->devid
, m
);
1490 case ACPI_IVMD_TYPE_ALL
:
1491 for (i
= 0; i
<= amd_iommu_last_bdf
; ++i
)
1492 set_device_exclusion_range(i
, m
);
1494 case ACPI_IVMD_TYPE_RANGE
:
1495 for (i
= m
->devid
; i
<= m
->aux
; ++i
)
1496 set_device_exclusion_range(i
, m
);
1505 /* called for unity map ACPI definition */
1506 static int __init
init_unity_map_range(struct ivmd_header
*m
)
1508 struct unity_map_entry
*e
= NULL
;
1511 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
1519 case ACPI_IVMD_TYPE
:
1520 s
= "IVMD_TYPEi\t\t\t";
1521 e
->devid_start
= e
->devid_end
= m
->devid
;
1523 case ACPI_IVMD_TYPE_ALL
:
1524 s
= "IVMD_TYPE_ALL\t\t";
1526 e
->devid_end
= amd_iommu_last_bdf
;
1528 case ACPI_IVMD_TYPE_RANGE
:
1529 s
= "IVMD_TYPE_RANGE\t\t";
1530 e
->devid_start
= m
->devid
;
1531 e
->devid_end
= m
->aux
;
1534 e
->address_start
= PAGE_ALIGN(m
->range_start
);
1535 e
->address_end
= e
->address_start
+ PAGE_ALIGN(m
->range_length
);
1536 e
->prot
= m
->flags
>> 1;
1538 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1539 " range_start: %016llx range_end: %016llx flags: %x\n", s
,
1540 PCI_BUS_NUM(e
->devid_start
), PCI_SLOT(e
->devid_start
),
1541 PCI_FUNC(e
->devid_start
), PCI_BUS_NUM(e
->devid_end
),
1542 PCI_SLOT(e
->devid_end
), PCI_FUNC(e
->devid_end
),
1543 e
->address_start
, e
->address_end
, m
->flags
);
1545 list_add_tail(&e
->list
, &amd_iommu_unity_map
);
1550 /* iterates over all memory definitions we find in the ACPI table */
1551 static int __init
init_memory_definitions(struct acpi_table_header
*table
)
1553 u8
*p
= (u8
*)table
, *end
= (u8
*)table
;
1554 struct ivmd_header
*m
;
1556 end
+= table
->length
;
1557 p
+= IVRS_HEADER_LENGTH
;
1560 m
= (struct ivmd_header
*)p
;
1561 if (m
->flags
& IVMD_FLAG_EXCL_RANGE
)
1562 init_exclusion_range(m
);
1563 else if (m
->flags
& IVMD_FLAG_UNITY_MAP
)
1564 init_unity_map_range(m
);
1573 * Init the device table to not allow DMA access for devices and
1574 * suppress all page faults
1576 static void init_device_table_dma(void)
1580 for (devid
= 0; devid
<= amd_iommu_last_bdf
; ++devid
) {
1581 set_dev_entry_bit(devid
, DEV_ENTRY_VALID
);
1582 set_dev_entry_bit(devid
, DEV_ENTRY_TRANSLATION
);
1586 static void __init
uninit_device_table_dma(void)
1590 for (devid
= 0; devid
<= amd_iommu_last_bdf
; ++devid
) {
1591 amd_iommu_dev_table
[devid
].data
[0] = 0ULL;
1592 amd_iommu_dev_table
[devid
].data
[1] = 0ULL;
1596 static void init_device_table(void)
1600 if (!amd_iommu_irq_remap
)
1603 for (devid
= 0; devid
<= amd_iommu_last_bdf
; ++devid
)
1604 set_dev_entry_bit(devid
, DEV_ENTRY_IRQ_TBL_EN
);
1607 static void iommu_init_flags(struct amd_iommu
*iommu
)
1609 iommu
->acpi_flags
& IVHD_FLAG_HT_TUN_EN_MASK
?
1610 iommu_feature_enable(iommu
, CONTROL_HT_TUN_EN
) :
1611 iommu_feature_disable(iommu
, CONTROL_HT_TUN_EN
);
1613 iommu
->acpi_flags
& IVHD_FLAG_PASSPW_EN_MASK
?
1614 iommu_feature_enable(iommu
, CONTROL_PASSPW_EN
) :
1615 iommu_feature_disable(iommu
, CONTROL_PASSPW_EN
);
1617 iommu
->acpi_flags
& IVHD_FLAG_RESPASSPW_EN_MASK
?
1618 iommu_feature_enable(iommu
, CONTROL_RESPASSPW_EN
) :
1619 iommu_feature_disable(iommu
, CONTROL_RESPASSPW_EN
);
1621 iommu
->acpi_flags
& IVHD_FLAG_ISOC_EN_MASK
?
1622 iommu_feature_enable(iommu
, CONTROL_ISOC_EN
) :
1623 iommu_feature_disable(iommu
, CONTROL_ISOC_EN
);
1626 * make IOMMU memory accesses cache coherent
1628 iommu_feature_enable(iommu
, CONTROL_COHERENT_EN
);
1630 /* Set IOTLB invalidation timeout to 1s */
1631 iommu_set_inv_tlb_timeout(iommu
, CTRL_INV_TO_1S
);
1634 static void iommu_apply_resume_quirks(struct amd_iommu
*iommu
)
1637 u32 ioc_feature_control
;
1638 struct pci_dev
*pdev
= iommu
->root_pdev
;
1640 /* RD890 BIOSes may not have completely reconfigured the iommu */
1641 if (!is_rd890_iommu(iommu
->dev
) || !pdev
)
1645 * First, we need to ensure that the iommu is enabled. This is
1646 * controlled by a register in the northbridge
1649 /* Select Northbridge indirect register 0x75 and enable writing */
1650 pci_write_config_dword(pdev
, 0x60, 0x75 | (1 << 7));
1651 pci_read_config_dword(pdev
, 0x64, &ioc_feature_control
);
1653 /* Enable the iommu */
1654 if (!(ioc_feature_control
& 0x1))
1655 pci_write_config_dword(pdev
, 0x64, ioc_feature_control
| 1);
1657 /* Restore the iommu BAR */
1658 pci_write_config_dword(iommu
->dev
, iommu
->cap_ptr
+ 4,
1659 iommu
->stored_addr_lo
);
1660 pci_write_config_dword(iommu
->dev
, iommu
->cap_ptr
+ 8,
1661 iommu
->stored_addr_hi
);
1663 /* Restore the l1 indirect regs for each of the 6 l1s */
1664 for (i
= 0; i
< 6; i
++)
1665 for (j
= 0; j
< 0x12; j
++)
1666 iommu_write_l1(iommu
, i
, j
, iommu
->stored_l1
[i
][j
]);
1668 /* Restore the l2 indirect regs */
1669 for (i
= 0; i
< 0x83; i
++)
1670 iommu_write_l2(iommu
, i
, iommu
->stored_l2
[i
]);
1672 /* Lock PCI setup registers */
1673 pci_write_config_dword(iommu
->dev
, iommu
->cap_ptr
+ 4,
1674 iommu
->stored_addr_lo
| 1);
1678 * This function finally enables all IOMMUs found in the system after
1679 * they have been initialized
1681 static void early_enable_iommus(void)
1683 struct amd_iommu
*iommu
;
1685 for_each_iommu(iommu
) {
1686 iommu_disable(iommu
);
1687 iommu_init_flags(iommu
);
1688 iommu_set_device_table(iommu
);
1689 iommu_enable_command_buffer(iommu
);
1690 iommu_enable_event_buffer(iommu
);
1691 iommu_set_exclusion_range(iommu
);
1692 iommu_enable(iommu
);
1693 iommu_flush_all_caches(iommu
);
1697 static void enable_iommus_v2(void)
1699 struct amd_iommu
*iommu
;
1701 for_each_iommu(iommu
) {
1702 iommu_enable_ppr_log(iommu
);
1703 iommu_enable_gt(iommu
);
1707 static void enable_iommus(void)
1709 early_enable_iommus();
1714 static void disable_iommus(void)
1716 struct amd_iommu
*iommu
;
1718 for_each_iommu(iommu
)
1719 iommu_disable(iommu
);
1723 * Suspend/Resume support
1724 * disable suspend until real resume implemented
1727 static void amd_iommu_resume(void)
1729 struct amd_iommu
*iommu
;
1731 for_each_iommu(iommu
)
1732 iommu_apply_resume_quirks(iommu
);
1734 /* re-load the hardware */
1737 amd_iommu_enable_interrupts();
1740 static int amd_iommu_suspend(void)
1742 /* disable IOMMUs to go out of the way for BIOS */
1748 static struct syscore_ops amd_iommu_syscore_ops
= {
1749 .suspend
= amd_iommu_suspend
,
1750 .resume
= amd_iommu_resume
,
1753 static void __init
free_on_init_error(void)
1755 free_pages((unsigned long)irq_lookup_table
,
1756 get_order(rlookup_table_size
));
1758 if (amd_iommu_irq_cache
) {
1759 kmem_cache_destroy(amd_iommu_irq_cache
);
1760 amd_iommu_irq_cache
= NULL
;
1764 free_pages((unsigned long)amd_iommu_rlookup_table
,
1765 get_order(rlookup_table_size
));
1767 free_pages((unsigned long)amd_iommu_alias_table
,
1768 get_order(alias_table_size
));
1770 free_pages((unsigned long)amd_iommu_dev_table
,
1771 get_order(dev_table_size
));
1775 #ifdef CONFIG_GART_IOMMU
1777 * We failed to initialize the AMD IOMMU - try fallback to GART
1785 /* SB IOAPIC is always on this device in AMD systems */
1786 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
1788 static bool __init
check_ioapic_information(void)
1790 const char *fw_bug
= FW_BUG
;
1791 bool ret
, has_sb_ioapic
;
1794 has_sb_ioapic
= false;
1798 * If we have map overrides on the kernel command line the
1799 * messages in this function might not describe firmware bugs
1800 * anymore - so be careful
1805 for (idx
= 0; idx
< nr_ioapics
; idx
++) {
1806 int devid
, id
= mpc_ioapic_id(idx
);
1808 devid
= get_ioapic_devid(id
);
1810 pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
1813 } else if (devid
== IOAPIC_SB_DEVID
) {
1814 has_sb_ioapic
= true;
1819 if (!has_sb_ioapic
) {
1821 * We expect the SB IOAPIC to be listed in the IVRS
1822 * table. The system timer is connected to the SB IOAPIC
1823 * and if we don't have it in the list the system will
1824 * panic at boot time. This situation usually happens
1825 * when the BIOS is buggy and provides us the wrong
1826 * device id for the IOAPIC in the system.
1828 pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug
);
1832 pr_err("AMD-Vi: Disabling interrupt remapping\n");
1837 static void __init
free_dma_resources(void)
1839 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap
,
1840 get_order(MAX_DOMAIN_ID
/8));
1846 * This is the hardware init function for AMD IOMMU in the system.
1847 * This function is called either from amd_iommu_init or from the interrupt
1848 * remapping setup code.
1850 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1853 * 1 pass) Find the highest PCI device id the driver has to handle.
1854 * Upon this information the size of the data structures is
1855 * determined that needs to be allocated.
1857 * 2 pass) Initialize the data structures just allocated with the
1858 * information in the ACPI table about available AMD IOMMUs
1859 * in the system. It also maps the PCI devices in the
1860 * system to specific IOMMUs
1862 * 3 pass) After the basic data structures are allocated and
1863 * initialized we update them with information about memory
1864 * remapping requirements parsed out of the ACPI table in
1867 * After everything is set up the IOMMUs are enabled and the necessary
1868 * hotplug and suspend notifiers are registered.
1870 static int __init
early_amd_iommu_init(void)
1872 struct acpi_table_header
*ivrs_base
;
1873 acpi_size ivrs_size
;
1877 if (!amd_iommu_detected
)
1880 status
= acpi_get_table_with_size("IVRS", 0, &ivrs_base
, &ivrs_size
);
1881 if (status
== AE_NOT_FOUND
)
1883 else if (ACPI_FAILURE(status
)) {
1884 const char *err
= acpi_format_exception(status
);
1885 pr_err("AMD-Vi: IVRS table error: %s\n", err
);
1890 * First parse ACPI tables to find the largest Bus/Dev/Func
1891 * we need to handle. Upon this information the shared data
1892 * structures for the IOMMUs in the system will be allocated
1894 ret
= find_last_devid_acpi(ivrs_base
);
1898 dev_table_size
= tbl_size(DEV_TABLE_ENTRY_SIZE
);
1899 alias_table_size
= tbl_size(ALIAS_TABLE_ENTRY_SIZE
);
1900 rlookup_table_size
= tbl_size(RLOOKUP_TABLE_ENTRY_SIZE
);
1902 /* Device table - directly used by all IOMMUs */
1904 amd_iommu_dev_table
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
1905 get_order(dev_table_size
));
1906 if (amd_iommu_dev_table
== NULL
)
1910 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
1911 * IOMMU see for that device
1913 amd_iommu_alias_table
= (void *)__get_free_pages(GFP_KERNEL
,
1914 get_order(alias_table_size
));
1915 if (amd_iommu_alias_table
== NULL
)
1918 /* IOMMU rlookup table - find the IOMMU for a specific device */
1919 amd_iommu_rlookup_table
= (void *)__get_free_pages(
1920 GFP_KERNEL
| __GFP_ZERO
,
1921 get_order(rlookup_table_size
));
1922 if (amd_iommu_rlookup_table
== NULL
)
1925 amd_iommu_pd_alloc_bitmap
= (void *)__get_free_pages(
1926 GFP_KERNEL
| __GFP_ZERO
,
1927 get_order(MAX_DOMAIN_ID
/8));
1928 if (amd_iommu_pd_alloc_bitmap
== NULL
)
1932 * let all alias entries point to itself
1934 for (i
= 0; i
<= amd_iommu_last_bdf
; ++i
)
1935 amd_iommu_alias_table
[i
] = i
;
1938 * never allocate domain 0 because its used as the non-allocated and
1939 * error value placeholder
1941 amd_iommu_pd_alloc_bitmap
[0] = 1;
1943 spin_lock_init(&amd_iommu_pd_lock
);
1946 * now the data structures are allocated and basically initialized
1947 * start the real acpi table scan
1949 ret
= init_iommu_all(ivrs_base
);
1953 if (amd_iommu_irq_remap
)
1954 amd_iommu_irq_remap
= check_ioapic_information();
1956 if (amd_iommu_irq_remap
) {
1958 * Interrupt remapping enabled, create kmem_cache for the
1962 amd_iommu_irq_cache
= kmem_cache_create("irq_remap_cache",
1963 MAX_IRQS_PER_TABLE
* sizeof(u32
),
1964 IRQ_TABLE_ALIGNMENT
,
1966 if (!amd_iommu_irq_cache
)
1969 irq_lookup_table
= (void *)__get_free_pages(
1970 GFP_KERNEL
| __GFP_ZERO
,
1971 get_order(rlookup_table_size
));
1972 if (!irq_lookup_table
)
1976 ret
= init_memory_definitions(ivrs_base
);
1980 /* init the device table */
1981 init_device_table();
1984 /* Don't leak any ACPI memory */
1985 early_acpi_os_unmap_memory((char __iomem
*)ivrs_base
, ivrs_size
);
1991 static int amd_iommu_enable_interrupts(void)
1993 struct amd_iommu
*iommu
;
1996 for_each_iommu(iommu
) {
1997 ret
= iommu_init_msi(iommu
);
2006 static bool detect_ivrs(void)
2008 struct acpi_table_header
*ivrs_base
;
2009 acpi_size ivrs_size
;
2012 status
= acpi_get_table_with_size("IVRS", 0, &ivrs_base
, &ivrs_size
);
2013 if (status
== AE_NOT_FOUND
)
2015 else if (ACPI_FAILURE(status
)) {
2016 const char *err
= acpi_format_exception(status
);
2017 pr_err("AMD-Vi: IVRS table error: %s\n", err
);
2021 early_acpi_os_unmap_memory((char __iomem
*)ivrs_base
, ivrs_size
);
2023 /* Make sure ACS will be enabled during PCI probe */
2029 /****************************************************************************
2031 * AMD IOMMU Initialization State Machine
2033 ****************************************************************************/
2035 static int __init
state_next(void)
2039 switch (init_state
) {
2040 case IOMMU_START_STATE
:
2041 if (!detect_ivrs()) {
2042 init_state
= IOMMU_NOT_FOUND
;
2045 init_state
= IOMMU_IVRS_DETECTED
;
2048 case IOMMU_IVRS_DETECTED
:
2049 ret
= early_amd_iommu_init();
2050 init_state
= ret
? IOMMU_INIT_ERROR
: IOMMU_ACPI_FINISHED
;
2052 case IOMMU_ACPI_FINISHED
:
2053 early_enable_iommus();
2054 register_syscore_ops(&amd_iommu_syscore_ops
);
2055 x86_platform
.iommu_shutdown
= disable_iommus
;
2056 init_state
= IOMMU_ENABLED
;
2059 ret
= amd_iommu_init_pci();
2060 init_state
= ret
? IOMMU_INIT_ERROR
: IOMMU_PCI_INIT
;
2063 case IOMMU_PCI_INIT
:
2064 ret
= amd_iommu_enable_interrupts();
2065 init_state
= ret
? IOMMU_INIT_ERROR
: IOMMU_INTERRUPTS_EN
;
2067 case IOMMU_INTERRUPTS_EN
:
2068 ret
= amd_iommu_init_dma_ops();
2069 init_state
= ret
? IOMMU_INIT_ERROR
: IOMMU_DMA_OPS
;
2072 init_state
= IOMMU_INITIALIZED
;
2074 case IOMMU_INITIALIZED
:
2077 case IOMMU_NOT_FOUND
:
2078 case IOMMU_INIT_ERROR
:
2079 /* Error states => do nothing */
2090 static int __init
iommu_go_to_state(enum iommu_init_state state
)
2094 while (init_state
!= state
) {
2096 if (init_state
== IOMMU_NOT_FOUND
||
2097 init_state
== IOMMU_INIT_ERROR
)
2104 #ifdef CONFIG_IRQ_REMAP
2105 int __init
amd_iommu_prepare(void)
2109 amd_iommu_irq_remap
= true;
2111 ret
= iommu_go_to_state(IOMMU_ACPI_FINISHED
);
2114 return amd_iommu_irq_remap
? 0 : -ENODEV
;
2117 int __init
amd_iommu_enable(void)
2121 ret
= iommu_go_to_state(IOMMU_ENABLED
);
2125 irq_remapping_enabled
= 1;
2130 void amd_iommu_disable(void)
2132 amd_iommu_suspend();
2135 int amd_iommu_reenable(int mode
)
2142 int __init
amd_iommu_enable_faulting(void)
2144 /* We enable MSI later when PCI is initialized */
2150 * This is the core init function for AMD IOMMU hardware in the system.
2151 * This function is called from the generic x86 DMA layer initialization
2154 static int __init
amd_iommu_init(void)
2158 ret
= iommu_go_to_state(IOMMU_INITIALIZED
);
2160 free_dma_resources();
2161 if (!irq_remapping_enabled
) {
2163 free_on_init_error();
2165 struct amd_iommu
*iommu
;
2167 uninit_device_table_dma();
2168 for_each_iommu(iommu
)
2169 iommu_flush_all_caches(iommu
);
2176 /****************************************************************************
2178 * Early detect code. This code runs at IOMMU detection time in the DMA
2179 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2182 ****************************************************************************/
2183 int __init
amd_iommu_detect(void)
2187 if (no_iommu
|| (iommu_detected
&& !gart_iommu_aperture
))
2190 if (amd_iommu_disabled
)
2193 ret
= iommu_go_to_state(IOMMU_IVRS_DETECTED
);
2197 amd_iommu_detected
= true;
2199 x86_init
.iommu
.iommu_init
= amd_iommu_init
;
2204 /****************************************************************************
2206 * Parsing functions for the AMD IOMMU specific kernel command line
2209 ****************************************************************************/
2211 static int __init
parse_amd_iommu_dump(char *str
)
2213 amd_iommu_dump
= true;
2218 static int __init
parse_amd_iommu_options(char *str
)
2220 for (; *str
; ++str
) {
2221 if (strncmp(str
, "fullflush", 9) == 0)
2222 amd_iommu_unmap_flush
= true;
2223 if (strncmp(str
, "off", 3) == 0)
2224 amd_iommu_disabled
= true;
2225 if (strncmp(str
, "force_isolation", 15) == 0)
2226 amd_iommu_force_isolation
= true;
2232 static int __init
parse_ivrs_ioapic(char *str
)
2234 unsigned int bus
, dev
, fn
;
2238 ret
= sscanf(str
, "[%d]=%x:%x.%x", &id
, &bus
, &dev
, &fn
);
2241 pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str
);
2245 if (early_ioapic_map_size
== EARLY_MAP_SIZE
) {
2246 pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2251 devid
= ((bus
& 0xff) << 8) | ((dev
& 0x1f) << 3) | (fn
& 0x7);
2253 cmdline_maps
= true;
2254 i
= early_ioapic_map_size
++;
2255 early_ioapic_map
[i
].id
= id
;
2256 early_ioapic_map
[i
].devid
= devid
;
2257 early_ioapic_map
[i
].cmd_line
= true;
2262 static int __init
parse_ivrs_hpet(char *str
)
2264 unsigned int bus
, dev
, fn
;
2268 ret
= sscanf(str
, "[%d]=%x:%x.%x", &id
, &bus
, &dev
, &fn
);
2271 pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str
);
2275 if (early_hpet_map_size
== EARLY_MAP_SIZE
) {
2276 pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
2281 devid
= ((bus
& 0xff) << 8) | ((dev
& 0x1f) << 3) | (fn
& 0x7);
2283 cmdline_maps
= true;
2284 i
= early_hpet_map_size
++;
2285 early_hpet_map
[i
].id
= id
;
2286 early_hpet_map
[i
].devid
= devid
;
2287 early_hpet_map
[i
].cmd_line
= true;
2292 __setup("amd_iommu_dump", parse_amd_iommu_dump
);
2293 __setup("amd_iommu=", parse_amd_iommu_options
);
2294 __setup("ivrs_ioapic", parse_ivrs_ioapic
);
2295 __setup("ivrs_hpet", parse_ivrs_hpet
);
2297 IOMMU_INIT_FINISH(amd_iommu_detect
,
2298 gart_iommu_hole_init
,
2302 bool amd_iommu_v2_supported(void)
2304 return amd_iommu_v2_present
;
2306 EXPORT_SYMBOL(amd_iommu_v2_supported
);
2308 /****************************************************************************
2310 * IOMMU EFR Performance Counter support functionality. This code allows
2311 * access to the IOMMU PC functionality.
2313 ****************************************************************************/
2315 u8
amd_iommu_pc_get_max_banks(u16 devid
)
2317 struct amd_iommu
*iommu
;
2320 /* locate the iommu governing the devid */
2321 iommu
= amd_iommu_rlookup_table
[devid
];
2323 ret
= iommu
->max_banks
;
2327 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks
);
2329 bool amd_iommu_pc_supported(void)
2331 return amd_iommu_pc_present
;
2333 EXPORT_SYMBOL(amd_iommu_pc_supported
);
2335 u8
amd_iommu_pc_get_max_counters(u16 devid
)
2337 struct amd_iommu
*iommu
;
2340 /* locate the iommu governing the devid */
2341 iommu
= amd_iommu_rlookup_table
[devid
];
2343 ret
= iommu
->max_counters
;
2347 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters
);
2349 int amd_iommu_pc_get_set_reg_val(u16 devid
, u8 bank
, u8 cntr
, u8 fxn
,
2350 u64
*value
, bool is_write
)
2352 struct amd_iommu
*iommu
;
2356 /* Make sure the IOMMU PC resource is available */
2357 if (!amd_iommu_pc_present
)
2360 /* Locate the iommu associated with the device ID */
2361 iommu
= amd_iommu_rlookup_table
[devid
];
2363 /* Check for valid iommu and pc register indexing */
2364 if (WARN_ON((iommu
== NULL
) || (fxn
> 0x28) || (fxn
& 7)))
2367 offset
= (u32
)(((0x40|bank
) << 12) | (cntr
<< 8) | fxn
);
2369 /* Limit the offset to the hw defined mmio region aperture */
2370 max_offset_lim
= (u32
)(((0x40|iommu
->max_banks
) << 12) |
2371 (iommu
->max_counters
<< 8) | 0x28);
2372 if ((offset
< MMIO_CNTR_REG_OFFSET
) ||
2373 (offset
> max_offset_lim
))
2377 writel((u32
)*value
, iommu
->mmio_base
+ offset
);
2378 writel((*value
>> 32), iommu
->mmio_base
+ offset
+ 4);
2380 *value
= readl(iommu
->mmio_base
+ offset
+ 4);
2382 *value
= readl(iommu
->mmio_base
+ offset
);
2387 EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val
);