Merge tag 'v3.5-rc6' into irqdomain/next
[deliverable/linux.git] / arch / x86 / kernel / pci-dma.c
CommitLineData
459121c9 1#include <linux/dma-mapping.h>
2118d0c5 2#include <linux/dma-debug.h>
cb5867a5 3#include <linux/dmar.h>
69c60c88 4#include <linux/export.h>
116890d5 5#include <linux/bootmem.h>
5a0e3ad6 6#include <linux/gfp.h>
bca5c096 7#include <linux/pci.h>
acde31dc 8#include <linux/kmemleak.h>
cb5867a5 9
116890d5
GC
10#include <asm/proto.h>
11#include <asm/dma.h>
46a7fa27 12#include <asm/iommu.h>
1d9b16d1 13#include <asm/gart.h>
cb5867a5 14#include <asm/calgary.h>
b4941a9a 15#include <asm/x86_init.h>
ee1f284f 16#include <asm/iommu_table.h>
459121c9 17
3b15e581
FY
18static int forbid_dac __read_mostly;
19
a3b28ee1 20struct dma_map_ops *dma_ops = &nommu_dma_ops;
85c246ee
GC
21EXPORT_SYMBOL(dma_ops);
22
b4cdc430 23static int iommu_sac_force __read_mostly;
8e0c3797 24
f9c258de
GC
25#ifdef CONFIG_IOMMU_DEBUG
26int panic_on_overflow __read_mostly = 1;
27int force_iommu __read_mostly = 1;
28#else
29int panic_on_overflow __read_mostly = 0;
30int force_iommu __read_mostly = 0;
31#endif
32
fae9a0d8
GC
33int iommu_merge __read_mostly = 0;
34
35int no_iommu __read_mostly;
36/* Set this to 1 if there is a HW IOMMU in the system */
37int iommu_detected __read_mostly = 0;
38
ac0101d3
JR
39/*
40 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
e3be785f 41 * If this variable is 1, IOMMU implementations do no DMA translation for
ac0101d3 42 * devices and allow every device to access to whole physical memory. This is
fb637f3c 43 * useful if a user wants to use an IOMMU only for KVM device assignment to
ac0101d3
JR
44 * guests and not for driver dma translation.
45 */
46int iommu_pass_through __read_mostly;
aed5d5f4 47
bcb71abe
AW
48/*
49 * Group multi-function PCI devices into a single device-group for the
50 * iommu_device_group interface. This tells the iommu driver to pretend
51 * it cannot distinguish between functions of a device, exposing only one
52 * group for the device. Useful for disallowing use of individual PCI
53 * functions from userspace drivers.
54 */
55int iommu_group_mf __read_mostly;
56
ee1f284f
KRW
57extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
58
eb647138 59/* Dummy device used for NULL arguments (normally ISA). */
6c505ce3 60struct device x86_dma_fallback_dev = {
1a927133 61 .init_name = "fallback device",
eb647138 62 .coherent_dma_mask = ISA_DMA_BIT_MASK,
6c505ce3 63 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
098cb7f2 64};
6c505ce3 65EXPORT_SYMBOL(x86_dma_fallback_dev);
098cb7f2 66
2118d0c5
JR
67/* Number of entries preallocated for DMA-API debugging */
68#define PREALLOC_DMA_DEBUG_ENTRIES 32768
69
459121c9
GC
70int dma_set_mask(struct device *dev, u64 mask)
71{
72 if (!dev->dma_mask || !dma_supported(dev, mask))
73 return -EIO;
74
75 *dev->dma_mask = mask;
76
77 return 0;
78}
79EXPORT_SYMBOL(dma_set_mask);
80
116890d5
GC
81void __init pci_iommu_alloc(void)
82{
ee1f284f
KRW
83 struct iommu_table_entry *p;
84
ee1f284f
KRW
85 sort_iommu_table(__iommu_table, __iommu_table_end);
86 check_iommu_entries(__iommu_table, __iommu_table_end);
116890d5 87
ee1f284f
KRW
88 for (p = __iommu_table; p < __iommu_table_end; p++) {
89 if (p && p->detect && p->detect() > 0) {
90 p->flags |= IOMMU_DETECTED;
91 if (p->early_init)
92 p->early_init();
93 if (p->flags & IOMMU_FINISH_IF_DETECTED)
94 break;
95 }
96 }
116890d5 97}
9f6ac577 98void *dma_generic_alloc_coherent(struct device *dev, size_t size,
baa676fc
AP
99 dma_addr_t *dma_addr, gfp_t flag,
100 struct dma_attrs *attrs)
9f6ac577
FT
101{
102 unsigned long dma_mask;
c080e26e 103 struct page *page;
0a2b9a6e 104 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
9f6ac577
FT
105 dma_addr_t addr;
106
107 dma_mask = dma_alloc_coherent_mask(dev, flag);
108
109 flag |= __GFP_ZERO;
110again:
c080e26e 111 page = NULL;
0a2b9a6e
MS
112 if (!(flag & GFP_ATOMIC))
113 page = dma_alloc_from_contiguous(dev, count, get_order(size));
114 if (!page)
115 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
9f6ac577
FT
116 if (!page)
117 return NULL;
118
119 addr = page_to_phys(page);
a4c2baa6 120 if (addr + size > dma_mask) {
9f6ac577
FT
121 __free_pages(page, get_order(size));
122
284901a9 123 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
9f6ac577
FT
124 flag = (flag & ~GFP_DMA32) | GFP_DMA;
125 goto again;
126 }
127
128 return NULL;
129 }
130
131 *dma_addr = addr;
132 return page_address(page);
133}
134
0a2b9a6e
MS
135void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
136 dma_addr_t dma_addr, struct dma_attrs *attrs)
137{
138 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
139 struct page *page = virt_to_page(vaddr);
140
141 if (!dma_release_from_contiguous(dev, page, count))
142 free_pages((unsigned long)vaddr, get_order(size));
143}
144
fae9a0d8 145/*
395cf969
PB
146 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
147 * parameter documentation.
fae9a0d8
GC
148 */
149static __init int iommu_setup(char *p)
150{
151 iommu_merge = 1;
152
153 if (!p)
154 return -EINVAL;
155
156 while (*p) {
157 if (!strncmp(p, "off", 3))
158 no_iommu = 1;
159 /* gart_parse_options has more force support */
160 if (!strncmp(p, "force", 5))
161 force_iommu = 1;
162 if (!strncmp(p, "noforce", 7)) {
163 iommu_merge = 0;
164 force_iommu = 0;
165 }
166
167 if (!strncmp(p, "biomerge", 8)) {
fae9a0d8
GC
168 iommu_merge = 1;
169 force_iommu = 1;
170 }
171 if (!strncmp(p, "panic", 5))
172 panic_on_overflow = 1;
173 if (!strncmp(p, "nopanic", 7))
174 panic_on_overflow = 0;
175 if (!strncmp(p, "merge", 5)) {
176 iommu_merge = 1;
177 force_iommu = 1;
178 }
179 if (!strncmp(p, "nomerge", 7))
180 iommu_merge = 0;
181 if (!strncmp(p, "forcesac", 8))
182 iommu_sac_force = 1;
183 if (!strncmp(p, "allowdac", 8))
184 forbid_dac = 0;
185 if (!strncmp(p, "nodac", 5))
2ae8bb75 186 forbid_dac = 1;
fae9a0d8
GC
187 if (!strncmp(p, "usedac", 6)) {
188 forbid_dac = -1;
189 return 1;
190 }
191#ifdef CONFIG_SWIOTLB
192 if (!strncmp(p, "soft", 4))
193 swiotlb = 1;
3238c0c4 194#endif
80286879 195 if (!strncmp(p, "pt", 2))
4ed0d3e6 196 iommu_pass_through = 1;
bcb71abe
AW
197 if (!strncmp(p, "group_mf", 8))
198 iommu_group_mf = 1;
fae9a0d8 199
fae9a0d8 200 gart_parse_options(p);
fae9a0d8
GC
201
202#ifdef CONFIG_CALGARY_IOMMU
203 if (!strncmp(p, "calgary", 7))
204 use_calgary = 1;
205#endif /* CONFIG_CALGARY_IOMMU */
206
207 p += strcspn(p, ",");
208 if (*p == ',')
209 ++p;
210 }
211 return 0;
212}
213early_param("iommu", iommu_setup);
214
8e0c3797
GC
215int dma_supported(struct device *dev, u64 mask)
216{
160c1d8e 217 struct dma_map_ops *ops = get_dma_ops(dev);
8d8bb39b 218
8e0c3797
GC
219#ifdef CONFIG_PCI
220 if (mask > 0xffffffff && forbid_dac > 0) {
fc3a8828 221 dev_info(dev, "PCI: Disallowing DAC for device\n");
8e0c3797
GC
222 return 0;
223 }
224#endif
225
8d8bb39b
FT
226 if (ops->dma_supported)
227 return ops->dma_supported(dev, mask);
8e0c3797
GC
228
229 /* Copied from i386. Doesn't make much sense, because it will
230 only work for pci_alloc_coherent.
231 The caller just has to use GFP_DMA in this case. */
2f4f27d4 232 if (mask < DMA_BIT_MASK(24))
8e0c3797
GC
233 return 0;
234
235 /* Tell the device to use SAC when IOMMU force is on. This
236 allows the driver to use cheaper accesses in some cases.
237
238 Problem with this is that if we overflow the IOMMU area and
239 return DAC as fallback address the device may not handle it
240 correctly.
241
242 As a special case some controllers have a 39bit address
243 mode that is as efficient as 32bit (aic79xx). Don't force
244 SAC for these. Assume all masks <= 40 bits are of this
245 type. Normally this doesn't make any difference, but gives
246 more gentle handling of IOMMU overflow. */
50cf156a 247 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
fc3a8828 248 dev_info(dev, "Force SAC with mask %Lx\n", mask);
8e0c3797
GC
249 return 0;
250 }
251
252 return 1;
253}
254EXPORT_SYMBOL(dma_supported);
255
cb5867a5
GC
256static int __init pci_iommu_init(void)
257{
ee1f284f 258 struct iommu_table_entry *p;
2118d0c5
JR
259 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
260
86f31952
JR
261#ifdef CONFIG_PCI
262 dma_debug_add_bus(&pci_bus_type);
263#endif
d07c1be0
FT
264 x86_init.iommu.iommu_init();
265
ee1f284f
KRW
266 for (p = __iommu_table; p < __iommu_table_end; p++) {
267 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
268 p->late_init();
269 }
75f1cdf1 270
cb5867a5
GC
271 return 0;
272}
cb5867a5 273/* Must execute after PCI subsystem */
9a821b23 274rootfs_initcall(pci_iommu_init);
3b15e581
FY
275
276#ifdef CONFIG_PCI
277/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
278
279static __devinit void via_no_dac(struct pci_dev *dev)
280{
c484b241 281 if (forbid_dac == 0) {
13bf7576 282 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
3b15e581
FY
283 forbid_dac = 1;
284 }
285}
c484b241
YL
286DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
287 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
3b15e581 288#endif
This page took 0.329778 seconds and 5 git commands to generate.