Merge tag 'for-3.8' of git://openrisc.net/~jonas/linux
[deliverable/linux.git] / arch / x86 / kernel / pci-dma.c
CommitLineData
459121c9 1#include <linux/dma-mapping.h>
2118d0c5 2#include <linux/dma-debug.h>
cb5867a5 3#include <linux/dmar.h>
69c60c88 4#include <linux/export.h>
116890d5 5#include <linux/bootmem.h>
5a0e3ad6 6#include <linux/gfp.h>
bca5c096 7#include <linux/pci.h>
acde31dc 8#include <linux/kmemleak.h>
cb5867a5 9
116890d5
GC
10#include <asm/proto.h>
11#include <asm/dma.h>
46a7fa27 12#include <asm/iommu.h>
1d9b16d1 13#include <asm/gart.h>
cb5867a5 14#include <asm/calgary.h>
b4941a9a 15#include <asm/x86_init.h>
ee1f284f 16#include <asm/iommu_table.h>
459121c9 17
3b15e581
FY
18static int forbid_dac __read_mostly;
19
a3b28ee1 20struct dma_map_ops *dma_ops = &nommu_dma_ops;
85c246ee
GC
21EXPORT_SYMBOL(dma_ops);
22
b4cdc430 23static int iommu_sac_force __read_mostly;
8e0c3797 24
f9c258de
GC
25#ifdef CONFIG_IOMMU_DEBUG
26int panic_on_overflow __read_mostly = 1;
27int force_iommu __read_mostly = 1;
28#else
29int panic_on_overflow __read_mostly = 0;
30int force_iommu __read_mostly = 0;
31#endif
32
fae9a0d8
GC
33int iommu_merge __read_mostly = 0;
34
35int no_iommu __read_mostly;
36/* Set this to 1 if there is a HW IOMMU in the system */
37int iommu_detected __read_mostly = 0;
38
ac0101d3
JR
39/*
40 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
e3be785f 41 * If this variable is 1, IOMMU implementations do no DMA translation for
ac0101d3 42 * devices and allow every device to access to whole physical memory. This is
fb637f3c 43 * useful if a user wants to use an IOMMU only for KVM device assignment to
ac0101d3
JR
44 * guests and not for driver dma translation.
45 */
46int iommu_pass_through __read_mostly;
aed5d5f4 47
ee1f284f
KRW
48extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
49
eb647138 50/* Dummy device used for NULL arguments (normally ISA). */
6c505ce3 51struct device x86_dma_fallback_dev = {
1a927133 52 .init_name = "fallback device",
eb647138 53 .coherent_dma_mask = ISA_DMA_BIT_MASK,
6c505ce3 54 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
098cb7f2 55};
6c505ce3 56EXPORT_SYMBOL(x86_dma_fallback_dev);
098cb7f2 57
2118d0c5
JR
58/* Number of entries preallocated for DMA-API debugging */
59#define PREALLOC_DMA_DEBUG_ENTRIES 32768
60
459121c9
GC
61int dma_set_mask(struct device *dev, u64 mask)
62{
63 if (!dev->dma_mask || !dma_supported(dev, mask))
64 return -EIO;
65
66 *dev->dma_mask = mask;
67
68 return 0;
69}
70EXPORT_SYMBOL(dma_set_mask);
71
116890d5
GC
72void __init pci_iommu_alloc(void)
73{
ee1f284f
KRW
74 struct iommu_table_entry *p;
75
ee1f284f
KRW
76 sort_iommu_table(__iommu_table, __iommu_table_end);
77 check_iommu_entries(__iommu_table, __iommu_table_end);
116890d5 78
ee1f284f
KRW
79 for (p = __iommu_table; p < __iommu_table_end; p++) {
80 if (p && p->detect && p->detect() > 0) {
81 p->flags |= IOMMU_DETECTED;
82 if (p->early_init)
83 p->early_init();
84 if (p->flags & IOMMU_FINISH_IF_DETECTED)
85 break;
86 }
87 }
116890d5 88}
9f6ac577 89void *dma_generic_alloc_coherent(struct device *dev, size_t size,
baa676fc
AP
90 dma_addr_t *dma_addr, gfp_t flag,
91 struct dma_attrs *attrs)
9f6ac577
FT
92{
93 unsigned long dma_mask;
c080e26e 94 struct page *page;
0a2b9a6e 95 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
9f6ac577
FT
96 dma_addr_t addr;
97
98 dma_mask = dma_alloc_coherent_mask(dev, flag);
99
100 flag |= __GFP_ZERO;
101again:
c080e26e 102 page = NULL;
0a2b9a6e
MS
103 if (!(flag & GFP_ATOMIC))
104 page = dma_alloc_from_contiguous(dev, count, get_order(size));
105 if (!page)
106 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
9f6ac577
FT
107 if (!page)
108 return NULL;
109
110 addr = page_to_phys(page);
a4c2baa6 111 if (addr + size > dma_mask) {
9f6ac577
FT
112 __free_pages(page, get_order(size));
113
284901a9 114 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
9f6ac577
FT
115 flag = (flag & ~GFP_DMA32) | GFP_DMA;
116 goto again;
117 }
118
119 return NULL;
120 }
121
122 *dma_addr = addr;
123 return page_address(page);
124}
125
0a2b9a6e
MS
126void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
127 dma_addr_t dma_addr, struct dma_attrs *attrs)
128{
129 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
130 struct page *page = virt_to_page(vaddr);
131
132 if (!dma_release_from_contiguous(dev, page, count))
133 free_pages((unsigned long)vaddr, get_order(size));
134}
135
fae9a0d8 136/*
395cf969
PB
137 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
138 * parameter documentation.
fae9a0d8
GC
139 */
140static __init int iommu_setup(char *p)
141{
142 iommu_merge = 1;
143
144 if (!p)
145 return -EINVAL;
146
147 while (*p) {
148 if (!strncmp(p, "off", 3))
149 no_iommu = 1;
150 /* gart_parse_options has more force support */
151 if (!strncmp(p, "force", 5))
152 force_iommu = 1;
153 if (!strncmp(p, "noforce", 7)) {
154 iommu_merge = 0;
155 force_iommu = 0;
156 }
157
158 if (!strncmp(p, "biomerge", 8)) {
fae9a0d8
GC
159 iommu_merge = 1;
160 force_iommu = 1;
161 }
162 if (!strncmp(p, "panic", 5))
163 panic_on_overflow = 1;
164 if (!strncmp(p, "nopanic", 7))
165 panic_on_overflow = 0;
166 if (!strncmp(p, "merge", 5)) {
167 iommu_merge = 1;
168 force_iommu = 1;
169 }
170 if (!strncmp(p, "nomerge", 7))
171 iommu_merge = 0;
172 if (!strncmp(p, "forcesac", 8))
173 iommu_sac_force = 1;
174 if (!strncmp(p, "allowdac", 8))
175 forbid_dac = 0;
176 if (!strncmp(p, "nodac", 5))
2ae8bb75 177 forbid_dac = 1;
fae9a0d8
GC
178 if (!strncmp(p, "usedac", 6)) {
179 forbid_dac = -1;
180 return 1;
181 }
182#ifdef CONFIG_SWIOTLB
183 if (!strncmp(p, "soft", 4))
184 swiotlb = 1;
3238c0c4 185#endif
80286879 186 if (!strncmp(p, "pt", 2))
4ed0d3e6 187 iommu_pass_through = 1;
fae9a0d8 188
fae9a0d8 189 gart_parse_options(p);
fae9a0d8
GC
190
191#ifdef CONFIG_CALGARY_IOMMU
192 if (!strncmp(p, "calgary", 7))
193 use_calgary = 1;
194#endif /* CONFIG_CALGARY_IOMMU */
195
196 p += strcspn(p, ",");
197 if (*p == ',')
198 ++p;
199 }
200 return 0;
201}
202early_param("iommu", iommu_setup);
203
8e0c3797
GC
204int dma_supported(struct device *dev, u64 mask)
205{
160c1d8e 206 struct dma_map_ops *ops = get_dma_ops(dev);
8d8bb39b 207
8e0c3797
GC
208#ifdef CONFIG_PCI
209 if (mask > 0xffffffff && forbid_dac > 0) {
fc3a8828 210 dev_info(dev, "PCI: Disallowing DAC for device\n");
8e0c3797
GC
211 return 0;
212 }
213#endif
214
8d8bb39b
FT
215 if (ops->dma_supported)
216 return ops->dma_supported(dev, mask);
8e0c3797
GC
217
218 /* Copied from i386. Doesn't make much sense, because it will
219 only work for pci_alloc_coherent.
220 The caller just has to use GFP_DMA in this case. */
2f4f27d4 221 if (mask < DMA_BIT_MASK(24))
8e0c3797
GC
222 return 0;
223
224 /* Tell the device to use SAC when IOMMU force is on. This
225 allows the driver to use cheaper accesses in some cases.
226
227 Problem with this is that if we overflow the IOMMU area and
228 return DAC as fallback address the device may not handle it
229 correctly.
230
231 As a special case some controllers have a 39bit address
232 mode that is as efficient as 32bit (aic79xx). Don't force
233 SAC for these. Assume all masks <= 40 bits are of this
234 type. Normally this doesn't make any difference, but gives
235 more gentle handling of IOMMU overflow. */
50cf156a 236 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
fc3a8828 237 dev_info(dev, "Force SAC with mask %Lx\n", mask);
8e0c3797
GC
238 return 0;
239 }
240
241 return 1;
242}
243EXPORT_SYMBOL(dma_supported);
244
cb5867a5
GC
245static int __init pci_iommu_init(void)
246{
ee1f284f 247 struct iommu_table_entry *p;
2118d0c5
JR
248 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
249
86f31952
JR
250#ifdef CONFIG_PCI
251 dma_debug_add_bus(&pci_bus_type);
252#endif
d07c1be0
FT
253 x86_init.iommu.iommu_init();
254
ee1f284f
KRW
255 for (p = __iommu_table; p < __iommu_table_end; p++) {
256 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
257 p->late_init();
258 }
75f1cdf1 259
cb5867a5
GC
260 return 0;
261}
cb5867a5 262/* Must execute after PCI subsystem */
9a821b23 263rootfs_initcall(pci_iommu_init);
3b15e581
FY
264
265#ifdef CONFIG_PCI
266/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
267
268static __devinit void via_no_dac(struct pci_dev *dev)
269{
c484b241 270 if (forbid_dac == 0) {
13bf7576 271 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
3b15e581
FY
272 forbid_dac = 1;
273 }
274}
c484b241
YL
275DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
276 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
3b15e581 277#endif
This page took 0.337122 seconds and 5 git commands to generate.