x86: dma-ops on highmem fix
[deliverable/linux.git] / arch / x86 / kernel / pci-swiotlb_64.c
CommitLineData
17a941d8
MBY
1/* Glue code to lib/swiotlb.c */
2
3#include <linux/pci.h>
4#include <linux/cache.h>
5#include <linux/module.h>
d6bd3a39
REB
6#include <linux/dma-mapping.h>
7
395624fc 8#include <asm/gart.h>
17a941d8
MBY
9#include <asm/swiotlb.h>
10#include <asm/dma.h>
11
12int swiotlb __read_mostly;
17a941d8 13
2be62149
IM
14static dma_addr_t
15swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
16 int direction)
17{
18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
19}
20
e6584504 21const struct dma_mapping_ops swiotlb_dma_ops = {
17a941d8
MBY
22 .mapping_error = swiotlb_dma_mapping_error,
23 .alloc_coherent = swiotlb_alloc_coherent,
24 .free_coherent = swiotlb_free_coherent,
2be62149 25 .map_single = swiotlb_map_single_phys,
17a941d8
MBY
26 .unmap_single = swiotlb_unmap_single,
27 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
28 .sync_single_for_device = swiotlb_sync_single_for_device,
29 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
30 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
31 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
32 .sync_sg_for_device = swiotlb_sync_sg_for_device,
33 .map_sg = swiotlb_map_sg,
34 .unmap_sg = swiotlb_unmap_sg,
35 .dma_supported = NULL,
36};
37
563aaf06 38void __init pci_swiotlb_init(void)
17a941d8
MBY
39{
40 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
65f87d8a 41 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
17a941d8 42 swiotlb = 1;
65f87d8a
AK
43 if (swiotlb_force)
44 swiotlb = 1;
17a941d8 45 if (swiotlb) {
17a941d8 46 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
5b7b644c 47 swiotlb_init();
17a941d8
MBY
48 dma_ops = &swiotlb_dma_ops;
49 }
50}
This page took 0.343193 seconds and 5 git commands to generate.