xen/arm/arm64: introduce xen_arch_need_swiotlb
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>
Fri, 21 Nov 2014 11:07:39 +0000 (11:07 +0000)
committerDavid Vrabel <david.vrabel@citrix.com>
Thu, 4 Dec 2014 12:41:54 +0000 (12:41 +0000)
Introduce an arch specific function to find out whether a particular dma
mapping operation needs to bounce on the swiotlb buffer.

On ARM and ARM64, if the page involved is a foreign page and the device
is not coherent, we need to bounce because at unmap time we cannot
execute any required cache maintenance operations (we don't know how to
find the pfn from the mfn).

No change of behaviour for x86.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
arch/arm/include/asm/xen/page.h
arch/arm/xen/mm.c
arch/x86/include/asm/xen/page.h
drivers/xen/swiotlb-xen.c

index 135c24a5ba262b76529fa1c3dbe088d098de84a5..68c739b3fdf41b8b70dd09170661ae95f6c352dc 100644 (file)
@@ -107,4 +107,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 #define xen_remap(cookie, size) ioremap_cache((cookie), (size))
 #define xen_unmap(cookie) iounmap((cookie))
 
+bool xen_arch_need_swiotlb(struct device *dev,
+                          unsigned long pfn,
+                          unsigned long mfn);
+
 #endif /* _ASM_ARM_XEN_PAGE_H */
index ab700e1e59226696d96b9d21dc8241f2ebc285bb..28ebf3ecee4ee6a85e03b4821d7654a106bad085 100644 (file)
@@ -100,6 +100,13 @@ void __xen_dma_sync_single_for_device(struct device *hwdev,
        __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
 }
 
+bool xen_arch_need_swiotlb(struct device *dev,
+                          unsigned long pfn,
+                          unsigned long mfn)
+{
+       return ((pfn != mfn) && !is_device_dma_coherent(dev));
+}
+
 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
                                 unsigned int address_bits,
                                 dma_addr_t *dma_handle)
index c949923a56685463465b9281b3b4d5cb1c28a7d6..f58ef6c0613b6739351e4366a3b869c9c2ecb72b 100644 (file)
@@ -236,4 +236,11 @@ void make_lowmem_page_readwrite(void *vaddr);
 #define xen_remap(cookie, size) ioremap((cookie), (size));
 #define xen_unmap(cookie) iounmap((cookie))
 
+static inline bool xen_arch_need_swiotlb(struct device *dev,
+                                        unsigned long pfn,
+                                        unsigned long mfn)
+{
+       return false;
+}
+
 #endif /* _ASM_X86_XEN_PAGE_H */
index ad2c5eb8a9c79b7f64ce04dcf995b5422720d7b4..3725ee4ff43c0f2ed66391dd79f09b42e4a26e62 100644 (file)
@@ -399,7 +399,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
         * buffering it.
         */
        if (dma_capable(dev, dev_addr, size) &&
-           !range_straddles_page_boundary(phys, size) && !swiotlb_force) {
+           !range_straddles_page_boundary(phys, size) &&
+               !xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) &&
+               !swiotlb_force) {
                /* we are not interested in the dma_addr returned by
                 * xen_dma_map_page, only in the potential cache flushes executed
                 * by the function. */
@@ -557,6 +559,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                dma_addr_t dev_addr = xen_phys_to_bus(paddr);
 
                if (swiotlb_force ||
+                   xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), PFN_DOWN(dev_addr)) ||
                    !dma_capable(hwdev, dev_addr, sg->length) ||
                    range_straddles_page_boundary(paddr, sg->length)) {
                        phys_addr_t map = swiotlb_tbl_map_single(hwdev,
This page took 0.027391 seconds and 5 git commands to generate.